python_code
stringlengths 0
1.8M
| repo_name
stringclasses 7
values | file_path
stringlengths 5
99
|
---|---|---|
// SPDX-License-Identifier: GPL-2.0-only
/* iptables module for the packet checksum mangling
*
* (C) 2002 by Harald Welte <[email protected]>
* (C) 2010 Red Hat, Inc.
*
* Author: Michael S. Tsirkin <[email protected]>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter/xt_CHECKSUM.h>
#include <linux/netfilter_ipv4/ip_tables.h>
#include <linux/netfilter_ipv6/ip6_tables.h>
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Michael S. Tsirkin <[email protected]>");
MODULE_DESCRIPTION("Xtables: checksum modification");
MODULE_ALIAS("ipt_CHECKSUM");
MODULE_ALIAS("ip6t_CHECKSUM");
static unsigned int
checksum_tg(struct sk_buff *skb, const struct xt_action_param *par)
{
if (skb->ip_summed == CHECKSUM_PARTIAL && !skb_is_gso(skb))
skb_checksum_help(skb);
return XT_CONTINUE;
}
static int checksum_tg_check(const struct xt_tgchk_param *par)
{
const struct xt_CHECKSUM_info *einfo = par->targinfo;
const struct ip6t_ip6 *i6 = par->entryinfo;
const struct ipt_ip *i4 = par->entryinfo;
if (einfo->operation & ~XT_CHECKSUM_OP_FILL) {
pr_info_ratelimited("unsupported CHECKSUM operation %x\n",
einfo->operation);
return -EINVAL;
}
if (!einfo->operation)
return -EINVAL;
switch (par->family) {
case NFPROTO_IPV4:
if (i4->proto == IPPROTO_UDP &&
(i4->invflags & XT_INV_PROTO) == 0)
return 0;
break;
case NFPROTO_IPV6:
if ((i6->flags & IP6T_F_PROTO) &&
i6->proto == IPPROTO_UDP &&
(i6->invflags & XT_INV_PROTO) == 0)
return 0;
break;
}
pr_warn_once("CHECKSUM should be avoided. If really needed, restrict with \"-p udp\" and only use in OUTPUT\n");
return 0;
}
static struct xt_target checksum_tg_reg __read_mostly = {
.name = "CHECKSUM",
.family = NFPROTO_UNSPEC,
.target = checksum_tg,
.targetsize = sizeof(struct xt_CHECKSUM_info),
.table = "mangle",
.checkentry = checksum_tg_check,
.me = THIS_MODULE,
};
static int __init checksum_tg_init(void)
{
return xt_register_target(&checksum_tg_reg);
}
static void __exit checksum_tg_exit(void)
{
xt_unregister_target(&checksum_tg_reg);
}
module_init(checksum_tg_init);
module_exit(checksum_tg_exit);
| linux-master | net/netfilter/xt_CHECKSUM.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2015 Patrick McHardy <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/netlink.h>
#include <linux/netfilter.h>
#include <linux/netfilter/nf_tables.h>
#include <net/netfilter/nf_tables.h>
#include <net/netfilter/nf_tables_core.h>
struct nft_dynset {
struct nft_set *set;
struct nft_set_ext_tmpl tmpl;
enum nft_dynset_ops op:8;
u8 sreg_key;
u8 sreg_data;
bool invert;
bool expr;
u8 num_exprs;
u64 timeout;
struct nft_expr *expr_array[NFT_SET_EXPR_MAX];
struct nft_set_binding binding;
};
static int nft_dynset_expr_setup(const struct nft_dynset *priv,
const struct nft_set_ext *ext)
{
struct nft_set_elem_expr *elem_expr = nft_set_ext_expr(ext);
struct nft_expr *expr;
int i;
for (i = 0; i < priv->num_exprs; i++) {
expr = nft_setelem_expr_at(elem_expr, elem_expr->size);
if (nft_expr_clone(expr, priv->expr_array[i]) < 0)
return -1;
elem_expr->size += priv->expr_array[i]->ops->size;
}
return 0;
}
static void *nft_dynset_new(struct nft_set *set, const struct nft_expr *expr,
struct nft_regs *regs)
{
const struct nft_dynset *priv = nft_expr_priv(expr);
struct nft_set_ext *ext;
u64 timeout;
void *elem;
if (!atomic_add_unless(&set->nelems, 1, set->size))
return NULL;
timeout = priv->timeout ? : set->timeout;
elem = nft_set_elem_init(set, &priv->tmpl,
®s->data[priv->sreg_key], NULL,
®s->data[priv->sreg_data],
timeout, 0, GFP_ATOMIC);
if (IS_ERR(elem))
goto err1;
ext = nft_set_elem_ext(set, elem);
if (priv->num_exprs && nft_dynset_expr_setup(priv, ext) < 0)
goto err2;
return elem;
err2:
nft_set_elem_destroy(set, elem, false);
err1:
if (set->size)
atomic_dec(&set->nelems);
return NULL;
}
void nft_dynset_eval(const struct nft_expr *expr,
struct nft_regs *regs, const struct nft_pktinfo *pkt)
{
const struct nft_dynset *priv = nft_expr_priv(expr);
struct nft_set *set = priv->set;
const struct nft_set_ext *ext;
u64 timeout;
if (priv->op == NFT_DYNSET_OP_DELETE) {
set->ops->delete(set, ®s->data[priv->sreg_key]);
return;
}
if (set->ops->update(set, ®s->data[priv->sreg_key], nft_dynset_new,
expr, regs, &ext)) {
if (priv->op == NFT_DYNSET_OP_UPDATE &&
nft_set_ext_exists(ext, NFT_SET_EXT_EXPIRATION)) {
timeout = priv->timeout ? : set->timeout;
*nft_set_ext_expiration(ext) = get_jiffies_64() + timeout;
}
nft_set_elem_update_expr(ext, regs, pkt);
if (priv->invert)
regs->verdict.code = NFT_BREAK;
return;
}
if (!priv->invert)
regs->verdict.code = NFT_BREAK;
}
static void nft_dynset_ext_add_expr(struct nft_dynset *priv)
{
u8 size = 0;
int i;
for (i = 0; i < priv->num_exprs; i++)
size += priv->expr_array[i]->ops->size;
nft_set_ext_add_length(&priv->tmpl, NFT_SET_EXT_EXPRESSIONS,
sizeof(struct nft_set_elem_expr) + size);
}
static struct nft_expr *
nft_dynset_expr_alloc(const struct nft_ctx *ctx, const struct nft_set *set,
const struct nlattr *attr, int pos)
{
struct nft_expr *expr;
int err;
expr = nft_set_elem_expr_alloc(ctx, set, attr);
if (IS_ERR(expr))
return expr;
if (set->exprs[pos] && set->exprs[pos]->ops != expr->ops) {
err = -EOPNOTSUPP;
goto err_dynset_expr;
}
return expr;
err_dynset_expr:
nft_expr_destroy(ctx, expr);
return ERR_PTR(err);
}
static const struct nla_policy nft_dynset_policy[NFTA_DYNSET_MAX + 1] = {
[NFTA_DYNSET_SET_NAME] = { .type = NLA_STRING,
.len = NFT_SET_MAXNAMELEN - 1 },
[NFTA_DYNSET_SET_ID] = { .type = NLA_U32 },
[NFTA_DYNSET_OP] = NLA_POLICY_MAX(NLA_BE32, 255),
[NFTA_DYNSET_SREG_KEY] = { .type = NLA_U32 },
[NFTA_DYNSET_SREG_DATA] = { .type = NLA_U32 },
[NFTA_DYNSET_TIMEOUT] = { .type = NLA_U64 },
[NFTA_DYNSET_EXPR] = { .type = NLA_NESTED },
[NFTA_DYNSET_FLAGS] = { .type = NLA_U32 },
[NFTA_DYNSET_EXPRESSIONS] = { .type = NLA_NESTED },
};
static int nft_dynset_init(const struct nft_ctx *ctx,
const struct nft_expr *expr,
const struct nlattr * const tb[])
{
struct nftables_pernet *nft_net = nft_pernet(ctx->net);
struct nft_dynset *priv = nft_expr_priv(expr);
u8 genmask = nft_genmask_next(ctx->net);
struct nft_set *set;
u64 timeout;
int err, i;
lockdep_assert_held(&nft_net->commit_mutex);
if (tb[NFTA_DYNSET_SET_NAME] == NULL ||
tb[NFTA_DYNSET_OP] == NULL ||
tb[NFTA_DYNSET_SREG_KEY] == NULL)
return -EINVAL;
if (tb[NFTA_DYNSET_FLAGS]) {
u32 flags = ntohl(nla_get_be32(tb[NFTA_DYNSET_FLAGS]));
if (flags & ~(NFT_DYNSET_F_INV | NFT_DYNSET_F_EXPR))
return -EOPNOTSUPP;
if (flags & NFT_DYNSET_F_INV)
priv->invert = true;
if (flags & NFT_DYNSET_F_EXPR)
priv->expr = true;
}
set = nft_set_lookup_global(ctx->net, ctx->table,
tb[NFTA_DYNSET_SET_NAME],
tb[NFTA_DYNSET_SET_ID], genmask);
if (IS_ERR(set))
return PTR_ERR(set);
if (set->flags & NFT_SET_OBJECT)
return -EOPNOTSUPP;
if (set->ops->update == NULL)
return -EOPNOTSUPP;
if (set->flags & NFT_SET_CONSTANT)
return -EBUSY;
priv->op = ntohl(nla_get_be32(tb[NFTA_DYNSET_OP]));
if (priv->op > NFT_DYNSET_OP_DELETE)
return -EOPNOTSUPP;
timeout = 0;
if (tb[NFTA_DYNSET_TIMEOUT] != NULL) {
if (!(set->flags & NFT_SET_TIMEOUT))
return -EOPNOTSUPP;
err = nf_msecs_to_jiffies64(tb[NFTA_DYNSET_TIMEOUT], &timeout);
if (err)
return err;
}
err = nft_parse_register_load(tb[NFTA_DYNSET_SREG_KEY], &priv->sreg_key,
set->klen);
if (err < 0)
return err;
if (tb[NFTA_DYNSET_SREG_DATA] != NULL) {
if (!(set->flags & NFT_SET_MAP))
return -EOPNOTSUPP;
if (set->dtype == NFT_DATA_VERDICT)
return -EOPNOTSUPP;
err = nft_parse_register_load(tb[NFTA_DYNSET_SREG_DATA],
&priv->sreg_data, set->dlen);
if (err < 0)
return err;
} else if (set->flags & NFT_SET_MAP)
return -EINVAL;
if ((tb[NFTA_DYNSET_EXPR] || tb[NFTA_DYNSET_EXPRESSIONS]) &&
!(set->flags & NFT_SET_EVAL))
return -EINVAL;
if (tb[NFTA_DYNSET_EXPR]) {
struct nft_expr *dynset_expr;
dynset_expr = nft_dynset_expr_alloc(ctx, set,
tb[NFTA_DYNSET_EXPR], 0);
if (IS_ERR(dynset_expr))
return PTR_ERR(dynset_expr);
priv->num_exprs++;
priv->expr_array[0] = dynset_expr;
if (set->num_exprs > 1 ||
(set->num_exprs == 1 &&
dynset_expr->ops != set->exprs[0]->ops)) {
err = -EOPNOTSUPP;
goto err_expr_free;
}
} else if (tb[NFTA_DYNSET_EXPRESSIONS]) {
struct nft_expr *dynset_expr;
struct nlattr *tmp;
int left;
if (!priv->expr)
return -EINVAL;
i = 0;
nla_for_each_nested(tmp, tb[NFTA_DYNSET_EXPRESSIONS], left) {
if (i == NFT_SET_EXPR_MAX) {
err = -E2BIG;
goto err_expr_free;
}
if (nla_type(tmp) != NFTA_LIST_ELEM) {
err = -EINVAL;
goto err_expr_free;
}
dynset_expr = nft_dynset_expr_alloc(ctx, set, tmp, i);
if (IS_ERR(dynset_expr)) {
err = PTR_ERR(dynset_expr);
goto err_expr_free;
}
priv->expr_array[i] = dynset_expr;
priv->num_exprs++;
if (set->num_exprs &&
dynset_expr->ops != set->exprs[i]->ops) {
err = -EOPNOTSUPP;
goto err_expr_free;
}
i++;
}
if (set->num_exprs && set->num_exprs != i) {
err = -EOPNOTSUPP;
goto err_expr_free;
}
} else if (set->num_exprs > 0) {
err = nft_set_elem_expr_clone(ctx, set, priv->expr_array);
if (err < 0)
return err;
priv->num_exprs = set->num_exprs;
}
nft_set_ext_prepare(&priv->tmpl);
nft_set_ext_add_length(&priv->tmpl, NFT_SET_EXT_KEY, set->klen);
if (set->flags & NFT_SET_MAP)
nft_set_ext_add_length(&priv->tmpl, NFT_SET_EXT_DATA, set->dlen);
if (priv->num_exprs)
nft_dynset_ext_add_expr(priv);
if (set->flags & NFT_SET_TIMEOUT) {
if (timeout || set->timeout) {
nft_set_ext_add(&priv->tmpl, NFT_SET_EXT_TIMEOUT);
nft_set_ext_add(&priv->tmpl, NFT_SET_EXT_EXPIRATION);
}
}
priv->timeout = timeout;
err = nf_tables_bind_set(ctx, set, &priv->binding);
if (err < 0)
goto err_expr_free;
if (set->size == 0)
set->size = 0xffff;
priv->set = set;
return 0;
err_expr_free:
for (i = 0; i < priv->num_exprs; i++)
nft_expr_destroy(ctx, priv->expr_array[i]);
return err;
}
static void nft_dynset_deactivate(const struct nft_ctx *ctx,
const struct nft_expr *expr,
enum nft_trans_phase phase)
{
struct nft_dynset *priv = nft_expr_priv(expr);
nf_tables_deactivate_set(ctx, priv->set, &priv->binding, phase);
}
static void nft_dynset_activate(const struct nft_ctx *ctx,
const struct nft_expr *expr)
{
struct nft_dynset *priv = nft_expr_priv(expr);
nf_tables_activate_set(ctx, priv->set);
}
static void nft_dynset_destroy(const struct nft_ctx *ctx,
const struct nft_expr *expr)
{
struct nft_dynset *priv = nft_expr_priv(expr);
int i;
for (i = 0; i < priv->num_exprs; i++)
nft_expr_destroy(ctx, priv->expr_array[i]);
nf_tables_destroy_set(ctx, priv->set);
}
static int nft_dynset_dump(struct sk_buff *skb,
const struct nft_expr *expr, bool reset)
{
const struct nft_dynset *priv = nft_expr_priv(expr);
u32 flags = priv->invert ? NFT_DYNSET_F_INV : 0;
int i;
if (nft_dump_register(skb, NFTA_DYNSET_SREG_KEY, priv->sreg_key))
goto nla_put_failure;
if (priv->set->flags & NFT_SET_MAP &&
nft_dump_register(skb, NFTA_DYNSET_SREG_DATA, priv->sreg_data))
goto nla_put_failure;
if (nla_put_be32(skb, NFTA_DYNSET_OP, htonl(priv->op)))
goto nla_put_failure;
if (nla_put_string(skb, NFTA_DYNSET_SET_NAME, priv->set->name))
goto nla_put_failure;
if (nla_put_be64(skb, NFTA_DYNSET_TIMEOUT,
nf_jiffies64_to_msecs(priv->timeout),
NFTA_DYNSET_PAD))
goto nla_put_failure;
if (priv->set->num_exprs == 0) {
if (priv->num_exprs == 1) {
if (nft_expr_dump(skb, NFTA_DYNSET_EXPR,
priv->expr_array[0], reset))
goto nla_put_failure;
} else if (priv->num_exprs > 1) {
struct nlattr *nest;
nest = nla_nest_start_noflag(skb, NFTA_DYNSET_EXPRESSIONS);
if (!nest)
goto nla_put_failure;
for (i = 0; i < priv->num_exprs; i++) {
if (nft_expr_dump(skb, NFTA_LIST_ELEM,
priv->expr_array[i], reset))
goto nla_put_failure;
}
nla_nest_end(skb, nest);
}
}
if (nla_put_be32(skb, NFTA_DYNSET_FLAGS, htonl(flags)))
goto nla_put_failure;
return 0;
nla_put_failure:
return -1;
}
static const struct nft_expr_ops nft_dynset_ops = {
.type = &nft_dynset_type,
.size = NFT_EXPR_SIZE(sizeof(struct nft_dynset)),
.eval = nft_dynset_eval,
.init = nft_dynset_init,
.destroy = nft_dynset_destroy,
.activate = nft_dynset_activate,
.deactivate = nft_dynset_deactivate,
.dump = nft_dynset_dump,
.reduce = NFT_REDUCE_READONLY,
};
struct nft_expr_type nft_dynset_type __read_mostly = {
.name = "dynset",
.ops = &nft_dynset_ops,
.policy = nft_dynset_policy,
.maxattr = NFTA_DYNSET_MAX,
.owner = THIS_MODULE,
};
| linux-master | net/netfilter/nft_dynset.c |
// SPDX-License-Identifier: GPL-2.0-only
/* This is a module which is used to mark packets for tracing.
*/
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/netfilter/x_tables.h>
#include <net/netfilter/nf_log.h>
MODULE_DESCRIPTION("Xtables: packet flow tracing");
MODULE_LICENSE("GPL");
MODULE_ALIAS("ipt_TRACE");
MODULE_ALIAS("ip6t_TRACE");
static int trace_tg_check(const struct xt_tgchk_param *par)
{
return nf_logger_find_get(par->family, NF_LOG_TYPE_LOG);
}
static void trace_tg_destroy(const struct xt_tgdtor_param *par)
{
nf_logger_put(par->family, NF_LOG_TYPE_LOG);
}
static unsigned int
trace_tg(struct sk_buff *skb, const struct xt_action_param *par)
{
skb->nf_trace = 1;
return XT_CONTINUE;
}
static struct xt_target trace_tg_reg __read_mostly = {
.name = "TRACE",
.revision = 0,
.family = NFPROTO_UNSPEC,
.table = "raw",
.target = trace_tg,
.checkentry = trace_tg_check,
.destroy = trace_tg_destroy,
.me = THIS_MODULE,
};
static int __init trace_tg_init(void)
{
return xt_register_target(&trace_tg_reg);
}
static void __exit trace_tg_exit(void)
{
xt_unregister_target(&trace_tg_reg);
}
module_init(trace_tg_init);
module_exit(trace_tg_exit);
MODULE_SOFTDEP("pre: nf_log_syslog");
| linux-master | net/netfilter/xt_TRACE.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Event cache for netfilter. */
/*
* (C) 2005 Harald Welte <[email protected]>
* (C) 2005 Patrick McHardy <[email protected]>
* (C) 2005-2006 Netfilter Core Team <[email protected]>
* (C) 2005 USAGI/WIDE Project <http://www.linux-ipv6.org>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/types.h>
#include <linux/netfilter.h>
#include <linux/skbuff.h>
#include <linux/vmalloc.h>
#include <linux/stddef.h>
#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/slab.h>
#include <linux/export.h>
#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_conntrack_core.h>
#include <net/netfilter/nf_conntrack_ecache.h>
#include <net/netfilter/nf_conntrack_extend.h>
static DEFINE_MUTEX(nf_ct_ecache_mutex);
#define DYING_NULLS_VAL ((1 << 30) + 1)
#define ECACHE_MAX_JIFFIES msecs_to_jiffies(10)
#define ECACHE_RETRY_JIFFIES msecs_to_jiffies(10)
enum retry_state {
STATE_CONGESTED,
STATE_RESTART,
STATE_DONE,
};
struct nf_conntrack_net_ecache *nf_conn_pernet_ecache(const struct net *net)
{
struct nf_conntrack_net *cnet = nf_ct_pernet(net);
return &cnet->ecache;
}
#if IS_MODULE(CONFIG_NF_CT_NETLINK)
EXPORT_SYMBOL_GPL(nf_conn_pernet_ecache);
#endif
static enum retry_state ecache_work_evict_list(struct nf_conntrack_net *cnet)
{
unsigned long stop = jiffies + ECACHE_MAX_JIFFIES;
struct hlist_nulls_head evicted_list;
enum retry_state ret = STATE_DONE;
struct nf_conntrack_tuple_hash *h;
struct hlist_nulls_node *n;
unsigned int sent;
INIT_HLIST_NULLS_HEAD(&evicted_list, DYING_NULLS_VAL);
next:
sent = 0;
spin_lock_bh(&cnet->ecache.dying_lock);
hlist_nulls_for_each_entry_safe(h, n, &cnet->ecache.dying_list, hnnode) {
struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h);
/* The worker owns all entries, ct remains valid until nf_ct_put
* in the loop below.
*/
if (nf_conntrack_event(IPCT_DESTROY, ct)) {
ret = STATE_CONGESTED;
break;
}
hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode);
hlist_nulls_add_head(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode, &evicted_list);
if (time_after(stop, jiffies)) {
ret = STATE_RESTART;
break;
}
if (sent++ > 16) {
spin_unlock_bh(&cnet->ecache.dying_lock);
cond_resched();
goto next;
}
}
spin_unlock_bh(&cnet->ecache.dying_lock);
hlist_nulls_for_each_entry_safe(h, n, &evicted_list, hnnode) {
struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h);
hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode);
nf_ct_put(ct);
cond_resched();
}
return ret;
}
static void ecache_work(struct work_struct *work)
{
struct nf_conntrack_net *cnet = container_of(work, struct nf_conntrack_net, ecache.dwork.work);
int ret, delay = -1;
ret = ecache_work_evict_list(cnet);
switch (ret) {
case STATE_CONGESTED:
delay = ECACHE_RETRY_JIFFIES;
break;
case STATE_RESTART:
delay = 0;
break;
case STATE_DONE:
break;
}
if (delay >= 0)
schedule_delayed_work(&cnet->ecache.dwork, delay);
}
static int __nf_conntrack_eventmask_report(struct nf_conntrack_ecache *e,
const u32 events,
const u32 missed,
const struct nf_ct_event *item)
{
struct net *net = nf_ct_net(item->ct);
struct nf_ct_event_notifier *notify;
u32 old, want;
int ret;
if (!((events | missed) & e->ctmask))
return 0;
rcu_read_lock();
notify = rcu_dereference(net->ct.nf_conntrack_event_cb);
if (!notify) {
rcu_read_unlock();
return 0;
}
ret = notify->ct_event(events | missed, item);
rcu_read_unlock();
if (likely(ret >= 0 && missed == 0))
return 0;
do {
old = READ_ONCE(e->missed);
if (ret < 0)
want = old | events;
else
want = old & ~missed;
} while (cmpxchg(&e->missed, old, want) != old);
return ret;
}
int nf_conntrack_eventmask_report(unsigned int events, struct nf_conn *ct,
u32 portid, int report)
{
struct nf_conntrack_ecache *e;
struct nf_ct_event item;
unsigned int missed;
int ret;
if (!nf_ct_is_confirmed(ct))
return 0;
e = nf_ct_ecache_find(ct);
if (!e)
return 0;
memset(&item, 0, sizeof(item));
item.ct = ct;
item.portid = e->portid ? e->portid : portid;
item.report = report;
/* This is a resent of a destroy event? If so, skip missed */
missed = e->portid ? 0 : e->missed;
ret = __nf_conntrack_eventmask_report(e, events, missed, &item);
if (unlikely(ret < 0 && (events & (1 << IPCT_DESTROY)))) {
/* This is a destroy event that has been triggered by a process,
* we store the PORTID to include it in the retransmission.
*/
if (e->portid == 0 && portid != 0)
e->portid = portid;
}
return ret;
}
EXPORT_SYMBOL_GPL(nf_conntrack_eventmask_report);
/* deliver cached events and clear cache entry - must be called with locally
* disabled softirqs */
void nf_ct_deliver_cached_events(struct nf_conn *ct)
{
struct nf_conntrack_ecache *e;
struct nf_ct_event item;
unsigned int events;
if (!nf_ct_is_confirmed(ct) || nf_ct_is_dying(ct))
return;
e = nf_ct_ecache_find(ct);
if (e == NULL)
return;
events = xchg(&e->cache, 0);
item.ct = ct;
item.portid = 0;
item.report = 0;
/* We make a copy of the missed event cache without taking
* the lock, thus we may send missed events twice. However,
* this does not harm and it happens very rarely.
*/
__nf_conntrack_eventmask_report(e, events, e->missed, &item);
}
EXPORT_SYMBOL_GPL(nf_ct_deliver_cached_events);
void nf_ct_expect_event_report(enum ip_conntrack_expect_events event,
struct nf_conntrack_expect *exp,
u32 portid, int report)
{
struct net *net = nf_ct_exp_net(exp);
struct nf_ct_event_notifier *notify;
struct nf_conntrack_ecache *e;
rcu_read_lock();
notify = rcu_dereference(net->ct.nf_conntrack_event_cb);
if (!notify)
goto out_unlock;
e = nf_ct_ecache_find(exp->master);
if (!e)
goto out_unlock;
if (e->expmask & (1 << event)) {
struct nf_exp_event item = {
.exp = exp,
.portid = portid,
.report = report
};
notify->exp_event(1 << event, &item);
}
out_unlock:
rcu_read_unlock();
}
void nf_conntrack_register_notifier(struct net *net,
const struct nf_ct_event_notifier *new)
{
struct nf_ct_event_notifier *notify;
mutex_lock(&nf_ct_ecache_mutex);
notify = rcu_dereference_protected(net->ct.nf_conntrack_event_cb,
lockdep_is_held(&nf_ct_ecache_mutex));
WARN_ON_ONCE(notify);
rcu_assign_pointer(net->ct.nf_conntrack_event_cb, new);
mutex_unlock(&nf_ct_ecache_mutex);
}
EXPORT_SYMBOL_GPL(nf_conntrack_register_notifier);
void nf_conntrack_unregister_notifier(struct net *net)
{
mutex_lock(&nf_ct_ecache_mutex);
RCU_INIT_POINTER(net->ct.nf_conntrack_event_cb, NULL);
mutex_unlock(&nf_ct_ecache_mutex);
/* synchronize_rcu() is called after netns pre_exit */
}
EXPORT_SYMBOL_GPL(nf_conntrack_unregister_notifier);
void nf_conntrack_ecache_work(struct net *net, enum nf_ct_ecache_state state)
{
struct nf_conntrack_net *cnet = nf_ct_pernet(net);
if (state == NFCT_ECACHE_DESTROY_FAIL &&
!delayed_work_pending(&cnet->ecache.dwork)) {
schedule_delayed_work(&cnet->ecache.dwork, HZ);
net->ct.ecache_dwork_pending = true;
} else if (state == NFCT_ECACHE_DESTROY_SENT) {
if (!hlist_nulls_empty(&cnet->ecache.dying_list))
mod_delayed_work(system_wq, &cnet->ecache.dwork, 0);
else
net->ct.ecache_dwork_pending = false;
}
}
bool nf_ct_ecache_ext_add(struct nf_conn *ct, u16 ctmask, u16 expmask, gfp_t gfp)
{
struct net *net = nf_ct_net(ct);
struct nf_conntrack_ecache *e;
switch (net->ct.sysctl_events) {
case 0:
/* assignment via template / ruleset? ignore sysctl. */
if (ctmask || expmask)
break;
return true;
case 2: /* autodetect: no event listener, don't allocate extension. */
if (!READ_ONCE(nf_ctnetlink_has_listener))
return true;
fallthrough;
case 1:
/* always allocate an extension. */
if (!ctmask && !expmask) {
ctmask = ~0;
expmask = ~0;
}
break;
default:
WARN_ON_ONCE(1);
return true;
}
e = nf_ct_ext_add(ct, NF_CT_EXT_ECACHE, gfp);
if (e) {
e->ctmask = ctmask;
e->expmask = expmask;
}
return e != NULL;
}
EXPORT_SYMBOL_GPL(nf_ct_ecache_ext_add);
#define NF_CT_EVENTS_DEFAULT 2
static int nf_ct_events __read_mostly = NF_CT_EVENTS_DEFAULT;
void nf_conntrack_ecache_pernet_init(struct net *net)
{
struct nf_conntrack_net *cnet = nf_ct_pernet(net);
net->ct.sysctl_events = nf_ct_events;
INIT_DELAYED_WORK(&cnet->ecache.dwork, ecache_work);
INIT_HLIST_NULLS_HEAD(&cnet->ecache.dying_list, DYING_NULLS_VAL);
spin_lock_init(&cnet->ecache.dying_lock);
BUILD_BUG_ON(__IPCT_MAX >= 16); /* e->ctmask is u16 */
}
void nf_conntrack_ecache_pernet_fini(struct net *net)
{
struct nf_conntrack_net *cnet = nf_ct_pernet(net);
cancel_delayed_work_sync(&cnet->ecache.dwork);
}
| linux-master | net/netfilter/nf_conntrack_ecache.c |
// SPDX-License-Identifier: GPL-2.0-only
/* IP tables module for matching IPsec policy
*
* Copyright (c) 2004,2005 Patrick McHardy, <[email protected]>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/init.h>
#include <net/xfrm.h>
#include <linux/netfilter.h>
#include <linux/netfilter/xt_policy.h>
#include <linux/netfilter/x_tables.h>
MODULE_AUTHOR("Patrick McHardy <[email protected]>");
MODULE_DESCRIPTION("Xtables: IPsec policy match");
MODULE_LICENSE("GPL");
static inline bool
xt_addr_cmp(const union nf_inet_addr *a1, const union nf_inet_addr *m,
const union nf_inet_addr *a2, unsigned short family)
{
switch (family) {
case NFPROTO_IPV4:
return ((a1->ip ^ a2->ip) & m->ip) == 0;
case NFPROTO_IPV6:
return ipv6_masked_addr_cmp(&a1->in6, &m->in6, &a2->in6) == 0;
}
return false;
}
static bool
match_xfrm_state(const struct xfrm_state *x, const struct xt_policy_elem *e,
unsigned short family)
{
#define MATCH_ADDR(x,y,z) (!e->match.x || \
(xt_addr_cmp(&e->x, &e->y, (const union nf_inet_addr *)(z), family) \
^ e->invert.x))
#define MATCH(x,y) (!e->match.x || ((e->x == (y)) ^ e->invert.x))
return MATCH_ADDR(saddr, smask, &x->props.saddr) &&
MATCH_ADDR(daddr, dmask, &x->id.daddr) &&
MATCH(proto, x->id.proto) &&
MATCH(mode, x->props.mode) &&
MATCH(spi, x->id.spi) &&
MATCH(reqid, x->props.reqid);
}
static int
match_policy_in(const struct sk_buff *skb, const struct xt_policy_info *info,
unsigned short family)
{
const struct xt_policy_elem *e;
const struct sec_path *sp = skb_sec_path(skb);
int strict = info->flags & XT_POLICY_MATCH_STRICT;
int i, pos;
if (sp == NULL)
return -1;
if (strict && info->len != sp->len)
return 0;
for (i = sp->len - 1; i >= 0; i--) {
pos = strict ? i - sp->len + 1 : 0;
if (pos >= info->len)
return 0;
e = &info->pol[pos];
if (match_xfrm_state(sp->xvec[i], e, family)) {
if (!strict)
return 1;
} else if (strict)
return 0;
}
return strict ? 1 : 0;
}
static int
match_policy_out(const struct sk_buff *skb, const struct xt_policy_info *info,
unsigned short family)
{
const struct xt_policy_elem *e;
const struct dst_entry *dst = skb_dst(skb);
int strict = info->flags & XT_POLICY_MATCH_STRICT;
int i, pos;
if (dst->xfrm == NULL)
return -1;
for (i = 0; dst && dst->xfrm;
dst = ((struct xfrm_dst *)dst)->child, i++) {
pos = strict ? i : 0;
if (pos >= info->len)
return 0;
e = &info->pol[pos];
if (match_xfrm_state(dst->xfrm, e, family)) {
if (!strict)
return 1;
} else if (strict)
return 0;
}
return strict ? i == info->len : 0;
}
static bool
policy_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct xt_policy_info *info = par->matchinfo;
int ret;
if (info->flags & XT_POLICY_MATCH_IN)
ret = match_policy_in(skb, info, xt_family(par));
else
ret = match_policy_out(skb, info, xt_family(par));
if (ret < 0)
ret = info->flags & XT_POLICY_MATCH_NONE ? true : false;
else if (info->flags & XT_POLICY_MATCH_NONE)
ret = false;
return ret;
}
static int policy_mt_check(const struct xt_mtchk_param *par)
{
const struct xt_policy_info *info = par->matchinfo;
const char *errmsg = "neither incoming nor outgoing policy selected";
if (!(info->flags & (XT_POLICY_MATCH_IN|XT_POLICY_MATCH_OUT)))
goto err;
if (par->hook_mask & ((1 << NF_INET_PRE_ROUTING) |
(1 << NF_INET_LOCAL_IN)) && info->flags & XT_POLICY_MATCH_OUT) {
errmsg = "output policy not valid in PREROUTING and INPUT";
goto err;
}
if (par->hook_mask & ((1 << NF_INET_POST_ROUTING) |
(1 << NF_INET_LOCAL_OUT)) && info->flags & XT_POLICY_MATCH_IN) {
errmsg = "input policy not valid in POSTROUTING and OUTPUT";
goto err;
}
if (info->len > XT_POLICY_MAX_ELEM) {
errmsg = "too many policy elements";
goto err;
}
return 0;
err:
pr_info_ratelimited("%s\n", errmsg);
return -EINVAL;
}
static struct xt_match policy_mt_reg[] __read_mostly = {
{
.name = "policy",
.family = NFPROTO_IPV4,
.checkentry = policy_mt_check,
.match = policy_mt,
.matchsize = sizeof(struct xt_policy_info),
.me = THIS_MODULE,
},
{
.name = "policy",
.family = NFPROTO_IPV6,
.checkentry = policy_mt_check,
.match = policy_mt,
.matchsize = sizeof(struct xt_policy_info),
.me = THIS_MODULE,
},
};
static int __init policy_mt_init(void)
{
return xt_register_matches(policy_mt_reg, ARRAY_SIZE(policy_mt_reg));
}
static void __exit policy_mt_exit(void)
{
xt_unregister_matches(policy_mt_reg, ARRAY_SIZE(policy_mt_reg));
}
module_init(policy_mt_init);
module_exit(policy_mt_exit);
MODULE_ALIAS("ipt_policy");
MODULE_ALIAS("ip6t_policy");
| linux-master | net/netfilter/xt_policy.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Implements a dummy match to allow attaching comments to rules
*
* 2003-05-13 Brad Fisher ([email protected])
*/
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter/xt_comment.h>
MODULE_AUTHOR("Brad Fisher <[email protected]>");
MODULE_DESCRIPTION("Xtables: No-op match which can be tagged with a comment");
MODULE_LICENSE("GPL");
MODULE_ALIAS("ipt_comment");
MODULE_ALIAS("ip6t_comment");
static bool
comment_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
/* We always match */
return true;
}
static struct xt_match comment_mt_reg __read_mostly = {
.name = "comment",
.revision = 0,
.family = NFPROTO_UNSPEC,
.match = comment_mt,
.matchsize = sizeof(struct xt_comment_info),
.me = THIS_MODULE,
};
static int __init comment_mt_init(void)
{
return xt_register_match(&comment_mt_reg);
}
static void __exit comment_mt_exit(void)
{
xt_unregister_match(&comment_mt_reg);
}
module_init(comment_mt_init);
module_exit(comment_mt_exit);
| linux-master | net/netfilter/xt_comment.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* Amanda extension for TCP NAT alteration.
* (C) 2002 by Brian J. Murrell <[email protected]>
* based on a copy of HW's ip_nat_irc.c as well as other modules
* (C) 2006-2012 Patrick McHardy <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/udp.h>
#include <net/netfilter/nf_conntrack_helper.h>
#include <net/netfilter/nf_conntrack_expect.h>
#include <net/netfilter/nf_nat_helper.h>
#include <linux/netfilter/nf_conntrack_amanda.h>
#define NAT_HELPER_NAME "amanda"
MODULE_AUTHOR("Brian J. Murrell <[email protected]>");
MODULE_DESCRIPTION("Amanda NAT helper");
MODULE_LICENSE("GPL");
MODULE_ALIAS_NF_NAT_HELPER(NAT_HELPER_NAME);
static struct nf_conntrack_nat_helper nat_helper_amanda =
NF_CT_NAT_HELPER_INIT(NAT_HELPER_NAME);
static unsigned int help(struct sk_buff *skb,
enum ip_conntrack_info ctinfo,
unsigned int protoff,
unsigned int matchoff,
unsigned int matchlen,
struct nf_conntrack_expect *exp)
{
char buffer[sizeof("65535")];
u_int16_t port;
/* Connection comes from client. */
exp->saved_proto.tcp.port = exp->tuple.dst.u.tcp.port;
exp->dir = IP_CT_DIR_ORIGINAL;
/* When you see the packet, we need to NAT it the same as the
* this one (ie. same IP: it will be TCP and master is UDP). */
exp->expectfn = nf_nat_follow_master;
/* Try to get same port: if not, try to change it. */
port = nf_nat_exp_find_port(exp, ntohs(exp->saved_proto.tcp.port));
if (port == 0) {
nf_ct_helper_log(skb, exp->master, "all ports in use");
return NF_DROP;
}
sprintf(buffer, "%u", port);
if (!nf_nat_mangle_udp_packet(skb, exp->master, ctinfo,
protoff, matchoff, matchlen,
buffer, strlen(buffer))) {
nf_ct_helper_log(skb, exp->master, "cannot mangle packet");
nf_ct_unexpect_related(exp);
return NF_DROP;
}
return NF_ACCEPT;
}
static void __exit nf_nat_amanda_fini(void)
{
nf_nat_helper_unregister(&nat_helper_amanda);
RCU_INIT_POINTER(nf_nat_amanda_hook, NULL);
synchronize_rcu();
}
static int __init nf_nat_amanda_init(void)
{
BUG_ON(nf_nat_amanda_hook != NULL);
nf_nat_helper_register(&nat_helper_amanda);
RCU_INIT_POINTER(nf_nat_amanda_hook, help);
return 0;
}
module_init(nf_nat_amanda_init);
module_exit(nf_nat_amanda_fini);
| linux-master | net/netfilter/nf_nat_amanda.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2014 Arturo Borrero Gonzalez <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/netlink.h>
#include <linux/netfilter.h>
#include <linux/netfilter/nf_tables.h>
#include <net/netfilter/nf_nat.h>
#include <net/netfilter/nf_nat_redirect.h>
#include <net/netfilter/nf_tables.h>
struct nft_redir {
u8 sreg_proto_min;
u8 sreg_proto_max;
u16 flags;
};
static const struct nla_policy nft_redir_policy[NFTA_REDIR_MAX + 1] = {
[NFTA_REDIR_REG_PROTO_MIN] = { .type = NLA_U32 },
[NFTA_REDIR_REG_PROTO_MAX] = { .type = NLA_U32 },
[NFTA_REDIR_FLAGS] =
NLA_POLICY_MASK(NLA_BE32, NF_NAT_RANGE_MASK),
};
static int nft_redir_validate(const struct nft_ctx *ctx,
const struct nft_expr *expr,
const struct nft_data **data)
{
int err;
err = nft_chain_validate_dependency(ctx->chain, NFT_CHAIN_T_NAT);
if (err < 0)
return err;
return nft_chain_validate_hooks(ctx->chain,
(1 << NF_INET_PRE_ROUTING) |
(1 << NF_INET_LOCAL_OUT));
}
static int nft_redir_init(const struct nft_ctx *ctx,
const struct nft_expr *expr,
const struct nlattr * const tb[])
{
struct nft_redir *priv = nft_expr_priv(expr);
unsigned int plen;
int err;
plen = sizeof_field(struct nf_nat_range, min_proto.all);
if (tb[NFTA_REDIR_REG_PROTO_MIN]) {
err = nft_parse_register_load(tb[NFTA_REDIR_REG_PROTO_MIN],
&priv->sreg_proto_min, plen);
if (err < 0)
return err;
if (tb[NFTA_REDIR_REG_PROTO_MAX]) {
err = nft_parse_register_load(tb[NFTA_REDIR_REG_PROTO_MAX],
&priv->sreg_proto_max,
plen);
if (err < 0)
return err;
} else {
priv->sreg_proto_max = priv->sreg_proto_min;
}
priv->flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
}
if (tb[NFTA_REDIR_FLAGS])
priv->flags = ntohl(nla_get_be32(tb[NFTA_REDIR_FLAGS]));
return nf_ct_netns_get(ctx->net, ctx->family);
}
static int nft_redir_dump(struct sk_buff *skb,
const struct nft_expr *expr, bool reset)
{
const struct nft_redir *priv = nft_expr_priv(expr);
if (priv->sreg_proto_min) {
if (nft_dump_register(skb, NFTA_REDIR_REG_PROTO_MIN,
priv->sreg_proto_min))
goto nla_put_failure;
if (nft_dump_register(skb, NFTA_REDIR_REG_PROTO_MAX,
priv->sreg_proto_max))
goto nla_put_failure;
}
if (priv->flags != 0 &&
nla_put_be32(skb, NFTA_REDIR_FLAGS, htonl(priv->flags)))
goto nla_put_failure;
return 0;
nla_put_failure:
return -1;
}
static void nft_redir_eval(const struct nft_expr *expr,
struct nft_regs *regs,
const struct nft_pktinfo *pkt)
{
const struct nft_redir *priv = nft_expr_priv(expr);
struct nf_nat_range2 range;
memset(&range, 0, sizeof(range));
range.flags = priv->flags;
if (priv->sreg_proto_min) {
range.min_proto.all = (__force __be16)
nft_reg_load16(®s->data[priv->sreg_proto_min]);
range.max_proto.all = (__force __be16)
nft_reg_load16(®s->data[priv->sreg_proto_max]);
}
switch (nft_pf(pkt)) {
case NFPROTO_IPV4:
regs->verdict.code = nf_nat_redirect_ipv4(pkt->skb, &range,
nft_hook(pkt));
break;
#ifdef CONFIG_NF_TABLES_IPV6
case NFPROTO_IPV6:
regs->verdict.code = nf_nat_redirect_ipv6(pkt->skb, &range,
nft_hook(pkt));
break;
#endif
default:
WARN_ON_ONCE(1);
break;
}
}
static void
nft_redir_ipv4_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr)
{
nf_ct_netns_put(ctx->net, NFPROTO_IPV4);
}
static struct nft_expr_type nft_redir_ipv4_type;
static const struct nft_expr_ops nft_redir_ipv4_ops = {
.type = &nft_redir_ipv4_type,
.size = NFT_EXPR_SIZE(sizeof(struct nft_redir)),
.eval = nft_redir_eval,
.init = nft_redir_init,
.destroy = nft_redir_ipv4_destroy,
.dump = nft_redir_dump,
.validate = nft_redir_validate,
.reduce = NFT_REDUCE_READONLY,
};
static struct nft_expr_type nft_redir_ipv4_type __read_mostly = {
.family = NFPROTO_IPV4,
.name = "redir",
.ops = &nft_redir_ipv4_ops,
.policy = nft_redir_policy,
.maxattr = NFTA_REDIR_MAX,
.owner = THIS_MODULE,
};
#ifdef CONFIG_NF_TABLES_IPV6
static void
nft_redir_ipv6_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr)
{
nf_ct_netns_put(ctx->net, NFPROTO_IPV6);
}
static struct nft_expr_type nft_redir_ipv6_type;
static const struct nft_expr_ops nft_redir_ipv6_ops = {
.type = &nft_redir_ipv6_type,
.size = NFT_EXPR_SIZE(sizeof(struct nft_redir)),
.eval = nft_redir_eval,
.init = nft_redir_init,
.destroy = nft_redir_ipv6_destroy,
.dump = nft_redir_dump,
.validate = nft_redir_validate,
.reduce = NFT_REDUCE_READONLY,
};
static struct nft_expr_type nft_redir_ipv6_type __read_mostly = {
.family = NFPROTO_IPV6,
.name = "redir",
.ops = &nft_redir_ipv6_ops,
.policy = nft_redir_policy,
.maxattr = NFTA_REDIR_MAX,
.owner = THIS_MODULE,
};
#endif
#ifdef CONFIG_NF_TABLES_INET
static void
nft_redir_inet_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr)
{
nf_ct_netns_put(ctx->net, NFPROTO_INET);
}
static struct nft_expr_type nft_redir_inet_type;
static const struct nft_expr_ops nft_redir_inet_ops = {
.type = &nft_redir_inet_type,
.size = NFT_EXPR_SIZE(sizeof(struct nft_redir)),
.eval = nft_redir_eval,
.init = nft_redir_init,
.destroy = nft_redir_inet_destroy,
.dump = nft_redir_dump,
.validate = nft_redir_validate,
.reduce = NFT_REDUCE_READONLY,
};
static struct nft_expr_type nft_redir_inet_type __read_mostly = {
.family = NFPROTO_INET,
.name = "redir",
.ops = &nft_redir_inet_ops,
.policy = nft_redir_policy,
.maxattr = NFTA_REDIR_MAX,
.owner = THIS_MODULE,
};
static int __init nft_redir_module_init_inet(void)
{
return nft_register_expr(&nft_redir_inet_type);
}
#else
static inline int nft_redir_module_init_inet(void) { return 0; }
#endif
static int __init nft_redir_module_init(void)
{
int ret = nft_register_expr(&nft_redir_ipv4_type);
if (ret)
return ret;
#ifdef CONFIG_NF_TABLES_IPV6
ret = nft_register_expr(&nft_redir_ipv6_type);
if (ret) {
nft_unregister_expr(&nft_redir_ipv4_type);
return ret;
}
#endif
ret = nft_redir_module_init_inet();
if (ret < 0) {
nft_unregister_expr(&nft_redir_ipv4_type);
#ifdef CONFIG_NF_TABLES_IPV6
nft_unregister_expr(&nft_redir_ipv6_type);
#endif
return ret;
}
return ret;
}
static void __exit nft_redir_module_exit(void)
{
nft_unregister_expr(&nft_redir_ipv4_type);
#ifdef CONFIG_NF_TABLES_IPV6
nft_unregister_expr(&nft_redir_ipv6_type);
#endif
#ifdef CONFIG_NF_TABLES_INET
nft_unregister_expr(&nft_redir_inet_type);
#endif
}
module_init(nft_redir_module_init);
module_exit(nft_redir_module_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Arturo Borrero Gonzalez <[email protected]>");
MODULE_ALIAS_NFT_EXPR("redir");
MODULE_DESCRIPTION("Netfilter nftables redirect support");
| linux-master | net/netfilter/nft_redir.c |
// SPDX-License-Identifier: GPL-2.0-only
/* iptables module to match on related connections */
/*
* (C) 2001 Martin Josefsson <[email protected]>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/netfilter.h>
#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_conntrack_core.h>
#include <net/netfilter/nf_conntrack_helper.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter/xt_helper.h>
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Martin Josefsson <[email protected]>");
MODULE_DESCRIPTION("Xtables: Related connection matching");
MODULE_ALIAS("ipt_helper");
MODULE_ALIAS("ip6t_helper");
static bool
helper_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct xt_helper_info *info = par->matchinfo;
const struct nf_conn *ct;
const struct nf_conn_help *master_help;
const struct nf_conntrack_helper *helper;
enum ip_conntrack_info ctinfo;
bool ret = info->invert;
ct = nf_ct_get(skb, &ctinfo);
if (!ct || !ct->master)
return ret;
master_help = nfct_help(ct->master);
if (!master_help)
return ret;
/* rcu_read_lock()ed by nf_hook_thresh */
helper = rcu_dereference(master_help->helper);
if (!helper)
return ret;
if (info->name[0] == '\0')
ret = !ret;
else
ret ^= !strncmp(helper->name, info->name,
strlen(helper->name));
return ret;
}
static int helper_mt_check(const struct xt_mtchk_param *par)
{
struct xt_helper_info *info = par->matchinfo;
int ret;
ret = nf_ct_netns_get(par->net, par->family);
if (ret < 0) {
pr_info_ratelimited("cannot load conntrack support for proto=%u\n",
par->family);
return ret;
}
info->name[sizeof(info->name) - 1] = '\0';
return 0;
}
static void helper_mt_destroy(const struct xt_mtdtor_param *par)
{
nf_ct_netns_put(par->net, par->family);
}
static struct xt_match helper_mt_reg __read_mostly = {
.name = "helper",
.revision = 0,
.family = NFPROTO_UNSPEC,
.checkentry = helper_mt_check,
.match = helper_mt,
.destroy = helper_mt_destroy,
.matchsize = sizeof(struct xt_helper_info),
.me = THIS_MODULE,
};
static int __init helper_mt_init(void)
{
return xt_register_match(&helper_mt_reg);
}
static void __exit helper_mt_exit(void)
{
xt_unregister_match(&helper_mt_reg);
}
module_init(helper_mt_init);
module_exit(helper_mt_exit);
| linux-master | net/netfilter/xt_helper.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2017 Pablo Neira Ayuso <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/list.h>
#include <linux/netlink.h>
#include <linux/netfilter.h>
#include <linux/netfilter/nf_tables.h>
#include <net/netfilter/nf_tables_core.h>
struct nft_bitmap_elem {
struct list_head head;
struct nft_set_ext ext;
};
/* This bitmap uses two bits to represent one element. These two bits determine
* the element state in the current and the future generation.
*
* An element can be in three states. The generation cursor is represented using
* the ^ character, note that this cursor shifts on every successful transaction.
* If no transaction is going on, we observe all elements are in the following
* state:
*
* 11 = this element is active in the current generation. In case of no updates,
* ^ it stays active in the next generation.
* 00 = this element is inactive in the current generation. In case of no
* ^ updates, it stays inactive in the next generation.
*
* On transaction handling, we observe these two temporary states:
*
* 01 = this element is inactive in the current generation and it becomes active
* ^ in the next one. This happens when the element is inserted but commit
* path has not yet been executed yet, so activation is still pending. On
* transaction abortion, the element is removed.
* 10 = this element is active in the current generation and it becomes inactive
* ^ in the next one. This happens when the element is deactivated but commit
* path has not yet been executed yet, so removal is still pending. On
* transaction abortion, the next generation bit is reset to go back to
* restore its previous state.
*/
struct nft_bitmap {
struct list_head list;
u16 bitmap_size;
u8 bitmap[];
};
static inline void nft_bitmap_location(const struct nft_set *set,
const void *key,
u32 *idx, u32 *off)
{
u32 k;
if (set->klen == 2)
k = *(u16 *)key;
else
k = *(u8 *)key;
k <<= 1;
*idx = k / BITS_PER_BYTE;
*off = k % BITS_PER_BYTE;
}
/* Fetch the two bits that represent the element and check if it is active based
* on the generation mask.
*/
static inline bool
nft_bitmap_active(const u8 *bitmap, u32 idx, u32 off, u8 genmask)
{
return (bitmap[idx] & (0x3 << off)) & (genmask << off);
}
INDIRECT_CALLABLE_SCOPE
bool nft_bitmap_lookup(const struct net *net, const struct nft_set *set,
const u32 *key, const struct nft_set_ext **ext)
{
const struct nft_bitmap *priv = nft_set_priv(set);
u8 genmask = nft_genmask_cur(net);
u32 idx, off;
nft_bitmap_location(set, key, &idx, &off);
return nft_bitmap_active(priv->bitmap, idx, off, genmask);
}
static struct nft_bitmap_elem *
nft_bitmap_elem_find(const struct nft_set *set, struct nft_bitmap_elem *this,
u8 genmask)
{
const struct nft_bitmap *priv = nft_set_priv(set);
struct nft_bitmap_elem *be;
list_for_each_entry_rcu(be, &priv->list, head) {
if (memcmp(nft_set_ext_key(&be->ext),
nft_set_ext_key(&this->ext), set->klen) ||
!nft_set_elem_active(&be->ext, genmask))
continue;
return be;
}
return NULL;
}
static void *nft_bitmap_get(const struct net *net, const struct nft_set *set,
const struct nft_set_elem *elem, unsigned int flags)
{
const struct nft_bitmap *priv = nft_set_priv(set);
u8 genmask = nft_genmask_cur(net);
struct nft_bitmap_elem *be;
list_for_each_entry_rcu(be, &priv->list, head) {
if (memcmp(nft_set_ext_key(&be->ext), elem->key.val.data, set->klen) ||
!nft_set_elem_active(&be->ext, genmask))
continue;
return be;
}
return ERR_PTR(-ENOENT);
}
static int nft_bitmap_insert(const struct net *net, const struct nft_set *set,
const struct nft_set_elem *elem,
struct nft_set_ext **ext)
{
struct nft_bitmap *priv = nft_set_priv(set);
struct nft_bitmap_elem *new = elem->priv, *be;
u8 genmask = nft_genmask_next(net);
u32 idx, off;
be = nft_bitmap_elem_find(set, new, genmask);
if (be) {
*ext = &be->ext;
return -EEXIST;
}
nft_bitmap_location(set, nft_set_ext_key(&new->ext), &idx, &off);
/* Enter 01 state. */
priv->bitmap[idx] |= (genmask << off);
list_add_tail_rcu(&new->head, &priv->list);
return 0;
}
static void nft_bitmap_remove(const struct net *net,
const struct nft_set *set,
const struct nft_set_elem *elem)
{
struct nft_bitmap *priv = nft_set_priv(set);
struct nft_bitmap_elem *be = elem->priv;
u8 genmask = nft_genmask_next(net);
u32 idx, off;
nft_bitmap_location(set, nft_set_ext_key(&be->ext), &idx, &off);
/* Enter 00 state. */
priv->bitmap[idx] &= ~(genmask << off);
list_del_rcu(&be->head);
}
static void nft_bitmap_activate(const struct net *net,
const struct nft_set *set,
const struct nft_set_elem *elem)
{
struct nft_bitmap *priv = nft_set_priv(set);
struct nft_bitmap_elem *be = elem->priv;
u8 genmask = nft_genmask_next(net);
u32 idx, off;
nft_bitmap_location(set, nft_set_ext_key(&be->ext), &idx, &off);
/* Enter 11 state. */
priv->bitmap[idx] |= (genmask << off);
nft_set_elem_change_active(net, set, &be->ext);
}
static bool nft_bitmap_flush(const struct net *net,
const struct nft_set *set, void *_be)
{
struct nft_bitmap *priv = nft_set_priv(set);
u8 genmask = nft_genmask_next(net);
struct nft_bitmap_elem *be = _be;
u32 idx, off;
nft_bitmap_location(set, nft_set_ext_key(&be->ext), &idx, &off);
/* Enter 10 state, similar to deactivation. */
priv->bitmap[idx] &= ~(genmask << off);
nft_set_elem_change_active(net, set, &be->ext);
return true;
}
static void *nft_bitmap_deactivate(const struct net *net,
const struct nft_set *set,
const struct nft_set_elem *elem)
{
struct nft_bitmap *priv = nft_set_priv(set);
struct nft_bitmap_elem *this = elem->priv, *be;
u8 genmask = nft_genmask_next(net);
u32 idx, off;
nft_bitmap_location(set, elem->key.val.data, &idx, &off);
be = nft_bitmap_elem_find(set, this, genmask);
if (!be)
return NULL;
/* Enter 10 state. */
priv->bitmap[idx] &= ~(genmask << off);
nft_set_elem_change_active(net, set, &be->ext);
return be;
}
static void nft_bitmap_walk(const struct nft_ctx *ctx,
struct nft_set *set,
struct nft_set_iter *iter)
{
const struct nft_bitmap *priv = nft_set_priv(set);
struct nft_bitmap_elem *be;
struct nft_set_elem elem;
list_for_each_entry_rcu(be, &priv->list, head) {
if (iter->count < iter->skip)
goto cont;
if (!nft_set_elem_active(&be->ext, iter->genmask))
goto cont;
elem.priv = be;
iter->err = iter->fn(ctx, set, iter, &elem);
if (iter->err < 0)
return;
cont:
iter->count++;
}
}
/* The bitmap size is pow(2, key length in bits) / bits per byte. This is
* multiplied by two since each element takes two bits. For 8 bit keys, the
* bitmap consumes 66 bytes. For 16 bit keys, 16388 bytes.
*/
static inline u32 nft_bitmap_size(u32 klen)
{
return ((2 << ((klen * BITS_PER_BYTE) - 1)) / BITS_PER_BYTE) << 1;
}
static inline u64 nft_bitmap_total_size(u32 klen)
{
return sizeof(struct nft_bitmap) + nft_bitmap_size(klen);
}
static u64 nft_bitmap_privsize(const struct nlattr * const nla[],
const struct nft_set_desc *desc)
{
u32 klen = ntohl(nla_get_be32(nla[NFTA_SET_KEY_LEN]));
return nft_bitmap_total_size(klen);
}
static int nft_bitmap_init(const struct nft_set *set,
const struct nft_set_desc *desc,
const struct nlattr * const nla[])
{
struct nft_bitmap *priv = nft_set_priv(set);
INIT_LIST_HEAD(&priv->list);
priv->bitmap_size = nft_bitmap_size(set->klen);
return 0;
}
static void nft_bitmap_destroy(const struct nft_ctx *ctx,
const struct nft_set *set)
{
struct nft_bitmap *priv = nft_set_priv(set);
struct nft_bitmap_elem *be, *n;
list_for_each_entry_safe(be, n, &priv->list, head)
nf_tables_set_elem_destroy(ctx, set, be);
}
static bool nft_bitmap_estimate(const struct nft_set_desc *desc, u32 features,
struct nft_set_estimate *est)
{
/* Make sure bitmaps we don't get bitmaps larger than 16 Kbytes. */
if (desc->klen > 2)
return false;
else if (desc->expr)
return false;
est->size = nft_bitmap_total_size(desc->klen);
est->lookup = NFT_SET_CLASS_O_1;
est->space = NFT_SET_CLASS_O_1;
return true;
}
const struct nft_set_type nft_set_bitmap_type = {
.ops = {
.privsize = nft_bitmap_privsize,
.elemsize = offsetof(struct nft_bitmap_elem, ext),
.estimate = nft_bitmap_estimate,
.init = nft_bitmap_init,
.destroy = nft_bitmap_destroy,
.insert = nft_bitmap_insert,
.remove = nft_bitmap_remove,
.deactivate = nft_bitmap_deactivate,
.flush = nft_bitmap_flush,
.activate = nft_bitmap_activate,
.lookup = nft_bitmap_lookup,
.walk = nft_bitmap_walk,
.get = nft_bitmap_get,
},
};
| linux-master | net/netfilter/nft_set_bitmap.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Connection tracking protocol helper module for SCTP.
*
* Copyright (c) 2004 Kiran Kumar Immidi <[email protected]>
* Copyright (c) 2004-2012 Patrick McHardy <[email protected]>
*
* SCTP is defined in RFC 2960. References to various sections in this code
* are to this RFC.
*/
#include <linux/types.h>
#include <linux/timer.h>
#include <linux/netfilter.h>
#include <linux/in.h>
#include <linux/ip.h>
#include <linux/sctp.h>
#include <linux/string.h>
#include <linux/seq_file.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
#include <net/sctp/checksum.h>
#include <net/netfilter/nf_log.h>
#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_conntrack_l4proto.h>
#include <net/netfilter/nf_conntrack_ecache.h>
#include <net/netfilter/nf_conntrack_timeout.h>
static const char *const sctp_conntrack_names[] = {
[SCTP_CONNTRACK_NONE] = "NONE",
[SCTP_CONNTRACK_CLOSED] = "CLOSED",
[SCTP_CONNTRACK_COOKIE_WAIT] = "COOKIE_WAIT",
[SCTP_CONNTRACK_COOKIE_ECHOED] = "COOKIE_ECHOED",
[SCTP_CONNTRACK_ESTABLISHED] = "ESTABLISHED",
[SCTP_CONNTRACK_SHUTDOWN_SENT] = "SHUTDOWN_SENT",
[SCTP_CONNTRACK_SHUTDOWN_RECD] = "SHUTDOWN_RECD",
[SCTP_CONNTRACK_SHUTDOWN_ACK_SENT] = "SHUTDOWN_ACK_SENT",
[SCTP_CONNTRACK_HEARTBEAT_SENT] = "HEARTBEAT_SENT",
};
#define SECS * HZ
#define MINS * 60 SECS
#define HOURS * 60 MINS
#define DAYS * 24 HOURS
static const unsigned int sctp_timeouts[SCTP_CONNTRACK_MAX] = {
[SCTP_CONNTRACK_CLOSED] = 10 SECS,
[SCTP_CONNTRACK_COOKIE_WAIT] = 3 SECS,
[SCTP_CONNTRACK_COOKIE_ECHOED] = 3 SECS,
[SCTP_CONNTRACK_ESTABLISHED] = 210 SECS,
[SCTP_CONNTRACK_SHUTDOWN_SENT] = 3 SECS,
[SCTP_CONNTRACK_SHUTDOWN_RECD] = 3 SECS,
[SCTP_CONNTRACK_SHUTDOWN_ACK_SENT] = 3 SECS,
[SCTP_CONNTRACK_HEARTBEAT_SENT] = 30 SECS,
};
#define SCTP_FLAG_HEARTBEAT_VTAG_FAILED 1
#define sNO SCTP_CONNTRACK_NONE
#define sCL SCTP_CONNTRACK_CLOSED
#define sCW SCTP_CONNTRACK_COOKIE_WAIT
#define sCE SCTP_CONNTRACK_COOKIE_ECHOED
#define sES SCTP_CONNTRACK_ESTABLISHED
#define sSS SCTP_CONNTRACK_SHUTDOWN_SENT
#define sSR SCTP_CONNTRACK_SHUTDOWN_RECD
#define sSA SCTP_CONNTRACK_SHUTDOWN_ACK_SENT
#define sHS SCTP_CONNTRACK_HEARTBEAT_SENT
#define sIV SCTP_CONNTRACK_MAX
/*
These are the descriptions of the states:
NOTE: These state names are tantalizingly similar to the states of an
SCTP endpoint. But the interpretation of the states is a little different,
considering that these are the states of the connection and not of an end
point. Please note the subtleties. -Kiran
NONE - Nothing so far.
COOKIE WAIT - We have seen an INIT chunk in the original direction, or also
an INIT_ACK chunk in the reply direction.
COOKIE ECHOED - We have seen a COOKIE_ECHO chunk in the original direction.
ESTABLISHED - We have seen a COOKIE_ACK in the reply direction.
SHUTDOWN_SENT - We have seen a SHUTDOWN chunk in the original direction.
SHUTDOWN_RECD - We have seen a SHUTDOWN chunk in the reply direction.
SHUTDOWN_ACK_SENT - We have seen a SHUTDOWN_ACK chunk in the direction opposite
to that of the SHUTDOWN chunk.
CLOSED - We have seen a SHUTDOWN_COMPLETE chunk in the direction of
the SHUTDOWN chunk. Connection is closed.
HEARTBEAT_SENT - We have seen a HEARTBEAT in a new flow.
*/
/* TODO
- I have assumed that the first INIT is in the original direction.
This messes things when an INIT comes in the reply direction in CLOSED
state.
- Check the error type in the reply dir before transitioning from
cookie echoed to closed.
- Sec 5.2.4 of RFC 2960
- Full Multi Homing support.
*/
/* SCTP conntrack state transitions */
static const u8 sctp_conntracks[2][11][SCTP_CONNTRACK_MAX] = {
{
/* ORIGINAL */
/* sNO, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS */
/* init */ {sCL, sCL, sCW, sCE, sES, sCL, sCL, sSA, sCW},
/* init_ack */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCL},
/* abort */ {sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL},
/* shutdown */ {sCL, sCL, sCW, sCE, sSS, sSS, sSR, sSA, sCL},
/* shutdown_ack */ {sSA, sCL, sCW, sCE, sES, sSA, sSA, sSA, sSA},
/* error */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCL},/* Can't have Stale cookie*/
/* cookie_echo */ {sCL, sCL, sCE, sCE, sES, sSS, sSR, sSA, sCL},/* 5.2.4 - Big TODO */
/* cookie_ack */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCL},/* Can't come in orig dir */
/* shutdown_comp*/ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sCL, sCL},
/* heartbeat */ {sHS, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS},
/* heartbeat_ack*/ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS},
},
{
/* REPLY */
/* sNO, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS */
/* init */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA, sIV},/* INIT in sCL Big TODO */
/* init_ack */ {sIV, sCW, sCW, sCE, sES, sSS, sSR, sSA, sIV},
/* abort */ {sIV, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sIV},
/* shutdown */ {sIV, sCL, sCW, sCE, sSR, sSS, sSR, sSA, sIV},
/* shutdown_ack */ {sIV, sCL, sCW, sCE, sES, sSA, sSA, sSA, sIV},
/* error */ {sIV, sCL, sCW, sCL, sES, sSS, sSR, sSA, sIV},
/* cookie_echo */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA, sIV},/* Can't come in reply dir */
/* cookie_ack */ {sIV, sCL, sCW, sES, sES, sSS, sSR, sSA, sIV},
/* shutdown_comp*/ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sCL, sIV},
/* heartbeat */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS},
/* heartbeat_ack*/ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA, sES},
}
};
#ifdef CONFIG_NF_CONNTRACK_PROCFS
/* Print out the private part of the conntrack. */
static void sctp_print_conntrack(struct seq_file *s, struct nf_conn *ct)
{
seq_printf(s, "%s ", sctp_conntrack_names[ct->proto.sctp.state]);
}
#endif
/* do_basic_checks ensures sch->length > 0, do not use before */
#define for_each_sctp_chunk(skb, sch, _sch, offset, dataoff, count) \
for ((offset) = (dataoff) + sizeof(struct sctphdr), (count) = 0; \
(offset) < (skb)->len && \
((sch) = skb_header_pointer((skb), (offset), sizeof(_sch), &(_sch))); \
(offset) += (ntohs((sch)->length) + 3) & ~3, (count)++)
/* Some validity checks to make sure the chunks are fine */
static int do_basic_checks(struct nf_conn *ct,
const struct sk_buff *skb,
unsigned int dataoff,
unsigned long *map,
const struct nf_hook_state *state)
{
u_int32_t offset, count;
struct sctp_chunkhdr _sch, *sch;
int flag;
flag = 0;
for_each_sctp_chunk (skb, sch, _sch, offset, dataoff, count) {
if (sch->type == SCTP_CID_INIT ||
sch->type == SCTP_CID_INIT_ACK ||
sch->type == SCTP_CID_SHUTDOWN_COMPLETE)
flag = 1;
/*
* Cookie Ack/Echo chunks not the first OR
* Init / Init Ack / Shutdown compl chunks not the only chunks
* OR zero-length.
*/
if (((sch->type == SCTP_CID_COOKIE_ACK ||
sch->type == SCTP_CID_COOKIE_ECHO ||
flag) &&
count != 0) || !sch->length) {
nf_ct_l4proto_log_invalid(skb, ct, state,
"%s failed. chunk num %d, type %d, len %d flag %d\n",
__func__, count, sch->type, sch->length, flag);
return 1;
}
if (map)
set_bit(sch->type, map);
}
return count == 0;
}
static int sctp_new_state(enum ip_conntrack_dir dir,
enum sctp_conntrack cur_state,
int chunk_type)
{
int i;
switch (chunk_type) {
case SCTP_CID_INIT:
i = 0;
break;
case SCTP_CID_INIT_ACK:
i = 1;
break;
case SCTP_CID_ABORT:
i = 2;
break;
case SCTP_CID_SHUTDOWN:
i = 3;
break;
case SCTP_CID_SHUTDOWN_ACK:
i = 4;
break;
case SCTP_CID_ERROR:
i = 5;
break;
case SCTP_CID_COOKIE_ECHO:
i = 6;
break;
case SCTP_CID_COOKIE_ACK:
i = 7;
break;
case SCTP_CID_SHUTDOWN_COMPLETE:
i = 8;
break;
case SCTP_CID_HEARTBEAT:
i = 9;
break;
case SCTP_CID_HEARTBEAT_ACK:
i = 10;
break;
default:
/* Other chunks like DATA or SACK do not change the state */
pr_debug("Unknown chunk type %d, Will stay in %s\n",
chunk_type, sctp_conntrack_names[cur_state]);
return cur_state;
}
return sctp_conntracks[dir][i][cur_state];
}
/* Don't need lock here: this conntrack not in circulation yet */
static noinline bool
sctp_new(struct nf_conn *ct, const struct sk_buff *skb,
const struct sctphdr *sh, unsigned int dataoff)
{
enum sctp_conntrack new_state;
const struct sctp_chunkhdr *sch;
struct sctp_chunkhdr _sch;
u32 offset, count;
memset(&ct->proto.sctp, 0, sizeof(ct->proto.sctp));
new_state = SCTP_CONNTRACK_MAX;
for_each_sctp_chunk(skb, sch, _sch, offset, dataoff, count) {
new_state = sctp_new_state(IP_CT_DIR_ORIGINAL,
SCTP_CONNTRACK_NONE, sch->type);
/* Invalid: delete conntrack */
if (new_state == SCTP_CONNTRACK_NONE ||
new_state == SCTP_CONNTRACK_MAX) {
pr_debug("nf_conntrack_sctp: invalid new deleting.\n");
return false;
}
/* Copy the vtag into the state info */
if (sch->type == SCTP_CID_INIT) {
struct sctp_inithdr _inithdr, *ih;
/* Sec 8.5.1 (A) */
if (sh->vtag)
return false;
ih = skb_header_pointer(skb, offset + sizeof(_sch),
sizeof(_inithdr), &_inithdr);
if (!ih)
return false;
pr_debug("Setting vtag %x for new conn\n",
ih->init_tag);
ct->proto.sctp.vtag[IP_CT_DIR_REPLY] = ih->init_tag;
} else if (sch->type == SCTP_CID_HEARTBEAT) {
pr_debug("Setting vtag %x for secondary conntrack\n",
sh->vtag);
ct->proto.sctp.vtag[IP_CT_DIR_ORIGINAL] = sh->vtag;
} else {
/* If it is a shutdown ack OOTB packet, we expect a return
shutdown complete, otherwise an ABORT Sec 8.4 (5) and (8) */
pr_debug("Setting vtag %x for new conn OOTB\n",
sh->vtag);
ct->proto.sctp.vtag[IP_CT_DIR_REPLY] = sh->vtag;
}
ct->proto.sctp.state = SCTP_CONNTRACK_NONE;
}
return true;
}
static bool sctp_error(struct sk_buff *skb,
unsigned int dataoff,
const struct nf_hook_state *state)
{
const struct sctphdr *sh;
const char *logmsg;
if (skb->len < dataoff + sizeof(struct sctphdr)) {
logmsg = "nf_ct_sctp: short packet ";
goto out_invalid;
}
if (state->hook == NF_INET_PRE_ROUTING &&
state->net->ct.sysctl_checksum &&
skb->ip_summed == CHECKSUM_NONE) {
if (skb_ensure_writable(skb, dataoff + sizeof(*sh))) {
logmsg = "nf_ct_sctp: failed to read header ";
goto out_invalid;
}
sh = (const struct sctphdr *)(skb->data + dataoff);
if (sh->checksum != sctp_compute_cksum(skb, dataoff)) {
logmsg = "nf_ct_sctp: bad CRC ";
goto out_invalid;
}
skb->ip_summed = CHECKSUM_UNNECESSARY;
}
return false;
out_invalid:
nf_l4proto_log_invalid(skb, state, IPPROTO_SCTP, "%s", logmsg);
return true;
}
/* Returns verdict for packet, or -NF_ACCEPT for invalid. */
int nf_conntrack_sctp_packet(struct nf_conn *ct,
struct sk_buff *skb,
unsigned int dataoff,
enum ip_conntrack_info ctinfo,
const struct nf_hook_state *state)
{
enum sctp_conntrack new_state, old_state;
enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
const struct sctphdr *sh;
struct sctphdr _sctph;
const struct sctp_chunkhdr *sch;
struct sctp_chunkhdr _sch;
u_int32_t offset, count;
unsigned int *timeouts;
unsigned long map[256 / sizeof(unsigned long)] = { 0 };
bool ignore = false;
if (sctp_error(skb, dataoff, state))
return -NF_ACCEPT;
sh = skb_header_pointer(skb, dataoff, sizeof(_sctph), &_sctph);
if (sh == NULL)
goto out;
if (do_basic_checks(ct, skb, dataoff, map, state) != 0)
goto out;
if (!nf_ct_is_confirmed(ct)) {
/* If an OOTB packet has any of these chunks discard (Sec 8.4) */
if (test_bit(SCTP_CID_ABORT, map) ||
test_bit(SCTP_CID_SHUTDOWN_COMPLETE, map) ||
test_bit(SCTP_CID_COOKIE_ACK, map))
return -NF_ACCEPT;
if (!sctp_new(ct, skb, sh, dataoff))
return -NF_ACCEPT;
}
/* Check the verification tag (Sec 8.5) */
if (!test_bit(SCTP_CID_INIT, map) &&
!test_bit(SCTP_CID_SHUTDOWN_COMPLETE, map) &&
!test_bit(SCTP_CID_COOKIE_ECHO, map) &&
!test_bit(SCTP_CID_ABORT, map) &&
!test_bit(SCTP_CID_SHUTDOWN_ACK, map) &&
!test_bit(SCTP_CID_HEARTBEAT, map) &&
!test_bit(SCTP_CID_HEARTBEAT_ACK, map) &&
sh->vtag != ct->proto.sctp.vtag[dir]) {
nf_ct_l4proto_log_invalid(skb, ct, state,
"verification tag check failed %x vs %x for dir %d",
sh->vtag, ct->proto.sctp.vtag[dir], dir);
goto out;
}
old_state = new_state = SCTP_CONNTRACK_NONE;
spin_lock_bh(&ct->lock);
for_each_sctp_chunk (skb, sch, _sch, offset, dataoff, count) {
/* Special cases of Verification tag check (Sec 8.5.1) */
if (sch->type == SCTP_CID_INIT) {
/* (A) vtag MUST be zero */
if (sh->vtag != 0)
goto out_unlock;
} else if (sch->type == SCTP_CID_ABORT) {
/* (B) vtag MUST match own vtag if T flag is unset OR
* MUST match peer's vtag if T flag is set
*/
if ((!(sch->flags & SCTP_CHUNK_FLAG_T) &&
sh->vtag != ct->proto.sctp.vtag[dir]) ||
((sch->flags & SCTP_CHUNK_FLAG_T) &&
sh->vtag != ct->proto.sctp.vtag[!dir]))
goto out_unlock;
} else if (sch->type == SCTP_CID_SHUTDOWN_COMPLETE) {
/* (C) vtag MUST match own vtag if T flag is unset OR
* MUST match peer's vtag if T flag is set
*/
if ((!(sch->flags & SCTP_CHUNK_FLAG_T) &&
sh->vtag != ct->proto.sctp.vtag[dir]) ||
((sch->flags & SCTP_CHUNK_FLAG_T) &&
sh->vtag != ct->proto.sctp.vtag[!dir]))
goto out_unlock;
} else if (sch->type == SCTP_CID_COOKIE_ECHO) {
/* (D) vtag must be same as init_vtag as found in INIT_ACK */
if (sh->vtag != ct->proto.sctp.vtag[dir])
goto out_unlock;
} else if (sch->type == SCTP_CID_HEARTBEAT) {
if (ct->proto.sctp.vtag[dir] == 0) {
pr_debug("Setting %d vtag %x for dir %d\n", sch->type, sh->vtag, dir);
ct->proto.sctp.vtag[dir] = sh->vtag;
} else if (sh->vtag != ct->proto.sctp.vtag[dir]) {
if (test_bit(SCTP_CID_DATA, map) || ignore)
goto out_unlock;
ct->proto.sctp.flags |= SCTP_FLAG_HEARTBEAT_VTAG_FAILED;
ct->proto.sctp.last_dir = dir;
ignore = true;
continue;
} else if (ct->proto.sctp.flags & SCTP_FLAG_HEARTBEAT_VTAG_FAILED) {
ct->proto.sctp.flags &= ~SCTP_FLAG_HEARTBEAT_VTAG_FAILED;
}
} else if (sch->type == SCTP_CID_HEARTBEAT_ACK) {
if (ct->proto.sctp.vtag[dir] == 0) {
pr_debug("Setting vtag %x for dir %d\n",
sh->vtag, dir);
ct->proto.sctp.vtag[dir] = sh->vtag;
} else if (sh->vtag != ct->proto.sctp.vtag[dir]) {
if (test_bit(SCTP_CID_DATA, map) || ignore)
goto out_unlock;
if ((ct->proto.sctp.flags & SCTP_FLAG_HEARTBEAT_VTAG_FAILED) == 0 ||
ct->proto.sctp.last_dir == dir)
goto out_unlock;
ct->proto.sctp.flags &= ~SCTP_FLAG_HEARTBEAT_VTAG_FAILED;
ct->proto.sctp.vtag[dir] = sh->vtag;
ct->proto.sctp.vtag[!dir] = 0;
} else if (ct->proto.sctp.flags & SCTP_FLAG_HEARTBEAT_VTAG_FAILED) {
ct->proto.sctp.flags &= ~SCTP_FLAG_HEARTBEAT_VTAG_FAILED;
}
}
old_state = ct->proto.sctp.state;
new_state = sctp_new_state(dir, old_state, sch->type);
/* Invalid */
if (new_state == SCTP_CONNTRACK_MAX) {
nf_ct_l4proto_log_invalid(skb, ct, state,
"Invalid, old_state %d, dir %d, type %d",
old_state, dir, sch->type);
goto out_unlock;
}
/* If it is an INIT or an INIT ACK note down the vtag */
if (sch->type == SCTP_CID_INIT ||
sch->type == SCTP_CID_INIT_ACK) {
struct sctp_inithdr _inithdr, *ih;
ih = skb_header_pointer(skb, offset + sizeof(_sch),
sizeof(_inithdr), &_inithdr);
if (ih == NULL)
goto out_unlock;
pr_debug("Setting vtag %x for dir %d\n",
ih->init_tag, !dir);
ct->proto.sctp.vtag[!dir] = ih->init_tag;
/* don't renew timeout on init retransmit so
* port reuse by client or NAT middlebox cannot
* keep entry alive indefinitely (incl. nat info).
*/
if (new_state == SCTP_CONNTRACK_CLOSED &&
old_state == SCTP_CONNTRACK_CLOSED &&
nf_ct_is_confirmed(ct))
ignore = true;
}
ct->proto.sctp.state = new_state;
if (old_state != new_state) {
nf_conntrack_event_cache(IPCT_PROTOINFO, ct);
if (new_state == SCTP_CONNTRACK_ESTABLISHED &&
!test_and_set_bit(IPS_ASSURED_BIT, &ct->status))
nf_conntrack_event_cache(IPCT_ASSURED, ct);
}
}
spin_unlock_bh(&ct->lock);
/* allow but do not refresh timeout */
if (ignore)
return NF_ACCEPT;
timeouts = nf_ct_timeout_lookup(ct);
if (!timeouts)
timeouts = nf_sctp_pernet(nf_ct_net(ct))->timeouts;
nf_ct_refresh_acct(ct, ctinfo, skb, timeouts[new_state]);
return NF_ACCEPT;
out_unlock:
spin_unlock_bh(&ct->lock);
out:
return -NF_ACCEPT;
}
static bool sctp_can_early_drop(const struct nf_conn *ct)
{
switch (ct->proto.sctp.state) {
case SCTP_CONNTRACK_SHUTDOWN_SENT:
case SCTP_CONNTRACK_SHUTDOWN_RECD:
case SCTP_CONNTRACK_SHUTDOWN_ACK_SENT:
return true;
default:
break;
}
return false;
}
#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
#include <linux/netfilter/nfnetlink.h>
#include <linux/netfilter/nfnetlink_conntrack.h>
static int sctp_to_nlattr(struct sk_buff *skb, struct nlattr *nla,
struct nf_conn *ct, bool destroy)
{
struct nlattr *nest_parms;
spin_lock_bh(&ct->lock);
nest_parms = nla_nest_start(skb, CTA_PROTOINFO_SCTP);
if (!nest_parms)
goto nla_put_failure;
if (nla_put_u8(skb, CTA_PROTOINFO_SCTP_STATE, ct->proto.sctp.state))
goto nla_put_failure;
if (destroy)
goto skip_state;
if (nla_put_be32(skb, CTA_PROTOINFO_SCTP_VTAG_ORIGINAL,
ct->proto.sctp.vtag[IP_CT_DIR_ORIGINAL]) ||
nla_put_be32(skb, CTA_PROTOINFO_SCTP_VTAG_REPLY,
ct->proto.sctp.vtag[IP_CT_DIR_REPLY]))
goto nla_put_failure;
skip_state:
spin_unlock_bh(&ct->lock);
nla_nest_end(skb, nest_parms);
return 0;
nla_put_failure:
spin_unlock_bh(&ct->lock);
return -1;
}
static const struct nla_policy sctp_nla_policy[CTA_PROTOINFO_SCTP_MAX+1] = {
[CTA_PROTOINFO_SCTP_STATE] = { .type = NLA_U8 },
[CTA_PROTOINFO_SCTP_VTAG_ORIGINAL] = { .type = NLA_U32 },
[CTA_PROTOINFO_SCTP_VTAG_REPLY] = { .type = NLA_U32 },
};
#define SCTP_NLATTR_SIZE ( \
NLA_ALIGN(NLA_HDRLEN + 1) + \
NLA_ALIGN(NLA_HDRLEN + 4) + \
NLA_ALIGN(NLA_HDRLEN + 4))
static int nlattr_to_sctp(struct nlattr *cda[], struct nf_conn *ct)
{
struct nlattr *attr = cda[CTA_PROTOINFO_SCTP];
struct nlattr *tb[CTA_PROTOINFO_SCTP_MAX+1];
int err;
/* updates may not contain the internal protocol info, skip parsing */
if (!attr)
return 0;
err = nla_parse_nested_deprecated(tb, CTA_PROTOINFO_SCTP_MAX, attr,
sctp_nla_policy, NULL);
if (err < 0)
return err;
if (!tb[CTA_PROTOINFO_SCTP_STATE] ||
!tb[CTA_PROTOINFO_SCTP_VTAG_ORIGINAL] ||
!tb[CTA_PROTOINFO_SCTP_VTAG_REPLY])
return -EINVAL;
spin_lock_bh(&ct->lock);
ct->proto.sctp.state = nla_get_u8(tb[CTA_PROTOINFO_SCTP_STATE]);
ct->proto.sctp.vtag[IP_CT_DIR_ORIGINAL] =
nla_get_be32(tb[CTA_PROTOINFO_SCTP_VTAG_ORIGINAL]);
ct->proto.sctp.vtag[IP_CT_DIR_REPLY] =
nla_get_be32(tb[CTA_PROTOINFO_SCTP_VTAG_REPLY]);
spin_unlock_bh(&ct->lock);
return 0;
}
#endif
#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
#include <linux/netfilter/nfnetlink.h>
#include <linux/netfilter/nfnetlink_cttimeout.h>
static int sctp_timeout_nlattr_to_obj(struct nlattr *tb[],
struct net *net, void *data)
{
unsigned int *timeouts = data;
struct nf_sctp_net *sn = nf_sctp_pernet(net);
int i;
if (!timeouts)
timeouts = sn->timeouts;
/* set default SCTP timeouts. */
for (i=0; i<SCTP_CONNTRACK_MAX; i++)
timeouts[i] = sn->timeouts[i];
/* there's a 1:1 mapping between attributes and protocol states. */
for (i=CTA_TIMEOUT_SCTP_UNSPEC+1; i<CTA_TIMEOUT_SCTP_MAX+1; i++) {
if (tb[i]) {
timeouts[i] = ntohl(nla_get_be32(tb[i])) * HZ;
}
}
timeouts[CTA_TIMEOUT_SCTP_UNSPEC] = timeouts[CTA_TIMEOUT_SCTP_CLOSED];
return 0;
}
static int
sctp_timeout_obj_to_nlattr(struct sk_buff *skb, const void *data)
{
const unsigned int *timeouts = data;
int i;
for (i=CTA_TIMEOUT_SCTP_UNSPEC+1; i<CTA_TIMEOUT_SCTP_MAX+1; i++) {
if (nla_put_be32(skb, i, htonl(timeouts[i] / HZ)))
goto nla_put_failure;
}
return 0;
nla_put_failure:
return -ENOSPC;
}
static const struct nla_policy
sctp_timeout_nla_policy[CTA_TIMEOUT_SCTP_MAX+1] = {
[CTA_TIMEOUT_SCTP_CLOSED] = { .type = NLA_U32 },
[CTA_TIMEOUT_SCTP_COOKIE_WAIT] = { .type = NLA_U32 },
[CTA_TIMEOUT_SCTP_COOKIE_ECHOED] = { .type = NLA_U32 },
[CTA_TIMEOUT_SCTP_ESTABLISHED] = { .type = NLA_U32 },
[CTA_TIMEOUT_SCTP_SHUTDOWN_SENT] = { .type = NLA_U32 },
[CTA_TIMEOUT_SCTP_SHUTDOWN_RECD] = { .type = NLA_U32 },
[CTA_TIMEOUT_SCTP_SHUTDOWN_ACK_SENT] = { .type = NLA_U32 },
[CTA_TIMEOUT_SCTP_HEARTBEAT_SENT] = { .type = NLA_U32 },
[CTA_TIMEOUT_SCTP_HEARTBEAT_ACKED] = { .type = NLA_U32 },
};
#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
void nf_conntrack_sctp_init_net(struct net *net)
{
struct nf_sctp_net *sn = nf_sctp_pernet(net);
int i;
for (i = 0; i < SCTP_CONNTRACK_MAX; i++)
sn->timeouts[i] = sctp_timeouts[i];
/* timeouts[0] is unused, init it so ->timeouts[0] contains
* 'new' timeout, like udp or icmp.
*/
sn->timeouts[0] = sctp_timeouts[SCTP_CONNTRACK_CLOSED];
}
const struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp = {
.l4proto = IPPROTO_SCTP,
#ifdef CONFIG_NF_CONNTRACK_PROCFS
.print_conntrack = sctp_print_conntrack,
#endif
.can_early_drop = sctp_can_early_drop,
#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
.nlattr_size = SCTP_NLATTR_SIZE,
.to_nlattr = sctp_to_nlattr,
.from_nlattr = nlattr_to_sctp,
.tuple_to_nlattr = nf_ct_port_tuple_to_nlattr,
.nlattr_tuple_size = nf_ct_port_nlattr_tuple_size,
.nlattr_to_tuple = nf_ct_port_nlattr_to_tuple,
.nla_policy = nf_ct_port_nla_policy,
#endif
#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
.ctnl_timeout = {
.nlattr_to_obj = sctp_timeout_nlattr_to_obj,
.obj_to_nlattr = sctp_timeout_obj_to_nlattr,
.nlattr_max = CTA_TIMEOUT_SCTP_MAX,
.obj_size = sizeof(unsigned int) * SCTP_CONNTRACK_MAX,
.nla_policy = sctp_timeout_nla_policy,
},
#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
};
| linux-master | net/netfilter/nf_conntrack_proto_sctp.c |
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/netfilter.h>
#include <linux/rhashtable.h>
#include <net/netfilter/nf_flow_table.h>
#include <net/netfilter/nf_tables.h>
#include <linux/if_vlan.h>
static unsigned int
nf_flow_offload_inet_hook(void *priv, struct sk_buff *skb,
const struct nf_hook_state *state)
{
struct vlan_ethhdr *veth;
__be16 proto;
switch (skb->protocol) {
case htons(ETH_P_8021Q):
veth = (struct vlan_ethhdr *)skb_mac_header(skb);
proto = veth->h_vlan_encapsulated_proto;
break;
case htons(ETH_P_PPP_SES):
proto = nf_flow_pppoe_proto(skb);
break;
default:
proto = skb->protocol;
break;
}
switch (proto) {
case htons(ETH_P_IP):
return nf_flow_offload_ip_hook(priv, skb, state);
case htons(ETH_P_IPV6):
return nf_flow_offload_ipv6_hook(priv, skb, state);
}
return NF_ACCEPT;
}
static int nf_flow_rule_route_inet(struct net *net,
struct flow_offload *flow,
enum flow_offload_tuple_dir dir,
struct nf_flow_rule *flow_rule)
{
const struct flow_offload_tuple *flow_tuple = &flow->tuplehash[dir].tuple;
int err;
switch (flow_tuple->l3proto) {
case NFPROTO_IPV4:
err = nf_flow_rule_route_ipv4(net, flow, dir, flow_rule);
break;
case NFPROTO_IPV6:
err = nf_flow_rule_route_ipv6(net, flow, dir, flow_rule);
break;
default:
err = -1;
break;
}
return err;
}
static struct nf_flowtable_type flowtable_inet = {
.family = NFPROTO_INET,
.init = nf_flow_table_init,
.setup = nf_flow_table_offload_setup,
.action = nf_flow_rule_route_inet,
.free = nf_flow_table_free,
.hook = nf_flow_offload_inet_hook,
.owner = THIS_MODULE,
};
static struct nf_flowtable_type flowtable_ipv4 = {
.family = NFPROTO_IPV4,
.init = nf_flow_table_init,
.setup = nf_flow_table_offload_setup,
.action = nf_flow_rule_route_ipv4,
.free = nf_flow_table_free,
.hook = nf_flow_offload_ip_hook,
.owner = THIS_MODULE,
};
static struct nf_flowtable_type flowtable_ipv6 = {
.family = NFPROTO_IPV6,
.init = nf_flow_table_init,
.setup = nf_flow_table_offload_setup,
.action = nf_flow_rule_route_ipv6,
.free = nf_flow_table_free,
.hook = nf_flow_offload_ipv6_hook,
.owner = THIS_MODULE,
};
static int __init nf_flow_inet_module_init(void)
{
nft_register_flowtable_type(&flowtable_ipv4);
nft_register_flowtable_type(&flowtable_ipv6);
nft_register_flowtable_type(&flowtable_inet);
return 0;
}
static void __exit nf_flow_inet_module_exit(void)
{
nft_unregister_flowtable_type(&flowtable_inet);
nft_unregister_flowtable_type(&flowtable_ipv6);
nft_unregister_flowtable_type(&flowtable_ipv4);
}
module_init(nf_flow_inet_module_init);
module_exit(nf_flow_inet_module_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Pablo Neira Ayuso <[email protected]>");
MODULE_ALIAS_NF_FLOWTABLE(AF_INET);
MODULE_ALIAS_NF_FLOWTABLE(AF_INET6);
MODULE_ALIAS_NF_FLOWTABLE(1); /* NFPROTO_INET */
MODULE_DESCRIPTION("Netfilter flow table mixed IPv4/IPv6 module");
| linux-master | net/netfilter/nf_flow_table_inet.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2013 Eric Leblond <[email protected]>
*
* Development of this code partly funded by OISF
* (http://www.openinfosecfoundation.org/)
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/netlink.h>
#include <linux/jhash.h>
#include <linux/netfilter.h>
#include <linux/netfilter/nf_tables.h>
#include <net/netfilter/nf_tables.h>
#include <net/netfilter/nf_queue.h>
static u32 jhash_initval __read_mostly;
struct nft_queue {
u8 sreg_qnum;
u16 queuenum;
u16 queues_total;
u16 flags;
};
static void nft_queue_eval(const struct nft_expr *expr,
struct nft_regs *regs,
const struct nft_pktinfo *pkt)
{
struct nft_queue *priv = nft_expr_priv(expr);
u32 queue = priv->queuenum;
u32 ret;
if (priv->queues_total > 1) {
if (priv->flags & NFT_QUEUE_FLAG_CPU_FANOUT) {
int cpu = raw_smp_processor_id();
queue = priv->queuenum + cpu % priv->queues_total;
} else {
queue = nfqueue_hash(pkt->skb, queue,
priv->queues_total, nft_pf(pkt),
jhash_initval);
}
}
ret = NF_QUEUE_NR(queue);
if (priv->flags & NFT_QUEUE_FLAG_BYPASS)
ret |= NF_VERDICT_FLAG_QUEUE_BYPASS;
regs->verdict.code = ret;
}
static void nft_queue_sreg_eval(const struct nft_expr *expr,
struct nft_regs *regs,
const struct nft_pktinfo *pkt)
{
struct nft_queue *priv = nft_expr_priv(expr);
u32 queue, ret;
queue = regs->data[priv->sreg_qnum];
ret = NF_QUEUE_NR(queue);
if (priv->flags & NFT_QUEUE_FLAG_BYPASS)
ret |= NF_VERDICT_FLAG_QUEUE_BYPASS;
regs->verdict.code = ret;
}
static int nft_queue_validate(const struct nft_ctx *ctx,
const struct nft_expr *expr,
const struct nft_data **data)
{
static const unsigned int supported_hooks = ((1 << NF_INET_PRE_ROUTING) |
(1 << NF_INET_LOCAL_IN) |
(1 << NF_INET_FORWARD) |
(1 << NF_INET_LOCAL_OUT) |
(1 << NF_INET_POST_ROUTING));
switch (ctx->family) {
case NFPROTO_IPV4:
case NFPROTO_IPV6:
case NFPROTO_INET:
case NFPROTO_BRIDGE:
break;
case NFPROTO_NETDEV: /* lacks okfn */
fallthrough;
default:
return -EOPNOTSUPP;
}
return nft_chain_validate_hooks(ctx->chain, supported_hooks);
}
static const struct nla_policy nft_queue_policy[NFTA_QUEUE_MAX + 1] = {
[NFTA_QUEUE_NUM] = { .type = NLA_U16 },
[NFTA_QUEUE_TOTAL] = { .type = NLA_U16 },
[NFTA_QUEUE_FLAGS] = { .type = NLA_U16 },
[NFTA_QUEUE_SREG_QNUM] = { .type = NLA_U32 },
};
static int nft_queue_init(const struct nft_ctx *ctx,
const struct nft_expr *expr,
const struct nlattr * const tb[])
{
struct nft_queue *priv = nft_expr_priv(expr);
u32 maxid;
priv->queuenum = ntohs(nla_get_be16(tb[NFTA_QUEUE_NUM]));
if (tb[NFTA_QUEUE_TOTAL])
priv->queues_total = ntohs(nla_get_be16(tb[NFTA_QUEUE_TOTAL]));
else
priv->queues_total = 1;
if (priv->queues_total == 0)
return -EINVAL;
maxid = priv->queues_total - 1 + priv->queuenum;
if (maxid > U16_MAX)
return -ERANGE;
if (tb[NFTA_QUEUE_FLAGS]) {
priv->flags = ntohs(nla_get_be16(tb[NFTA_QUEUE_FLAGS]));
if (priv->flags & ~NFT_QUEUE_FLAG_MASK)
return -EINVAL;
}
return 0;
}
static int nft_queue_sreg_init(const struct nft_ctx *ctx,
const struct nft_expr *expr,
const struct nlattr * const tb[])
{
struct nft_queue *priv = nft_expr_priv(expr);
int err;
err = nft_parse_register_load(tb[NFTA_QUEUE_SREG_QNUM],
&priv->sreg_qnum, sizeof(u32));
if (err < 0)
return err;
if (tb[NFTA_QUEUE_FLAGS]) {
priv->flags = ntohs(nla_get_be16(tb[NFTA_QUEUE_FLAGS]));
if (priv->flags & ~NFT_QUEUE_FLAG_MASK)
return -EINVAL;
if (priv->flags & NFT_QUEUE_FLAG_CPU_FANOUT)
return -EOPNOTSUPP;
}
return 0;
}
static int nft_queue_dump(struct sk_buff *skb,
const struct nft_expr *expr, bool reset)
{
const struct nft_queue *priv = nft_expr_priv(expr);
if (nla_put_be16(skb, NFTA_QUEUE_NUM, htons(priv->queuenum)) ||
nla_put_be16(skb, NFTA_QUEUE_TOTAL, htons(priv->queues_total)) ||
nla_put_be16(skb, NFTA_QUEUE_FLAGS, htons(priv->flags)))
goto nla_put_failure;
return 0;
nla_put_failure:
return -1;
}
static int
nft_queue_sreg_dump(struct sk_buff *skb,
const struct nft_expr *expr, bool reset)
{
const struct nft_queue *priv = nft_expr_priv(expr);
if (nft_dump_register(skb, NFTA_QUEUE_SREG_QNUM, priv->sreg_qnum) ||
nla_put_be16(skb, NFTA_QUEUE_FLAGS, htons(priv->flags)))
goto nla_put_failure;
return 0;
nla_put_failure:
return -1;
}
static struct nft_expr_type nft_queue_type;
static const struct nft_expr_ops nft_queue_ops = {
.type = &nft_queue_type,
.size = NFT_EXPR_SIZE(sizeof(struct nft_queue)),
.eval = nft_queue_eval,
.init = nft_queue_init,
.dump = nft_queue_dump,
.validate = nft_queue_validate,
.reduce = NFT_REDUCE_READONLY,
};
static const struct nft_expr_ops nft_queue_sreg_ops = {
.type = &nft_queue_type,
.size = NFT_EXPR_SIZE(sizeof(struct nft_queue)),
.eval = nft_queue_sreg_eval,
.init = nft_queue_sreg_init,
.dump = nft_queue_sreg_dump,
.validate = nft_queue_validate,
.reduce = NFT_REDUCE_READONLY,
};
static const struct nft_expr_ops *
nft_queue_select_ops(const struct nft_ctx *ctx,
const struct nlattr * const tb[])
{
if (tb[NFTA_QUEUE_NUM] && tb[NFTA_QUEUE_SREG_QNUM])
return ERR_PTR(-EINVAL);
init_hashrandom(&jhash_initval);
if (tb[NFTA_QUEUE_NUM])
return &nft_queue_ops;
if (tb[NFTA_QUEUE_SREG_QNUM])
return &nft_queue_sreg_ops;
return ERR_PTR(-EINVAL);
}
static struct nft_expr_type nft_queue_type __read_mostly = {
.name = "queue",
.select_ops = nft_queue_select_ops,
.policy = nft_queue_policy,
.maxattr = NFTA_QUEUE_MAX,
.owner = THIS_MODULE,
};
static int __init nft_queue_module_init(void)
{
return nft_register_expr(&nft_queue_type);
}
static void __exit nft_queue_module_exit(void)
{
nft_unregister_expr(&nft_queue_type);
}
module_init(nft_queue_module_init);
module_exit(nft_queue_module_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Eric Leblond <[email protected]>");
MODULE_ALIAS_NFT_EXPR("queue");
MODULE_DESCRIPTION("Netfilter nftables queue module");
| linux-master | net/netfilter/nft_queue.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2015 Pablo Neira Ayuso <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/netlink.h>
#include <linux/netfilter.h>
#include <linux/netfilter/nf_tables.h>
#include <net/netfilter/nf_tables.h>
#include <net/netfilter/nf_tables_offload.h>
#include <net/netfilter/nf_dup_netdev.h>
struct nft_dup_netdev {
u8 sreg_dev;
};
static void nft_dup_netdev_eval(const struct nft_expr *expr,
struct nft_regs *regs,
const struct nft_pktinfo *pkt)
{
struct nft_dup_netdev *priv = nft_expr_priv(expr);
int oif = regs->data[priv->sreg_dev];
nf_dup_netdev_egress(pkt, oif);
}
static const struct nla_policy nft_dup_netdev_policy[NFTA_DUP_MAX + 1] = {
[NFTA_DUP_SREG_DEV] = { .type = NLA_U32 },
};
static int nft_dup_netdev_init(const struct nft_ctx *ctx,
const struct nft_expr *expr,
const struct nlattr * const tb[])
{
struct nft_dup_netdev *priv = nft_expr_priv(expr);
if (tb[NFTA_DUP_SREG_DEV] == NULL)
return -EINVAL;
return nft_parse_register_load(tb[NFTA_DUP_SREG_DEV], &priv->sreg_dev,
sizeof(int));
}
static int nft_dup_netdev_dump(struct sk_buff *skb,
const struct nft_expr *expr, bool reset)
{
struct nft_dup_netdev *priv = nft_expr_priv(expr);
if (nft_dump_register(skb, NFTA_DUP_SREG_DEV, priv->sreg_dev))
goto nla_put_failure;
return 0;
nla_put_failure:
return -1;
}
static int nft_dup_netdev_offload(struct nft_offload_ctx *ctx,
struct nft_flow_rule *flow,
const struct nft_expr *expr)
{
const struct nft_dup_netdev *priv = nft_expr_priv(expr);
int oif = ctx->regs[priv->sreg_dev].data.data[0];
return nft_fwd_dup_netdev_offload(ctx, flow, FLOW_ACTION_MIRRED, oif);
}
static bool nft_dup_netdev_offload_action(const struct nft_expr *expr)
{
return true;
}
static struct nft_expr_type nft_dup_netdev_type;
static const struct nft_expr_ops nft_dup_netdev_ops = {
.type = &nft_dup_netdev_type,
.size = NFT_EXPR_SIZE(sizeof(struct nft_dup_netdev)),
.eval = nft_dup_netdev_eval,
.init = nft_dup_netdev_init,
.dump = nft_dup_netdev_dump,
.reduce = NFT_REDUCE_READONLY,
.offload = nft_dup_netdev_offload,
.offload_action = nft_dup_netdev_offload_action,
};
static struct nft_expr_type nft_dup_netdev_type __read_mostly = {
.family = NFPROTO_NETDEV,
.name = "dup",
.ops = &nft_dup_netdev_ops,
.policy = nft_dup_netdev_policy,
.maxattr = NFTA_DUP_MAX,
.owner = THIS_MODULE,
};
static int __init nft_dup_netdev_module_init(void)
{
return nft_register_expr(&nft_dup_netdev_type);
}
static void __exit nft_dup_netdev_module_exit(void)
{
nft_unregister_expr(&nft_dup_netdev_type);
}
module_init(nft_dup_netdev_module_init);
module_exit(nft_dup_netdev_module_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Pablo Neira Ayuso <[email protected]>");
MODULE_ALIAS_NFT_AF_EXPR(5, "dup");
MODULE_DESCRIPTION("nftables netdev packet duplication support");
| linux-master | net/netfilter/nft_dup_netdev.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2008-2009 Patrick McHardy <[email protected]>
*
* Development of this code funded by Astaro AG (http://www.astaro.com/)
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/netlink.h>
#include <linux/netfilter.h>
#include <linux/netfilter/nf_tables.h>
#include <net/netfilter/nf_tables_core.h>
#include <net/netfilter/nf_tables.h>
#include <net/netfilter/nf_tables_offload.h>
struct nft_bitwise {
u8 sreg;
u8 dreg;
enum nft_bitwise_ops op:8;
u8 len;
struct nft_data mask;
struct nft_data xor;
struct nft_data data;
};
static void nft_bitwise_eval_bool(u32 *dst, const u32 *src,
const struct nft_bitwise *priv)
{
unsigned int i;
for (i = 0; i < DIV_ROUND_UP(priv->len, sizeof(u32)); i++)
dst[i] = (src[i] & priv->mask.data[i]) ^ priv->xor.data[i];
}
static void nft_bitwise_eval_lshift(u32 *dst, const u32 *src,
const struct nft_bitwise *priv)
{
u32 shift = priv->data.data[0];
unsigned int i;
u32 carry = 0;
for (i = DIV_ROUND_UP(priv->len, sizeof(u32)); i > 0; i--) {
dst[i - 1] = (src[i - 1] << shift) | carry;
carry = src[i - 1] >> (BITS_PER_TYPE(u32) - shift);
}
}
static void nft_bitwise_eval_rshift(u32 *dst, const u32 *src,
const struct nft_bitwise *priv)
{
u32 shift = priv->data.data[0];
unsigned int i;
u32 carry = 0;
for (i = 0; i < DIV_ROUND_UP(priv->len, sizeof(u32)); i++) {
dst[i] = carry | (src[i] >> shift);
carry = src[i] << (BITS_PER_TYPE(u32) - shift);
}
}
void nft_bitwise_eval(const struct nft_expr *expr,
struct nft_regs *regs, const struct nft_pktinfo *pkt)
{
const struct nft_bitwise *priv = nft_expr_priv(expr);
const u32 *src = ®s->data[priv->sreg];
u32 *dst = ®s->data[priv->dreg];
switch (priv->op) {
case NFT_BITWISE_BOOL:
nft_bitwise_eval_bool(dst, src, priv);
break;
case NFT_BITWISE_LSHIFT:
nft_bitwise_eval_lshift(dst, src, priv);
break;
case NFT_BITWISE_RSHIFT:
nft_bitwise_eval_rshift(dst, src, priv);
break;
}
}
static const struct nla_policy nft_bitwise_policy[NFTA_BITWISE_MAX + 1] = {
[NFTA_BITWISE_SREG] = { .type = NLA_U32 },
[NFTA_BITWISE_DREG] = { .type = NLA_U32 },
[NFTA_BITWISE_LEN] = { .type = NLA_U32 },
[NFTA_BITWISE_MASK] = { .type = NLA_NESTED },
[NFTA_BITWISE_XOR] = { .type = NLA_NESTED },
[NFTA_BITWISE_OP] = NLA_POLICY_MAX(NLA_BE32, 255),
[NFTA_BITWISE_DATA] = { .type = NLA_NESTED },
};
static int nft_bitwise_init_bool(struct nft_bitwise *priv,
const struct nlattr *const tb[])
{
struct nft_data_desc mask = {
.type = NFT_DATA_VALUE,
.size = sizeof(priv->mask),
.len = priv->len,
};
struct nft_data_desc xor = {
.type = NFT_DATA_VALUE,
.size = sizeof(priv->xor),
.len = priv->len,
};
int err;
if (tb[NFTA_BITWISE_DATA])
return -EINVAL;
if (!tb[NFTA_BITWISE_MASK] ||
!tb[NFTA_BITWISE_XOR])
return -EINVAL;
err = nft_data_init(NULL, &priv->mask, &mask, tb[NFTA_BITWISE_MASK]);
if (err < 0)
return err;
err = nft_data_init(NULL, &priv->xor, &xor, tb[NFTA_BITWISE_XOR]);
if (err < 0)
goto err_xor_err;
return 0;
err_xor_err:
nft_data_release(&priv->mask, mask.type);
return err;
}
static int nft_bitwise_init_shift(struct nft_bitwise *priv,
const struct nlattr *const tb[])
{
struct nft_data_desc desc = {
.type = NFT_DATA_VALUE,
.size = sizeof(priv->data),
.len = sizeof(u32),
};
int err;
if (tb[NFTA_BITWISE_MASK] ||
tb[NFTA_BITWISE_XOR])
return -EINVAL;
if (!tb[NFTA_BITWISE_DATA])
return -EINVAL;
err = nft_data_init(NULL, &priv->data, &desc, tb[NFTA_BITWISE_DATA]);
if (err < 0)
return err;
if (priv->data.data[0] >= BITS_PER_TYPE(u32)) {
nft_data_release(&priv->data, desc.type);
return -EINVAL;
}
return 0;
}
static int nft_bitwise_init(const struct nft_ctx *ctx,
const struct nft_expr *expr,
const struct nlattr * const tb[])
{
struct nft_bitwise *priv = nft_expr_priv(expr);
u32 len;
int err;
err = nft_parse_u32_check(tb[NFTA_BITWISE_LEN], U8_MAX, &len);
if (err < 0)
return err;
priv->len = len;
err = nft_parse_register_load(tb[NFTA_BITWISE_SREG], &priv->sreg,
priv->len);
if (err < 0)
return err;
err = nft_parse_register_store(ctx, tb[NFTA_BITWISE_DREG],
&priv->dreg, NULL, NFT_DATA_VALUE,
priv->len);
if (err < 0)
return err;
if (tb[NFTA_BITWISE_OP]) {
priv->op = ntohl(nla_get_be32(tb[NFTA_BITWISE_OP]));
switch (priv->op) {
case NFT_BITWISE_BOOL:
case NFT_BITWISE_LSHIFT:
case NFT_BITWISE_RSHIFT:
break;
default:
return -EOPNOTSUPP;
}
} else {
priv->op = NFT_BITWISE_BOOL;
}
switch(priv->op) {
case NFT_BITWISE_BOOL:
err = nft_bitwise_init_bool(priv, tb);
break;
case NFT_BITWISE_LSHIFT:
case NFT_BITWISE_RSHIFT:
err = nft_bitwise_init_shift(priv, tb);
break;
}
return err;
}
static int nft_bitwise_dump_bool(struct sk_buff *skb,
const struct nft_bitwise *priv)
{
if (nft_data_dump(skb, NFTA_BITWISE_MASK, &priv->mask,
NFT_DATA_VALUE, priv->len) < 0)
return -1;
if (nft_data_dump(skb, NFTA_BITWISE_XOR, &priv->xor,
NFT_DATA_VALUE, priv->len) < 0)
return -1;
return 0;
}
static int nft_bitwise_dump_shift(struct sk_buff *skb,
const struct nft_bitwise *priv)
{
if (nft_data_dump(skb, NFTA_BITWISE_DATA, &priv->data,
NFT_DATA_VALUE, sizeof(u32)) < 0)
return -1;
return 0;
}
static int nft_bitwise_dump(struct sk_buff *skb,
const struct nft_expr *expr, bool reset)
{
const struct nft_bitwise *priv = nft_expr_priv(expr);
int err = 0;
if (nft_dump_register(skb, NFTA_BITWISE_SREG, priv->sreg))
return -1;
if (nft_dump_register(skb, NFTA_BITWISE_DREG, priv->dreg))
return -1;
if (nla_put_be32(skb, NFTA_BITWISE_LEN, htonl(priv->len)))
return -1;
if (nla_put_be32(skb, NFTA_BITWISE_OP, htonl(priv->op)))
return -1;
switch (priv->op) {
case NFT_BITWISE_BOOL:
err = nft_bitwise_dump_bool(skb, priv);
break;
case NFT_BITWISE_LSHIFT:
case NFT_BITWISE_RSHIFT:
err = nft_bitwise_dump_shift(skb, priv);
break;
}
return err;
}
static struct nft_data zero;
static int nft_bitwise_offload(struct nft_offload_ctx *ctx,
struct nft_flow_rule *flow,
const struct nft_expr *expr)
{
const struct nft_bitwise *priv = nft_expr_priv(expr);
struct nft_offload_reg *reg = &ctx->regs[priv->dreg];
if (priv->op != NFT_BITWISE_BOOL)
return -EOPNOTSUPP;
if (memcmp(&priv->xor, &zero, sizeof(priv->xor)) ||
priv->sreg != priv->dreg || priv->len != reg->len)
return -EOPNOTSUPP;
memcpy(®->mask, &priv->mask, sizeof(priv->mask));
return 0;
}
static bool nft_bitwise_reduce(struct nft_regs_track *track,
const struct nft_expr *expr)
{
const struct nft_bitwise *priv = nft_expr_priv(expr);
const struct nft_bitwise *bitwise;
unsigned int regcount;
u8 dreg;
int i;
if (!track->regs[priv->sreg].selector)
return false;
bitwise = nft_expr_priv(track->regs[priv->dreg].selector);
if (track->regs[priv->sreg].selector == track->regs[priv->dreg].selector &&
track->regs[priv->sreg].num_reg == 0 &&
track->regs[priv->dreg].bitwise &&
track->regs[priv->dreg].bitwise->ops == expr->ops &&
priv->sreg == bitwise->sreg &&
priv->dreg == bitwise->dreg &&
priv->op == bitwise->op &&
priv->len == bitwise->len &&
!memcmp(&priv->mask, &bitwise->mask, sizeof(priv->mask)) &&
!memcmp(&priv->xor, &bitwise->xor, sizeof(priv->xor)) &&
!memcmp(&priv->data, &bitwise->data, sizeof(priv->data))) {
track->cur = expr;
return true;
}
if (track->regs[priv->sreg].bitwise ||
track->regs[priv->sreg].num_reg != 0) {
nft_reg_track_cancel(track, priv->dreg, priv->len);
return false;
}
if (priv->sreg != priv->dreg) {
nft_reg_track_update(track, track->regs[priv->sreg].selector,
priv->dreg, priv->len);
}
dreg = priv->dreg;
regcount = DIV_ROUND_UP(priv->len, NFT_REG32_SIZE);
for (i = 0; i < regcount; i++, dreg++)
track->regs[dreg].bitwise = expr;
return false;
}
static const struct nft_expr_ops nft_bitwise_ops = {
.type = &nft_bitwise_type,
.size = NFT_EXPR_SIZE(sizeof(struct nft_bitwise)),
.eval = nft_bitwise_eval,
.init = nft_bitwise_init,
.dump = nft_bitwise_dump,
.reduce = nft_bitwise_reduce,
.offload = nft_bitwise_offload,
};
static int
nft_bitwise_extract_u32_data(const struct nlattr * const tb, u32 *out)
{
struct nft_data data;
struct nft_data_desc desc = {
.type = NFT_DATA_VALUE,
.size = sizeof(data),
.len = sizeof(u32),
};
int err;
err = nft_data_init(NULL, &data, &desc, tb);
if (err < 0)
return err;
*out = data.data[0];
return 0;
}
static int nft_bitwise_fast_init(const struct nft_ctx *ctx,
const struct nft_expr *expr,
const struct nlattr * const tb[])
{
struct nft_bitwise_fast_expr *priv = nft_expr_priv(expr);
int err;
err = nft_parse_register_load(tb[NFTA_BITWISE_SREG], &priv->sreg,
sizeof(u32));
if (err < 0)
return err;
err = nft_parse_register_store(ctx, tb[NFTA_BITWISE_DREG], &priv->dreg,
NULL, NFT_DATA_VALUE, sizeof(u32));
if (err < 0)
return err;
if (tb[NFTA_BITWISE_DATA])
return -EINVAL;
if (!tb[NFTA_BITWISE_MASK] ||
!tb[NFTA_BITWISE_XOR])
return -EINVAL;
err = nft_bitwise_extract_u32_data(tb[NFTA_BITWISE_MASK], &priv->mask);
if (err < 0)
return err;
err = nft_bitwise_extract_u32_data(tb[NFTA_BITWISE_XOR], &priv->xor);
if (err < 0)
return err;
return 0;
}
static int
nft_bitwise_fast_dump(struct sk_buff *skb,
const struct nft_expr *expr, bool reset)
{
const struct nft_bitwise_fast_expr *priv = nft_expr_priv(expr);
struct nft_data data;
if (nft_dump_register(skb, NFTA_BITWISE_SREG, priv->sreg))
return -1;
if (nft_dump_register(skb, NFTA_BITWISE_DREG, priv->dreg))
return -1;
if (nla_put_be32(skb, NFTA_BITWISE_LEN, htonl(sizeof(u32))))
return -1;
if (nla_put_be32(skb, NFTA_BITWISE_OP, htonl(NFT_BITWISE_BOOL)))
return -1;
data.data[0] = priv->mask;
if (nft_data_dump(skb, NFTA_BITWISE_MASK, &data,
NFT_DATA_VALUE, sizeof(u32)) < 0)
return -1;
data.data[0] = priv->xor;
if (nft_data_dump(skb, NFTA_BITWISE_XOR, &data,
NFT_DATA_VALUE, sizeof(u32)) < 0)
return -1;
return 0;
}
static int nft_bitwise_fast_offload(struct nft_offload_ctx *ctx,
struct nft_flow_rule *flow,
const struct nft_expr *expr)
{
const struct nft_bitwise_fast_expr *priv = nft_expr_priv(expr);
struct nft_offload_reg *reg = &ctx->regs[priv->dreg];
if (priv->xor || priv->sreg != priv->dreg || reg->len != sizeof(u32))
return -EOPNOTSUPP;
reg->mask.data[0] = priv->mask;
return 0;
}
static bool nft_bitwise_fast_reduce(struct nft_regs_track *track,
const struct nft_expr *expr)
{
const struct nft_bitwise_fast_expr *priv = nft_expr_priv(expr);
const struct nft_bitwise_fast_expr *bitwise;
if (!track->regs[priv->sreg].selector)
return false;
bitwise = nft_expr_priv(track->regs[priv->dreg].selector);
if (track->regs[priv->sreg].selector == track->regs[priv->dreg].selector &&
track->regs[priv->dreg].bitwise &&
track->regs[priv->dreg].bitwise->ops == expr->ops &&
priv->sreg == bitwise->sreg &&
priv->dreg == bitwise->dreg &&
priv->mask == bitwise->mask &&
priv->xor == bitwise->xor) {
track->cur = expr;
return true;
}
if (track->regs[priv->sreg].bitwise) {
nft_reg_track_cancel(track, priv->dreg, NFT_REG32_SIZE);
return false;
}
if (priv->sreg != priv->dreg) {
track->regs[priv->dreg].selector =
track->regs[priv->sreg].selector;
}
track->regs[priv->dreg].bitwise = expr;
return false;
}
const struct nft_expr_ops nft_bitwise_fast_ops = {
.type = &nft_bitwise_type,
.size = NFT_EXPR_SIZE(sizeof(struct nft_bitwise_fast_expr)),
.eval = NULL, /* inlined */
.init = nft_bitwise_fast_init,
.dump = nft_bitwise_fast_dump,
.reduce = nft_bitwise_fast_reduce,
.offload = nft_bitwise_fast_offload,
};
static const struct nft_expr_ops *
nft_bitwise_select_ops(const struct nft_ctx *ctx,
const struct nlattr * const tb[])
{
int err;
u32 len;
if (!tb[NFTA_BITWISE_LEN] ||
!tb[NFTA_BITWISE_SREG] ||
!tb[NFTA_BITWISE_DREG])
return ERR_PTR(-EINVAL);
err = nft_parse_u32_check(tb[NFTA_BITWISE_LEN], U8_MAX, &len);
if (err < 0)
return ERR_PTR(err);
if (len != sizeof(u32))
return &nft_bitwise_ops;
if (tb[NFTA_BITWISE_OP] &&
ntohl(nla_get_be32(tb[NFTA_BITWISE_OP])) != NFT_BITWISE_BOOL)
return &nft_bitwise_ops;
return &nft_bitwise_fast_ops;
}
struct nft_expr_type nft_bitwise_type __read_mostly = {
.name = "bitwise",
.select_ops = nft_bitwise_select_ops,
.policy = nft_bitwise_policy,
.maxattr = NFTA_BITWISE_MAX,
.owner = THIS_MODULE,
};
bool nft_expr_reduce_bitwise(struct nft_regs_track *track,
const struct nft_expr *expr)
{
const struct nft_expr *last = track->last;
const struct nft_expr *next;
if (expr == last)
return false;
next = nft_expr_next(expr);
if (next->ops == &nft_bitwise_ops)
return nft_bitwise_reduce(track, next);
else if (next->ops == &nft_bitwise_fast_ops)
return nft_bitwise_fast_reduce(track, next);
return false;
}
EXPORT_SYMBOL_GPL(nft_expr_reduce_bitwise);
| linux-master | net/netfilter/nft_bitwise.c |
/* Kernel module to match connection tracking byte counter.
* GPL (C) 2002 Martin Devera ([email protected]).
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/bitops.h>
#include <linux/skbuff.h>
#include <linux/math64.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter/xt_connbytes.h>
#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_conntrack_acct.h>
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Harald Welte <[email protected]>");
MODULE_DESCRIPTION("Xtables: Number of packets/bytes per connection matching");
MODULE_ALIAS("ipt_connbytes");
MODULE_ALIAS("ip6t_connbytes");
static bool
connbytes_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct xt_connbytes_info *sinfo = par->matchinfo;
const struct nf_conn *ct;
enum ip_conntrack_info ctinfo;
u_int64_t what = 0; /* initialize to make gcc happy */
u_int64_t bytes = 0;
u_int64_t pkts = 0;
const struct nf_conn_acct *acct;
const struct nf_conn_counter *counters;
ct = nf_ct_get(skb, &ctinfo);
if (!ct)
return false;
acct = nf_conn_acct_find(ct);
if (!acct)
return false;
counters = acct->counter;
switch (sinfo->what) {
case XT_CONNBYTES_PKTS:
switch (sinfo->direction) {
case XT_CONNBYTES_DIR_ORIGINAL:
what = atomic64_read(&counters[IP_CT_DIR_ORIGINAL].packets);
break;
case XT_CONNBYTES_DIR_REPLY:
what = atomic64_read(&counters[IP_CT_DIR_REPLY].packets);
break;
case XT_CONNBYTES_DIR_BOTH:
what = atomic64_read(&counters[IP_CT_DIR_ORIGINAL].packets);
what += atomic64_read(&counters[IP_CT_DIR_REPLY].packets);
break;
}
break;
case XT_CONNBYTES_BYTES:
switch (sinfo->direction) {
case XT_CONNBYTES_DIR_ORIGINAL:
what = atomic64_read(&counters[IP_CT_DIR_ORIGINAL].bytes);
break;
case XT_CONNBYTES_DIR_REPLY:
what = atomic64_read(&counters[IP_CT_DIR_REPLY].bytes);
break;
case XT_CONNBYTES_DIR_BOTH:
what = atomic64_read(&counters[IP_CT_DIR_ORIGINAL].bytes);
what += atomic64_read(&counters[IP_CT_DIR_REPLY].bytes);
break;
}
break;
case XT_CONNBYTES_AVGPKT:
switch (sinfo->direction) {
case XT_CONNBYTES_DIR_ORIGINAL:
bytes = atomic64_read(&counters[IP_CT_DIR_ORIGINAL].bytes);
pkts = atomic64_read(&counters[IP_CT_DIR_ORIGINAL].packets);
break;
case XT_CONNBYTES_DIR_REPLY:
bytes = atomic64_read(&counters[IP_CT_DIR_REPLY].bytes);
pkts = atomic64_read(&counters[IP_CT_DIR_REPLY].packets);
break;
case XT_CONNBYTES_DIR_BOTH:
bytes = atomic64_read(&counters[IP_CT_DIR_ORIGINAL].bytes) +
atomic64_read(&counters[IP_CT_DIR_REPLY].bytes);
pkts = atomic64_read(&counters[IP_CT_DIR_ORIGINAL].packets) +
atomic64_read(&counters[IP_CT_DIR_REPLY].packets);
break;
}
if (pkts != 0)
what = div64_u64(bytes, pkts);
break;
}
if (sinfo->count.to >= sinfo->count.from)
return what <= sinfo->count.to && what >= sinfo->count.from;
else /* inverted */
return what < sinfo->count.to || what > sinfo->count.from;
}
static int connbytes_mt_check(const struct xt_mtchk_param *par)
{
const struct xt_connbytes_info *sinfo = par->matchinfo;
int ret;
if (sinfo->what != XT_CONNBYTES_PKTS &&
sinfo->what != XT_CONNBYTES_BYTES &&
sinfo->what != XT_CONNBYTES_AVGPKT)
return -EINVAL;
if (sinfo->direction != XT_CONNBYTES_DIR_ORIGINAL &&
sinfo->direction != XT_CONNBYTES_DIR_REPLY &&
sinfo->direction != XT_CONNBYTES_DIR_BOTH)
return -EINVAL;
ret = nf_ct_netns_get(par->net, par->family);
if (ret < 0)
pr_info_ratelimited("cannot load conntrack support for proto=%u\n",
par->family);
/*
* This filter cannot function correctly unless connection tracking
* accounting is enabled, so complain in the hope that someone notices.
*/
if (!nf_ct_acct_enabled(par->net)) {
pr_warn("Forcing CT accounting to be enabled\n");
nf_ct_set_acct(par->net, true);
}
return ret;
}
static void connbytes_mt_destroy(const struct xt_mtdtor_param *par)
{
nf_ct_netns_put(par->net, par->family);
}
static struct xt_match connbytes_mt_reg __read_mostly = {
.name = "connbytes",
.revision = 0,
.family = NFPROTO_UNSPEC,
.checkentry = connbytes_mt_check,
.match = connbytes_mt,
.destroy = connbytes_mt_destroy,
.matchsize = sizeof(struct xt_connbytes_info),
.me = THIS_MODULE,
};
static int __init connbytes_mt_init(void)
{
return xt_register_match(&connbytes_mt_reg);
}
static void __exit connbytes_mt_exit(void)
{
xt_unregister_match(&connbytes_mt_reg);
}
module_init(connbytes_mt_init);
module_exit(connbytes_mt_exit);
| linux-master | net/netfilter/xt_connbytes.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* This is a module which is used for setting the skb->priority field
* of an skb for qdisc classification.
*/
/* (C) 2001-2002 Patrick McHardy <[email protected]>
*/
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/ip.h>
#include <net/checksum.h>
#include <linux/netfilter_ipv4.h>
#include <linux/netfilter_ipv6.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter/xt_CLASSIFY.h>
#include <linux/netfilter_arp.h>
MODULE_AUTHOR("Patrick McHardy <[email protected]>");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Xtables: Qdisc classification");
MODULE_ALIAS("ipt_CLASSIFY");
MODULE_ALIAS("ip6t_CLASSIFY");
MODULE_ALIAS("arpt_CLASSIFY");
static unsigned int
classify_tg(struct sk_buff *skb, const struct xt_action_param *par)
{
const struct xt_classify_target_info *clinfo = par->targinfo;
skb->priority = clinfo->priority;
return XT_CONTINUE;
}
static struct xt_target classify_tg_reg[] __read_mostly = {
{
.name = "CLASSIFY",
.revision = 0,
.family = NFPROTO_UNSPEC,
.hooks = (1 << NF_INET_LOCAL_OUT) | (1 << NF_INET_FORWARD) |
(1 << NF_INET_POST_ROUTING),
.target = classify_tg,
.targetsize = sizeof(struct xt_classify_target_info),
.me = THIS_MODULE,
},
{
.name = "CLASSIFY",
.revision = 0,
.family = NFPROTO_ARP,
.hooks = (1 << NF_ARP_OUT) | (1 << NF_ARP_FORWARD),
.target = classify_tg,
.targetsize = sizeof(struct xt_classify_target_info),
.me = THIS_MODULE,
},
};
static int __init classify_tg_init(void)
{
return xt_register_targets(classify_tg_reg, ARRAY_SIZE(classify_tg_reg));
}
static void __exit classify_tg_exit(void)
{
xt_unregister_targets(classify_tg_reg, ARRAY_SIZE(classify_tg_reg));
}
module_init(classify_tg_init);
module_exit(classify_tg_exit);
| linux-master | net/netfilter/xt_CLASSIFY.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2008-2009 Patrick McHardy <[email protected]>
* Copyright (c) 2014 Intel Corporation
* Author: Tomasz Bursztyka <[email protected]>
*
* Development of this code funded by Astaro AG (http://www.astaro.com/)
*/
#include <linux/kernel.h>
#include <linux/netlink.h>
#include <linux/netfilter.h>
#include <linux/netfilter/nf_tables.h>
#include <linux/in.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/random.h>
#include <linux/smp.h>
#include <linux/static_key.h>
#include <net/dst.h>
#include <net/ip.h>
#include <net/sock.h>
#include <net/tcp_states.h> /* for TCP_TIME_WAIT */
#include <net/netfilter/nf_tables.h>
#include <net/netfilter/nf_tables_core.h>
#include <net/netfilter/nft_meta.h>
#include <net/netfilter/nf_tables_offload.h>
#include <uapi/linux/netfilter_bridge.h> /* NF_BR_PRE_ROUTING */
#define NFT_META_SECS_PER_MINUTE 60
#define NFT_META_SECS_PER_HOUR 3600
#define NFT_META_SECS_PER_DAY 86400
#define NFT_META_DAYS_PER_WEEK 7
static u8 nft_meta_weekday(void)
{
time64_t secs = ktime_get_real_seconds();
unsigned int dse;
u8 wday;
secs -= NFT_META_SECS_PER_MINUTE * sys_tz.tz_minuteswest;
dse = div_u64(secs, NFT_META_SECS_PER_DAY);
wday = (4 + dse) % NFT_META_DAYS_PER_WEEK;
return wday;
}
static u32 nft_meta_hour(time64_t secs)
{
struct tm tm;
time64_to_tm(secs, 0, &tm);
return tm.tm_hour * NFT_META_SECS_PER_HOUR
+ tm.tm_min * NFT_META_SECS_PER_MINUTE
+ tm.tm_sec;
}
static noinline_for_stack void
nft_meta_get_eval_time(enum nft_meta_keys key,
u32 *dest)
{
switch (key) {
case NFT_META_TIME_NS:
nft_reg_store64(dest, ktime_get_real_ns());
break;
case NFT_META_TIME_DAY:
nft_reg_store8(dest, nft_meta_weekday());
break;
case NFT_META_TIME_HOUR:
*dest = nft_meta_hour(ktime_get_real_seconds());
break;
default:
break;
}
}
static noinline bool
nft_meta_get_eval_pkttype_lo(const struct nft_pktinfo *pkt,
u32 *dest)
{
const struct sk_buff *skb = pkt->skb;
switch (nft_pf(pkt)) {
case NFPROTO_IPV4:
if (ipv4_is_multicast(ip_hdr(skb)->daddr))
nft_reg_store8(dest, PACKET_MULTICAST);
else
nft_reg_store8(dest, PACKET_BROADCAST);
break;
case NFPROTO_IPV6:
nft_reg_store8(dest, PACKET_MULTICAST);
break;
case NFPROTO_NETDEV:
switch (skb->protocol) {
case htons(ETH_P_IP): {
int noff = skb_network_offset(skb);
struct iphdr *iph, _iph;
iph = skb_header_pointer(skb, noff,
sizeof(_iph), &_iph);
if (!iph)
return false;
if (ipv4_is_multicast(iph->daddr))
nft_reg_store8(dest, PACKET_MULTICAST);
else
nft_reg_store8(dest, PACKET_BROADCAST);
break;
}
case htons(ETH_P_IPV6):
nft_reg_store8(dest, PACKET_MULTICAST);
break;
default:
WARN_ON_ONCE(1);
return false;
}
break;
default:
WARN_ON_ONCE(1);
return false;
}
return true;
}
static noinline bool
nft_meta_get_eval_skugid(enum nft_meta_keys key,
u32 *dest,
const struct nft_pktinfo *pkt)
{
struct sock *sk = skb_to_full_sk(pkt->skb);
struct socket *sock;
if (!sk || !sk_fullsock(sk) || !net_eq(nft_net(pkt), sock_net(sk)))
return false;
read_lock_bh(&sk->sk_callback_lock);
sock = sk->sk_socket;
if (!sock || !sock->file) {
read_unlock_bh(&sk->sk_callback_lock);
return false;
}
switch (key) {
case NFT_META_SKUID:
*dest = from_kuid_munged(sock_net(sk)->user_ns,
sock->file->f_cred->fsuid);
break;
case NFT_META_SKGID:
*dest = from_kgid_munged(sock_net(sk)->user_ns,
sock->file->f_cred->fsgid);
break;
default:
break;
}
read_unlock_bh(&sk->sk_callback_lock);
return true;
}
#ifdef CONFIG_CGROUP_NET_CLASSID
static noinline bool
nft_meta_get_eval_cgroup(u32 *dest, const struct nft_pktinfo *pkt)
{
struct sock *sk = skb_to_full_sk(pkt->skb);
if (!sk || !sk_fullsock(sk) || !net_eq(nft_net(pkt), sock_net(sk)))
return false;
*dest = sock_cgroup_classid(&sk->sk_cgrp_data);
return true;
}
#endif
static noinline bool nft_meta_get_eval_kind(enum nft_meta_keys key,
u32 *dest,
const struct nft_pktinfo *pkt)
{
const struct net_device *in = nft_in(pkt), *out = nft_out(pkt);
switch (key) {
case NFT_META_IIFKIND:
if (!in || !in->rtnl_link_ops)
return false;
strscpy_pad((char *)dest, in->rtnl_link_ops->kind, IFNAMSIZ);
break;
case NFT_META_OIFKIND:
if (!out || !out->rtnl_link_ops)
return false;
strscpy_pad((char *)dest, out->rtnl_link_ops->kind, IFNAMSIZ);
break;
default:
return false;
}
return true;
}
static void nft_meta_store_ifindex(u32 *dest, const struct net_device *dev)
{
*dest = dev ? dev->ifindex : 0;
}
static void nft_meta_store_ifname(u32 *dest, const struct net_device *dev)
{
strscpy_pad((char *)dest, dev ? dev->name : "", IFNAMSIZ);
}
static bool nft_meta_store_iftype(u32 *dest, const struct net_device *dev)
{
if (!dev)
return false;
nft_reg_store16(dest, dev->type);
return true;
}
static bool nft_meta_store_ifgroup(u32 *dest, const struct net_device *dev)
{
if (!dev)
return false;
*dest = dev->group;
return true;
}
static bool nft_meta_get_eval_ifname(enum nft_meta_keys key, u32 *dest,
const struct nft_pktinfo *pkt)
{
switch (key) {
case NFT_META_IIFNAME:
nft_meta_store_ifname(dest, nft_in(pkt));
break;
case NFT_META_OIFNAME:
nft_meta_store_ifname(dest, nft_out(pkt));
break;
case NFT_META_IIF:
nft_meta_store_ifindex(dest, nft_in(pkt));
break;
case NFT_META_OIF:
nft_meta_store_ifindex(dest, nft_out(pkt));
break;
case NFT_META_IFTYPE:
if (!nft_meta_store_iftype(dest, pkt->skb->dev))
return false;
break;
case __NFT_META_IIFTYPE:
if (!nft_meta_store_iftype(dest, nft_in(pkt)))
return false;
break;
case NFT_META_OIFTYPE:
if (!nft_meta_store_iftype(dest, nft_out(pkt)))
return false;
break;
case NFT_META_IIFGROUP:
if (!nft_meta_store_ifgroup(dest, nft_in(pkt)))
return false;
break;
case NFT_META_OIFGROUP:
if (!nft_meta_store_ifgroup(dest, nft_out(pkt)))
return false;
break;
default:
return false;
}
return true;
}
#ifdef CONFIG_IP_ROUTE_CLASSID
static noinline bool
nft_meta_get_eval_rtclassid(const struct sk_buff *skb, u32 *dest)
{
const struct dst_entry *dst = skb_dst(skb);
if (!dst)
return false;
*dest = dst->tclassid;
return true;
}
#endif
static noinline u32 nft_meta_get_eval_sdif(const struct nft_pktinfo *pkt)
{
switch (nft_pf(pkt)) {
case NFPROTO_IPV4:
return inet_sdif(pkt->skb);
case NFPROTO_IPV6:
return inet6_sdif(pkt->skb);
}
return 0;
}
static noinline void
nft_meta_get_eval_sdifname(u32 *dest, const struct nft_pktinfo *pkt)
{
u32 sdif = nft_meta_get_eval_sdif(pkt);
const struct net_device *dev;
dev = sdif ? dev_get_by_index_rcu(nft_net(pkt), sdif) : NULL;
nft_meta_store_ifname(dest, dev);
}
void nft_meta_get_eval(const struct nft_expr *expr,
struct nft_regs *regs,
const struct nft_pktinfo *pkt)
{
const struct nft_meta *priv = nft_expr_priv(expr);
const struct sk_buff *skb = pkt->skb;
u32 *dest = ®s->data[priv->dreg];
switch (priv->key) {
case NFT_META_LEN:
*dest = skb->len;
break;
case NFT_META_PROTOCOL:
nft_reg_store16(dest, (__force u16)skb->protocol);
break;
case NFT_META_NFPROTO:
nft_reg_store8(dest, nft_pf(pkt));
break;
case NFT_META_L4PROTO:
if (!(pkt->flags & NFT_PKTINFO_L4PROTO))
goto err;
nft_reg_store8(dest, pkt->tprot);
break;
case NFT_META_PRIORITY:
*dest = skb->priority;
break;
case NFT_META_MARK:
*dest = skb->mark;
break;
case NFT_META_IIF:
case NFT_META_OIF:
case NFT_META_IIFNAME:
case NFT_META_OIFNAME:
case NFT_META_IIFTYPE:
case NFT_META_OIFTYPE:
case NFT_META_IIFGROUP:
case NFT_META_OIFGROUP:
if (!nft_meta_get_eval_ifname(priv->key, dest, pkt))
goto err;
break;
case NFT_META_SKUID:
case NFT_META_SKGID:
if (!nft_meta_get_eval_skugid(priv->key, dest, pkt))
goto err;
break;
#ifdef CONFIG_IP_ROUTE_CLASSID
case NFT_META_RTCLASSID:
if (!nft_meta_get_eval_rtclassid(skb, dest))
goto err;
break;
#endif
#ifdef CONFIG_NETWORK_SECMARK
case NFT_META_SECMARK:
*dest = skb->secmark;
break;
#endif
case NFT_META_PKTTYPE:
if (skb->pkt_type != PACKET_LOOPBACK) {
nft_reg_store8(dest, skb->pkt_type);
break;
}
if (!nft_meta_get_eval_pkttype_lo(pkt, dest))
goto err;
break;
case NFT_META_CPU:
*dest = raw_smp_processor_id();
break;
#ifdef CONFIG_CGROUP_NET_CLASSID
case NFT_META_CGROUP:
if (!nft_meta_get_eval_cgroup(dest, pkt))
goto err;
break;
#endif
case NFT_META_PRANDOM:
*dest = get_random_u32();
break;
#ifdef CONFIG_XFRM
case NFT_META_SECPATH:
nft_reg_store8(dest, secpath_exists(skb));
break;
#endif
case NFT_META_IIFKIND:
case NFT_META_OIFKIND:
if (!nft_meta_get_eval_kind(priv->key, dest, pkt))
goto err;
break;
case NFT_META_TIME_NS:
case NFT_META_TIME_DAY:
case NFT_META_TIME_HOUR:
nft_meta_get_eval_time(priv->key, dest);
break;
case NFT_META_SDIF:
*dest = nft_meta_get_eval_sdif(pkt);
break;
case NFT_META_SDIFNAME:
nft_meta_get_eval_sdifname(dest, pkt);
break;
default:
WARN_ON(1);
goto err;
}
return;
err:
regs->verdict.code = NFT_BREAK;
}
EXPORT_SYMBOL_GPL(nft_meta_get_eval);
void nft_meta_set_eval(const struct nft_expr *expr,
struct nft_regs *regs,
const struct nft_pktinfo *pkt)
{
const struct nft_meta *meta = nft_expr_priv(expr);
struct sk_buff *skb = pkt->skb;
u32 *sreg = ®s->data[meta->sreg];
u32 value = *sreg;
u8 value8;
switch (meta->key) {
case NFT_META_MARK:
skb->mark = value;
break;
case NFT_META_PRIORITY:
skb->priority = value;
break;
case NFT_META_PKTTYPE:
value8 = nft_reg_load8(sreg);
if (skb->pkt_type != value8 &&
skb_pkt_type_ok(value8) &&
skb_pkt_type_ok(skb->pkt_type))
skb->pkt_type = value8;
break;
case NFT_META_NFTRACE:
value8 = nft_reg_load8(sreg);
skb->nf_trace = !!value8;
break;
#ifdef CONFIG_NETWORK_SECMARK
case NFT_META_SECMARK:
skb->secmark = value;
break;
#endif
default:
WARN_ON(1);
}
}
EXPORT_SYMBOL_GPL(nft_meta_set_eval);
const struct nla_policy nft_meta_policy[NFTA_META_MAX + 1] = {
[NFTA_META_DREG] = { .type = NLA_U32 },
[NFTA_META_KEY] = NLA_POLICY_MAX(NLA_BE32, 255),
[NFTA_META_SREG] = { .type = NLA_U32 },
};
EXPORT_SYMBOL_GPL(nft_meta_policy);
int nft_meta_get_init(const struct nft_ctx *ctx,
const struct nft_expr *expr,
const struct nlattr * const tb[])
{
struct nft_meta *priv = nft_expr_priv(expr);
unsigned int len;
priv->key = ntohl(nla_get_be32(tb[NFTA_META_KEY]));
switch (priv->key) {
case NFT_META_PROTOCOL:
case NFT_META_IIFTYPE:
case NFT_META_OIFTYPE:
len = sizeof(u16);
break;
case NFT_META_NFPROTO:
case NFT_META_L4PROTO:
case NFT_META_LEN:
case NFT_META_PRIORITY:
case NFT_META_MARK:
case NFT_META_IIF:
case NFT_META_OIF:
case NFT_META_SDIF:
case NFT_META_SKUID:
case NFT_META_SKGID:
#ifdef CONFIG_IP_ROUTE_CLASSID
case NFT_META_RTCLASSID:
#endif
#ifdef CONFIG_NETWORK_SECMARK
case NFT_META_SECMARK:
#endif
case NFT_META_PKTTYPE:
case NFT_META_CPU:
case NFT_META_IIFGROUP:
case NFT_META_OIFGROUP:
#ifdef CONFIG_CGROUP_NET_CLASSID
case NFT_META_CGROUP:
#endif
len = sizeof(u32);
break;
case NFT_META_IIFNAME:
case NFT_META_OIFNAME:
case NFT_META_IIFKIND:
case NFT_META_OIFKIND:
case NFT_META_SDIFNAME:
len = IFNAMSIZ;
break;
case NFT_META_PRANDOM:
len = sizeof(u32);
break;
#ifdef CONFIG_XFRM
case NFT_META_SECPATH:
len = sizeof(u8);
break;
#endif
case NFT_META_TIME_NS:
len = sizeof(u64);
break;
case NFT_META_TIME_DAY:
len = sizeof(u8);
break;
case NFT_META_TIME_HOUR:
len = sizeof(u32);
break;
default:
return -EOPNOTSUPP;
}
priv->len = len;
return nft_parse_register_store(ctx, tb[NFTA_META_DREG], &priv->dreg,
NULL, NFT_DATA_VALUE, len);
}
EXPORT_SYMBOL_GPL(nft_meta_get_init);
static int nft_meta_get_validate_sdif(const struct nft_ctx *ctx)
{
unsigned int hooks;
switch (ctx->family) {
case NFPROTO_IPV4:
case NFPROTO_IPV6:
case NFPROTO_INET:
hooks = (1 << NF_INET_LOCAL_IN) |
(1 << NF_INET_FORWARD);
break;
default:
return -EOPNOTSUPP;
}
return nft_chain_validate_hooks(ctx->chain, hooks);
}
static int nft_meta_get_validate_xfrm(const struct nft_ctx *ctx)
{
#ifdef CONFIG_XFRM
unsigned int hooks;
switch (ctx->family) {
case NFPROTO_NETDEV:
hooks = 1 << NF_NETDEV_INGRESS;
break;
case NFPROTO_IPV4:
case NFPROTO_IPV6:
case NFPROTO_INET:
hooks = (1 << NF_INET_PRE_ROUTING) |
(1 << NF_INET_LOCAL_IN) |
(1 << NF_INET_FORWARD);
break;
default:
return -EOPNOTSUPP;
}
return nft_chain_validate_hooks(ctx->chain, hooks);
#else
return 0;
#endif
}
static int nft_meta_get_validate(const struct nft_ctx *ctx,
const struct nft_expr *expr,
const struct nft_data **data)
{
const struct nft_meta *priv = nft_expr_priv(expr);
switch (priv->key) {
case NFT_META_SECPATH:
return nft_meta_get_validate_xfrm(ctx);
case NFT_META_SDIF:
case NFT_META_SDIFNAME:
return nft_meta_get_validate_sdif(ctx);
default:
break;
}
return 0;
}
int nft_meta_set_validate(const struct nft_ctx *ctx,
const struct nft_expr *expr,
const struct nft_data **data)
{
struct nft_meta *priv = nft_expr_priv(expr);
unsigned int hooks;
if (priv->key != NFT_META_PKTTYPE)
return 0;
switch (ctx->family) {
case NFPROTO_BRIDGE:
hooks = 1 << NF_BR_PRE_ROUTING;
break;
case NFPROTO_NETDEV:
hooks = 1 << NF_NETDEV_INGRESS;
break;
case NFPROTO_IPV4:
case NFPROTO_IPV6:
case NFPROTO_INET:
hooks = 1 << NF_INET_PRE_ROUTING;
break;
default:
return -EOPNOTSUPP;
}
return nft_chain_validate_hooks(ctx->chain, hooks);
}
EXPORT_SYMBOL_GPL(nft_meta_set_validate);
int nft_meta_set_init(const struct nft_ctx *ctx,
const struct nft_expr *expr,
const struct nlattr * const tb[])
{
struct nft_meta *priv = nft_expr_priv(expr);
unsigned int len;
int err;
priv->key = ntohl(nla_get_be32(tb[NFTA_META_KEY]));
switch (priv->key) {
case NFT_META_MARK:
case NFT_META_PRIORITY:
#ifdef CONFIG_NETWORK_SECMARK
case NFT_META_SECMARK:
#endif
len = sizeof(u32);
break;
case NFT_META_NFTRACE:
len = sizeof(u8);
break;
case NFT_META_PKTTYPE:
len = sizeof(u8);
break;
default:
return -EOPNOTSUPP;
}
priv->len = len;
err = nft_parse_register_load(tb[NFTA_META_SREG], &priv->sreg, len);
if (err < 0)
return err;
if (priv->key == NFT_META_NFTRACE)
static_branch_inc(&nft_trace_enabled);
return 0;
}
EXPORT_SYMBOL_GPL(nft_meta_set_init);
int nft_meta_get_dump(struct sk_buff *skb,
const struct nft_expr *expr, bool reset)
{
const struct nft_meta *priv = nft_expr_priv(expr);
if (nla_put_be32(skb, NFTA_META_KEY, htonl(priv->key)))
goto nla_put_failure;
if (nft_dump_register(skb, NFTA_META_DREG, priv->dreg))
goto nla_put_failure;
return 0;
nla_put_failure:
return -1;
}
EXPORT_SYMBOL_GPL(nft_meta_get_dump);
int nft_meta_set_dump(struct sk_buff *skb,
const struct nft_expr *expr, bool reset)
{
const struct nft_meta *priv = nft_expr_priv(expr);
if (nla_put_be32(skb, NFTA_META_KEY, htonl(priv->key)))
goto nla_put_failure;
if (nft_dump_register(skb, NFTA_META_SREG, priv->sreg))
goto nla_put_failure;
return 0;
nla_put_failure:
return -1;
}
EXPORT_SYMBOL_GPL(nft_meta_set_dump);
void nft_meta_set_destroy(const struct nft_ctx *ctx,
const struct nft_expr *expr)
{
const struct nft_meta *priv = nft_expr_priv(expr);
if (priv->key == NFT_META_NFTRACE)
static_branch_dec(&nft_trace_enabled);
}
EXPORT_SYMBOL_GPL(nft_meta_set_destroy);
static int nft_meta_get_offload(struct nft_offload_ctx *ctx,
struct nft_flow_rule *flow,
const struct nft_expr *expr)
{
const struct nft_meta *priv = nft_expr_priv(expr);
struct nft_offload_reg *reg = &ctx->regs[priv->dreg];
switch (priv->key) {
case NFT_META_PROTOCOL:
NFT_OFFLOAD_MATCH_EXACT(FLOW_DISSECTOR_KEY_BASIC, basic, n_proto,
sizeof(__u16), reg);
nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_NETWORK);
break;
case NFT_META_L4PROTO:
NFT_OFFLOAD_MATCH_EXACT(FLOW_DISSECTOR_KEY_BASIC, basic, ip_proto,
sizeof(__u8), reg);
nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_TRANSPORT);
break;
case NFT_META_IIF:
NFT_OFFLOAD_MATCH_EXACT(FLOW_DISSECTOR_KEY_META, meta,
ingress_ifindex, sizeof(__u32), reg);
break;
case NFT_META_IIFTYPE:
NFT_OFFLOAD_MATCH_EXACT(FLOW_DISSECTOR_KEY_META, meta,
ingress_iftype, sizeof(__u16), reg);
break;
default:
return -EOPNOTSUPP;
}
return 0;
}
bool nft_meta_get_reduce(struct nft_regs_track *track,
const struct nft_expr *expr)
{
const struct nft_meta *priv = nft_expr_priv(expr);
const struct nft_meta *meta;
if (!nft_reg_track_cmp(track, expr, priv->dreg)) {
nft_reg_track_update(track, expr, priv->dreg, priv->len);
return false;
}
meta = nft_expr_priv(track->regs[priv->dreg].selector);
if (priv->key != meta->key ||
priv->dreg != meta->dreg) {
nft_reg_track_update(track, expr, priv->dreg, priv->len);
return false;
}
if (!track->regs[priv->dreg].bitwise)
return true;
return nft_expr_reduce_bitwise(track, expr);
}
EXPORT_SYMBOL_GPL(nft_meta_get_reduce);
static const struct nft_expr_ops nft_meta_get_ops = {
.type = &nft_meta_type,
.size = NFT_EXPR_SIZE(sizeof(struct nft_meta)),
.eval = nft_meta_get_eval,
.init = nft_meta_get_init,
.dump = nft_meta_get_dump,
.reduce = nft_meta_get_reduce,
.validate = nft_meta_get_validate,
.offload = nft_meta_get_offload,
};
static bool nft_meta_set_reduce(struct nft_regs_track *track,
const struct nft_expr *expr)
{
int i;
for (i = 0; i < NFT_REG32_NUM; i++) {
if (!track->regs[i].selector)
continue;
if (track->regs[i].selector->ops != &nft_meta_get_ops)
continue;
__nft_reg_track_cancel(track, i);
}
return false;
}
static const struct nft_expr_ops nft_meta_set_ops = {
.type = &nft_meta_type,
.size = NFT_EXPR_SIZE(sizeof(struct nft_meta)),
.eval = nft_meta_set_eval,
.init = nft_meta_set_init,
.destroy = nft_meta_set_destroy,
.dump = nft_meta_set_dump,
.reduce = nft_meta_set_reduce,
.validate = nft_meta_set_validate,
};
static const struct nft_expr_ops *
nft_meta_select_ops(const struct nft_ctx *ctx,
const struct nlattr * const tb[])
{
if (tb[NFTA_META_KEY] == NULL)
return ERR_PTR(-EINVAL);
if (tb[NFTA_META_DREG] && tb[NFTA_META_SREG])
return ERR_PTR(-EINVAL);
#if IS_ENABLED(CONFIG_NF_TABLES_BRIDGE) && IS_MODULE(CONFIG_NFT_BRIDGE_META)
if (ctx->family == NFPROTO_BRIDGE)
return ERR_PTR(-EAGAIN);
#endif
if (tb[NFTA_META_DREG])
return &nft_meta_get_ops;
if (tb[NFTA_META_SREG])
return &nft_meta_set_ops;
return ERR_PTR(-EINVAL);
}
static int nft_meta_inner_init(const struct nft_ctx *ctx,
const struct nft_expr *expr,
const struct nlattr * const tb[])
{
struct nft_meta *priv = nft_expr_priv(expr);
unsigned int len;
priv->key = ntohl(nla_get_be32(tb[NFTA_META_KEY]));
switch (priv->key) {
case NFT_META_PROTOCOL:
len = sizeof(u16);
break;
case NFT_META_L4PROTO:
len = sizeof(u32);
break;
default:
return -EOPNOTSUPP;
}
priv->len = len;
return nft_parse_register_store(ctx, tb[NFTA_META_DREG], &priv->dreg,
NULL, NFT_DATA_VALUE, len);
}
void nft_meta_inner_eval(const struct nft_expr *expr,
struct nft_regs *regs,
const struct nft_pktinfo *pkt,
struct nft_inner_tun_ctx *tun_ctx)
{
const struct nft_meta *priv = nft_expr_priv(expr);
u32 *dest = ®s->data[priv->dreg];
switch (priv->key) {
case NFT_META_PROTOCOL:
nft_reg_store16(dest, (__force u16)tun_ctx->llproto);
break;
case NFT_META_L4PROTO:
if (!(tun_ctx->flags & NFT_PAYLOAD_CTX_INNER_TH))
goto err;
nft_reg_store8(dest, tun_ctx->l4proto);
break;
default:
WARN_ON_ONCE(1);
goto err;
}
return;
err:
regs->verdict.code = NFT_BREAK;
}
EXPORT_SYMBOL_GPL(nft_meta_inner_eval);
static const struct nft_expr_ops nft_meta_inner_ops = {
.type = &nft_meta_type,
.size = NFT_EXPR_SIZE(sizeof(struct nft_meta)),
.init = nft_meta_inner_init,
.dump = nft_meta_get_dump,
/* direct call to nft_meta_inner_eval(). */
};
struct nft_expr_type nft_meta_type __read_mostly = {
.name = "meta",
.select_ops = nft_meta_select_ops,
.inner_ops = &nft_meta_inner_ops,
.policy = nft_meta_policy,
.maxattr = NFTA_META_MAX,
.owner = THIS_MODULE,
};
#ifdef CONFIG_NETWORK_SECMARK
struct nft_secmark {
u32 secid;
char *ctx;
};
static const struct nla_policy nft_secmark_policy[NFTA_SECMARK_MAX + 1] = {
[NFTA_SECMARK_CTX] = { .type = NLA_STRING, .len = NFT_SECMARK_CTX_MAXLEN },
};
static int nft_secmark_compute_secid(struct nft_secmark *priv)
{
u32 tmp_secid = 0;
int err;
err = security_secctx_to_secid(priv->ctx, strlen(priv->ctx), &tmp_secid);
if (err)
return err;
if (!tmp_secid)
return -ENOENT;
err = security_secmark_relabel_packet(tmp_secid);
if (err)
return err;
priv->secid = tmp_secid;
return 0;
}
static void nft_secmark_obj_eval(struct nft_object *obj, struct nft_regs *regs,
const struct nft_pktinfo *pkt)
{
const struct nft_secmark *priv = nft_obj_data(obj);
struct sk_buff *skb = pkt->skb;
skb->secmark = priv->secid;
}
static int nft_secmark_obj_init(const struct nft_ctx *ctx,
const struct nlattr * const tb[],
struct nft_object *obj)
{
struct nft_secmark *priv = nft_obj_data(obj);
int err;
if (tb[NFTA_SECMARK_CTX] == NULL)
return -EINVAL;
priv->ctx = nla_strdup(tb[NFTA_SECMARK_CTX], GFP_KERNEL);
if (!priv->ctx)
return -ENOMEM;
err = nft_secmark_compute_secid(priv);
if (err) {
kfree(priv->ctx);
return err;
}
security_secmark_refcount_inc();
return 0;
}
static int nft_secmark_obj_dump(struct sk_buff *skb, struct nft_object *obj,
bool reset)
{
struct nft_secmark *priv = nft_obj_data(obj);
int err;
if (nla_put_string(skb, NFTA_SECMARK_CTX, priv->ctx))
return -1;
if (reset) {
err = nft_secmark_compute_secid(priv);
if (err)
return err;
}
return 0;
}
static void nft_secmark_obj_destroy(const struct nft_ctx *ctx, struct nft_object *obj)
{
struct nft_secmark *priv = nft_obj_data(obj);
security_secmark_refcount_dec();
kfree(priv->ctx);
}
static const struct nft_object_ops nft_secmark_obj_ops = {
.type = &nft_secmark_obj_type,
.size = sizeof(struct nft_secmark),
.init = nft_secmark_obj_init,
.eval = nft_secmark_obj_eval,
.dump = nft_secmark_obj_dump,
.destroy = nft_secmark_obj_destroy,
};
struct nft_object_type nft_secmark_obj_type __read_mostly = {
.type = NFT_OBJECT_SECMARK,
.ops = &nft_secmark_obj_ops,
.maxattr = NFTA_SECMARK_MAX,
.policy = nft_secmark_policy,
.owner = THIS_MODULE,
};
#endif /* CONFIG_NETWORK_SECMARK */
| linux-master | net/netfilter/nft_meta.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* Structure dynamic extension infrastructure
* Copyright (C) 2004 Rusty Russell IBM Corporation
* Copyright (C) 2007 Netfilter Core Team <[email protected]>
* Copyright (C) 2007 USAGI/WIDE Project <http://www.linux-ipv6.org>
*/
#include <linux/kernel.h>
#include <linux/kmemleak.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/rcupdate.h>
#include <linux/slab.h>
#include <linux/skbuff.h>
#include <net/netfilter/nf_conntrack_extend.h>
#include <net/netfilter/nf_conntrack_helper.h>
#include <net/netfilter/nf_conntrack_acct.h>
#include <net/netfilter/nf_conntrack_seqadj.h>
#include <net/netfilter/nf_conntrack_ecache.h>
#include <net/netfilter/nf_conntrack_zones.h>
#include <net/netfilter/nf_conntrack_timestamp.h>
#include <net/netfilter/nf_conntrack_timeout.h>
#include <net/netfilter/nf_conntrack_labels.h>
#include <net/netfilter/nf_conntrack_synproxy.h>
#include <net/netfilter/nf_conntrack_act_ct.h>
#include <net/netfilter/nf_nat.h>
#define NF_CT_EXT_PREALLOC 128u /* conntrack events are on by default */
atomic_t nf_conntrack_ext_genid __read_mostly = ATOMIC_INIT(1);
static const u8 nf_ct_ext_type_len[NF_CT_EXT_NUM] = {
[NF_CT_EXT_HELPER] = sizeof(struct nf_conn_help),
#if IS_ENABLED(CONFIG_NF_NAT)
[NF_CT_EXT_NAT] = sizeof(struct nf_conn_nat),
#endif
[NF_CT_EXT_SEQADJ] = sizeof(struct nf_conn_seqadj),
[NF_CT_EXT_ACCT] = sizeof(struct nf_conn_acct),
#ifdef CONFIG_NF_CONNTRACK_EVENTS
[NF_CT_EXT_ECACHE] = sizeof(struct nf_conntrack_ecache),
#endif
#ifdef CONFIG_NF_CONNTRACK_TIMESTAMP
[NF_CT_EXT_TSTAMP] = sizeof(struct nf_conn_tstamp),
#endif
#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
[NF_CT_EXT_TIMEOUT] = sizeof(struct nf_conn_timeout),
#endif
#ifdef CONFIG_NF_CONNTRACK_LABELS
[NF_CT_EXT_LABELS] = sizeof(struct nf_conn_labels),
#endif
#if IS_ENABLED(CONFIG_NETFILTER_SYNPROXY)
[NF_CT_EXT_SYNPROXY] = sizeof(struct nf_conn_synproxy),
#endif
#if IS_ENABLED(CONFIG_NET_ACT_CT)
[NF_CT_EXT_ACT_CT] = sizeof(struct nf_conn_act_ct_ext),
#endif
};
static __always_inline unsigned int total_extension_size(void)
{
/* remember to add new extensions below */
BUILD_BUG_ON(NF_CT_EXT_NUM > 10);
return sizeof(struct nf_ct_ext) +
sizeof(struct nf_conn_help)
#if IS_ENABLED(CONFIG_NF_NAT)
+ sizeof(struct nf_conn_nat)
#endif
+ sizeof(struct nf_conn_seqadj)
+ sizeof(struct nf_conn_acct)
#ifdef CONFIG_NF_CONNTRACK_EVENTS
+ sizeof(struct nf_conntrack_ecache)
#endif
#ifdef CONFIG_NF_CONNTRACK_TIMESTAMP
+ sizeof(struct nf_conn_tstamp)
#endif
#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
+ sizeof(struct nf_conn_timeout)
#endif
#ifdef CONFIG_NF_CONNTRACK_LABELS
+ sizeof(struct nf_conn_labels)
#endif
#if IS_ENABLED(CONFIG_NETFILTER_SYNPROXY)
+ sizeof(struct nf_conn_synproxy)
#endif
#if IS_ENABLED(CONFIG_NET_ACT_CT)
+ sizeof(struct nf_conn_act_ct_ext)
#endif
;
}
void *nf_ct_ext_add(struct nf_conn *ct, enum nf_ct_ext_id id, gfp_t gfp)
{
unsigned int newlen, newoff, oldlen, alloc;
struct nf_ct_ext *new;
/* Conntrack must not be confirmed to avoid races on reallocation. */
WARN_ON(nf_ct_is_confirmed(ct));
/* struct nf_ct_ext uses u8 to store offsets/size */
BUILD_BUG_ON(total_extension_size() > 255u);
if (ct->ext) {
const struct nf_ct_ext *old = ct->ext;
if (__nf_ct_ext_exist(old, id))
return NULL;
oldlen = old->len;
} else {
oldlen = sizeof(*new);
}
newoff = ALIGN(oldlen, __alignof__(struct nf_ct_ext));
newlen = newoff + nf_ct_ext_type_len[id];
alloc = max(newlen, NF_CT_EXT_PREALLOC);
new = krealloc(ct->ext, alloc, gfp);
if (!new)
return NULL;
if (!ct->ext) {
memset(new->offset, 0, sizeof(new->offset));
new->gen_id = atomic_read(&nf_conntrack_ext_genid);
}
new->offset[id] = newoff;
new->len = newlen;
memset((void *)new + newoff, 0, newlen - newoff);
ct->ext = new;
return (void *)new + newoff;
}
EXPORT_SYMBOL(nf_ct_ext_add);
/* Use nf_ct_ext_find wrapper. This is only useful for unconfirmed entries. */
void *__nf_ct_ext_find(const struct nf_ct_ext *ext, u8 id)
{
unsigned int gen_id = atomic_read(&nf_conntrack_ext_genid);
unsigned int this_id = READ_ONCE(ext->gen_id);
if (!__nf_ct_ext_exist(ext, id))
return NULL;
if (this_id == 0 || ext->gen_id == gen_id)
return (void *)ext + ext->offset[id];
return NULL;
}
EXPORT_SYMBOL(__nf_ct_ext_find);
void nf_ct_ext_bump_genid(void)
{
unsigned int value = atomic_inc_return(&nf_conntrack_ext_genid);
if (value == UINT_MAX)
atomic_set(&nf_conntrack_ext_genid, 1);
msleep(HZ);
}
| linux-master | net/netfilter/nf_conntrack_extend.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* iptables module to match inet_addr_type() of an ip.
*
* Copyright (c) 2004 Patrick McHardy <[email protected]>
* (C) 2007 Laszlo Attila Toth <[email protected]>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <linux/ip.h>
#include <net/route.h>
#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
#include <net/ipv6.h>
#include <net/ip6_route.h>
#include <net/ip6_fib.h>
#endif
#include <linux/netfilter_ipv6.h>
#include <linux/netfilter/xt_addrtype.h>
#include <linux/netfilter/x_tables.h>
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Patrick McHardy <[email protected]>");
MODULE_DESCRIPTION("Xtables: address type match");
MODULE_ALIAS("ipt_addrtype");
MODULE_ALIAS("ip6t_addrtype");
#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
static u32 match_lookup_rt6(struct net *net, const struct net_device *dev,
const struct in6_addr *addr, u16 mask)
{
struct flowi6 flow;
struct rt6_info *rt;
u32 ret = 0;
int route_err;
memset(&flow, 0, sizeof(flow));
flow.daddr = *addr;
if (dev)
flow.flowi6_oif = dev->ifindex;
if (dev && (mask & XT_ADDRTYPE_LOCAL)) {
if (nf_ipv6_chk_addr(net, addr, dev, true))
ret = XT_ADDRTYPE_LOCAL;
}
route_err = nf_ip6_route(net, (struct dst_entry **)&rt,
flowi6_to_flowi(&flow), false);
if (route_err)
return XT_ADDRTYPE_UNREACHABLE;
if (rt->rt6i_flags & RTF_REJECT)
ret = XT_ADDRTYPE_UNREACHABLE;
if (dev == NULL && rt->rt6i_flags & RTF_LOCAL)
ret |= XT_ADDRTYPE_LOCAL;
if (ipv6_anycast_destination((struct dst_entry *)rt, addr))
ret |= XT_ADDRTYPE_ANYCAST;
dst_release(&rt->dst);
return ret;
}
static bool match_type6(struct net *net, const struct net_device *dev,
const struct in6_addr *addr, u16 mask)
{
int addr_type = ipv6_addr_type(addr);
if ((mask & XT_ADDRTYPE_MULTICAST) &&
!(addr_type & IPV6_ADDR_MULTICAST))
return false;
if ((mask & XT_ADDRTYPE_UNICAST) && !(addr_type & IPV6_ADDR_UNICAST))
return false;
if ((mask & XT_ADDRTYPE_UNSPEC) && addr_type != IPV6_ADDR_ANY)
return false;
if ((XT_ADDRTYPE_LOCAL | XT_ADDRTYPE_ANYCAST |
XT_ADDRTYPE_UNREACHABLE) & mask)
return !!(mask & match_lookup_rt6(net, dev, addr, mask));
return true;
}
static bool
addrtype_mt6(struct net *net, const struct net_device *dev,
const struct sk_buff *skb, const struct xt_addrtype_info_v1 *info)
{
const struct ipv6hdr *iph = ipv6_hdr(skb);
bool ret = true;
if (info->source)
ret &= match_type6(net, dev, &iph->saddr, info->source) ^
(info->flags & XT_ADDRTYPE_INVERT_SOURCE);
if (ret && info->dest)
ret &= match_type6(net, dev, &iph->daddr, info->dest) ^
!!(info->flags & XT_ADDRTYPE_INVERT_DEST);
return ret;
}
#endif
static inline bool match_type(struct net *net, const struct net_device *dev,
__be32 addr, u_int16_t mask)
{
return !!(mask & (1 << inet_dev_addr_type(net, dev, addr)));
}
static bool
addrtype_mt_v0(const struct sk_buff *skb, struct xt_action_param *par)
{
struct net *net = xt_net(par);
const struct xt_addrtype_info *info = par->matchinfo;
const struct iphdr *iph = ip_hdr(skb);
bool ret = true;
if (info->source)
ret &= match_type(net, NULL, iph->saddr, info->source) ^
info->invert_source;
if (info->dest)
ret &= match_type(net, NULL, iph->daddr, info->dest) ^
info->invert_dest;
return ret;
}
static bool
addrtype_mt_v1(const struct sk_buff *skb, struct xt_action_param *par)
{
struct net *net = xt_net(par);
const struct xt_addrtype_info_v1 *info = par->matchinfo;
const struct iphdr *iph;
const struct net_device *dev = NULL;
bool ret = true;
if (info->flags & XT_ADDRTYPE_LIMIT_IFACE_IN)
dev = xt_in(par);
else if (info->flags & XT_ADDRTYPE_LIMIT_IFACE_OUT)
dev = xt_out(par);
#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
if (xt_family(par) == NFPROTO_IPV6)
return addrtype_mt6(net, dev, skb, info);
#endif
iph = ip_hdr(skb);
if (info->source)
ret &= match_type(net, dev, iph->saddr, info->source) ^
(info->flags & XT_ADDRTYPE_INVERT_SOURCE);
if (ret && info->dest)
ret &= match_type(net, dev, iph->daddr, info->dest) ^
!!(info->flags & XT_ADDRTYPE_INVERT_DEST);
return ret;
}
static int addrtype_mt_checkentry_v1(const struct xt_mtchk_param *par)
{
const char *errmsg = "both incoming and outgoing interface limitation cannot be selected";
struct xt_addrtype_info_v1 *info = par->matchinfo;
if (info->flags & XT_ADDRTYPE_LIMIT_IFACE_IN &&
info->flags & XT_ADDRTYPE_LIMIT_IFACE_OUT)
goto err;
if (par->hook_mask & ((1 << NF_INET_PRE_ROUTING) |
(1 << NF_INET_LOCAL_IN)) &&
info->flags & XT_ADDRTYPE_LIMIT_IFACE_OUT) {
errmsg = "output interface limitation not valid in PREROUTING and INPUT";
goto err;
}
if (par->hook_mask & ((1 << NF_INET_POST_ROUTING) |
(1 << NF_INET_LOCAL_OUT)) &&
info->flags & XT_ADDRTYPE_LIMIT_IFACE_IN) {
errmsg = "input interface limitation not valid in POSTROUTING and OUTPUT";
goto err;
}
#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
if (par->family == NFPROTO_IPV6) {
if ((info->source | info->dest) & XT_ADDRTYPE_BLACKHOLE) {
errmsg = "ipv6 BLACKHOLE matching not supported";
goto err;
}
if ((info->source | info->dest) >= XT_ADDRTYPE_PROHIBIT) {
errmsg = "ipv6 PROHIBIT (THROW, NAT ..) matching not supported";
goto err;
}
if ((info->source | info->dest) & XT_ADDRTYPE_BROADCAST) {
errmsg = "ipv6 does not support BROADCAST matching";
goto err;
}
}
#endif
return 0;
err:
pr_info_ratelimited("%s\n", errmsg);
return -EINVAL;
}
static struct xt_match addrtype_mt_reg[] __read_mostly = {
{
.name = "addrtype",
.family = NFPROTO_IPV4,
.match = addrtype_mt_v0,
.matchsize = sizeof(struct xt_addrtype_info),
.me = THIS_MODULE
},
{
.name = "addrtype",
.family = NFPROTO_UNSPEC,
.revision = 1,
.match = addrtype_mt_v1,
.checkentry = addrtype_mt_checkentry_v1,
.matchsize = sizeof(struct xt_addrtype_info_v1),
.me = THIS_MODULE
}
};
static int __init addrtype_mt_init(void)
{
return xt_register_matches(addrtype_mt_reg,
ARRAY_SIZE(addrtype_mt_reg));
}
static void __exit addrtype_mt_exit(void)
{
xt_unregister_matches(addrtype_mt_reg, ARRAY_SIZE(addrtype_mt_reg));
}
module_init(addrtype_mt_init);
module_exit(addrtype_mt_exit);
| linux-master | net/netfilter/xt_addrtype.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* IRC extension for TCP NAT alteration.
*
* (C) 2000-2001 by Harald Welte <[email protected]>
* (C) 2004 Rusty Russell <[email protected]> IBM Corporation
* based on a copy of RR's ip_nat_ftp.c
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/tcp.h>
#include <linux/kernel.h>
#include <net/netfilter/nf_nat.h>
#include <net/netfilter/nf_nat_helper.h>
#include <net/netfilter/nf_conntrack_helper.h>
#include <net/netfilter/nf_conntrack_expect.h>
#include <linux/netfilter/nf_conntrack_irc.h>
#define NAT_HELPER_NAME "irc"
MODULE_AUTHOR("Harald Welte <[email protected]>");
MODULE_DESCRIPTION("IRC (DCC) NAT helper");
MODULE_LICENSE("GPL");
MODULE_ALIAS_NF_NAT_HELPER(NAT_HELPER_NAME);
static struct nf_conntrack_nat_helper nat_helper_irc =
NF_CT_NAT_HELPER_INIT(NAT_HELPER_NAME);
static unsigned int help(struct sk_buff *skb,
enum ip_conntrack_info ctinfo,
unsigned int protoff,
unsigned int matchoff,
unsigned int matchlen,
struct nf_conntrack_expect *exp)
{
char buffer[sizeof("4294967296 65635")];
struct nf_conn *ct = exp->master;
union nf_inet_addr newaddr;
u_int16_t port;
/* Reply comes from server. */
newaddr = ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3;
exp->saved_proto.tcp.port = exp->tuple.dst.u.tcp.port;
exp->dir = IP_CT_DIR_REPLY;
exp->expectfn = nf_nat_follow_master;
port = nf_nat_exp_find_port(exp,
ntohs(exp->saved_proto.tcp.port));
if (port == 0) {
nf_ct_helper_log(skb, ct, "all ports in use");
return NF_DROP;
}
/* strlen("\1DCC CHAT chat AAAAAAAA P\1\n")=27
* strlen("\1DCC SCHAT chat AAAAAAAA P\1\n")=28
* strlen("\1DCC SEND F AAAAAAAA P S\1\n")=26
* strlen("\1DCC MOVE F AAAAAAAA P S\1\n")=26
* strlen("\1DCC TSEND F AAAAAAAA P S\1\n")=27
*
* AAAAAAAAA: bound addr (1.0.0.0==16777216, min 8 digits,
* 255.255.255.255==4294967296, 10 digits)
* P: bound port (min 1 d, max 5d (65635))
* F: filename (min 1 d )
* S: size (min 1 d )
* 0x01, \n: terminators
*/
/* AAA = "us", ie. where server normally talks to. */
snprintf(buffer, sizeof(buffer), "%u %u", ntohl(newaddr.ip), port);
pr_debug("inserting '%s' == %pI4, port %u\n",
buffer, &newaddr.ip, port);
if (!nf_nat_mangle_tcp_packet(skb, ct, ctinfo, protoff, matchoff,
matchlen, buffer, strlen(buffer))) {
nf_ct_helper_log(skb, ct, "cannot mangle packet");
nf_ct_unexpect_related(exp);
return NF_DROP;
}
return NF_ACCEPT;
}
static void __exit nf_nat_irc_fini(void)
{
nf_nat_helper_unregister(&nat_helper_irc);
RCU_INIT_POINTER(nf_nat_irc_hook, NULL);
synchronize_rcu();
}
static int __init nf_nat_irc_init(void)
{
BUG_ON(nf_nat_irc_hook != NULL);
nf_nat_helper_register(&nat_helper_irc);
RCU_INIT_POINTER(nf_nat_irc_hook, help);
return 0;
}
/* Prior to 2.6.11, we had a ports param. No longer, but don't break users. */
static int warn_set(const char *val, const struct kernel_param *kp)
{
pr_info("kernel >= 2.6.10 only uses 'ports' for conntrack modules\n");
return 0;
}
module_param_call(ports, warn_set, NULL, NULL, 0);
module_init(nf_nat_irc_init);
module_exit(nf_nat_irc_fini);
| linux-master | net/netfilter/nf_nat_irc.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/types.h>
#include <linux/netfilter.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/percpu.h>
#include <linux/netdevice.h>
#include <linux/security.h>
#include <net/net_namespace.h>
#ifdef CONFIG_SYSCTL
#include <linux/sysctl.h>
#endif
#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_conntrack_core.h>
#include <net/netfilter/nf_conntrack_l4proto.h>
#include <net/netfilter/nf_conntrack_expect.h>
#include <net/netfilter/nf_conntrack_helper.h>
#include <net/netfilter/nf_conntrack_acct.h>
#include <net/netfilter/nf_conntrack_zones.h>
#include <net/netfilter/nf_conntrack_timestamp.h>
#ifdef CONFIG_LWTUNNEL
#include <net/netfilter/nf_hooks_lwtunnel.h>
#endif
#include <linux/rculist_nulls.h>
static bool enable_hooks __read_mostly;
MODULE_PARM_DESC(enable_hooks, "Always enable conntrack hooks");
module_param(enable_hooks, bool, 0000);
unsigned int nf_conntrack_net_id __read_mostly;
#ifdef CONFIG_NF_CONNTRACK_PROCFS
void
print_tuple(struct seq_file *s, const struct nf_conntrack_tuple *tuple,
const struct nf_conntrack_l4proto *l4proto)
{
switch (tuple->src.l3num) {
case NFPROTO_IPV4:
seq_printf(s, "src=%pI4 dst=%pI4 ",
&tuple->src.u3.ip, &tuple->dst.u3.ip);
break;
case NFPROTO_IPV6:
seq_printf(s, "src=%pI6 dst=%pI6 ",
tuple->src.u3.ip6, tuple->dst.u3.ip6);
break;
default:
break;
}
switch (l4proto->l4proto) {
case IPPROTO_ICMP:
seq_printf(s, "type=%u code=%u id=%u ",
tuple->dst.u.icmp.type,
tuple->dst.u.icmp.code,
ntohs(tuple->src.u.icmp.id));
break;
case IPPROTO_TCP:
seq_printf(s, "sport=%hu dport=%hu ",
ntohs(tuple->src.u.tcp.port),
ntohs(tuple->dst.u.tcp.port));
break;
case IPPROTO_UDPLITE:
case IPPROTO_UDP:
seq_printf(s, "sport=%hu dport=%hu ",
ntohs(tuple->src.u.udp.port),
ntohs(tuple->dst.u.udp.port));
break;
case IPPROTO_DCCP:
seq_printf(s, "sport=%hu dport=%hu ",
ntohs(tuple->src.u.dccp.port),
ntohs(tuple->dst.u.dccp.port));
break;
case IPPROTO_SCTP:
seq_printf(s, "sport=%hu dport=%hu ",
ntohs(tuple->src.u.sctp.port),
ntohs(tuple->dst.u.sctp.port));
break;
case IPPROTO_ICMPV6:
seq_printf(s, "type=%u code=%u id=%u ",
tuple->dst.u.icmp.type,
tuple->dst.u.icmp.code,
ntohs(tuple->src.u.icmp.id));
break;
case IPPROTO_GRE:
seq_printf(s, "srckey=0x%x dstkey=0x%x ",
ntohs(tuple->src.u.gre.key),
ntohs(tuple->dst.u.gre.key));
break;
default:
break;
}
}
EXPORT_SYMBOL_GPL(print_tuple);
struct ct_iter_state {
struct seq_net_private p;
struct hlist_nulls_head *hash;
unsigned int htable_size;
unsigned int bucket;
u_int64_t time_now;
};
static struct hlist_nulls_node *ct_get_first(struct seq_file *seq)
{
struct ct_iter_state *st = seq->private;
struct hlist_nulls_node *n;
for (st->bucket = 0;
st->bucket < st->htable_size;
st->bucket++) {
n = rcu_dereference(
hlist_nulls_first_rcu(&st->hash[st->bucket]));
if (!is_a_nulls(n))
return n;
}
return NULL;
}
static struct hlist_nulls_node *ct_get_next(struct seq_file *seq,
struct hlist_nulls_node *head)
{
struct ct_iter_state *st = seq->private;
head = rcu_dereference(hlist_nulls_next_rcu(head));
while (is_a_nulls(head)) {
if (likely(get_nulls_value(head) == st->bucket)) {
if (++st->bucket >= st->htable_size)
return NULL;
}
head = rcu_dereference(
hlist_nulls_first_rcu(&st->hash[st->bucket]));
}
return head;
}
static struct hlist_nulls_node *ct_get_idx(struct seq_file *seq, loff_t pos)
{
struct hlist_nulls_node *head = ct_get_first(seq);
if (head)
while (pos && (head = ct_get_next(seq, head)))
pos--;
return pos ? NULL : head;
}
static void *ct_seq_start(struct seq_file *seq, loff_t *pos)
__acquires(RCU)
{
struct ct_iter_state *st = seq->private;
st->time_now = ktime_get_real_ns();
rcu_read_lock();
nf_conntrack_get_ht(&st->hash, &st->htable_size);
return ct_get_idx(seq, *pos);
}
static void *ct_seq_next(struct seq_file *s, void *v, loff_t *pos)
{
(*pos)++;
return ct_get_next(s, v);
}
static void ct_seq_stop(struct seq_file *s, void *v)
__releases(RCU)
{
rcu_read_unlock();
}
#ifdef CONFIG_NF_CONNTRACK_SECMARK
static void ct_show_secctx(struct seq_file *s, const struct nf_conn *ct)
{
int ret;
u32 len;
char *secctx;
ret = security_secid_to_secctx(ct->secmark, &secctx, &len);
if (ret)
return;
seq_printf(s, "secctx=%s ", secctx);
security_release_secctx(secctx, len);
}
#else
static inline void ct_show_secctx(struct seq_file *s, const struct nf_conn *ct)
{
}
#endif
#ifdef CONFIG_NF_CONNTRACK_ZONES
static void ct_show_zone(struct seq_file *s, const struct nf_conn *ct,
int dir)
{
const struct nf_conntrack_zone *zone = nf_ct_zone(ct);
if (zone->dir != dir)
return;
switch (zone->dir) {
case NF_CT_DEFAULT_ZONE_DIR:
seq_printf(s, "zone=%u ", zone->id);
break;
case NF_CT_ZONE_DIR_ORIG:
seq_printf(s, "zone-orig=%u ", zone->id);
break;
case NF_CT_ZONE_DIR_REPL:
seq_printf(s, "zone-reply=%u ", zone->id);
break;
default:
break;
}
}
#else
static inline void ct_show_zone(struct seq_file *s, const struct nf_conn *ct,
int dir)
{
}
#endif
#ifdef CONFIG_NF_CONNTRACK_TIMESTAMP
static void ct_show_delta_time(struct seq_file *s, const struct nf_conn *ct)
{
struct ct_iter_state *st = s->private;
struct nf_conn_tstamp *tstamp;
s64 delta_time;
tstamp = nf_conn_tstamp_find(ct);
if (tstamp) {
delta_time = st->time_now - tstamp->start;
if (delta_time > 0)
delta_time = div_s64(delta_time, NSEC_PER_SEC);
else
delta_time = 0;
seq_printf(s, "delta-time=%llu ",
(unsigned long long)delta_time);
}
return;
}
#else
static inline void
ct_show_delta_time(struct seq_file *s, const struct nf_conn *ct)
{
}
#endif
static const char* l3proto_name(u16 proto)
{
switch (proto) {
case AF_INET: return "ipv4";
case AF_INET6: return "ipv6";
}
return "unknown";
}
static const char* l4proto_name(u16 proto)
{
switch (proto) {
case IPPROTO_ICMP: return "icmp";
case IPPROTO_TCP: return "tcp";
case IPPROTO_UDP: return "udp";
case IPPROTO_DCCP: return "dccp";
case IPPROTO_GRE: return "gre";
case IPPROTO_SCTP: return "sctp";
case IPPROTO_UDPLITE: return "udplite";
case IPPROTO_ICMPV6: return "icmpv6";
}
return "unknown";
}
static void
seq_print_acct(struct seq_file *s, const struct nf_conn *ct, int dir)
{
struct nf_conn_acct *acct;
struct nf_conn_counter *counter;
acct = nf_conn_acct_find(ct);
if (!acct)
return;
counter = acct->counter;
seq_printf(s, "packets=%llu bytes=%llu ",
(unsigned long long)atomic64_read(&counter[dir].packets),
(unsigned long long)atomic64_read(&counter[dir].bytes));
}
/* return 0 on success, 1 in case of error */
static int ct_seq_show(struct seq_file *s, void *v)
{
struct nf_conntrack_tuple_hash *hash = v;
struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(hash);
const struct nf_conntrack_l4proto *l4proto;
struct net *net = seq_file_net(s);
int ret = 0;
WARN_ON(!ct);
if (unlikely(!refcount_inc_not_zero(&ct->ct_general.use)))
return 0;
/* load ->status after refcount increase */
smp_acquire__after_ctrl_dep();
if (nf_ct_should_gc(ct)) {
nf_ct_kill(ct);
goto release;
}
/* we only want to print DIR_ORIGINAL */
if (NF_CT_DIRECTION(hash))
goto release;
if (!net_eq(nf_ct_net(ct), net))
goto release;
l4proto = nf_ct_l4proto_find(nf_ct_protonum(ct));
ret = -ENOSPC;
seq_printf(s, "%-8s %u %-8s %u ",
l3proto_name(nf_ct_l3num(ct)), nf_ct_l3num(ct),
l4proto_name(l4proto->l4proto), nf_ct_protonum(ct));
if (!test_bit(IPS_OFFLOAD_BIT, &ct->status))
seq_printf(s, "%ld ", nf_ct_expires(ct) / HZ);
if (l4proto->print_conntrack)
l4proto->print_conntrack(s, ct);
print_tuple(s, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
l4proto);
ct_show_zone(s, ct, NF_CT_ZONE_DIR_ORIG);
if (seq_has_overflowed(s))
goto release;
seq_print_acct(s, ct, IP_CT_DIR_ORIGINAL);
if (!(test_bit(IPS_SEEN_REPLY_BIT, &ct->status)))
seq_puts(s, "[UNREPLIED] ");
print_tuple(s, &ct->tuplehash[IP_CT_DIR_REPLY].tuple, l4proto);
ct_show_zone(s, ct, NF_CT_ZONE_DIR_REPL);
seq_print_acct(s, ct, IP_CT_DIR_REPLY);
if (test_bit(IPS_HW_OFFLOAD_BIT, &ct->status))
seq_puts(s, "[HW_OFFLOAD] ");
else if (test_bit(IPS_OFFLOAD_BIT, &ct->status))
seq_puts(s, "[OFFLOAD] ");
else if (test_bit(IPS_ASSURED_BIT, &ct->status))
seq_puts(s, "[ASSURED] ");
if (seq_has_overflowed(s))
goto release;
#if defined(CONFIG_NF_CONNTRACK_MARK)
seq_printf(s, "mark=%u ", READ_ONCE(ct->mark));
#endif
ct_show_secctx(s, ct);
ct_show_zone(s, ct, NF_CT_DEFAULT_ZONE_DIR);
ct_show_delta_time(s, ct);
seq_printf(s, "use=%u\n", refcount_read(&ct->ct_general.use));
if (seq_has_overflowed(s))
goto release;
ret = 0;
release:
nf_ct_put(ct);
return ret;
}
static const struct seq_operations ct_seq_ops = {
.start = ct_seq_start,
.next = ct_seq_next,
.stop = ct_seq_stop,
.show = ct_seq_show
};
static void *ct_cpu_seq_start(struct seq_file *seq, loff_t *pos)
{
struct net *net = seq_file_net(seq);
int cpu;
if (*pos == 0)
return SEQ_START_TOKEN;
for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
if (!cpu_possible(cpu))
continue;
*pos = cpu + 1;
return per_cpu_ptr(net->ct.stat, cpu);
}
return NULL;
}
static void *ct_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
struct net *net = seq_file_net(seq);
int cpu;
for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
if (!cpu_possible(cpu))
continue;
*pos = cpu + 1;
return per_cpu_ptr(net->ct.stat, cpu);
}
(*pos)++;
return NULL;
}
static void ct_cpu_seq_stop(struct seq_file *seq, void *v)
{
}
static int ct_cpu_seq_show(struct seq_file *seq, void *v)
{
struct net *net = seq_file_net(seq);
const struct ip_conntrack_stat *st = v;
unsigned int nr_conntracks;
if (v == SEQ_START_TOKEN) {
seq_puts(seq, "entries clashres found new invalid ignore delete chainlength insert insert_failed drop early_drop icmp_error expect_new expect_create expect_delete search_restart\n");
return 0;
}
nr_conntracks = nf_conntrack_count(net);
seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x "
"%08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
nr_conntracks,
st->clash_resolve,
st->found,
0,
st->invalid,
0,
0,
st->chaintoolong,
st->insert,
st->insert_failed,
st->drop,
st->early_drop,
st->error,
st->expect_new,
st->expect_create,
st->expect_delete,
st->search_restart
);
return 0;
}
static const struct seq_operations ct_cpu_seq_ops = {
.start = ct_cpu_seq_start,
.next = ct_cpu_seq_next,
.stop = ct_cpu_seq_stop,
.show = ct_cpu_seq_show,
};
static int nf_conntrack_standalone_init_proc(struct net *net)
{
struct proc_dir_entry *pde;
kuid_t root_uid;
kgid_t root_gid;
pde = proc_create_net("nf_conntrack", 0440, net->proc_net, &ct_seq_ops,
sizeof(struct ct_iter_state));
if (!pde)
goto out_nf_conntrack;
root_uid = make_kuid(net->user_ns, 0);
root_gid = make_kgid(net->user_ns, 0);
if (uid_valid(root_uid) && gid_valid(root_gid))
proc_set_user(pde, root_uid, root_gid);
pde = proc_create_net("nf_conntrack", 0444, net->proc_net_stat,
&ct_cpu_seq_ops, sizeof(struct seq_net_private));
if (!pde)
goto out_stat_nf_conntrack;
return 0;
out_stat_nf_conntrack:
remove_proc_entry("nf_conntrack", net->proc_net);
out_nf_conntrack:
return -ENOMEM;
}
static void nf_conntrack_standalone_fini_proc(struct net *net)
{
remove_proc_entry("nf_conntrack", net->proc_net_stat);
remove_proc_entry("nf_conntrack", net->proc_net);
}
#else
static int nf_conntrack_standalone_init_proc(struct net *net)
{
return 0;
}
static void nf_conntrack_standalone_fini_proc(struct net *net)
{
}
#endif /* CONFIG_NF_CONNTRACK_PROCFS */
u32 nf_conntrack_count(const struct net *net)
{
const struct nf_conntrack_net *cnet = nf_ct_pernet(net);
return atomic_read(&cnet->count);
}
EXPORT_SYMBOL_GPL(nf_conntrack_count);
/* Sysctl support */
#ifdef CONFIG_SYSCTL
/* size the user *wants to set */
static unsigned int nf_conntrack_htable_size_user __read_mostly;
static int
nf_conntrack_hash_sysctl(struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
int ret;
/* module_param hashsize could have changed value */
nf_conntrack_htable_size_user = nf_conntrack_htable_size;
ret = proc_dointvec(table, write, buffer, lenp, ppos);
if (ret < 0 || !write)
return ret;
/* update ret, we might not be able to satisfy request */
ret = nf_conntrack_hash_resize(nf_conntrack_htable_size_user);
/* update it to the actual value used by conntrack */
nf_conntrack_htable_size_user = nf_conntrack_htable_size;
return ret;
}
static struct ctl_table_header *nf_ct_netfilter_header;
enum nf_ct_sysctl_index {
NF_SYSCTL_CT_MAX,
NF_SYSCTL_CT_COUNT,
NF_SYSCTL_CT_BUCKETS,
NF_SYSCTL_CT_CHECKSUM,
NF_SYSCTL_CT_LOG_INVALID,
NF_SYSCTL_CT_EXPECT_MAX,
NF_SYSCTL_CT_ACCT,
#ifdef CONFIG_NF_CONNTRACK_EVENTS
NF_SYSCTL_CT_EVENTS,
#endif
#ifdef CONFIG_NF_CONNTRACK_TIMESTAMP
NF_SYSCTL_CT_TIMESTAMP,
#endif
NF_SYSCTL_CT_PROTO_TIMEOUT_GENERIC,
NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_SYN_SENT,
NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_SYN_RECV,
NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_ESTABLISHED,
NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_FIN_WAIT,
NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_CLOSE_WAIT,
NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_LAST_ACK,
NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_TIME_WAIT,
NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_CLOSE,
NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_RETRANS,
NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_UNACK,
#if IS_ENABLED(CONFIG_NF_FLOW_TABLE)
NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_OFFLOAD,
#endif
NF_SYSCTL_CT_PROTO_TCP_LOOSE,
NF_SYSCTL_CT_PROTO_TCP_LIBERAL,
NF_SYSCTL_CT_PROTO_TCP_IGNORE_INVALID_RST,
NF_SYSCTL_CT_PROTO_TCP_MAX_RETRANS,
NF_SYSCTL_CT_PROTO_TIMEOUT_UDP,
NF_SYSCTL_CT_PROTO_TIMEOUT_UDP_STREAM,
#if IS_ENABLED(CONFIG_NF_FLOW_TABLE)
NF_SYSCTL_CT_PROTO_TIMEOUT_UDP_OFFLOAD,
#endif
NF_SYSCTL_CT_PROTO_TIMEOUT_ICMP,
NF_SYSCTL_CT_PROTO_TIMEOUT_ICMPV6,
#ifdef CONFIG_NF_CT_PROTO_SCTP
NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_CLOSED,
NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_COOKIE_WAIT,
NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_COOKIE_ECHOED,
NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_ESTABLISHED,
NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_SHUTDOWN_SENT,
NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_SHUTDOWN_RECD,
NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_SHUTDOWN_ACK_SENT,
NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_HEARTBEAT_SENT,
#endif
#ifdef CONFIG_NF_CT_PROTO_DCCP
NF_SYSCTL_CT_PROTO_TIMEOUT_DCCP_REQUEST,
NF_SYSCTL_CT_PROTO_TIMEOUT_DCCP_RESPOND,
NF_SYSCTL_CT_PROTO_TIMEOUT_DCCP_PARTOPEN,
NF_SYSCTL_CT_PROTO_TIMEOUT_DCCP_OPEN,
NF_SYSCTL_CT_PROTO_TIMEOUT_DCCP_CLOSEREQ,
NF_SYSCTL_CT_PROTO_TIMEOUT_DCCP_CLOSING,
NF_SYSCTL_CT_PROTO_TIMEOUT_DCCP_TIMEWAIT,
NF_SYSCTL_CT_PROTO_DCCP_LOOSE,
#endif
#ifdef CONFIG_NF_CT_PROTO_GRE
NF_SYSCTL_CT_PROTO_TIMEOUT_GRE,
NF_SYSCTL_CT_PROTO_TIMEOUT_GRE_STREAM,
#endif
#ifdef CONFIG_LWTUNNEL
NF_SYSCTL_CT_LWTUNNEL,
#endif
__NF_SYSCTL_CT_LAST_SYSCTL,
};
#define NF_SYSCTL_CT_LAST_SYSCTL (__NF_SYSCTL_CT_LAST_SYSCTL + 1)
static struct ctl_table nf_ct_sysctl_table[] = {
[NF_SYSCTL_CT_MAX] = {
.procname = "nf_conntrack_max",
.data = &nf_conntrack_max,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
[NF_SYSCTL_CT_COUNT] = {
.procname = "nf_conntrack_count",
.maxlen = sizeof(int),
.mode = 0444,
.proc_handler = proc_dointvec,
},
[NF_SYSCTL_CT_BUCKETS] = {
.procname = "nf_conntrack_buckets",
.data = &nf_conntrack_htable_size_user,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = nf_conntrack_hash_sysctl,
},
[NF_SYSCTL_CT_CHECKSUM] = {
.procname = "nf_conntrack_checksum",
.data = &init_net.ct.sysctl_checksum,
.maxlen = sizeof(u8),
.mode = 0644,
.proc_handler = proc_dou8vec_minmax,
.extra1 = SYSCTL_ZERO,
.extra2 = SYSCTL_ONE,
},
[NF_SYSCTL_CT_LOG_INVALID] = {
.procname = "nf_conntrack_log_invalid",
.data = &init_net.ct.sysctl_log_invalid,
.maxlen = sizeof(u8),
.mode = 0644,
.proc_handler = proc_dou8vec_minmax,
},
[NF_SYSCTL_CT_EXPECT_MAX] = {
.procname = "nf_conntrack_expect_max",
.data = &nf_ct_expect_max,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
[NF_SYSCTL_CT_ACCT] = {
.procname = "nf_conntrack_acct",
.data = &init_net.ct.sysctl_acct,
.maxlen = sizeof(u8),
.mode = 0644,
.proc_handler = proc_dou8vec_minmax,
.extra1 = SYSCTL_ZERO,
.extra2 = SYSCTL_ONE,
},
#ifdef CONFIG_NF_CONNTRACK_EVENTS
[NF_SYSCTL_CT_EVENTS] = {
.procname = "nf_conntrack_events",
.data = &init_net.ct.sysctl_events,
.maxlen = sizeof(u8),
.mode = 0644,
.proc_handler = proc_dou8vec_minmax,
.extra1 = SYSCTL_ZERO,
.extra2 = SYSCTL_TWO,
},
#endif
#ifdef CONFIG_NF_CONNTRACK_TIMESTAMP
[NF_SYSCTL_CT_TIMESTAMP] = {
.procname = "nf_conntrack_timestamp",
.data = &init_net.ct.sysctl_tstamp,
.maxlen = sizeof(u8),
.mode = 0644,
.proc_handler = proc_dou8vec_minmax,
.extra1 = SYSCTL_ZERO,
.extra2 = SYSCTL_ONE,
},
#endif
[NF_SYSCTL_CT_PROTO_TIMEOUT_GENERIC] = {
.procname = "nf_conntrack_generic_timeout",
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
[NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_SYN_SENT] = {
.procname = "nf_conntrack_tcp_timeout_syn_sent",
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
[NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_SYN_RECV] = {
.procname = "nf_conntrack_tcp_timeout_syn_recv",
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
[NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_ESTABLISHED] = {
.procname = "nf_conntrack_tcp_timeout_established",
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
[NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_FIN_WAIT] = {
.procname = "nf_conntrack_tcp_timeout_fin_wait",
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
[NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_CLOSE_WAIT] = {
.procname = "nf_conntrack_tcp_timeout_close_wait",
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
[NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_LAST_ACK] = {
.procname = "nf_conntrack_tcp_timeout_last_ack",
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
[NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_TIME_WAIT] = {
.procname = "nf_conntrack_tcp_timeout_time_wait",
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
[NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_CLOSE] = {
.procname = "nf_conntrack_tcp_timeout_close",
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
[NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_RETRANS] = {
.procname = "nf_conntrack_tcp_timeout_max_retrans",
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
[NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_UNACK] = {
.procname = "nf_conntrack_tcp_timeout_unacknowledged",
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
#if IS_ENABLED(CONFIG_NF_FLOW_TABLE)
[NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_OFFLOAD] = {
.procname = "nf_flowtable_tcp_timeout",
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
#endif
[NF_SYSCTL_CT_PROTO_TCP_LOOSE] = {
.procname = "nf_conntrack_tcp_loose",
.maxlen = sizeof(u8),
.mode = 0644,
.proc_handler = proc_dou8vec_minmax,
.extra1 = SYSCTL_ZERO,
.extra2 = SYSCTL_ONE,
},
[NF_SYSCTL_CT_PROTO_TCP_LIBERAL] = {
.procname = "nf_conntrack_tcp_be_liberal",
.maxlen = sizeof(u8),
.mode = 0644,
.proc_handler = proc_dou8vec_minmax,
.extra1 = SYSCTL_ZERO,
.extra2 = SYSCTL_ONE,
},
[NF_SYSCTL_CT_PROTO_TCP_IGNORE_INVALID_RST] = {
.procname = "nf_conntrack_tcp_ignore_invalid_rst",
.maxlen = sizeof(u8),
.mode = 0644,
.proc_handler = proc_dou8vec_minmax,
.extra1 = SYSCTL_ZERO,
.extra2 = SYSCTL_ONE,
},
[NF_SYSCTL_CT_PROTO_TCP_MAX_RETRANS] = {
.procname = "nf_conntrack_tcp_max_retrans",
.maxlen = sizeof(u8),
.mode = 0644,
.proc_handler = proc_dou8vec_minmax,
},
[NF_SYSCTL_CT_PROTO_TIMEOUT_UDP] = {
.procname = "nf_conntrack_udp_timeout",
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
[NF_SYSCTL_CT_PROTO_TIMEOUT_UDP_STREAM] = {
.procname = "nf_conntrack_udp_timeout_stream",
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
#if IS_ENABLED(CONFIG_NF_FLOW_TABLE)
[NF_SYSCTL_CT_PROTO_TIMEOUT_UDP_OFFLOAD] = {
.procname = "nf_flowtable_udp_timeout",
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
#endif
[NF_SYSCTL_CT_PROTO_TIMEOUT_ICMP] = {
.procname = "nf_conntrack_icmp_timeout",
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
[NF_SYSCTL_CT_PROTO_TIMEOUT_ICMPV6] = {
.procname = "nf_conntrack_icmpv6_timeout",
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
#ifdef CONFIG_NF_CT_PROTO_SCTP
[NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_CLOSED] = {
.procname = "nf_conntrack_sctp_timeout_closed",
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
[NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_COOKIE_WAIT] = {
.procname = "nf_conntrack_sctp_timeout_cookie_wait",
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
[NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_COOKIE_ECHOED] = {
.procname = "nf_conntrack_sctp_timeout_cookie_echoed",
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
[NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_ESTABLISHED] = {
.procname = "nf_conntrack_sctp_timeout_established",
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
[NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_SHUTDOWN_SENT] = {
.procname = "nf_conntrack_sctp_timeout_shutdown_sent",
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
[NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_SHUTDOWN_RECD] = {
.procname = "nf_conntrack_sctp_timeout_shutdown_recd",
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
[NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_SHUTDOWN_ACK_SENT] = {
.procname = "nf_conntrack_sctp_timeout_shutdown_ack_sent",
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
[NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_HEARTBEAT_SENT] = {
.procname = "nf_conntrack_sctp_timeout_heartbeat_sent",
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
#endif
#ifdef CONFIG_NF_CT_PROTO_DCCP
[NF_SYSCTL_CT_PROTO_TIMEOUT_DCCP_REQUEST] = {
.procname = "nf_conntrack_dccp_timeout_request",
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
[NF_SYSCTL_CT_PROTO_TIMEOUT_DCCP_RESPOND] = {
.procname = "nf_conntrack_dccp_timeout_respond",
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
[NF_SYSCTL_CT_PROTO_TIMEOUT_DCCP_PARTOPEN] = {
.procname = "nf_conntrack_dccp_timeout_partopen",
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
[NF_SYSCTL_CT_PROTO_TIMEOUT_DCCP_OPEN] = {
.procname = "nf_conntrack_dccp_timeout_open",
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
[NF_SYSCTL_CT_PROTO_TIMEOUT_DCCP_CLOSEREQ] = {
.procname = "nf_conntrack_dccp_timeout_closereq",
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
[NF_SYSCTL_CT_PROTO_TIMEOUT_DCCP_CLOSING] = {
.procname = "nf_conntrack_dccp_timeout_closing",
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
[NF_SYSCTL_CT_PROTO_TIMEOUT_DCCP_TIMEWAIT] = {
.procname = "nf_conntrack_dccp_timeout_timewait",
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
[NF_SYSCTL_CT_PROTO_DCCP_LOOSE] = {
.procname = "nf_conntrack_dccp_loose",
.maxlen = sizeof(u8),
.mode = 0644,
.proc_handler = proc_dou8vec_minmax,
.extra1 = SYSCTL_ZERO,
.extra2 = SYSCTL_ONE,
},
#endif
#ifdef CONFIG_NF_CT_PROTO_GRE
[NF_SYSCTL_CT_PROTO_TIMEOUT_GRE] = {
.procname = "nf_conntrack_gre_timeout",
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
[NF_SYSCTL_CT_PROTO_TIMEOUT_GRE_STREAM] = {
.procname = "nf_conntrack_gre_timeout_stream",
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
#endif
#ifdef CONFIG_LWTUNNEL
[NF_SYSCTL_CT_LWTUNNEL] = {
.procname = "nf_hooks_lwtunnel",
.data = NULL,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = nf_hooks_lwtunnel_sysctl_handler,
},
#endif
{}
};
static struct ctl_table nf_ct_netfilter_table[] = {
{
.procname = "nf_conntrack_max",
.data = &nf_conntrack_max,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{ }
};
static void nf_conntrack_standalone_init_tcp_sysctl(struct net *net,
struct ctl_table *table)
{
struct nf_tcp_net *tn = nf_tcp_pernet(net);
#define XASSIGN(XNAME, tn) \
table[NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_ ## XNAME].data = \
&(tn)->timeouts[TCP_CONNTRACK_ ## XNAME]
XASSIGN(SYN_SENT, tn);
XASSIGN(SYN_RECV, tn);
XASSIGN(ESTABLISHED, tn);
XASSIGN(FIN_WAIT, tn);
XASSIGN(CLOSE_WAIT, tn);
XASSIGN(LAST_ACK, tn);
XASSIGN(TIME_WAIT, tn);
XASSIGN(CLOSE, tn);
XASSIGN(RETRANS, tn);
XASSIGN(UNACK, tn);
#undef XASSIGN
#define XASSIGN(XNAME, rval) \
table[NF_SYSCTL_CT_PROTO_TCP_ ## XNAME].data = (rval)
XASSIGN(LOOSE, &tn->tcp_loose);
XASSIGN(LIBERAL, &tn->tcp_be_liberal);
XASSIGN(MAX_RETRANS, &tn->tcp_max_retrans);
XASSIGN(IGNORE_INVALID_RST, &tn->tcp_ignore_invalid_rst);
#undef XASSIGN
#if IS_ENABLED(CONFIG_NF_FLOW_TABLE)
table[NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_OFFLOAD].data = &tn->offload_timeout;
#endif
}
static void nf_conntrack_standalone_init_sctp_sysctl(struct net *net,
struct ctl_table *table)
{
#ifdef CONFIG_NF_CT_PROTO_SCTP
struct nf_sctp_net *sn = nf_sctp_pernet(net);
#define XASSIGN(XNAME, sn) \
table[NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_ ## XNAME].data = \
&(sn)->timeouts[SCTP_CONNTRACK_ ## XNAME]
XASSIGN(CLOSED, sn);
XASSIGN(COOKIE_WAIT, sn);
XASSIGN(COOKIE_ECHOED, sn);
XASSIGN(ESTABLISHED, sn);
XASSIGN(SHUTDOWN_SENT, sn);
XASSIGN(SHUTDOWN_RECD, sn);
XASSIGN(SHUTDOWN_ACK_SENT, sn);
XASSIGN(HEARTBEAT_SENT, sn);
#undef XASSIGN
#endif
}
static void nf_conntrack_standalone_init_dccp_sysctl(struct net *net,
struct ctl_table *table)
{
#ifdef CONFIG_NF_CT_PROTO_DCCP
struct nf_dccp_net *dn = nf_dccp_pernet(net);
#define XASSIGN(XNAME, dn) \
table[NF_SYSCTL_CT_PROTO_TIMEOUT_DCCP_ ## XNAME].data = \
&(dn)->dccp_timeout[CT_DCCP_ ## XNAME]
XASSIGN(REQUEST, dn);
XASSIGN(RESPOND, dn);
XASSIGN(PARTOPEN, dn);
XASSIGN(OPEN, dn);
XASSIGN(CLOSEREQ, dn);
XASSIGN(CLOSING, dn);
XASSIGN(TIMEWAIT, dn);
#undef XASSIGN
table[NF_SYSCTL_CT_PROTO_DCCP_LOOSE].data = &dn->dccp_loose;
#endif
}
static void nf_conntrack_standalone_init_gre_sysctl(struct net *net,
struct ctl_table *table)
{
#ifdef CONFIG_NF_CT_PROTO_GRE
struct nf_gre_net *gn = nf_gre_pernet(net);
table[NF_SYSCTL_CT_PROTO_TIMEOUT_GRE].data = &gn->timeouts[GRE_CT_UNREPLIED];
table[NF_SYSCTL_CT_PROTO_TIMEOUT_GRE_STREAM].data = &gn->timeouts[GRE_CT_REPLIED];
#endif
}
static int nf_conntrack_standalone_init_sysctl(struct net *net)
{
struct nf_conntrack_net *cnet = nf_ct_pernet(net);
struct nf_udp_net *un = nf_udp_pernet(net);
struct ctl_table *table;
BUILD_BUG_ON(ARRAY_SIZE(nf_ct_sysctl_table) != NF_SYSCTL_CT_LAST_SYSCTL);
table = kmemdup(nf_ct_sysctl_table, sizeof(nf_ct_sysctl_table),
GFP_KERNEL);
if (!table)
return -ENOMEM;
table[NF_SYSCTL_CT_COUNT].data = &cnet->count;
table[NF_SYSCTL_CT_CHECKSUM].data = &net->ct.sysctl_checksum;
table[NF_SYSCTL_CT_LOG_INVALID].data = &net->ct.sysctl_log_invalid;
table[NF_SYSCTL_CT_ACCT].data = &net->ct.sysctl_acct;
#ifdef CONFIG_NF_CONNTRACK_EVENTS
table[NF_SYSCTL_CT_EVENTS].data = &net->ct.sysctl_events;
#endif
#ifdef CONFIG_NF_CONNTRACK_TIMESTAMP
table[NF_SYSCTL_CT_TIMESTAMP].data = &net->ct.sysctl_tstamp;
#endif
table[NF_SYSCTL_CT_PROTO_TIMEOUT_GENERIC].data = &nf_generic_pernet(net)->timeout;
table[NF_SYSCTL_CT_PROTO_TIMEOUT_ICMP].data = &nf_icmp_pernet(net)->timeout;
table[NF_SYSCTL_CT_PROTO_TIMEOUT_ICMPV6].data = &nf_icmpv6_pernet(net)->timeout;
table[NF_SYSCTL_CT_PROTO_TIMEOUT_UDP].data = &un->timeouts[UDP_CT_UNREPLIED];
table[NF_SYSCTL_CT_PROTO_TIMEOUT_UDP_STREAM].data = &un->timeouts[UDP_CT_REPLIED];
#if IS_ENABLED(CONFIG_NF_FLOW_TABLE)
table[NF_SYSCTL_CT_PROTO_TIMEOUT_UDP_OFFLOAD].data = &un->offload_timeout;
#endif
nf_conntrack_standalone_init_tcp_sysctl(net, table);
nf_conntrack_standalone_init_sctp_sysctl(net, table);
nf_conntrack_standalone_init_dccp_sysctl(net, table);
nf_conntrack_standalone_init_gre_sysctl(net, table);
/* Don't allow non-init_net ns to alter global sysctls */
if (!net_eq(&init_net, net)) {
table[NF_SYSCTL_CT_MAX].mode = 0444;
table[NF_SYSCTL_CT_EXPECT_MAX].mode = 0444;
table[NF_SYSCTL_CT_BUCKETS].mode = 0444;
}
cnet->sysctl_header = register_net_sysctl_sz(net, "net/netfilter",
table,
ARRAY_SIZE(nf_ct_sysctl_table));
if (!cnet->sysctl_header)
goto out_unregister_netfilter;
return 0;
out_unregister_netfilter:
kfree(table);
return -ENOMEM;
}
static void nf_conntrack_standalone_fini_sysctl(struct net *net)
{
struct nf_conntrack_net *cnet = nf_ct_pernet(net);
struct ctl_table *table;
table = cnet->sysctl_header->ctl_table_arg;
unregister_net_sysctl_table(cnet->sysctl_header);
kfree(table);
}
#else
static int nf_conntrack_standalone_init_sysctl(struct net *net)
{
return 0;
}
static void nf_conntrack_standalone_fini_sysctl(struct net *net)
{
}
#endif /* CONFIG_SYSCTL */
static void nf_conntrack_fini_net(struct net *net)
{
if (enable_hooks)
nf_ct_netns_put(net, NFPROTO_INET);
nf_conntrack_standalone_fini_proc(net);
nf_conntrack_standalone_fini_sysctl(net);
}
static int nf_conntrack_pernet_init(struct net *net)
{
int ret;
net->ct.sysctl_checksum = 1;
ret = nf_conntrack_standalone_init_sysctl(net);
if (ret < 0)
return ret;
ret = nf_conntrack_standalone_init_proc(net);
if (ret < 0)
goto out_proc;
ret = nf_conntrack_init_net(net);
if (ret < 0)
goto out_init_net;
if (enable_hooks) {
ret = nf_ct_netns_get(net, NFPROTO_INET);
if (ret < 0)
goto out_hooks;
}
return 0;
out_hooks:
nf_conntrack_cleanup_net(net);
out_init_net:
nf_conntrack_standalone_fini_proc(net);
out_proc:
nf_conntrack_standalone_fini_sysctl(net);
return ret;
}
static void nf_conntrack_pernet_exit(struct list_head *net_exit_list)
{
struct net *net;
list_for_each_entry(net, net_exit_list, exit_list)
nf_conntrack_fini_net(net);
nf_conntrack_cleanup_net_list(net_exit_list);
}
static struct pernet_operations nf_conntrack_net_ops = {
.init = nf_conntrack_pernet_init,
.exit_batch = nf_conntrack_pernet_exit,
.id = &nf_conntrack_net_id,
.size = sizeof(struct nf_conntrack_net),
};
static int __init nf_conntrack_standalone_init(void)
{
int ret = nf_conntrack_init_start();
if (ret < 0)
goto out_start;
BUILD_BUG_ON(NFCT_INFOMASK <= IP_CT_NUMBER);
#ifdef CONFIG_SYSCTL
nf_ct_netfilter_header =
register_net_sysctl(&init_net, "net", nf_ct_netfilter_table);
if (!nf_ct_netfilter_header) {
pr_err("nf_conntrack: can't register to sysctl.\n");
ret = -ENOMEM;
goto out_sysctl;
}
nf_conntrack_htable_size_user = nf_conntrack_htable_size;
#endif
nf_conntrack_init_end();
ret = register_pernet_subsys(&nf_conntrack_net_ops);
if (ret < 0)
goto out_pernet;
return 0;
out_pernet:
#ifdef CONFIG_SYSCTL
unregister_net_sysctl_table(nf_ct_netfilter_header);
out_sysctl:
#endif
nf_conntrack_cleanup_end();
out_start:
return ret;
}
static void __exit nf_conntrack_standalone_fini(void)
{
nf_conntrack_cleanup_start();
unregister_pernet_subsys(&nf_conntrack_net_ops);
#ifdef CONFIG_SYSCTL
unregister_net_sysctl_table(nf_ct_netfilter_header);
#endif
nf_conntrack_cleanup_end();
}
module_init(nf_conntrack_standalone_init);
module_exit(nf_conntrack_standalone_fini);
| linux-master | net/netfilter/nf_conntrack_standalone.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2007-2009 Patrick McHardy <[email protected]>
*
* Development of this code funded by Astaro AG (http://www.astaro.com/)
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/list.h>
#include <linux/skbuff.h>
#include <linux/netlink.h>
#include <linux/vmalloc.h>
#include <linux/rhashtable.h>
#include <linux/audit.h>
#include <linux/netfilter.h>
#include <linux/netfilter/nfnetlink.h>
#include <linux/netfilter/nf_tables.h>
#include <net/netfilter/nf_flow_table.h>
#include <net/netfilter/nf_tables_core.h>
#include <net/netfilter/nf_tables.h>
#include <net/netfilter/nf_tables_offload.h>
#include <net/net_namespace.h>
#include <net/sock.h>
#define NFT_MODULE_AUTOLOAD_LIMIT (MODULE_NAME_LEN - sizeof("nft-expr-255-"))
unsigned int nf_tables_net_id __read_mostly;
static LIST_HEAD(nf_tables_expressions);
static LIST_HEAD(nf_tables_objects);
static LIST_HEAD(nf_tables_flowtables);
static LIST_HEAD(nf_tables_destroy_list);
static LIST_HEAD(nf_tables_gc_list);
static DEFINE_SPINLOCK(nf_tables_destroy_list_lock);
static DEFINE_SPINLOCK(nf_tables_gc_list_lock);
enum {
NFT_VALIDATE_SKIP = 0,
NFT_VALIDATE_NEED,
NFT_VALIDATE_DO,
};
static struct rhltable nft_objname_ht;
static u32 nft_chain_hash(const void *data, u32 len, u32 seed);
static u32 nft_chain_hash_obj(const void *data, u32 len, u32 seed);
static int nft_chain_hash_cmp(struct rhashtable_compare_arg *, const void *);
static u32 nft_objname_hash(const void *data, u32 len, u32 seed);
static u32 nft_objname_hash_obj(const void *data, u32 len, u32 seed);
static int nft_objname_hash_cmp(struct rhashtable_compare_arg *, const void *);
static const struct rhashtable_params nft_chain_ht_params = {
.head_offset = offsetof(struct nft_chain, rhlhead),
.key_offset = offsetof(struct nft_chain, name),
.hashfn = nft_chain_hash,
.obj_hashfn = nft_chain_hash_obj,
.obj_cmpfn = nft_chain_hash_cmp,
.automatic_shrinking = true,
};
static const struct rhashtable_params nft_objname_ht_params = {
.head_offset = offsetof(struct nft_object, rhlhead),
.key_offset = offsetof(struct nft_object, key),
.hashfn = nft_objname_hash,
.obj_hashfn = nft_objname_hash_obj,
.obj_cmpfn = nft_objname_hash_cmp,
.automatic_shrinking = true,
};
struct nft_audit_data {
struct nft_table *table;
int entries;
int op;
struct list_head list;
};
static const u8 nft2audit_op[NFT_MSG_MAX] = { // enum nf_tables_msg_types
[NFT_MSG_NEWTABLE] = AUDIT_NFT_OP_TABLE_REGISTER,
[NFT_MSG_GETTABLE] = AUDIT_NFT_OP_INVALID,
[NFT_MSG_DELTABLE] = AUDIT_NFT_OP_TABLE_UNREGISTER,
[NFT_MSG_NEWCHAIN] = AUDIT_NFT_OP_CHAIN_REGISTER,
[NFT_MSG_GETCHAIN] = AUDIT_NFT_OP_INVALID,
[NFT_MSG_DELCHAIN] = AUDIT_NFT_OP_CHAIN_UNREGISTER,
[NFT_MSG_NEWRULE] = AUDIT_NFT_OP_RULE_REGISTER,
[NFT_MSG_GETRULE] = AUDIT_NFT_OP_INVALID,
[NFT_MSG_DELRULE] = AUDIT_NFT_OP_RULE_UNREGISTER,
[NFT_MSG_NEWSET] = AUDIT_NFT_OP_SET_REGISTER,
[NFT_MSG_GETSET] = AUDIT_NFT_OP_INVALID,
[NFT_MSG_DELSET] = AUDIT_NFT_OP_SET_UNREGISTER,
[NFT_MSG_NEWSETELEM] = AUDIT_NFT_OP_SETELEM_REGISTER,
[NFT_MSG_GETSETELEM] = AUDIT_NFT_OP_INVALID,
[NFT_MSG_DELSETELEM] = AUDIT_NFT_OP_SETELEM_UNREGISTER,
[NFT_MSG_NEWGEN] = AUDIT_NFT_OP_GEN_REGISTER,
[NFT_MSG_GETGEN] = AUDIT_NFT_OP_INVALID,
[NFT_MSG_TRACE] = AUDIT_NFT_OP_INVALID,
[NFT_MSG_NEWOBJ] = AUDIT_NFT_OP_OBJ_REGISTER,
[NFT_MSG_GETOBJ] = AUDIT_NFT_OP_INVALID,
[NFT_MSG_DELOBJ] = AUDIT_NFT_OP_OBJ_UNREGISTER,
[NFT_MSG_GETOBJ_RESET] = AUDIT_NFT_OP_OBJ_RESET,
[NFT_MSG_NEWFLOWTABLE] = AUDIT_NFT_OP_FLOWTABLE_REGISTER,
[NFT_MSG_GETFLOWTABLE] = AUDIT_NFT_OP_INVALID,
[NFT_MSG_DELFLOWTABLE] = AUDIT_NFT_OP_FLOWTABLE_UNREGISTER,
[NFT_MSG_GETSETELEM_RESET] = AUDIT_NFT_OP_SETELEM_RESET,
};
static void nft_validate_state_update(struct nft_table *table, u8 new_validate_state)
{
switch (table->validate_state) {
case NFT_VALIDATE_SKIP:
WARN_ON_ONCE(new_validate_state == NFT_VALIDATE_DO);
break;
case NFT_VALIDATE_NEED:
break;
case NFT_VALIDATE_DO:
if (new_validate_state == NFT_VALIDATE_NEED)
return;
}
table->validate_state = new_validate_state;
}
static void nf_tables_trans_destroy_work(struct work_struct *w);
static DECLARE_WORK(trans_destroy_work, nf_tables_trans_destroy_work);
static void nft_trans_gc_work(struct work_struct *work);
static DECLARE_WORK(trans_gc_work, nft_trans_gc_work);
static void nft_ctx_init(struct nft_ctx *ctx,
struct net *net,
const struct sk_buff *skb,
const struct nlmsghdr *nlh,
u8 family,
struct nft_table *table,
struct nft_chain *chain,
const struct nlattr * const *nla)
{
ctx->net = net;
ctx->family = family;
ctx->level = 0;
ctx->table = table;
ctx->chain = chain;
ctx->nla = nla;
ctx->portid = NETLINK_CB(skb).portid;
ctx->report = nlmsg_report(nlh);
ctx->flags = nlh->nlmsg_flags;
ctx->seq = nlh->nlmsg_seq;
}
static struct nft_trans *nft_trans_alloc_gfp(const struct nft_ctx *ctx,
int msg_type, u32 size, gfp_t gfp)
{
struct nft_trans *trans;
trans = kzalloc(sizeof(struct nft_trans) + size, gfp);
if (trans == NULL)
return NULL;
INIT_LIST_HEAD(&trans->list);
INIT_LIST_HEAD(&trans->binding_list);
trans->msg_type = msg_type;
trans->ctx = *ctx;
return trans;
}
static struct nft_trans *nft_trans_alloc(const struct nft_ctx *ctx,
int msg_type, u32 size)
{
return nft_trans_alloc_gfp(ctx, msg_type, size, GFP_KERNEL);
}
static void nft_trans_list_del(struct nft_trans *trans)
{
list_del(&trans->list);
list_del(&trans->binding_list);
}
static void nft_trans_destroy(struct nft_trans *trans)
{
nft_trans_list_del(trans);
kfree(trans);
}
static void __nft_set_trans_bind(const struct nft_ctx *ctx, struct nft_set *set,
bool bind)
{
struct nftables_pernet *nft_net;
struct net *net = ctx->net;
struct nft_trans *trans;
if (!nft_set_is_anonymous(set))
return;
nft_net = nft_pernet(net);
list_for_each_entry_reverse(trans, &nft_net->commit_list, list) {
switch (trans->msg_type) {
case NFT_MSG_NEWSET:
if (nft_trans_set(trans) == set)
nft_trans_set_bound(trans) = bind;
break;
case NFT_MSG_NEWSETELEM:
if (nft_trans_elem_set(trans) == set)
nft_trans_elem_set_bound(trans) = bind;
break;
}
}
}
static void nft_set_trans_bind(const struct nft_ctx *ctx, struct nft_set *set)
{
return __nft_set_trans_bind(ctx, set, true);
}
static void nft_set_trans_unbind(const struct nft_ctx *ctx, struct nft_set *set)
{
return __nft_set_trans_bind(ctx, set, false);
}
static void __nft_chain_trans_bind(const struct nft_ctx *ctx,
struct nft_chain *chain, bool bind)
{
struct nftables_pernet *nft_net;
struct net *net = ctx->net;
struct nft_trans *trans;
if (!nft_chain_binding(chain))
return;
nft_net = nft_pernet(net);
list_for_each_entry_reverse(trans, &nft_net->commit_list, list) {
switch (trans->msg_type) {
case NFT_MSG_NEWCHAIN:
if (nft_trans_chain(trans) == chain)
nft_trans_chain_bound(trans) = bind;
break;
case NFT_MSG_NEWRULE:
if (trans->ctx.chain == chain)
nft_trans_rule_bound(trans) = bind;
break;
}
}
}
static void nft_chain_trans_bind(const struct nft_ctx *ctx,
struct nft_chain *chain)
{
__nft_chain_trans_bind(ctx, chain, true);
}
int nf_tables_bind_chain(const struct nft_ctx *ctx, struct nft_chain *chain)
{
if (!nft_chain_binding(chain))
return 0;
if (nft_chain_binding(ctx->chain))
return -EOPNOTSUPP;
if (chain->bound)
return -EBUSY;
if (!nft_use_inc(&chain->use))
return -EMFILE;
chain->bound = true;
nft_chain_trans_bind(ctx, chain);
return 0;
}
void nf_tables_unbind_chain(const struct nft_ctx *ctx, struct nft_chain *chain)
{
__nft_chain_trans_bind(ctx, chain, false);
}
static int nft_netdev_register_hooks(struct net *net,
struct list_head *hook_list)
{
struct nft_hook *hook;
int err, j;
j = 0;
list_for_each_entry(hook, hook_list, list) {
err = nf_register_net_hook(net, &hook->ops);
if (err < 0)
goto err_register;
j++;
}
return 0;
err_register:
list_for_each_entry(hook, hook_list, list) {
if (j-- <= 0)
break;
nf_unregister_net_hook(net, &hook->ops);
}
return err;
}
static void nft_netdev_unregister_hooks(struct net *net,
struct list_head *hook_list,
bool release_netdev)
{
struct nft_hook *hook, *next;
list_for_each_entry_safe(hook, next, hook_list, list) {
nf_unregister_net_hook(net, &hook->ops);
if (release_netdev) {
list_del(&hook->list);
kfree_rcu(hook, rcu);
}
}
}
static int nf_tables_register_hook(struct net *net,
const struct nft_table *table,
struct nft_chain *chain)
{
struct nft_base_chain *basechain;
const struct nf_hook_ops *ops;
if (table->flags & NFT_TABLE_F_DORMANT ||
!nft_is_base_chain(chain))
return 0;
basechain = nft_base_chain(chain);
ops = &basechain->ops;
if (basechain->type->ops_register)
return basechain->type->ops_register(net, ops);
if (nft_base_chain_netdev(table->family, basechain->ops.hooknum))
return nft_netdev_register_hooks(net, &basechain->hook_list);
return nf_register_net_hook(net, &basechain->ops);
}
static void __nf_tables_unregister_hook(struct net *net,
const struct nft_table *table,
struct nft_chain *chain,
bool release_netdev)
{
struct nft_base_chain *basechain;
const struct nf_hook_ops *ops;
if (table->flags & NFT_TABLE_F_DORMANT ||
!nft_is_base_chain(chain))
return;
basechain = nft_base_chain(chain);
ops = &basechain->ops;
if (basechain->type->ops_unregister)
return basechain->type->ops_unregister(net, ops);
if (nft_base_chain_netdev(table->family, basechain->ops.hooknum))
nft_netdev_unregister_hooks(net, &basechain->hook_list,
release_netdev);
else
nf_unregister_net_hook(net, &basechain->ops);
}
static void nf_tables_unregister_hook(struct net *net,
const struct nft_table *table,
struct nft_chain *chain)
{
return __nf_tables_unregister_hook(net, table, chain, false);
}
static void nft_trans_commit_list_add_tail(struct net *net, struct nft_trans *trans)
{
struct nftables_pernet *nft_net = nft_pernet(net);
switch (trans->msg_type) {
case NFT_MSG_NEWSET:
if (!nft_trans_set_update(trans) &&
nft_set_is_anonymous(nft_trans_set(trans)))
list_add_tail(&trans->binding_list, &nft_net->binding_list);
break;
case NFT_MSG_NEWCHAIN:
if (!nft_trans_chain_update(trans) &&
nft_chain_binding(nft_trans_chain(trans)))
list_add_tail(&trans->binding_list, &nft_net->binding_list);
break;
}
list_add_tail(&trans->list, &nft_net->commit_list);
}
static int nft_trans_table_add(struct nft_ctx *ctx, int msg_type)
{
struct nft_trans *trans;
trans = nft_trans_alloc(ctx, msg_type, sizeof(struct nft_trans_table));
if (trans == NULL)
return -ENOMEM;
if (msg_type == NFT_MSG_NEWTABLE)
nft_activate_next(ctx->net, ctx->table);
nft_trans_commit_list_add_tail(ctx->net, trans);
return 0;
}
static int nft_deltable(struct nft_ctx *ctx)
{
int err;
err = nft_trans_table_add(ctx, NFT_MSG_DELTABLE);
if (err < 0)
return err;
nft_deactivate_next(ctx->net, ctx->table);
return err;
}
static struct nft_trans *nft_trans_chain_add(struct nft_ctx *ctx, int msg_type)
{
struct nft_trans *trans;
trans = nft_trans_alloc(ctx, msg_type, sizeof(struct nft_trans_chain));
if (trans == NULL)
return ERR_PTR(-ENOMEM);
if (msg_type == NFT_MSG_NEWCHAIN) {
nft_activate_next(ctx->net, ctx->chain);
if (ctx->nla[NFTA_CHAIN_ID]) {
nft_trans_chain_id(trans) =
ntohl(nla_get_be32(ctx->nla[NFTA_CHAIN_ID]));
}
}
nft_trans_chain(trans) = ctx->chain;
nft_trans_commit_list_add_tail(ctx->net, trans);
return trans;
}
static int nft_delchain(struct nft_ctx *ctx)
{
struct nft_trans *trans;
trans = nft_trans_chain_add(ctx, NFT_MSG_DELCHAIN);
if (IS_ERR(trans))
return PTR_ERR(trans);
nft_use_dec(&ctx->table->use);
nft_deactivate_next(ctx->net, ctx->chain);
return 0;
}
void nft_rule_expr_activate(const struct nft_ctx *ctx, struct nft_rule *rule)
{
struct nft_expr *expr;
expr = nft_expr_first(rule);
while (nft_expr_more(rule, expr)) {
if (expr->ops->activate)
expr->ops->activate(ctx, expr);
expr = nft_expr_next(expr);
}
}
void nft_rule_expr_deactivate(const struct nft_ctx *ctx, struct nft_rule *rule,
enum nft_trans_phase phase)
{
struct nft_expr *expr;
expr = nft_expr_first(rule);
while (nft_expr_more(rule, expr)) {
if (expr->ops->deactivate)
expr->ops->deactivate(ctx, expr, phase);
expr = nft_expr_next(expr);
}
}
static int
nf_tables_delrule_deactivate(struct nft_ctx *ctx, struct nft_rule *rule)
{
/* You cannot delete the same rule twice */
if (nft_is_active_next(ctx->net, rule)) {
nft_deactivate_next(ctx->net, rule);
nft_use_dec(&ctx->chain->use);
return 0;
}
return -ENOENT;
}
static struct nft_trans *nft_trans_rule_add(struct nft_ctx *ctx, int msg_type,
struct nft_rule *rule)
{
struct nft_trans *trans;
trans = nft_trans_alloc(ctx, msg_type, sizeof(struct nft_trans_rule));
if (trans == NULL)
return NULL;
if (msg_type == NFT_MSG_NEWRULE && ctx->nla[NFTA_RULE_ID] != NULL) {
nft_trans_rule_id(trans) =
ntohl(nla_get_be32(ctx->nla[NFTA_RULE_ID]));
}
nft_trans_rule(trans) = rule;
nft_trans_commit_list_add_tail(ctx->net, trans);
return trans;
}
static int nft_delrule(struct nft_ctx *ctx, struct nft_rule *rule)
{
struct nft_flow_rule *flow;
struct nft_trans *trans;
int err;
trans = nft_trans_rule_add(ctx, NFT_MSG_DELRULE, rule);
if (trans == NULL)
return -ENOMEM;
if (ctx->chain->flags & NFT_CHAIN_HW_OFFLOAD) {
flow = nft_flow_rule_create(ctx->net, rule);
if (IS_ERR(flow)) {
nft_trans_destroy(trans);
return PTR_ERR(flow);
}
nft_trans_flow_rule(trans) = flow;
}
err = nf_tables_delrule_deactivate(ctx, rule);
if (err < 0) {
nft_trans_destroy(trans);
return err;
}
nft_rule_expr_deactivate(ctx, rule, NFT_TRANS_PREPARE);
return 0;
}
static int nft_delrule_by_chain(struct nft_ctx *ctx)
{
struct nft_rule *rule;
int err;
list_for_each_entry(rule, &ctx->chain->rules, list) {
if (!nft_is_active_next(ctx->net, rule))
continue;
err = nft_delrule(ctx, rule);
if (err < 0)
return err;
}
return 0;
}
static int __nft_trans_set_add(const struct nft_ctx *ctx, int msg_type,
struct nft_set *set,
const struct nft_set_desc *desc)
{
struct nft_trans *trans;
trans = nft_trans_alloc(ctx, msg_type, sizeof(struct nft_trans_set));
if (trans == NULL)
return -ENOMEM;
if (msg_type == NFT_MSG_NEWSET && ctx->nla[NFTA_SET_ID] && !desc) {
nft_trans_set_id(trans) =
ntohl(nla_get_be32(ctx->nla[NFTA_SET_ID]));
nft_activate_next(ctx->net, set);
}
nft_trans_set(trans) = set;
if (desc) {
nft_trans_set_update(trans) = true;
nft_trans_set_gc_int(trans) = desc->gc_int;
nft_trans_set_timeout(trans) = desc->timeout;
nft_trans_set_size(trans) = desc->size;
}
nft_trans_commit_list_add_tail(ctx->net, trans);
return 0;
}
static int nft_trans_set_add(const struct nft_ctx *ctx, int msg_type,
struct nft_set *set)
{
return __nft_trans_set_add(ctx, msg_type, set, NULL);
}
static int nft_mapelem_deactivate(const struct nft_ctx *ctx,
struct nft_set *set,
const struct nft_set_iter *iter,
struct nft_set_elem *elem)
{
nft_setelem_data_deactivate(ctx->net, set, elem);
return 0;
}
struct nft_set_elem_catchall {
struct list_head list;
struct rcu_head rcu;
void *elem;
};
static void nft_map_catchall_deactivate(const struct nft_ctx *ctx,
struct nft_set *set)
{
u8 genmask = nft_genmask_next(ctx->net);
struct nft_set_elem_catchall *catchall;
struct nft_set_elem elem;
struct nft_set_ext *ext;
list_for_each_entry(catchall, &set->catchall_list, list) {
ext = nft_set_elem_ext(set, catchall->elem);
if (!nft_set_elem_active(ext, genmask))
continue;
elem.priv = catchall->elem;
nft_setelem_data_deactivate(ctx->net, set, &elem);
break;
}
}
static void nft_map_deactivate(const struct nft_ctx *ctx, struct nft_set *set)
{
struct nft_set_iter iter = {
.genmask = nft_genmask_next(ctx->net),
.fn = nft_mapelem_deactivate,
};
set->ops->walk(ctx, set, &iter);
WARN_ON_ONCE(iter.err);
nft_map_catchall_deactivate(ctx, set);
}
static int nft_delset(const struct nft_ctx *ctx, struct nft_set *set)
{
int err;
err = nft_trans_set_add(ctx, NFT_MSG_DELSET, set);
if (err < 0)
return err;
if (set->flags & (NFT_SET_MAP | NFT_SET_OBJECT))
nft_map_deactivate(ctx, set);
nft_deactivate_next(ctx->net, set);
nft_use_dec(&ctx->table->use);
return err;
}
static int nft_trans_obj_add(struct nft_ctx *ctx, int msg_type,
struct nft_object *obj)
{
struct nft_trans *trans;
trans = nft_trans_alloc(ctx, msg_type, sizeof(struct nft_trans_obj));
if (trans == NULL)
return -ENOMEM;
if (msg_type == NFT_MSG_NEWOBJ)
nft_activate_next(ctx->net, obj);
nft_trans_obj(trans) = obj;
nft_trans_commit_list_add_tail(ctx->net, trans);
return 0;
}
static int nft_delobj(struct nft_ctx *ctx, struct nft_object *obj)
{
int err;
err = nft_trans_obj_add(ctx, NFT_MSG_DELOBJ, obj);
if (err < 0)
return err;
nft_deactivate_next(ctx->net, obj);
nft_use_dec(&ctx->table->use);
return err;
}
static int nft_trans_flowtable_add(struct nft_ctx *ctx, int msg_type,
struct nft_flowtable *flowtable)
{
struct nft_trans *trans;
trans = nft_trans_alloc(ctx, msg_type,
sizeof(struct nft_trans_flowtable));
if (trans == NULL)
return -ENOMEM;
if (msg_type == NFT_MSG_NEWFLOWTABLE)
nft_activate_next(ctx->net, flowtable);
INIT_LIST_HEAD(&nft_trans_flowtable_hooks(trans));
nft_trans_flowtable(trans) = flowtable;
nft_trans_commit_list_add_tail(ctx->net, trans);
return 0;
}
static int nft_delflowtable(struct nft_ctx *ctx,
struct nft_flowtable *flowtable)
{
int err;
err = nft_trans_flowtable_add(ctx, NFT_MSG_DELFLOWTABLE, flowtable);
if (err < 0)
return err;
nft_deactivate_next(ctx->net, flowtable);
nft_use_dec(&ctx->table->use);
return err;
}
static void __nft_reg_track_clobber(struct nft_regs_track *track, u8 dreg)
{
int i;
for (i = track->regs[dreg].num_reg; i > 0; i--)
__nft_reg_track_cancel(track, dreg - i);
}
static void __nft_reg_track_update(struct nft_regs_track *track,
const struct nft_expr *expr,
u8 dreg, u8 num_reg)
{
track->regs[dreg].selector = expr;
track->regs[dreg].bitwise = NULL;
track->regs[dreg].num_reg = num_reg;
}
void nft_reg_track_update(struct nft_regs_track *track,
const struct nft_expr *expr, u8 dreg, u8 len)
{
unsigned int regcount;
int i;
__nft_reg_track_clobber(track, dreg);
regcount = DIV_ROUND_UP(len, NFT_REG32_SIZE);
for (i = 0; i < regcount; i++, dreg++)
__nft_reg_track_update(track, expr, dreg, i);
}
EXPORT_SYMBOL_GPL(nft_reg_track_update);
void nft_reg_track_cancel(struct nft_regs_track *track, u8 dreg, u8 len)
{
unsigned int regcount;
int i;
__nft_reg_track_clobber(track, dreg);
regcount = DIV_ROUND_UP(len, NFT_REG32_SIZE);
for (i = 0; i < regcount; i++, dreg++)
__nft_reg_track_cancel(track, dreg);
}
EXPORT_SYMBOL_GPL(nft_reg_track_cancel);
void __nft_reg_track_cancel(struct nft_regs_track *track, u8 dreg)
{
track->regs[dreg].selector = NULL;
track->regs[dreg].bitwise = NULL;
track->regs[dreg].num_reg = 0;
}
EXPORT_SYMBOL_GPL(__nft_reg_track_cancel);
/*
* Tables
*/
static struct nft_table *nft_table_lookup(const struct net *net,
const struct nlattr *nla,
u8 family, u8 genmask, u32 nlpid)
{
struct nftables_pernet *nft_net;
struct nft_table *table;
if (nla == NULL)
return ERR_PTR(-EINVAL);
nft_net = nft_pernet(net);
list_for_each_entry_rcu(table, &nft_net->tables, list,
lockdep_is_held(&nft_net->commit_mutex)) {
if (!nla_strcmp(nla, table->name) &&
table->family == family &&
nft_active_genmask(table, genmask)) {
if (nft_table_has_owner(table) &&
nlpid && table->nlpid != nlpid)
return ERR_PTR(-EPERM);
return table;
}
}
return ERR_PTR(-ENOENT);
}
static struct nft_table *nft_table_lookup_byhandle(const struct net *net,
const struct nlattr *nla,
u8 genmask, u32 nlpid)
{
struct nftables_pernet *nft_net;
struct nft_table *table;
nft_net = nft_pernet(net);
list_for_each_entry(table, &nft_net->tables, list) {
if (be64_to_cpu(nla_get_be64(nla)) == table->handle &&
nft_active_genmask(table, genmask)) {
if (nft_table_has_owner(table) &&
nlpid && table->nlpid != nlpid)
return ERR_PTR(-EPERM);
return table;
}
}
return ERR_PTR(-ENOENT);
}
static inline u64 nf_tables_alloc_handle(struct nft_table *table)
{
return ++table->hgenerator;
}
static const struct nft_chain_type *chain_type[NFPROTO_NUMPROTO][NFT_CHAIN_T_MAX];
static const struct nft_chain_type *
__nft_chain_type_get(u8 family, enum nft_chain_types type)
{
if (family >= NFPROTO_NUMPROTO ||
type >= NFT_CHAIN_T_MAX)
return NULL;
return chain_type[family][type];
}
static const struct nft_chain_type *
__nf_tables_chain_type_lookup(const struct nlattr *nla, u8 family)
{
const struct nft_chain_type *type;
int i;
for (i = 0; i < NFT_CHAIN_T_MAX; i++) {
type = __nft_chain_type_get(family, i);
if (!type)
continue;
if (!nla_strcmp(nla, type->name))
return type;
}
return NULL;
}
struct nft_module_request {
struct list_head list;
char module[MODULE_NAME_LEN];
bool done;
};
#ifdef CONFIG_MODULES
__printf(2, 3) int nft_request_module(struct net *net, const char *fmt,
...)
{
char module_name[MODULE_NAME_LEN];
struct nftables_pernet *nft_net;
struct nft_module_request *req;
va_list args;
int ret;
va_start(args, fmt);
ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
va_end(args);
if (ret >= MODULE_NAME_LEN)
return 0;
nft_net = nft_pernet(net);
list_for_each_entry(req, &nft_net->module_list, list) {
if (!strcmp(req->module, module_name)) {
if (req->done)
return 0;
/* A request to load this module already exists. */
return -EAGAIN;
}
}
req = kmalloc(sizeof(*req), GFP_KERNEL);
if (!req)
return -ENOMEM;
req->done = false;
strscpy(req->module, module_name, MODULE_NAME_LEN);
list_add_tail(&req->list, &nft_net->module_list);
return -EAGAIN;
}
EXPORT_SYMBOL_GPL(nft_request_module);
#endif
static void lockdep_nfnl_nft_mutex_not_held(void)
{
#ifdef CONFIG_PROVE_LOCKING
if (debug_locks)
WARN_ON_ONCE(lockdep_nfnl_is_held(NFNL_SUBSYS_NFTABLES));
#endif
}
static const struct nft_chain_type *
nf_tables_chain_type_lookup(struct net *net, const struct nlattr *nla,
u8 family, bool autoload)
{
const struct nft_chain_type *type;
type = __nf_tables_chain_type_lookup(nla, family);
if (type != NULL)
return type;
lockdep_nfnl_nft_mutex_not_held();
#ifdef CONFIG_MODULES
if (autoload) {
if (nft_request_module(net, "nft-chain-%u-%.*s", family,
nla_len(nla),
(const char *)nla_data(nla)) == -EAGAIN)
return ERR_PTR(-EAGAIN);
}
#endif
return ERR_PTR(-ENOENT);
}
static __be16 nft_base_seq(const struct net *net)
{
struct nftables_pernet *nft_net = nft_pernet(net);
return htons(nft_net->base_seq & 0xffff);
}
static const struct nla_policy nft_table_policy[NFTA_TABLE_MAX + 1] = {
[NFTA_TABLE_NAME] = { .type = NLA_STRING,
.len = NFT_TABLE_MAXNAMELEN - 1 },
[NFTA_TABLE_FLAGS] = { .type = NLA_U32 },
[NFTA_TABLE_HANDLE] = { .type = NLA_U64 },
[NFTA_TABLE_USERDATA] = { .type = NLA_BINARY,
.len = NFT_USERDATA_MAXLEN }
};
static int nf_tables_fill_table_info(struct sk_buff *skb, struct net *net,
u32 portid, u32 seq, int event, u32 flags,
int family, const struct nft_table *table)
{
struct nlmsghdr *nlh;
event = nfnl_msg_type(NFNL_SUBSYS_NFTABLES, event);
nlh = nfnl_msg_put(skb, portid, seq, event, flags, family,
NFNETLINK_V0, nft_base_seq(net));
if (!nlh)
goto nla_put_failure;
if (nla_put_string(skb, NFTA_TABLE_NAME, table->name) ||
nla_put_be32(skb, NFTA_TABLE_USE, htonl(table->use)) ||
nla_put_be64(skb, NFTA_TABLE_HANDLE, cpu_to_be64(table->handle),
NFTA_TABLE_PAD))
goto nla_put_failure;
if (event == NFT_MSG_DELTABLE) {
nlmsg_end(skb, nlh);
return 0;
}
if (nla_put_be32(skb, NFTA_TABLE_FLAGS,
htonl(table->flags & NFT_TABLE_F_MASK)))
goto nla_put_failure;
if (nft_table_has_owner(table) &&
nla_put_be32(skb, NFTA_TABLE_OWNER, htonl(table->nlpid)))
goto nla_put_failure;
if (table->udata) {
if (nla_put(skb, NFTA_TABLE_USERDATA, table->udlen, table->udata))
goto nla_put_failure;
}
nlmsg_end(skb, nlh);
return 0;
nla_put_failure:
nlmsg_trim(skb, nlh);
return -1;
}
struct nftnl_skb_parms {
bool report;
};
#define NFT_CB(skb) (*(struct nftnl_skb_parms*)&((skb)->cb))
static void nft_notify_enqueue(struct sk_buff *skb, bool report,
struct list_head *notify_list)
{
NFT_CB(skb).report = report;
list_add_tail(&skb->list, notify_list);
}
static void nf_tables_table_notify(const struct nft_ctx *ctx, int event)
{
struct nftables_pernet *nft_net;
struct sk_buff *skb;
u16 flags = 0;
int err;
if (!ctx->report &&
!nfnetlink_has_listeners(ctx->net, NFNLGRP_NFTABLES))
return;
skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
if (skb == NULL)
goto err;
if (ctx->flags & (NLM_F_CREATE | NLM_F_EXCL))
flags |= ctx->flags & (NLM_F_CREATE | NLM_F_EXCL);
err = nf_tables_fill_table_info(skb, ctx->net, ctx->portid, ctx->seq,
event, flags, ctx->family, ctx->table);
if (err < 0) {
kfree_skb(skb);
goto err;
}
nft_net = nft_pernet(ctx->net);
nft_notify_enqueue(skb, ctx->report, &nft_net->notify_list);
return;
err:
nfnetlink_set_err(ctx->net, ctx->portid, NFNLGRP_NFTABLES, -ENOBUFS);
}
static int nf_tables_dump_tables(struct sk_buff *skb,
struct netlink_callback *cb)
{
const struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
struct nftables_pernet *nft_net;
const struct nft_table *table;
unsigned int idx = 0, s_idx = cb->args[0];
struct net *net = sock_net(skb->sk);
int family = nfmsg->nfgen_family;
rcu_read_lock();
nft_net = nft_pernet(net);
cb->seq = READ_ONCE(nft_net->base_seq);
list_for_each_entry_rcu(table, &nft_net->tables, list) {
if (family != NFPROTO_UNSPEC && family != table->family)
continue;
if (idx < s_idx)
goto cont;
if (idx > s_idx)
memset(&cb->args[1], 0,
sizeof(cb->args) - sizeof(cb->args[0]));
if (!nft_is_active(net, table))
continue;
if (nf_tables_fill_table_info(skb, net,
NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq,
NFT_MSG_NEWTABLE, NLM_F_MULTI,
table->family, table) < 0)
goto done;
nl_dump_check_consistent(cb, nlmsg_hdr(skb));
cont:
idx++;
}
done:
rcu_read_unlock();
cb->args[0] = idx;
return skb->len;
}
static int nft_netlink_dump_start_rcu(struct sock *nlsk, struct sk_buff *skb,
const struct nlmsghdr *nlh,
struct netlink_dump_control *c)
{
int err;
if (!try_module_get(THIS_MODULE))
return -EINVAL;
rcu_read_unlock();
err = netlink_dump_start(nlsk, skb, nlh, c);
rcu_read_lock();
module_put(THIS_MODULE);
return err;
}
/* called with rcu_read_lock held */
static int nf_tables_gettable(struct sk_buff *skb, const struct nfnl_info *info,
const struct nlattr * const nla[])
{
struct netlink_ext_ack *extack = info->extack;
u8 genmask = nft_genmask_cur(info->net);
u8 family = info->nfmsg->nfgen_family;
const struct nft_table *table;
struct net *net = info->net;
struct sk_buff *skb2;
int err;
if (info->nlh->nlmsg_flags & NLM_F_DUMP) {
struct netlink_dump_control c = {
.dump = nf_tables_dump_tables,
.module = THIS_MODULE,
};
return nft_netlink_dump_start_rcu(info->sk, skb, info->nlh, &c);
}
table = nft_table_lookup(net, nla[NFTA_TABLE_NAME], family, genmask, 0);
if (IS_ERR(table)) {
NL_SET_BAD_ATTR(extack, nla[NFTA_TABLE_NAME]);
return PTR_ERR(table);
}
skb2 = alloc_skb(NLMSG_GOODSIZE, GFP_ATOMIC);
if (!skb2)
return -ENOMEM;
err = nf_tables_fill_table_info(skb2, net, NETLINK_CB(skb).portid,
info->nlh->nlmsg_seq, NFT_MSG_NEWTABLE,
0, family, table);
if (err < 0)
goto err_fill_table_info;
return nfnetlink_unicast(skb2, net, NETLINK_CB(skb).portid);
err_fill_table_info:
kfree_skb(skb2);
return err;
}
static void nft_table_disable(struct net *net, struct nft_table *table, u32 cnt)
{
struct nft_chain *chain;
u32 i = 0;
list_for_each_entry(chain, &table->chains, list) {
if (!nft_is_active_next(net, chain))
continue;
if (!nft_is_base_chain(chain))
continue;
if (cnt && i++ == cnt)
break;
nf_tables_unregister_hook(net, table, chain);
}
}
static int nf_tables_table_enable(struct net *net, struct nft_table *table)
{
struct nft_chain *chain;
int err, i = 0;
list_for_each_entry(chain, &table->chains, list) {
if (!nft_is_active_next(net, chain))
continue;
if (!nft_is_base_chain(chain))
continue;
err = nf_tables_register_hook(net, table, chain);
if (err < 0)
goto err_register_hooks;
i++;
}
return 0;
err_register_hooks:
if (i)
nft_table_disable(net, table, i);
return err;
}
static void nf_tables_table_disable(struct net *net, struct nft_table *table)
{
table->flags &= ~NFT_TABLE_F_DORMANT;
nft_table_disable(net, table, 0);
table->flags |= NFT_TABLE_F_DORMANT;
}
#define __NFT_TABLE_F_INTERNAL (NFT_TABLE_F_MASK + 1)
#define __NFT_TABLE_F_WAS_DORMANT (__NFT_TABLE_F_INTERNAL << 0)
#define __NFT_TABLE_F_WAS_AWAKEN (__NFT_TABLE_F_INTERNAL << 1)
#define __NFT_TABLE_F_UPDATE (__NFT_TABLE_F_WAS_DORMANT | \
__NFT_TABLE_F_WAS_AWAKEN)
static int nf_tables_updtable(struct nft_ctx *ctx)
{
struct nft_trans *trans;
u32 flags;
int ret;
if (!ctx->nla[NFTA_TABLE_FLAGS])
return 0;
flags = ntohl(nla_get_be32(ctx->nla[NFTA_TABLE_FLAGS]));
if (flags & ~NFT_TABLE_F_MASK)
return -EOPNOTSUPP;
if (flags == ctx->table->flags)
return 0;
if ((nft_table_has_owner(ctx->table) &&
!(flags & NFT_TABLE_F_OWNER)) ||
(!nft_table_has_owner(ctx->table) &&
flags & NFT_TABLE_F_OWNER))
return -EOPNOTSUPP;
/* No dormant off/on/off/on games in single transaction */
if (ctx->table->flags & __NFT_TABLE_F_UPDATE)
return -EINVAL;
trans = nft_trans_alloc(ctx, NFT_MSG_NEWTABLE,
sizeof(struct nft_trans_table));
if (trans == NULL)
return -ENOMEM;
if ((flags & NFT_TABLE_F_DORMANT) &&
!(ctx->table->flags & NFT_TABLE_F_DORMANT)) {
ctx->table->flags |= NFT_TABLE_F_DORMANT;
if (!(ctx->table->flags & __NFT_TABLE_F_UPDATE))
ctx->table->flags |= __NFT_TABLE_F_WAS_AWAKEN;
} else if (!(flags & NFT_TABLE_F_DORMANT) &&
ctx->table->flags & NFT_TABLE_F_DORMANT) {
ctx->table->flags &= ~NFT_TABLE_F_DORMANT;
if (!(ctx->table->flags & __NFT_TABLE_F_UPDATE)) {
ret = nf_tables_table_enable(ctx->net, ctx->table);
if (ret < 0)
goto err_register_hooks;
ctx->table->flags |= __NFT_TABLE_F_WAS_DORMANT;
}
}
nft_trans_table_update(trans) = true;
nft_trans_commit_list_add_tail(ctx->net, trans);
return 0;
err_register_hooks:
nft_trans_destroy(trans);
return ret;
}
static u32 nft_chain_hash(const void *data, u32 len, u32 seed)
{
const char *name = data;
return jhash(name, strlen(name), seed);
}
static u32 nft_chain_hash_obj(const void *data, u32 len, u32 seed)
{
const struct nft_chain *chain = data;
return nft_chain_hash(chain->name, 0, seed);
}
static int nft_chain_hash_cmp(struct rhashtable_compare_arg *arg,
const void *ptr)
{
const struct nft_chain *chain = ptr;
const char *name = arg->key;
return strcmp(chain->name, name);
}
static u32 nft_objname_hash(const void *data, u32 len, u32 seed)
{
const struct nft_object_hash_key *k = data;
seed ^= hash_ptr(k->table, 32);
return jhash(k->name, strlen(k->name), seed);
}
static u32 nft_objname_hash_obj(const void *data, u32 len, u32 seed)
{
const struct nft_object *obj = data;
return nft_objname_hash(&obj->key, 0, seed);
}
static int nft_objname_hash_cmp(struct rhashtable_compare_arg *arg,
const void *ptr)
{
const struct nft_object_hash_key *k = arg->key;
const struct nft_object *obj = ptr;
if (obj->key.table != k->table)
return -1;
return strcmp(obj->key.name, k->name);
}
static bool nft_supported_family(u8 family)
{
return false
#ifdef CONFIG_NF_TABLES_INET
|| family == NFPROTO_INET
#endif
#ifdef CONFIG_NF_TABLES_IPV4
|| family == NFPROTO_IPV4
#endif
#ifdef CONFIG_NF_TABLES_ARP
|| family == NFPROTO_ARP
#endif
#ifdef CONFIG_NF_TABLES_NETDEV
|| family == NFPROTO_NETDEV
#endif
#if IS_ENABLED(CONFIG_NF_TABLES_BRIDGE)
|| family == NFPROTO_BRIDGE
#endif
#ifdef CONFIG_NF_TABLES_IPV6
|| family == NFPROTO_IPV6
#endif
;
}
static int nf_tables_newtable(struct sk_buff *skb, const struct nfnl_info *info,
const struct nlattr * const nla[])
{
struct nftables_pernet *nft_net = nft_pernet(info->net);
struct netlink_ext_ack *extack = info->extack;
u8 genmask = nft_genmask_next(info->net);
u8 family = info->nfmsg->nfgen_family;
struct net *net = info->net;
const struct nlattr *attr;
struct nft_table *table;
struct nft_ctx ctx;
u32 flags = 0;
int err;
if (!nft_supported_family(family))
return -EOPNOTSUPP;
lockdep_assert_held(&nft_net->commit_mutex);
attr = nla[NFTA_TABLE_NAME];
table = nft_table_lookup(net, attr, family, genmask,
NETLINK_CB(skb).portid);
if (IS_ERR(table)) {
if (PTR_ERR(table) != -ENOENT)
return PTR_ERR(table);
} else {
if (info->nlh->nlmsg_flags & NLM_F_EXCL) {
NL_SET_BAD_ATTR(extack, attr);
return -EEXIST;
}
if (info->nlh->nlmsg_flags & NLM_F_REPLACE)
return -EOPNOTSUPP;
nft_ctx_init(&ctx, net, skb, info->nlh, family, table, NULL, nla);
return nf_tables_updtable(&ctx);
}
if (nla[NFTA_TABLE_FLAGS]) {
flags = ntohl(nla_get_be32(nla[NFTA_TABLE_FLAGS]));
if (flags & ~NFT_TABLE_F_MASK)
return -EOPNOTSUPP;
}
err = -ENOMEM;
table = kzalloc(sizeof(*table), GFP_KERNEL_ACCOUNT);
if (table == NULL)
goto err_kzalloc;
table->validate_state = nft_net->validate_state;
table->name = nla_strdup(attr, GFP_KERNEL_ACCOUNT);
if (table->name == NULL)
goto err_strdup;
if (nla[NFTA_TABLE_USERDATA]) {
table->udata = nla_memdup(nla[NFTA_TABLE_USERDATA], GFP_KERNEL_ACCOUNT);
if (table->udata == NULL)
goto err_table_udata;
table->udlen = nla_len(nla[NFTA_TABLE_USERDATA]);
}
err = rhltable_init(&table->chains_ht, &nft_chain_ht_params);
if (err)
goto err_chain_ht;
INIT_LIST_HEAD(&table->chains);
INIT_LIST_HEAD(&table->sets);
INIT_LIST_HEAD(&table->objects);
INIT_LIST_HEAD(&table->flowtables);
table->family = family;
table->flags = flags;
table->handle = ++nft_net->table_handle;
if (table->flags & NFT_TABLE_F_OWNER)
table->nlpid = NETLINK_CB(skb).portid;
nft_ctx_init(&ctx, net, skb, info->nlh, family, table, NULL, nla);
err = nft_trans_table_add(&ctx, NFT_MSG_NEWTABLE);
if (err < 0)
goto err_trans;
list_add_tail_rcu(&table->list, &nft_net->tables);
return 0;
err_trans:
rhltable_destroy(&table->chains_ht);
err_chain_ht:
kfree(table->udata);
err_table_udata:
kfree(table->name);
err_strdup:
kfree(table);
err_kzalloc:
return err;
}
static int nft_flush_table(struct nft_ctx *ctx)
{
struct nft_flowtable *flowtable, *nft;
struct nft_chain *chain, *nc;
struct nft_object *obj, *ne;
struct nft_set *set, *ns;
int err;
list_for_each_entry(chain, &ctx->table->chains, list) {
if (!nft_is_active_next(ctx->net, chain))
continue;
if (nft_chain_binding(chain))
continue;
ctx->chain = chain;
err = nft_delrule_by_chain(ctx);
if (err < 0)
goto out;
}
list_for_each_entry_safe(set, ns, &ctx->table->sets, list) {
if (!nft_is_active_next(ctx->net, set))
continue;
if (nft_set_is_anonymous(set))
continue;
err = nft_delset(ctx, set);
if (err < 0)
goto out;
}
list_for_each_entry_safe(flowtable, nft, &ctx->table->flowtables, list) {
if (!nft_is_active_next(ctx->net, flowtable))
continue;
err = nft_delflowtable(ctx, flowtable);
if (err < 0)
goto out;
}
list_for_each_entry_safe(obj, ne, &ctx->table->objects, list) {
if (!nft_is_active_next(ctx->net, obj))
continue;
err = nft_delobj(ctx, obj);
if (err < 0)
goto out;
}
list_for_each_entry_safe(chain, nc, &ctx->table->chains, list) {
if (!nft_is_active_next(ctx->net, chain))
continue;
if (nft_chain_binding(chain))
continue;
ctx->chain = chain;
err = nft_delchain(ctx);
if (err < 0)
goto out;
}
err = nft_deltable(ctx);
out:
return err;
}
static int nft_flush(struct nft_ctx *ctx, int family)
{
struct nftables_pernet *nft_net = nft_pernet(ctx->net);
const struct nlattr * const *nla = ctx->nla;
struct nft_table *table, *nt;
int err = 0;
list_for_each_entry_safe(table, nt, &nft_net->tables, list) {
if (family != AF_UNSPEC && table->family != family)
continue;
ctx->family = table->family;
if (!nft_is_active_next(ctx->net, table))
continue;
if (nft_table_has_owner(table) && table->nlpid != ctx->portid)
continue;
if (nla[NFTA_TABLE_NAME] &&
nla_strcmp(nla[NFTA_TABLE_NAME], table->name) != 0)
continue;
ctx->table = table;
err = nft_flush_table(ctx);
if (err < 0)
goto out;
}
out:
return err;
}
static int nf_tables_deltable(struct sk_buff *skb, const struct nfnl_info *info,
const struct nlattr * const nla[])
{
struct netlink_ext_ack *extack = info->extack;
u8 genmask = nft_genmask_next(info->net);
u8 family = info->nfmsg->nfgen_family;
struct net *net = info->net;
const struct nlattr *attr;
struct nft_table *table;
struct nft_ctx ctx;
nft_ctx_init(&ctx, net, skb, info->nlh, 0, NULL, NULL, nla);
if (family == AF_UNSPEC ||
(!nla[NFTA_TABLE_NAME] && !nla[NFTA_TABLE_HANDLE]))
return nft_flush(&ctx, family);
if (nla[NFTA_TABLE_HANDLE]) {
attr = nla[NFTA_TABLE_HANDLE];
table = nft_table_lookup_byhandle(net, attr, genmask,
NETLINK_CB(skb).portid);
} else {
attr = nla[NFTA_TABLE_NAME];
table = nft_table_lookup(net, attr, family, genmask,
NETLINK_CB(skb).portid);
}
if (IS_ERR(table)) {
if (PTR_ERR(table) == -ENOENT &&
NFNL_MSG_TYPE(info->nlh->nlmsg_type) == NFT_MSG_DESTROYTABLE)
return 0;
NL_SET_BAD_ATTR(extack, attr);
return PTR_ERR(table);
}
if (info->nlh->nlmsg_flags & NLM_F_NONREC &&
table->use > 0)
return -EBUSY;
ctx.family = family;
ctx.table = table;
return nft_flush_table(&ctx);
}
static void nf_tables_table_destroy(struct nft_ctx *ctx)
{
if (WARN_ON(ctx->table->use > 0))
return;
rhltable_destroy(&ctx->table->chains_ht);
kfree(ctx->table->name);
kfree(ctx->table->udata);
kfree(ctx->table);
}
void nft_register_chain_type(const struct nft_chain_type *ctype)
{
nfnl_lock(NFNL_SUBSYS_NFTABLES);
if (WARN_ON(__nft_chain_type_get(ctype->family, ctype->type))) {
nfnl_unlock(NFNL_SUBSYS_NFTABLES);
return;
}
chain_type[ctype->family][ctype->type] = ctype;
nfnl_unlock(NFNL_SUBSYS_NFTABLES);
}
EXPORT_SYMBOL_GPL(nft_register_chain_type);
void nft_unregister_chain_type(const struct nft_chain_type *ctype)
{
nfnl_lock(NFNL_SUBSYS_NFTABLES);
chain_type[ctype->family][ctype->type] = NULL;
nfnl_unlock(NFNL_SUBSYS_NFTABLES);
}
EXPORT_SYMBOL_GPL(nft_unregister_chain_type);
/*
* Chains
*/
static struct nft_chain *
nft_chain_lookup_byhandle(const struct nft_table *table, u64 handle, u8 genmask)
{
struct nft_chain *chain;
list_for_each_entry(chain, &table->chains, list) {
if (chain->handle == handle &&
nft_active_genmask(chain, genmask))
return chain;
}
return ERR_PTR(-ENOENT);
}
static bool lockdep_commit_lock_is_held(const struct net *net)
{
#ifdef CONFIG_PROVE_LOCKING
struct nftables_pernet *nft_net = nft_pernet(net);
return lockdep_is_held(&nft_net->commit_mutex);
#else
return true;
#endif
}
static struct nft_chain *nft_chain_lookup(struct net *net,
struct nft_table *table,
const struct nlattr *nla, u8 genmask)
{
char search[NFT_CHAIN_MAXNAMELEN + 1];
struct rhlist_head *tmp, *list;
struct nft_chain *chain;
if (nla == NULL)
return ERR_PTR(-EINVAL);
nla_strscpy(search, nla, sizeof(search));
WARN_ON(!rcu_read_lock_held() &&
!lockdep_commit_lock_is_held(net));
chain = ERR_PTR(-ENOENT);
rcu_read_lock();
list = rhltable_lookup(&table->chains_ht, search, nft_chain_ht_params);
if (!list)
goto out_unlock;
rhl_for_each_entry_rcu(chain, tmp, list, rhlhead) {
if (nft_active_genmask(chain, genmask))
goto out_unlock;
}
chain = ERR_PTR(-ENOENT);
out_unlock:
rcu_read_unlock();
return chain;
}
static const struct nla_policy nft_chain_policy[NFTA_CHAIN_MAX + 1] = {
[NFTA_CHAIN_TABLE] = { .type = NLA_STRING,
.len = NFT_TABLE_MAXNAMELEN - 1 },
[NFTA_CHAIN_HANDLE] = { .type = NLA_U64 },
[NFTA_CHAIN_NAME] = { .type = NLA_STRING,
.len = NFT_CHAIN_MAXNAMELEN - 1 },
[NFTA_CHAIN_HOOK] = { .type = NLA_NESTED },
[NFTA_CHAIN_POLICY] = { .type = NLA_U32 },
[NFTA_CHAIN_TYPE] = { .type = NLA_STRING,
.len = NFT_MODULE_AUTOLOAD_LIMIT },
[NFTA_CHAIN_COUNTERS] = { .type = NLA_NESTED },
[NFTA_CHAIN_FLAGS] = { .type = NLA_U32 },
[NFTA_CHAIN_ID] = { .type = NLA_U32 },
[NFTA_CHAIN_USERDATA] = { .type = NLA_BINARY,
.len = NFT_USERDATA_MAXLEN },
};
static const struct nla_policy nft_hook_policy[NFTA_HOOK_MAX + 1] = {
[NFTA_HOOK_HOOKNUM] = { .type = NLA_U32 },
[NFTA_HOOK_PRIORITY] = { .type = NLA_U32 },
[NFTA_HOOK_DEV] = { .type = NLA_STRING,
.len = IFNAMSIZ - 1 },
};
static int nft_dump_stats(struct sk_buff *skb, struct nft_stats __percpu *stats)
{
struct nft_stats *cpu_stats, total;
struct nlattr *nest;
unsigned int seq;
u64 pkts, bytes;
int cpu;
if (!stats)
return 0;
memset(&total, 0, sizeof(total));
for_each_possible_cpu(cpu) {
cpu_stats = per_cpu_ptr(stats, cpu);
do {
seq = u64_stats_fetch_begin(&cpu_stats->syncp);
pkts = cpu_stats->pkts;
bytes = cpu_stats->bytes;
} while (u64_stats_fetch_retry(&cpu_stats->syncp, seq));
total.pkts += pkts;
total.bytes += bytes;
}
nest = nla_nest_start_noflag(skb, NFTA_CHAIN_COUNTERS);
if (nest == NULL)
goto nla_put_failure;
if (nla_put_be64(skb, NFTA_COUNTER_PACKETS, cpu_to_be64(total.pkts),
NFTA_COUNTER_PAD) ||
nla_put_be64(skb, NFTA_COUNTER_BYTES, cpu_to_be64(total.bytes),
NFTA_COUNTER_PAD))
goto nla_put_failure;
nla_nest_end(skb, nest);
return 0;
nla_put_failure:
return -ENOSPC;
}
static int nft_dump_basechain_hook(struct sk_buff *skb, int family,
const struct nft_base_chain *basechain,
const struct list_head *hook_list)
{
const struct nf_hook_ops *ops = &basechain->ops;
struct nft_hook *hook, *first = NULL;
struct nlattr *nest, *nest_devs;
int n = 0;
nest = nla_nest_start_noflag(skb, NFTA_CHAIN_HOOK);
if (nest == NULL)
goto nla_put_failure;
if (nla_put_be32(skb, NFTA_HOOK_HOOKNUM, htonl(ops->hooknum)))
goto nla_put_failure;
if (nla_put_be32(skb, NFTA_HOOK_PRIORITY, htonl(ops->priority)))
goto nla_put_failure;
if (nft_base_chain_netdev(family, ops->hooknum)) {
nest_devs = nla_nest_start_noflag(skb, NFTA_HOOK_DEVS);
if (!nest_devs)
goto nla_put_failure;
if (!hook_list)
hook_list = &basechain->hook_list;
list_for_each_entry(hook, hook_list, list) {
if (!first)
first = hook;
if (nla_put_string(skb, NFTA_DEVICE_NAME,
hook->ops.dev->name))
goto nla_put_failure;
n++;
}
nla_nest_end(skb, nest_devs);
if (n == 1 &&
nla_put_string(skb, NFTA_HOOK_DEV, first->ops.dev->name))
goto nla_put_failure;
}
nla_nest_end(skb, nest);
return 0;
nla_put_failure:
return -1;
}
static int nf_tables_fill_chain_info(struct sk_buff *skb, struct net *net,
u32 portid, u32 seq, int event, u32 flags,
int family, const struct nft_table *table,
const struct nft_chain *chain,
const struct list_head *hook_list)
{
struct nlmsghdr *nlh;
event = nfnl_msg_type(NFNL_SUBSYS_NFTABLES, event);
nlh = nfnl_msg_put(skb, portid, seq, event, flags, family,
NFNETLINK_V0, nft_base_seq(net));
if (!nlh)
goto nla_put_failure;
if (nla_put_string(skb, NFTA_CHAIN_TABLE, table->name) ||
nla_put_string(skb, NFTA_CHAIN_NAME, chain->name) ||
nla_put_be64(skb, NFTA_CHAIN_HANDLE, cpu_to_be64(chain->handle),
NFTA_CHAIN_PAD))
goto nla_put_failure;
if (event == NFT_MSG_DELCHAIN && !hook_list) {
nlmsg_end(skb, nlh);
return 0;
}
if (nft_is_base_chain(chain)) {
const struct nft_base_chain *basechain = nft_base_chain(chain);
struct nft_stats __percpu *stats;
if (nft_dump_basechain_hook(skb, family, basechain, hook_list))
goto nla_put_failure;
if (nla_put_be32(skb, NFTA_CHAIN_POLICY,
htonl(basechain->policy)))
goto nla_put_failure;
if (nla_put_string(skb, NFTA_CHAIN_TYPE, basechain->type->name))
goto nla_put_failure;
stats = rcu_dereference_check(basechain->stats,
lockdep_commit_lock_is_held(net));
if (nft_dump_stats(skb, stats))
goto nla_put_failure;
}
if (chain->flags &&
nla_put_be32(skb, NFTA_CHAIN_FLAGS, htonl(chain->flags)))
goto nla_put_failure;
if (nla_put_be32(skb, NFTA_CHAIN_USE, htonl(chain->use)))
goto nla_put_failure;
if (chain->udata &&
nla_put(skb, NFTA_CHAIN_USERDATA, chain->udlen, chain->udata))
goto nla_put_failure;
nlmsg_end(skb, nlh);
return 0;
nla_put_failure:
nlmsg_trim(skb, nlh);
return -1;
}
static void nf_tables_chain_notify(const struct nft_ctx *ctx, int event,
const struct list_head *hook_list)
{
struct nftables_pernet *nft_net;
struct sk_buff *skb;
u16 flags = 0;
int err;
if (!ctx->report &&
!nfnetlink_has_listeners(ctx->net, NFNLGRP_NFTABLES))
return;
skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
if (skb == NULL)
goto err;
if (ctx->flags & (NLM_F_CREATE | NLM_F_EXCL))
flags |= ctx->flags & (NLM_F_CREATE | NLM_F_EXCL);
err = nf_tables_fill_chain_info(skb, ctx->net, ctx->portid, ctx->seq,
event, flags, ctx->family, ctx->table,
ctx->chain, hook_list);
if (err < 0) {
kfree_skb(skb);
goto err;
}
nft_net = nft_pernet(ctx->net);
nft_notify_enqueue(skb, ctx->report, &nft_net->notify_list);
return;
err:
nfnetlink_set_err(ctx->net, ctx->portid, NFNLGRP_NFTABLES, -ENOBUFS);
}
static int nf_tables_dump_chains(struct sk_buff *skb,
struct netlink_callback *cb)
{
const struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
unsigned int idx = 0, s_idx = cb->args[0];
struct net *net = sock_net(skb->sk);
int family = nfmsg->nfgen_family;
struct nftables_pernet *nft_net;
const struct nft_table *table;
const struct nft_chain *chain;
rcu_read_lock();
nft_net = nft_pernet(net);
cb->seq = READ_ONCE(nft_net->base_seq);
list_for_each_entry_rcu(table, &nft_net->tables, list) {
if (family != NFPROTO_UNSPEC && family != table->family)
continue;
list_for_each_entry_rcu(chain, &table->chains, list) {
if (idx < s_idx)
goto cont;
if (idx > s_idx)
memset(&cb->args[1], 0,
sizeof(cb->args) - sizeof(cb->args[0]));
if (!nft_is_active(net, chain))
continue;
if (nf_tables_fill_chain_info(skb, net,
NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq,
NFT_MSG_NEWCHAIN,
NLM_F_MULTI,
table->family, table,
chain, NULL) < 0)
goto done;
nl_dump_check_consistent(cb, nlmsg_hdr(skb));
cont:
idx++;
}
}
done:
rcu_read_unlock();
cb->args[0] = idx;
return skb->len;
}
/* called with rcu_read_lock held */
static int nf_tables_getchain(struct sk_buff *skb, const struct nfnl_info *info,
const struct nlattr * const nla[])
{
struct netlink_ext_ack *extack = info->extack;
u8 genmask = nft_genmask_cur(info->net);
u8 family = info->nfmsg->nfgen_family;
const struct nft_chain *chain;
struct net *net = info->net;
struct nft_table *table;
struct sk_buff *skb2;
int err;
if (info->nlh->nlmsg_flags & NLM_F_DUMP) {
struct netlink_dump_control c = {
.dump = nf_tables_dump_chains,
.module = THIS_MODULE,
};
return nft_netlink_dump_start_rcu(info->sk, skb, info->nlh, &c);
}
table = nft_table_lookup(net, nla[NFTA_CHAIN_TABLE], family, genmask, 0);
if (IS_ERR(table)) {
NL_SET_BAD_ATTR(extack, nla[NFTA_CHAIN_TABLE]);
return PTR_ERR(table);
}
chain = nft_chain_lookup(net, table, nla[NFTA_CHAIN_NAME], genmask);
if (IS_ERR(chain)) {
NL_SET_BAD_ATTR(extack, nla[NFTA_CHAIN_NAME]);
return PTR_ERR(chain);
}
skb2 = alloc_skb(NLMSG_GOODSIZE, GFP_ATOMIC);
if (!skb2)
return -ENOMEM;
err = nf_tables_fill_chain_info(skb2, net, NETLINK_CB(skb).portid,
info->nlh->nlmsg_seq, NFT_MSG_NEWCHAIN,
0, family, table, chain, NULL);
if (err < 0)
goto err_fill_chain_info;
return nfnetlink_unicast(skb2, net, NETLINK_CB(skb).portid);
err_fill_chain_info:
kfree_skb(skb2);
return err;
}
static const struct nla_policy nft_counter_policy[NFTA_COUNTER_MAX + 1] = {
[NFTA_COUNTER_PACKETS] = { .type = NLA_U64 },
[NFTA_COUNTER_BYTES] = { .type = NLA_U64 },
};
static struct nft_stats __percpu *nft_stats_alloc(const struct nlattr *attr)
{
struct nlattr *tb[NFTA_COUNTER_MAX+1];
struct nft_stats __percpu *newstats;
struct nft_stats *stats;
int err;
err = nla_parse_nested_deprecated(tb, NFTA_COUNTER_MAX, attr,
nft_counter_policy, NULL);
if (err < 0)
return ERR_PTR(err);
if (!tb[NFTA_COUNTER_BYTES] || !tb[NFTA_COUNTER_PACKETS])
return ERR_PTR(-EINVAL);
newstats = netdev_alloc_pcpu_stats(struct nft_stats);
if (newstats == NULL)
return ERR_PTR(-ENOMEM);
/* Restore old counters on this cpu, no problem. Per-cpu statistics
* are not exposed to userspace.
*/
preempt_disable();
stats = this_cpu_ptr(newstats);
stats->bytes = be64_to_cpu(nla_get_be64(tb[NFTA_COUNTER_BYTES]));
stats->pkts = be64_to_cpu(nla_get_be64(tb[NFTA_COUNTER_PACKETS]));
preempt_enable();
return newstats;
}
static void nft_chain_stats_replace(struct nft_trans *trans)
{
struct nft_base_chain *chain = nft_base_chain(trans->ctx.chain);
if (!nft_trans_chain_stats(trans))
return;
nft_trans_chain_stats(trans) =
rcu_replace_pointer(chain->stats, nft_trans_chain_stats(trans),
lockdep_commit_lock_is_held(trans->ctx.net));
if (!nft_trans_chain_stats(trans))
static_branch_inc(&nft_counters_enabled);
}
static void nf_tables_chain_free_chain_rules(struct nft_chain *chain)
{
struct nft_rule_blob *g0 = rcu_dereference_raw(chain->blob_gen_0);
struct nft_rule_blob *g1 = rcu_dereference_raw(chain->blob_gen_1);
if (g0 != g1)
kvfree(g1);
kvfree(g0);
/* should be NULL either via abort or via successful commit */
WARN_ON_ONCE(chain->blob_next);
kvfree(chain->blob_next);
}
void nf_tables_chain_destroy(struct nft_ctx *ctx)
{
struct nft_chain *chain = ctx->chain;
struct nft_hook *hook, *next;
if (WARN_ON(chain->use > 0))
return;
/* no concurrent access possible anymore */
nf_tables_chain_free_chain_rules(chain);
if (nft_is_base_chain(chain)) {
struct nft_base_chain *basechain = nft_base_chain(chain);
if (nft_base_chain_netdev(ctx->family, basechain->ops.hooknum)) {
list_for_each_entry_safe(hook, next,
&basechain->hook_list, list) {
list_del_rcu(&hook->list);
kfree_rcu(hook, rcu);
}
}
module_put(basechain->type->owner);
if (rcu_access_pointer(basechain->stats)) {
static_branch_dec(&nft_counters_enabled);
free_percpu(rcu_dereference_raw(basechain->stats));
}
kfree(chain->name);
kfree(chain->udata);
kfree(basechain);
} else {
kfree(chain->name);
kfree(chain->udata);
kfree(chain);
}
}
static struct nft_hook *nft_netdev_hook_alloc(struct net *net,
const struct nlattr *attr)
{
struct net_device *dev;
char ifname[IFNAMSIZ];
struct nft_hook *hook;
int err;
hook = kmalloc(sizeof(struct nft_hook), GFP_KERNEL_ACCOUNT);
if (!hook) {
err = -ENOMEM;
goto err_hook_alloc;
}
nla_strscpy(ifname, attr, IFNAMSIZ);
/* nf_tables_netdev_event() is called under rtnl_mutex, this is
* indirectly serializing all the other holders of the commit_mutex with
* the rtnl_mutex.
*/
dev = __dev_get_by_name(net, ifname);
if (!dev) {
err = -ENOENT;
goto err_hook_dev;
}
hook->ops.dev = dev;
return hook;
err_hook_dev:
kfree(hook);
err_hook_alloc:
return ERR_PTR(err);
}
static struct nft_hook *nft_hook_list_find(struct list_head *hook_list,
const struct nft_hook *this)
{
struct nft_hook *hook;
list_for_each_entry(hook, hook_list, list) {
if (this->ops.dev == hook->ops.dev)
return hook;
}
return NULL;
}
static int nf_tables_parse_netdev_hooks(struct net *net,
const struct nlattr *attr,
struct list_head *hook_list,
struct netlink_ext_ack *extack)
{
struct nft_hook *hook, *next;
const struct nlattr *tmp;
int rem, n = 0, err;
nla_for_each_nested(tmp, attr, rem) {
if (nla_type(tmp) != NFTA_DEVICE_NAME) {
err = -EINVAL;
goto err_hook;
}
hook = nft_netdev_hook_alloc(net, tmp);
if (IS_ERR(hook)) {
NL_SET_BAD_ATTR(extack, tmp);
err = PTR_ERR(hook);
goto err_hook;
}
if (nft_hook_list_find(hook_list, hook)) {
NL_SET_BAD_ATTR(extack, tmp);
kfree(hook);
err = -EEXIST;
goto err_hook;
}
list_add_tail(&hook->list, hook_list);
n++;
if (n == NFT_NETDEVICE_MAX) {
err = -EFBIG;
goto err_hook;
}
}
return 0;
err_hook:
list_for_each_entry_safe(hook, next, hook_list, list) {
list_del(&hook->list);
kfree(hook);
}
return err;
}
struct nft_chain_hook {
u32 num;
s32 priority;
const struct nft_chain_type *type;
struct list_head list;
};
static int nft_chain_parse_netdev(struct net *net, struct nlattr *tb[],
struct list_head *hook_list,
struct netlink_ext_ack *extack, u32 flags)
{
struct nft_hook *hook;
int err;
if (tb[NFTA_HOOK_DEV]) {
hook = nft_netdev_hook_alloc(net, tb[NFTA_HOOK_DEV]);
if (IS_ERR(hook)) {
NL_SET_BAD_ATTR(extack, tb[NFTA_HOOK_DEV]);
return PTR_ERR(hook);
}
list_add_tail(&hook->list, hook_list);
} else if (tb[NFTA_HOOK_DEVS]) {
err = nf_tables_parse_netdev_hooks(net, tb[NFTA_HOOK_DEVS],
hook_list, extack);
if (err < 0)
return err;
}
if (flags & NFT_CHAIN_HW_OFFLOAD &&
list_empty(hook_list))
return -EINVAL;
return 0;
}
static int nft_chain_parse_hook(struct net *net,
struct nft_base_chain *basechain,
const struct nlattr * const nla[],
struct nft_chain_hook *hook, u8 family,
u32 flags, struct netlink_ext_ack *extack)
{
struct nftables_pernet *nft_net = nft_pernet(net);
struct nlattr *ha[NFTA_HOOK_MAX + 1];
const struct nft_chain_type *type;
int err;
lockdep_assert_held(&nft_net->commit_mutex);
lockdep_nfnl_nft_mutex_not_held();
err = nla_parse_nested_deprecated(ha, NFTA_HOOK_MAX,
nla[NFTA_CHAIN_HOOK],
nft_hook_policy, NULL);
if (err < 0)
return err;
if (!basechain) {
if (!ha[NFTA_HOOK_HOOKNUM] ||
!ha[NFTA_HOOK_PRIORITY]) {
NL_SET_BAD_ATTR(extack, nla[NFTA_CHAIN_NAME]);
return -ENOENT;
}
hook->num = ntohl(nla_get_be32(ha[NFTA_HOOK_HOOKNUM]));
hook->priority = ntohl(nla_get_be32(ha[NFTA_HOOK_PRIORITY]));
type = __nft_chain_type_get(family, NFT_CHAIN_T_DEFAULT);
if (!type)
return -EOPNOTSUPP;
if (nla[NFTA_CHAIN_TYPE]) {
type = nf_tables_chain_type_lookup(net, nla[NFTA_CHAIN_TYPE],
family, true);
if (IS_ERR(type)) {
NL_SET_BAD_ATTR(extack, nla[NFTA_CHAIN_TYPE]);
return PTR_ERR(type);
}
}
if (hook->num >= NFT_MAX_HOOKS || !(type->hook_mask & (1 << hook->num)))
return -EOPNOTSUPP;
if (type->type == NFT_CHAIN_T_NAT &&
hook->priority <= NF_IP_PRI_CONNTRACK)
return -EOPNOTSUPP;
} else {
if (ha[NFTA_HOOK_HOOKNUM]) {
hook->num = ntohl(nla_get_be32(ha[NFTA_HOOK_HOOKNUM]));
if (hook->num != basechain->ops.hooknum)
return -EOPNOTSUPP;
}
if (ha[NFTA_HOOK_PRIORITY]) {
hook->priority = ntohl(nla_get_be32(ha[NFTA_HOOK_PRIORITY]));
if (hook->priority != basechain->ops.priority)
return -EOPNOTSUPP;
}
type = basechain->type;
}
if (!try_module_get(type->owner)) {
if (nla[NFTA_CHAIN_TYPE])
NL_SET_BAD_ATTR(extack, nla[NFTA_CHAIN_TYPE]);
return -ENOENT;
}
hook->type = type;
INIT_LIST_HEAD(&hook->list);
if (nft_base_chain_netdev(family, hook->num)) {
err = nft_chain_parse_netdev(net, ha, &hook->list, extack, flags);
if (err < 0) {
module_put(type->owner);
return err;
}
} else if (ha[NFTA_HOOK_DEV] || ha[NFTA_HOOK_DEVS]) {
module_put(type->owner);
return -EOPNOTSUPP;
}
return 0;
}
static void nft_chain_release_hook(struct nft_chain_hook *hook)
{
struct nft_hook *h, *next;
list_for_each_entry_safe(h, next, &hook->list, list) {
list_del(&h->list);
kfree(h);
}
module_put(hook->type->owner);
}
static void nft_last_rule(const struct nft_chain *chain, const void *ptr)
{
struct nft_rule_dp_last *lrule;
BUILD_BUG_ON(offsetof(struct nft_rule_dp_last, end) != 0);
lrule = (struct nft_rule_dp_last *)ptr;
lrule->end.is_last = 1;
lrule->chain = chain;
/* blob size does not include the trailer rule */
}
static struct nft_rule_blob *nf_tables_chain_alloc_rules(const struct nft_chain *chain,
unsigned int size)
{
struct nft_rule_blob *blob;
if (size > INT_MAX)
return NULL;
size += sizeof(struct nft_rule_blob) + sizeof(struct nft_rule_dp_last);
blob = kvmalloc(size, GFP_KERNEL_ACCOUNT);
if (!blob)
return NULL;
blob->size = 0;
nft_last_rule(chain, blob->data);
return blob;
}
static void nft_basechain_hook_init(struct nf_hook_ops *ops, u8 family,
const struct nft_chain_hook *hook,
struct nft_chain *chain)
{
ops->pf = family;
ops->hooknum = hook->num;
ops->priority = hook->priority;
ops->priv = chain;
ops->hook = hook->type->hooks[ops->hooknum];
ops->hook_ops_type = NF_HOOK_OP_NF_TABLES;
}
static int nft_basechain_init(struct nft_base_chain *basechain, u8 family,
struct nft_chain_hook *hook, u32 flags)
{
struct nft_chain *chain;
struct nft_hook *h;
basechain->type = hook->type;
INIT_LIST_HEAD(&basechain->hook_list);
chain = &basechain->chain;
if (nft_base_chain_netdev(family, hook->num)) {
list_splice_init(&hook->list, &basechain->hook_list);
list_for_each_entry(h, &basechain->hook_list, list)
nft_basechain_hook_init(&h->ops, family, hook, chain);
}
nft_basechain_hook_init(&basechain->ops, family, hook, chain);
chain->flags |= NFT_CHAIN_BASE | flags;
basechain->policy = NF_ACCEPT;
if (chain->flags & NFT_CHAIN_HW_OFFLOAD &&
!nft_chain_offload_support(basechain)) {
list_splice_init(&basechain->hook_list, &hook->list);
return -EOPNOTSUPP;
}
flow_block_init(&basechain->flow_block);
return 0;
}
int nft_chain_add(struct nft_table *table, struct nft_chain *chain)
{
int err;
err = rhltable_insert_key(&table->chains_ht, chain->name,
&chain->rhlhead, nft_chain_ht_params);
if (err)
return err;
list_add_tail_rcu(&chain->list, &table->chains);
return 0;
}
static u64 chain_id;
static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask,
u8 policy, u32 flags,
struct netlink_ext_ack *extack)
{
const struct nlattr * const *nla = ctx->nla;
struct nft_table *table = ctx->table;
struct nft_base_chain *basechain;
struct net *net = ctx->net;
char name[NFT_NAME_MAXLEN];
struct nft_rule_blob *blob;
struct nft_trans *trans;
struct nft_chain *chain;
int err;
if (nla[NFTA_CHAIN_HOOK]) {
struct nft_stats __percpu *stats = NULL;
struct nft_chain_hook hook = {};
if (flags & NFT_CHAIN_BINDING)
return -EOPNOTSUPP;
err = nft_chain_parse_hook(net, NULL, nla, &hook, family, flags,
extack);
if (err < 0)
return err;
basechain = kzalloc(sizeof(*basechain), GFP_KERNEL_ACCOUNT);
if (basechain == NULL) {
nft_chain_release_hook(&hook);
return -ENOMEM;
}
chain = &basechain->chain;
if (nla[NFTA_CHAIN_COUNTERS]) {
stats = nft_stats_alloc(nla[NFTA_CHAIN_COUNTERS]);
if (IS_ERR(stats)) {
nft_chain_release_hook(&hook);
kfree(basechain);
return PTR_ERR(stats);
}
rcu_assign_pointer(basechain->stats, stats);
}
err = nft_basechain_init(basechain, family, &hook, flags);
if (err < 0) {
nft_chain_release_hook(&hook);
kfree(basechain);
free_percpu(stats);
return err;
}
if (stats)
static_branch_inc(&nft_counters_enabled);
} else {
if (flags & NFT_CHAIN_BASE)
return -EINVAL;
if (flags & NFT_CHAIN_HW_OFFLOAD)
return -EOPNOTSUPP;
chain = kzalloc(sizeof(*chain), GFP_KERNEL_ACCOUNT);
if (chain == NULL)
return -ENOMEM;
chain->flags = flags;
}
ctx->chain = chain;
INIT_LIST_HEAD(&chain->rules);
chain->handle = nf_tables_alloc_handle(table);
chain->table = table;
if (nla[NFTA_CHAIN_NAME]) {
chain->name = nla_strdup(nla[NFTA_CHAIN_NAME], GFP_KERNEL_ACCOUNT);
} else {
if (!(flags & NFT_CHAIN_BINDING)) {
err = -EINVAL;
goto err_destroy_chain;
}
snprintf(name, sizeof(name), "__chain%llu", ++chain_id);
chain->name = kstrdup(name, GFP_KERNEL_ACCOUNT);
}
if (!chain->name) {
err = -ENOMEM;
goto err_destroy_chain;
}
if (nla[NFTA_CHAIN_USERDATA]) {
chain->udata = nla_memdup(nla[NFTA_CHAIN_USERDATA], GFP_KERNEL_ACCOUNT);
if (chain->udata == NULL) {
err = -ENOMEM;
goto err_destroy_chain;
}
chain->udlen = nla_len(nla[NFTA_CHAIN_USERDATA]);
}
blob = nf_tables_chain_alloc_rules(chain, 0);
if (!blob) {
err = -ENOMEM;
goto err_destroy_chain;
}
RCU_INIT_POINTER(chain->blob_gen_0, blob);
RCU_INIT_POINTER(chain->blob_gen_1, blob);
err = nf_tables_register_hook(net, table, chain);
if (err < 0)
goto err_destroy_chain;
if (!nft_use_inc(&table->use)) {
err = -EMFILE;
goto err_use;
}
trans = nft_trans_chain_add(ctx, NFT_MSG_NEWCHAIN);
if (IS_ERR(trans)) {
err = PTR_ERR(trans);
goto err_unregister_hook;
}
nft_trans_chain_policy(trans) = NFT_CHAIN_POLICY_UNSET;
if (nft_is_base_chain(chain))
nft_trans_chain_policy(trans) = policy;
err = nft_chain_add(table, chain);
if (err < 0) {
nft_trans_destroy(trans);
goto err_unregister_hook;
}
return 0;
err_unregister_hook:
nft_use_dec_restore(&table->use);
err_use:
nf_tables_unregister_hook(net, table, chain);
err_destroy_chain:
nf_tables_chain_destroy(ctx);
return err;
}
static int nf_tables_updchain(struct nft_ctx *ctx, u8 genmask, u8 policy,
u32 flags, const struct nlattr *attr,
struct netlink_ext_ack *extack)
{
const struct nlattr * const *nla = ctx->nla;
struct nft_base_chain *basechain = NULL;
struct nft_table *table = ctx->table;
struct nft_chain *chain = ctx->chain;
struct nft_chain_hook hook = {};
struct nft_stats *stats = NULL;
struct nft_hook *h, *next;
struct nf_hook_ops *ops;
struct nft_trans *trans;
bool unregister = false;
int err;
if (chain->flags ^ flags)
return -EOPNOTSUPP;
INIT_LIST_HEAD(&hook.list);
if (nla[NFTA_CHAIN_HOOK]) {
if (!nft_is_base_chain(chain)) {
NL_SET_BAD_ATTR(extack, attr);
return -EEXIST;
}
basechain = nft_base_chain(chain);
err = nft_chain_parse_hook(ctx->net, basechain, nla, &hook,
ctx->family, flags, extack);
if (err < 0)
return err;
if (basechain->type != hook.type) {
nft_chain_release_hook(&hook);
NL_SET_BAD_ATTR(extack, attr);
return -EEXIST;
}
if (nft_base_chain_netdev(ctx->family, basechain->ops.hooknum)) {
list_for_each_entry_safe(h, next, &hook.list, list) {
h->ops.pf = basechain->ops.pf;
h->ops.hooknum = basechain->ops.hooknum;
h->ops.priority = basechain->ops.priority;
h->ops.priv = basechain->ops.priv;
h->ops.hook = basechain->ops.hook;
if (nft_hook_list_find(&basechain->hook_list, h)) {
list_del(&h->list);
kfree(h);
}
}
} else {
ops = &basechain->ops;
if (ops->hooknum != hook.num ||
ops->priority != hook.priority) {
nft_chain_release_hook(&hook);
NL_SET_BAD_ATTR(extack, attr);
return -EEXIST;
}
}
}
if (nla[NFTA_CHAIN_HANDLE] &&
nla[NFTA_CHAIN_NAME]) {
struct nft_chain *chain2;
chain2 = nft_chain_lookup(ctx->net, table,
nla[NFTA_CHAIN_NAME], genmask);
if (!IS_ERR(chain2)) {
NL_SET_BAD_ATTR(extack, nla[NFTA_CHAIN_NAME]);
err = -EEXIST;
goto err_hooks;
}
}
if (nla[NFTA_CHAIN_COUNTERS]) {
if (!nft_is_base_chain(chain)) {
err = -EOPNOTSUPP;
goto err_hooks;
}
stats = nft_stats_alloc(nla[NFTA_CHAIN_COUNTERS]);
if (IS_ERR(stats)) {
err = PTR_ERR(stats);
goto err_hooks;
}
}
if (!(table->flags & NFT_TABLE_F_DORMANT) &&
nft_is_base_chain(chain) &&
!list_empty(&hook.list)) {
basechain = nft_base_chain(chain);
ops = &basechain->ops;
if (nft_base_chain_netdev(table->family, basechain->ops.hooknum)) {
err = nft_netdev_register_hooks(ctx->net, &hook.list);
if (err < 0)
goto err_hooks;
}
}
unregister = true;
err = -ENOMEM;
trans = nft_trans_alloc(ctx, NFT_MSG_NEWCHAIN,
sizeof(struct nft_trans_chain));
if (trans == NULL)
goto err_trans;
nft_trans_chain_stats(trans) = stats;
nft_trans_chain_update(trans) = true;
if (nla[NFTA_CHAIN_POLICY])
nft_trans_chain_policy(trans) = policy;
else
nft_trans_chain_policy(trans) = -1;
if (nla[NFTA_CHAIN_HANDLE] &&
nla[NFTA_CHAIN_NAME]) {
struct nftables_pernet *nft_net = nft_pernet(ctx->net);
struct nft_trans *tmp;
char *name;
err = -ENOMEM;
name = nla_strdup(nla[NFTA_CHAIN_NAME], GFP_KERNEL_ACCOUNT);
if (!name)
goto err_trans;
err = -EEXIST;
list_for_each_entry(tmp, &nft_net->commit_list, list) {
if (tmp->msg_type == NFT_MSG_NEWCHAIN &&
tmp->ctx.table == table &&
nft_trans_chain_update(tmp) &&
nft_trans_chain_name(tmp) &&
strcmp(name, nft_trans_chain_name(tmp)) == 0) {
NL_SET_BAD_ATTR(extack, nla[NFTA_CHAIN_NAME]);
kfree(name);
goto err_trans;
}
}
nft_trans_chain_name(trans) = name;
}
nft_trans_basechain(trans) = basechain;
INIT_LIST_HEAD(&nft_trans_chain_hooks(trans));
list_splice(&hook.list, &nft_trans_chain_hooks(trans));
if (nla[NFTA_CHAIN_HOOK])
module_put(hook.type->owner);
nft_trans_commit_list_add_tail(ctx->net, trans);
return 0;
err_trans:
free_percpu(stats);
kfree(trans);
err_hooks:
if (nla[NFTA_CHAIN_HOOK]) {
list_for_each_entry_safe(h, next, &hook.list, list) {
if (unregister)
nf_unregister_net_hook(ctx->net, &h->ops);
list_del(&h->list);
kfree_rcu(h, rcu);
}
module_put(hook.type->owner);
}
return err;
}
static struct nft_chain *nft_chain_lookup_byid(const struct net *net,
const struct nft_table *table,
const struct nlattr *nla, u8 genmask)
{
struct nftables_pernet *nft_net = nft_pernet(net);
u32 id = ntohl(nla_get_be32(nla));
struct nft_trans *trans;
list_for_each_entry(trans, &nft_net->commit_list, list) {
struct nft_chain *chain = trans->ctx.chain;
if (trans->msg_type == NFT_MSG_NEWCHAIN &&
chain->table == table &&
id == nft_trans_chain_id(trans) &&
nft_active_genmask(chain, genmask))
return chain;
}
return ERR_PTR(-ENOENT);
}
static int nf_tables_newchain(struct sk_buff *skb, const struct nfnl_info *info,
const struct nlattr * const nla[])
{
struct nftables_pernet *nft_net = nft_pernet(info->net);
struct netlink_ext_ack *extack = info->extack;
u8 genmask = nft_genmask_next(info->net);
u8 family = info->nfmsg->nfgen_family;
struct nft_chain *chain = NULL;
struct net *net = info->net;
const struct nlattr *attr;
struct nft_table *table;
u8 policy = NF_ACCEPT;
struct nft_ctx ctx;
u64 handle = 0;
u32 flags = 0;
lockdep_assert_held(&nft_net->commit_mutex);
table = nft_table_lookup(net, nla[NFTA_CHAIN_TABLE], family, genmask,
NETLINK_CB(skb).portid);
if (IS_ERR(table)) {
NL_SET_BAD_ATTR(extack, nla[NFTA_CHAIN_TABLE]);
return PTR_ERR(table);
}
chain = NULL;
attr = nla[NFTA_CHAIN_NAME];
if (nla[NFTA_CHAIN_HANDLE]) {
handle = be64_to_cpu(nla_get_be64(nla[NFTA_CHAIN_HANDLE]));
chain = nft_chain_lookup_byhandle(table, handle, genmask);
if (IS_ERR(chain)) {
NL_SET_BAD_ATTR(extack, nla[NFTA_CHAIN_HANDLE]);
return PTR_ERR(chain);
}
attr = nla[NFTA_CHAIN_HANDLE];
} else if (nla[NFTA_CHAIN_NAME]) {
chain = nft_chain_lookup(net, table, attr, genmask);
if (IS_ERR(chain)) {
if (PTR_ERR(chain) != -ENOENT) {
NL_SET_BAD_ATTR(extack, attr);
return PTR_ERR(chain);
}
chain = NULL;
}
} else if (!nla[NFTA_CHAIN_ID]) {
return -EINVAL;
}
if (nla[NFTA_CHAIN_POLICY]) {
if (chain != NULL &&
!nft_is_base_chain(chain)) {
NL_SET_BAD_ATTR(extack, nla[NFTA_CHAIN_POLICY]);
return -EOPNOTSUPP;
}
if (chain == NULL &&
nla[NFTA_CHAIN_HOOK] == NULL) {
NL_SET_BAD_ATTR(extack, nla[NFTA_CHAIN_POLICY]);
return -EOPNOTSUPP;
}
policy = ntohl(nla_get_be32(nla[NFTA_CHAIN_POLICY]));
switch (policy) {
case NF_DROP:
case NF_ACCEPT:
break;
default:
return -EINVAL;
}
}
if (nla[NFTA_CHAIN_FLAGS])
flags = ntohl(nla_get_be32(nla[NFTA_CHAIN_FLAGS]));
else if (chain)
flags = chain->flags;
if (flags & ~NFT_CHAIN_FLAGS)
return -EOPNOTSUPP;
nft_ctx_init(&ctx, net, skb, info->nlh, family, table, chain, nla);
if (chain != NULL) {
if (chain->flags & NFT_CHAIN_BINDING)
return -EINVAL;
if (info->nlh->nlmsg_flags & NLM_F_EXCL) {
NL_SET_BAD_ATTR(extack, attr);
return -EEXIST;
}
if (info->nlh->nlmsg_flags & NLM_F_REPLACE)
return -EOPNOTSUPP;
flags |= chain->flags & NFT_CHAIN_BASE;
return nf_tables_updchain(&ctx, genmask, policy, flags, attr,
extack);
}
return nf_tables_addchain(&ctx, family, genmask, policy, flags, extack);
}
static int nft_delchain_hook(struct nft_ctx *ctx,
struct nft_base_chain *basechain,
struct netlink_ext_ack *extack)
{
const struct nft_chain *chain = &basechain->chain;
const struct nlattr * const *nla = ctx->nla;
struct nft_chain_hook chain_hook = {};
struct nft_hook *this, *hook;
LIST_HEAD(chain_del_list);
struct nft_trans *trans;
int err;
err = nft_chain_parse_hook(ctx->net, basechain, nla, &chain_hook,
ctx->family, chain->flags, extack);
if (err < 0)
return err;
list_for_each_entry(this, &chain_hook.list, list) {
hook = nft_hook_list_find(&basechain->hook_list, this);
if (!hook) {
err = -ENOENT;
goto err_chain_del_hook;
}
list_move(&hook->list, &chain_del_list);
}
trans = nft_trans_alloc(ctx, NFT_MSG_DELCHAIN,
sizeof(struct nft_trans_chain));
if (!trans) {
err = -ENOMEM;
goto err_chain_del_hook;
}
nft_trans_basechain(trans) = basechain;
nft_trans_chain_update(trans) = true;
INIT_LIST_HEAD(&nft_trans_chain_hooks(trans));
list_splice(&chain_del_list, &nft_trans_chain_hooks(trans));
nft_chain_release_hook(&chain_hook);
nft_trans_commit_list_add_tail(ctx->net, trans);
return 0;
err_chain_del_hook:
list_splice(&chain_del_list, &basechain->hook_list);
nft_chain_release_hook(&chain_hook);
return err;
}
static int nf_tables_delchain(struct sk_buff *skb, const struct nfnl_info *info,
const struct nlattr * const nla[])
{
struct netlink_ext_ack *extack = info->extack;
u8 genmask = nft_genmask_next(info->net);
u8 family = info->nfmsg->nfgen_family;
struct net *net = info->net;
const struct nlattr *attr;
struct nft_table *table;
struct nft_chain *chain;
struct nft_rule *rule;
struct nft_ctx ctx;
u64 handle;
u32 use;
int err;
table = nft_table_lookup(net, nla[NFTA_CHAIN_TABLE], family, genmask,
NETLINK_CB(skb).portid);
if (IS_ERR(table)) {
NL_SET_BAD_ATTR(extack, nla[NFTA_CHAIN_TABLE]);
return PTR_ERR(table);
}
if (nla[NFTA_CHAIN_HANDLE]) {
attr = nla[NFTA_CHAIN_HANDLE];
handle = be64_to_cpu(nla_get_be64(attr));
chain = nft_chain_lookup_byhandle(table, handle, genmask);
} else {
attr = nla[NFTA_CHAIN_NAME];
chain = nft_chain_lookup(net, table, attr, genmask);
}
if (IS_ERR(chain)) {
if (PTR_ERR(chain) == -ENOENT &&
NFNL_MSG_TYPE(info->nlh->nlmsg_type) == NFT_MSG_DESTROYCHAIN)
return 0;
NL_SET_BAD_ATTR(extack, attr);
return PTR_ERR(chain);
}
if (nft_chain_binding(chain))
return -EOPNOTSUPP;
nft_ctx_init(&ctx, net, skb, info->nlh, family, table, chain, nla);
if (nla[NFTA_CHAIN_HOOK]) {
if (chain->flags & NFT_CHAIN_HW_OFFLOAD)
return -EOPNOTSUPP;
if (nft_is_base_chain(chain)) {
struct nft_base_chain *basechain = nft_base_chain(chain);
if (nft_base_chain_netdev(table->family, basechain->ops.hooknum))
return nft_delchain_hook(&ctx, basechain, extack);
}
}
if (info->nlh->nlmsg_flags & NLM_F_NONREC &&
chain->use > 0)
return -EBUSY;
use = chain->use;
list_for_each_entry(rule, &chain->rules, list) {
if (!nft_is_active_next(net, rule))
continue;
use--;
err = nft_delrule(&ctx, rule);
if (err < 0)
return err;
}
/* There are rules and elements that are still holding references to us,
* we cannot do a recursive removal in this case.
*/
if (use > 0) {
NL_SET_BAD_ATTR(extack, attr);
return -EBUSY;
}
return nft_delchain(&ctx);
}
/*
* Expressions
*/
/**
* nft_register_expr - register nf_tables expr type
* @type: expr type
*
* Registers the expr type for use with nf_tables. Returns zero on
* success or a negative errno code otherwise.
*/
int nft_register_expr(struct nft_expr_type *type)
{
nfnl_lock(NFNL_SUBSYS_NFTABLES);
if (type->family == NFPROTO_UNSPEC)
list_add_tail_rcu(&type->list, &nf_tables_expressions);
else
list_add_rcu(&type->list, &nf_tables_expressions);
nfnl_unlock(NFNL_SUBSYS_NFTABLES);
return 0;
}
EXPORT_SYMBOL_GPL(nft_register_expr);
/**
* nft_unregister_expr - unregister nf_tables expr type
* @type: expr type
*
* Unregisters the expr typefor use with nf_tables.
*/
void nft_unregister_expr(struct nft_expr_type *type)
{
nfnl_lock(NFNL_SUBSYS_NFTABLES);
list_del_rcu(&type->list);
nfnl_unlock(NFNL_SUBSYS_NFTABLES);
}
EXPORT_SYMBOL_GPL(nft_unregister_expr);
static const struct nft_expr_type *__nft_expr_type_get(u8 family,
struct nlattr *nla)
{
const struct nft_expr_type *type, *candidate = NULL;
list_for_each_entry(type, &nf_tables_expressions, list) {
if (!nla_strcmp(nla, type->name)) {
if (!type->family && !candidate)
candidate = type;
else if (type->family == family)
candidate = type;
}
}
return candidate;
}
#ifdef CONFIG_MODULES
static int nft_expr_type_request_module(struct net *net, u8 family,
struct nlattr *nla)
{
if (nft_request_module(net, "nft-expr-%u-%.*s", family,
nla_len(nla), (char *)nla_data(nla)) == -EAGAIN)
return -EAGAIN;
return 0;
}
#endif
static const struct nft_expr_type *nft_expr_type_get(struct net *net,
u8 family,
struct nlattr *nla)
{
const struct nft_expr_type *type;
if (nla == NULL)
return ERR_PTR(-EINVAL);
type = __nft_expr_type_get(family, nla);
if (type != NULL && try_module_get(type->owner))
return type;
lockdep_nfnl_nft_mutex_not_held();
#ifdef CONFIG_MODULES
if (type == NULL) {
if (nft_expr_type_request_module(net, family, nla) == -EAGAIN)
return ERR_PTR(-EAGAIN);
if (nft_request_module(net, "nft-expr-%.*s",
nla_len(nla),
(char *)nla_data(nla)) == -EAGAIN)
return ERR_PTR(-EAGAIN);
}
#endif
return ERR_PTR(-ENOENT);
}
static const struct nla_policy nft_expr_policy[NFTA_EXPR_MAX + 1] = {
[NFTA_EXPR_NAME] = { .type = NLA_STRING,
.len = NFT_MODULE_AUTOLOAD_LIMIT },
[NFTA_EXPR_DATA] = { .type = NLA_NESTED },
};
static int nf_tables_fill_expr_info(struct sk_buff *skb,
const struct nft_expr *expr, bool reset)
{
if (nla_put_string(skb, NFTA_EXPR_NAME, expr->ops->type->name))
goto nla_put_failure;
if (expr->ops->dump) {
struct nlattr *data = nla_nest_start_noflag(skb,
NFTA_EXPR_DATA);
if (data == NULL)
goto nla_put_failure;
if (expr->ops->dump(skb, expr, reset) < 0)
goto nla_put_failure;
nla_nest_end(skb, data);
}
return skb->len;
nla_put_failure:
return -1;
};
int nft_expr_dump(struct sk_buff *skb, unsigned int attr,
const struct nft_expr *expr, bool reset)
{
struct nlattr *nest;
nest = nla_nest_start_noflag(skb, attr);
if (!nest)
goto nla_put_failure;
if (nf_tables_fill_expr_info(skb, expr, reset) < 0)
goto nla_put_failure;
nla_nest_end(skb, nest);
return 0;
nla_put_failure:
return -1;
}
struct nft_expr_info {
const struct nft_expr_ops *ops;
const struct nlattr *attr;
struct nlattr *tb[NFT_EXPR_MAXATTR + 1];
};
static int nf_tables_expr_parse(const struct nft_ctx *ctx,
const struct nlattr *nla,
struct nft_expr_info *info)
{
const struct nft_expr_type *type;
const struct nft_expr_ops *ops;
struct nlattr *tb[NFTA_EXPR_MAX + 1];
int err;
err = nla_parse_nested_deprecated(tb, NFTA_EXPR_MAX, nla,
nft_expr_policy, NULL);
if (err < 0)
return err;
type = nft_expr_type_get(ctx->net, ctx->family, tb[NFTA_EXPR_NAME]);
if (IS_ERR(type))
return PTR_ERR(type);
if (tb[NFTA_EXPR_DATA]) {
err = nla_parse_nested_deprecated(info->tb, type->maxattr,
tb[NFTA_EXPR_DATA],
type->policy, NULL);
if (err < 0)
goto err1;
} else
memset(info->tb, 0, sizeof(info->tb[0]) * (type->maxattr + 1));
if (type->select_ops != NULL) {
ops = type->select_ops(ctx,
(const struct nlattr * const *)info->tb);
if (IS_ERR(ops)) {
err = PTR_ERR(ops);
#ifdef CONFIG_MODULES
if (err == -EAGAIN)
if (nft_expr_type_request_module(ctx->net,
ctx->family,
tb[NFTA_EXPR_NAME]) != -EAGAIN)
err = -ENOENT;
#endif
goto err1;
}
} else
ops = type->ops;
info->attr = nla;
info->ops = ops;
return 0;
err1:
module_put(type->owner);
return err;
}
int nft_expr_inner_parse(const struct nft_ctx *ctx, const struct nlattr *nla,
struct nft_expr_info *info)
{
struct nlattr *tb[NFTA_EXPR_MAX + 1];
const struct nft_expr_type *type;
int err;
err = nla_parse_nested_deprecated(tb, NFTA_EXPR_MAX, nla,
nft_expr_policy, NULL);
if (err < 0)
return err;
if (!tb[NFTA_EXPR_DATA])
return -EINVAL;
type = __nft_expr_type_get(ctx->family, tb[NFTA_EXPR_NAME]);
if (!type)
return -ENOENT;
if (!type->inner_ops)
return -EOPNOTSUPP;
err = nla_parse_nested_deprecated(info->tb, type->maxattr,
tb[NFTA_EXPR_DATA],
type->policy, NULL);
if (err < 0)
goto err_nla_parse;
info->attr = nla;
info->ops = type->inner_ops;
return 0;
err_nla_parse:
return err;
}
static int nf_tables_newexpr(const struct nft_ctx *ctx,
const struct nft_expr_info *expr_info,
struct nft_expr *expr)
{
const struct nft_expr_ops *ops = expr_info->ops;
int err;
expr->ops = ops;
if (ops->init) {
err = ops->init(ctx, expr, (const struct nlattr **)expr_info->tb);
if (err < 0)
goto err1;
}
return 0;
err1:
expr->ops = NULL;
return err;
}
static void nf_tables_expr_destroy(const struct nft_ctx *ctx,
struct nft_expr *expr)
{
const struct nft_expr_type *type = expr->ops->type;
if (expr->ops->destroy)
expr->ops->destroy(ctx, expr);
module_put(type->owner);
}
static struct nft_expr *nft_expr_init(const struct nft_ctx *ctx,
const struct nlattr *nla)
{
struct nft_expr_info expr_info;
struct nft_expr *expr;
struct module *owner;
int err;
err = nf_tables_expr_parse(ctx, nla, &expr_info);
if (err < 0)
goto err_expr_parse;
err = -EOPNOTSUPP;
if (!(expr_info.ops->type->flags & NFT_EXPR_STATEFUL))
goto err_expr_stateful;
err = -ENOMEM;
expr = kzalloc(expr_info.ops->size, GFP_KERNEL_ACCOUNT);
if (expr == NULL)
goto err_expr_stateful;
err = nf_tables_newexpr(ctx, &expr_info, expr);
if (err < 0)
goto err_expr_new;
return expr;
err_expr_new:
kfree(expr);
err_expr_stateful:
owner = expr_info.ops->type->owner;
if (expr_info.ops->type->release_ops)
expr_info.ops->type->release_ops(expr_info.ops);
module_put(owner);
err_expr_parse:
return ERR_PTR(err);
}
int nft_expr_clone(struct nft_expr *dst, struct nft_expr *src)
{
int err;
if (src->ops->clone) {
dst->ops = src->ops;
err = src->ops->clone(dst, src);
if (err < 0)
return err;
} else {
memcpy(dst, src, src->ops->size);
}
__module_get(src->ops->type->owner);
return 0;
}
void nft_expr_destroy(const struct nft_ctx *ctx, struct nft_expr *expr)
{
nf_tables_expr_destroy(ctx, expr);
kfree(expr);
}
/*
* Rules
*/
static struct nft_rule *__nft_rule_lookup(const struct nft_chain *chain,
u64 handle)
{
struct nft_rule *rule;
// FIXME: this sucks
list_for_each_entry_rcu(rule, &chain->rules, list) {
if (handle == rule->handle)
return rule;
}
return ERR_PTR(-ENOENT);
}
static struct nft_rule *nft_rule_lookup(const struct nft_chain *chain,
const struct nlattr *nla)
{
if (nla == NULL)
return ERR_PTR(-EINVAL);
return __nft_rule_lookup(chain, be64_to_cpu(nla_get_be64(nla)));
}
static const struct nla_policy nft_rule_policy[NFTA_RULE_MAX + 1] = {
[NFTA_RULE_TABLE] = { .type = NLA_STRING,
.len = NFT_TABLE_MAXNAMELEN - 1 },
[NFTA_RULE_CHAIN] = { .type = NLA_STRING,
.len = NFT_CHAIN_MAXNAMELEN - 1 },
[NFTA_RULE_HANDLE] = { .type = NLA_U64 },
[NFTA_RULE_EXPRESSIONS] = { .type = NLA_NESTED },
[NFTA_RULE_COMPAT] = { .type = NLA_NESTED },
[NFTA_RULE_POSITION] = { .type = NLA_U64 },
[NFTA_RULE_USERDATA] = { .type = NLA_BINARY,
.len = NFT_USERDATA_MAXLEN },
[NFTA_RULE_ID] = { .type = NLA_U32 },
[NFTA_RULE_POSITION_ID] = { .type = NLA_U32 },
[NFTA_RULE_CHAIN_ID] = { .type = NLA_U32 },
};
static int nf_tables_fill_rule_info(struct sk_buff *skb, struct net *net,
u32 portid, u32 seq, int event,
u32 flags, int family,
const struct nft_table *table,
const struct nft_chain *chain,
const struct nft_rule *rule, u64 handle,
bool reset)
{
struct nlmsghdr *nlh;
const struct nft_expr *expr, *next;
struct nlattr *list;
u16 type = nfnl_msg_type(NFNL_SUBSYS_NFTABLES, event);
nlh = nfnl_msg_put(skb, portid, seq, type, flags, family, NFNETLINK_V0,
nft_base_seq(net));
if (!nlh)
goto nla_put_failure;
if (nla_put_string(skb, NFTA_RULE_TABLE, table->name))
goto nla_put_failure;
if (nla_put_string(skb, NFTA_RULE_CHAIN, chain->name))
goto nla_put_failure;
if (nla_put_be64(skb, NFTA_RULE_HANDLE, cpu_to_be64(rule->handle),
NFTA_RULE_PAD))
goto nla_put_failure;
if (event != NFT_MSG_DELRULE && handle) {
if (nla_put_be64(skb, NFTA_RULE_POSITION, cpu_to_be64(handle),
NFTA_RULE_PAD))
goto nla_put_failure;
}
if (chain->flags & NFT_CHAIN_HW_OFFLOAD)
nft_flow_rule_stats(chain, rule);
list = nla_nest_start_noflag(skb, NFTA_RULE_EXPRESSIONS);
if (list == NULL)
goto nla_put_failure;
nft_rule_for_each_expr(expr, next, rule) {
if (nft_expr_dump(skb, NFTA_LIST_ELEM, expr, reset) < 0)
goto nla_put_failure;
}
nla_nest_end(skb, list);
if (rule->udata) {
struct nft_userdata *udata = nft_userdata(rule);
if (nla_put(skb, NFTA_RULE_USERDATA, udata->len + 1,
udata->data) < 0)
goto nla_put_failure;
}
nlmsg_end(skb, nlh);
return 0;
nla_put_failure:
nlmsg_trim(skb, nlh);
return -1;
}
static void nf_tables_rule_notify(const struct nft_ctx *ctx,
const struct nft_rule *rule, int event)
{
struct nftables_pernet *nft_net = nft_pernet(ctx->net);
const struct nft_rule *prule;
struct sk_buff *skb;
u64 handle = 0;
u16 flags = 0;
int err;
if (!ctx->report &&
!nfnetlink_has_listeners(ctx->net, NFNLGRP_NFTABLES))
return;
skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
if (skb == NULL)
goto err;
if (event == NFT_MSG_NEWRULE &&
!list_is_first(&rule->list, &ctx->chain->rules) &&
!list_is_last(&rule->list, &ctx->chain->rules)) {
prule = list_prev_entry(rule, list);
handle = prule->handle;
}
if (ctx->flags & (NLM_F_APPEND | NLM_F_REPLACE))
flags |= NLM_F_APPEND;
if (ctx->flags & (NLM_F_CREATE | NLM_F_EXCL))
flags |= ctx->flags & (NLM_F_CREATE | NLM_F_EXCL);
err = nf_tables_fill_rule_info(skb, ctx->net, ctx->portid, ctx->seq,
event, flags, ctx->family, ctx->table,
ctx->chain, rule, handle, false);
if (err < 0) {
kfree_skb(skb);
goto err;
}
nft_notify_enqueue(skb, ctx->report, &nft_net->notify_list);
return;
err:
nfnetlink_set_err(ctx->net, ctx->portid, NFNLGRP_NFTABLES, -ENOBUFS);
}
static void audit_log_rule_reset(const struct nft_table *table,
unsigned int base_seq,
unsigned int nentries)
{
char *buf = kasprintf(GFP_ATOMIC, "%s:%u",
table->name, base_seq);
audit_log_nfcfg(buf, table->family, nentries,
AUDIT_NFT_OP_RULE_RESET, GFP_ATOMIC);
kfree(buf);
}
struct nft_rule_dump_ctx {
char *table;
char *chain;
};
static int __nf_tables_dump_rules(struct sk_buff *skb,
unsigned int *idx,
struct netlink_callback *cb,
const struct nft_table *table,
const struct nft_chain *chain,
bool reset)
{
struct net *net = sock_net(skb->sk);
const struct nft_rule *rule, *prule;
unsigned int s_idx = cb->args[0];
unsigned int entries = 0;
int ret = 0;
u64 handle;
prule = NULL;
list_for_each_entry_rcu(rule, &chain->rules, list) {
if (!nft_is_active(net, rule))
goto cont_skip;
if (*idx < s_idx)
goto cont;
if (*idx > s_idx) {
memset(&cb->args[1], 0,
sizeof(cb->args) - sizeof(cb->args[0]));
}
if (prule)
handle = prule->handle;
else
handle = 0;
if (nf_tables_fill_rule_info(skb, net, NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq,
NFT_MSG_NEWRULE,
NLM_F_MULTI | NLM_F_APPEND,
table->family,
table, chain, rule, handle, reset) < 0) {
ret = 1;
break;
}
entries++;
nl_dump_check_consistent(cb, nlmsg_hdr(skb));
cont:
prule = rule;
cont_skip:
(*idx)++;
}
if (reset && entries)
audit_log_rule_reset(table, cb->seq, entries);
return ret;
}
static int nf_tables_dump_rules(struct sk_buff *skb,
struct netlink_callback *cb)
{
const struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
const struct nft_rule_dump_ctx *ctx = cb->data;
struct nft_table *table;
const struct nft_chain *chain;
unsigned int idx = 0;
struct net *net = sock_net(skb->sk);
int family = nfmsg->nfgen_family;
struct nftables_pernet *nft_net;
bool reset = false;
if (NFNL_MSG_TYPE(cb->nlh->nlmsg_type) == NFT_MSG_GETRULE_RESET)
reset = true;
rcu_read_lock();
nft_net = nft_pernet(net);
cb->seq = READ_ONCE(nft_net->base_seq);
list_for_each_entry_rcu(table, &nft_net->tables, list) {
if (family != NFPROTO_UNSPEC && family != table->family)
continue;
if (ctx && ctx->table && strcmp(ctx->table, table->name) != 0)
continue;
if (ctx && ctx->table && ctx->chain) {
struct rhlist_head *list, *tmp;
list = rhltable_lookup(&table->chains_ht, ctx->chain,
nft_chain_ht_params);
if (!list)
goto done;
rhl_for_each_entry_rcu(chain, tmp, list, rhlhead) {
if (!nft_is_active(net, chain))
continue;
__nf_tables_dump_rules(skb, &idx,
cb, table, chain, reset);
break;
}
goto done;
}
list_for_each_entry_rcu(chain, &table->chains, list) {
if (__nf_tables_dump_rules(skb, &idx,
cb, table, chain, reset))
goto done;
}
if (ctx && ctx->table)
break;
}
done:
rcu_read_unlock();
cb->args[0] = idx;
return skb->len;
}
static int nf_tables_dump_rules_start(struct netlink_callback *cb)
{
const struct nlattr * const *nla = cb->data;
struct nft_rule_dump_ctx *ctx = NULL;
if (nla[NFTA_RULE_TABLE] || nla[NFTA_RULE_CHAIN]) {
ctx = kzalloc(sizeof(*ctx), GFP_ATOMIC);
if (!ctx)
return -ENOMEM;
if (nla[NFTA_RULE_TABLE]) {
ctx->table = nla_strdup(nla[NFTA_RULE_TABLE],
GFP_ATOMIC);
if (!ctx->table) {
kfree(ctx);
return -ENOMEM;
}
}
if (nla[NFTA_RULE_CHAIN]) {
ctx->chain = nla_strdup(nla[NFTA_RULE_CHAIN],
GFP_ATOMIC);
if (!ctx->chain) {
kfree(ctx->table);
kfree(ctx);
return -ENOMEM;
}
}
}
cb->data = ctx;
return 0;
}
static int nf_tables_dump_rules_done(struct netlink_callback *cb)
{
struct nft_rule_dump_ctx *ctx = cb->data;
if (ctx) {
kfree(ctx->table);
kfree(ctx->chain);
kfree(ctx);
}
return 0;
}
/* called with rcu_read_lock held */
static int nf_tables_getrule(struct sk_buff *skb, const struct nfnl_info *info,
const struct nlattr * const nla[])
{
struct netlink_ext_ack *extack = info->extack;
u8 genmask = nft_genmask_cur(info->net);
u8 family = info->nfmsg->nfgen_family;
const struct nft_chain *chain;
const struct nft_rule *rule;
struct net *net = info->net;
struct nft_table *table;
struct sk_buff *skb2;
bool reset = false;
int err;
if (info->nlh->nlmsg_flags & NLM_F_DUMP) {
struct netlink_dump_control c = {
.start= nf_tables_dump_rules_start,
.dump = nf_tables_dump_rules,
.done = nf_tables_dump_rules_done,
.module = THIS_MODULE,
.data = (void *)nla,
};
return nft_netlink_dump_start_rcu(info->sk, skb, info->nlh, &c);
}
table = nft_table_lookup(net, nla[NFTA_RULE_TABLE], family, genmask, 0);
if (IS_ERR(table)) {
NL_SET_BAD_ATTR(extack, nla[NFTA_RULE_TABLE]);
return PTR_ERR(table);
}
chain = nft_chain_lookup(net, table, nla[NFTA_RULE_CHAIN], genmask);
if (IS_ERR(chain)) {
NL_SET_BAD_ATTR(extack, nla[NFTA_RULE_CHAIN]);
return PTR_ERR(chain);
}
rule = nft_rule_lookup(chain, nla[NFTA_RULE_HANDLE]);
if (IS_ERR(rule)) {
NL_SET_BAD_ATTR(extack, nla[NFTA_RULE_HANDLE]);
return PTR_ERR(rule);
}
skb2 = alloc_skb(NLMSG_GOODSIZE, GFP_ATOMIC);
if (!skb2)
return -ENOMEM;
if (NFNL_MSG_TYPE(info->nlh->nlmsg_type) == NFT_MSG_GETRULE_RESET)
reset = true;
err = nf_tables_fill_rule_info(skb2, net, NETLINK_CB(skb).portid,
info->nlh->nlmsg_seq, NFT_MSG_NEWRULE, 0,
family, table, chain, rule, 0, reset);
if (err < 0)
goto err_fill_rule_info;
if (reset)
audit_log_rule_reset(table, nft_pernet(net)->base_seq, 1);
return nfnetlink_unicast(skb2, net, NETLINK_CB(skb).portid);
err_fill_rule_info:
kfree_skb(skb2);
return err;
}
void nf_tables_rule_destroy(const struct nft_ctx *ctx, struct nft_rule *rule)
{
struct nft_expr *expr, *next;
/*
* Careful: some expressions might not be initialized in case this
* is called on error from nf_tables_newrule().
*/
expr = nft_expr_first(rule);
while (nft_expr_more(rule, expr)) {
next = nft_expr_next(expr);
nf_tables_expr_destroy(ctx, expr);
expr = next;
}
kfree(rule);
}
static void nf_tables_rule_release(const struct nft_ctx *ctx, struct nft_rule *rule)
{
nft_rule_expr_deactivate(ctx, rule, NFT_TRANS_RELEASE);
nf_tables_rule_destroy(ctx, rule);
}
int nft_chain_validate(const struct nft_ctx *ctx, const struct nft_chain *chain)
{
struct nft_expr *expr, *last;
const struct nft_data *data;
struct nft_rule *rule;
int err;
if (ctx->level == NFT_JUMP_STACK_SIZE)
return -EMLINK;
list_for_each_entry(rule, &chain->rules, list) {
if (fatal_signal_pending(current))
return -EINTR;
if (!nft_is_active_next(ctx->net, rule))
continue;
nft_rule_for_each_expr(expr, last, rule) {
if (!expr->ops->validate)
continue;
err = expr->ops->validate(ctx, expr, &data);
if (err < 0)
return err;
}
}
return 0;
}
EXPORT_SYMBOL_GPL(nft_chain_validate);
static int nft_table_validate(struct net *net, const struct nft_table *table)
{
struct nft_chain *chain;
struct nft_ctx ctx = {
.net = net,
.family = table->family,
};
int err;
list_for_each_entry(chain, &table->chains, list) {
if (!nft_is_base_chain(chain))
continue;
ctx.chain = chain;
err = nft_chain_validate(&ctx, chain);
if (err < 0)
return err;
cond_resched();
}
return 0;
}
int nft_setelem_validate(const struct nft_ctx *ctx, struct nft_set *set,
const struct nft_set_iter *iter,
struct nft_set_elem *elem)
{
const struct nft_set_ext *ext = nft_set_elem_ext(set, elem->priv);
struct nft_ctx *pctx = (struct nft_ctx *)ctx;
const struct nft_data *data;
int err;
if (nft_set_ext_exists(ext, NFT_SET_EXT_FLAGS) &&
*nft_set_ext_flags(ext) & NFT_SET_ELEM_INTERVAL_END)
return 0;
data = nft_set_ext_data(ext);
switch (data->verdict.code) {
case NFT_JUMP:
case NFT_GOTO:
pctx->level++;
err = nft_chain_validate(ctx, data->verdict.chain);
if (err < 0)
return err;
pctx->level--;
break;
default:
break;
}
return 0;
}
int nft_set_catchall_validate(const struct nft_ctx *ctx, struct nft_set *set)
{
u8 genmask = nft_genmask_next(ctx->net);
struct nft_set_elem_catchall *catchall;
struct nft_set_elem elem;
struct nft_set_ext *ext;
int ret = 0;
list_for_each_entry_rcu(catchall, &set->catchall_list, list) {
ext = nft_set_elem_ext(set, catchall->elem);
if (!nft_set_elem_active(ext, genmask))
continue;
elem.priv = catchall->elem;
ret = nft_setelem_validate(ctx, set, NULL, &elem);
if (ret < 0)
return ret;
}
return ret;
}
static struct nft_rule *nft_rule_lookup_byid(const struct net *net,
const struct nft_chain *chain,
const struct nlattr *nla);
#define NFT_RULE_MAXEXPRS 128
static int nf_tables_newrule(struct sk_buff *skb, const struct nfnl_info *info,
const struct nlattr * const nla[])
{
struct nftables_pernet *nft_net = nft_pernet(info->net);
struct netlink_ext_ack *extack = info->extack;
unsigned int size, i, n, ulen = 0, usize = 0;
u8 genmask = nft_genmask_next(info->net);
struct nft_rule *rule, *old_rule = NULL;
struct nft_expr_info *expr_info = NULL;
u8 family = info->nfmsg->nfgen_family;
struct nft_flow_rule *flow = NULL;
struct net *net = info->net;
struct nft_userdata *udata;
struct nft_table *table;
struct nft_chain *chain;
struct nft_trans *trans;
u64 handle, pos_handle;
struct nft_expr *expr;
struct nft_ctx ctx;
struct nlattr *tmp;
int err, rem;
lockdep_assert_held(&nft_net->commit_mutex);
table = nft_table_lookup(net, nla[NFTA_RULE_TABLE], family, genmask,
NETLINK_CB(skb).portid);
if (IS_ERR(table)) {
NL_SET_BAD_ATTR(extack, nla[NFTA_RULE_TABLE]);
return PTR_ERR(table);
}
if (nla[NFTA_RULE_CHAIN]) {
chain = nft_chain_lookup(net, table, nla[NFTA_RULE_CHAIN],
genmask);
if (IS_ERR(chain)) {
NL_SET_BAD_ATTR(extack, nla[NFTA_RULE_CHAIN]);
return PTR_ERR(chain);
}
} else if (nla[NFTA_RULE_CHAIN_ID]) {
chain = nft_chain_lookup_byid(net, table, nla[NFTA_RULE_CHAIN_ID],
genmask);
if (IS_ERR(chain)) {
NL_SET_BAD_ATTR(extack, nla[NFTA_RULE_CHAIN_ID]);
return PTR_ERR(chain);
}
} else {
return -EINVAL;
}
if (nft_chain_is_bound(chain))
return -EOPNOTSUPP;
if (nla[NFTA_RULE_HANDLE]) {
handle = be64_to_cpu(nla_get_be64(nla[NFTA_RULE_HANDLE]));
rule = __nft_rule_lookup(chain, handle);
if (IS_ERR(rule)) {
NL_SET_BAD_ATTR(extack, nla[NFTA_RULE_HANDLE]);
return PTR_ERR(rule);
}
if (info->nlh->nlmsg_flags & NLM_F_EXCL) {
NL_SET_BAD_ATTR(extack, nla[NFTA_RULE_HANDLE]);
return -EEXIST;
}
if (info->nlh->nlmsg_flags & NLM_F_REPLACE)
old_rule = rule;
else
return -EOPNOTSUPP;
} else {
if (!(info->nlh->nlmsg_flags & NLM_F_CREATE) ||
info->nlh->nlmsg_flags & NLM_F_REPLACE)
return -EINVAL;
handle = nf_tables_alloc_handle(table);
if (nla[NFTA_RULE_POSITION]) {
pos_handle = be64_to_cpu(nla_get_be64(nla[NFTA_RULE_POSITION]));
old_rule = __nft_rule_lookup(chain, pos_handle);
if (IS_ERR(old_rule)) {
NL_SET_BAD_ATTR(extack, nla[NFTA_RULE_POSITION]);
return PTR_ERR(old_rule);
}
} else if (nla[NFTA_RULE_POSITION_ID]) {
old_rule = nft_rule_lookup_byid(net, chain, nla[NFTA_RULE_POSITION_ID]);
if (IS_ERR(old_rule)) {
NL_SET_BAD_ATTR(extack, nla[NFTA_RULE_POSITION_ID]);
return PTR_ERR(old_rule);
}
}
}
nft_ctx_init(&ctx, net, skb, info->nlh, family, table, chain, nla);
n = 0;
size = 0;
if (nla[NFTA_RULE_EXPRESSIONS]) {
expr_info = kvmalloc_array(NFT_RULE_MAXEXPRS,
sizeof(struct nft_expr_info),
GFP_KERNEL);
if (!expr_info)
return -ENOMEM;
nla_for_each_nested(tmp, nla[NFTA_RULE_EXPRESSIONS], rem) {
err = -EINVAL;
if (nla_type(tmp) != NFTA_LIST_ELEM)
goto err_release_expr;
if (n == NFT_RULE_MAXEXPRS)
goto err_release_expr;
err = nf_tables_expr_parse(&ctx, tmp, &expr_info[n]);
if (err < 0) {
NL_SET_BAD_ATTR(extack, tmp);
goto err_release_expr;
}
size += expr_info[n].ops->size;
n++;
}
}
/* Check for overflow of dlen field */
err = -EFBIG;
if (size >= 1 << 12)
goto err_release_expr;
if (nla[NFTA_RULE_USERDATA]) {
ulen = nla_len(nla[NFTA_RULE_USERDATA]);
if (ulen > 0)
usize = sizeof(struct nft_userdata) + ulen;
}
err = -ENOMEM;
rule = kzalloc(sizeof(*rule) + size + usize, GFP_KERNEL_ACCOUNT);
if (rule == NULL)
goto err_release_expr;
nft_activate_next(net, rule);
rule->handle = handle;
rule->dlen = size;
rule->udata = ulen ? 1 : 0;
if (ulen) {
udata = nft_userdata(rule);
udata->len = ulen - 1;
nla_memcpy(udata->data, nla[NFTA_RULE_USERDATA], ulen);
}
expr = nft_expr_first(rule);
for (i = 0; i < n; i++) {
err = nf_tables_newexpr(&ctx, &expr_info[i], expr);
if (err < 0) {
NL_SET_BAD_ATTR(extack, expr_info[i].attr);
goto err_release_rule;
}
if (expr_info[i].ops->validate)
nft_validate_state_update(table, NFT_VALIDATE_NEED);
expr_info[i].ops = NULL;
expr = nft_expr_next(expr);
}
if (chain->flags & NFT_CHAIN_HW_OFFLOAD) {
flow = nft_flow_rule_create(net, rule);
if (IS_ERR(flow)) {
err = PTR_ERR(flow);
goto err_release_rule;
}
}
if (!nft_use_inc(&chain->use)) {
err = -EMFILE;
goto err_release_rule;
}
if (info->nlh->nlmsg_flags & NLM_F_REPLACE) {
if (nft_chain_binding(chain)) {
err = -EOPNOTSUPP;
goto err_destroy_flow_rule;
}
err = nft_delrule(&ctx, old_rule);
if (err < 0)
goto err_destroy_flow_rule;
trans = nft_trans_rule_add(&ctx, NFT_MSG_NEWRULE, rule);
if (trans == NULL) {
err = -ENOMEM;
goto err_destroy_flow_rule;
}
list_add_tail_rcu(&rule->list, &old_rule->list);
} else {
trans = nft_trans_rule_add(&ctx, NFT_MSG_NEWRULE, rule);
if (!trans) {
err = -ENOMEM;
goto err_destroy_flow_rule;
}
if (info->nlh->nlmsg_flags & NLM_F_APPEND) {
if (old_rule)
list_add_rcu(&rule->list, &old_rule->list);
else
list_add_tail_rcu(&rule->list, &chain->rules);
} else {
if (old_rule)
list_add_tail_rcu(&rule->list, &old_rule->list);
else
list_add_rcu(&rule->list, &chain->rules);
}
}
kvfree(expr_info);
if (flow)
nft_trans_flow_rule(trans) = flow;
if (table->validate_state == NFT_VALIDATE_DO)
return nft_table_validate(net, table);
return 0;
err_destroy_flow_rule:
nft_use_dec_restore(&chain->use);
if (flow)
nft_flow_rule_destroy(flow);
err_release_rule:
nft_rule_expr_deactivate(&ctx, rule, NFT_TRANS_PREPARE_ERROR);
nf_tables_rule_destroy(&ctx, rule);
err_release_expr:
for (i = 0; i < n; i++) {
if (expr_info[i].ops) {
module_put(expr_info[i].ops->type->owner);
if (expr_info[i].ops->type->release_ops)
expr_info[i].ops->type->release_ops(expr_info[i].ops);
}
}
kvfree(expr_info);
return err;
}
static struct nft_rule *nft_rule_lookup_byid(const struct net *net,
const struct nft_chain *chain,
const struct nlattr *nla)
{
struct nftables_pernet *nft_net = nft_pernet(net);
u32 id = ntohl(nla_get_be32(nla));
struct nft_trans *trans;
list_for_each_entry(trans, &nft_net->commit_list, list) {
if (trans->msg_type == NFT_MSG_NEWRULE &&
trans->ctx.chain == chain &&
id == nft_trans_rule_id(trans))
return nft_trans_rule(trans);
}
return ERR_PTR(-ENOENT);
}
static int nf_tables_delrule(struct sk_buff *skb, const struct nfnl_info *info,
const struct nlattr * const nla[])
{
struct netlink_ext_ack *extack = info->extack;
u8 genmask = nft_genmask_next(info->net);
u8 family = info->nfmsg->nfgen_family;
struct nft_chain *chain = NULL;
struct net *net = info->net;
struct nft_table *table;
struct nft_rule *rule;
struct nft_ctx ctx;
int err = 0;
table = nft_table_lookup(net, nla[NFTA_RULE_TABLE], family, genmask,
NETLINK_CB(skb).portid);
if (IS_ERR(table)) {
NL_SET_BAD_ATTR(extack, nla[NFTA_RULE_TABLE]);
return PTR_ERR(table);
}
if (nla[NFTA_RULE_CHAIN]) {
chain = nft_chain_lookup(net, table, nla[NFTA_RULE_CHAIN],
genmask);
if (IS_ERR(chain)) {
if (PTR_ERR(chain) == -ENOENT &&
NFNL_MSG_TYPE(info->nlh->nlmsg_type) == NFT_MSG_DESTROYRULE)
return 0;
NL_SET_BAD_ATTR(extack, nla[NFTA_RULE_CHAIN]);
return PTR_ERR(chain);
}
if (nft_chain_binding(chain))
return -EOPNOTSUPP;
}
nft_ctx_init(&ctx, net, skb, info->nlh, family, table, chain, nla);
if (chain) {
if (nla[NFTA_RULE_HANDLE]) {
rule = nft_rule_lookup(chain, nla[NFTA_RULE_HANDLE]);
if (IS_ERR(rule)) {
if (PTR_ERR(rule) == -ENOENT &&
NFNL_MSG_TYPE(info->nlh->nlmsg_type) == NFT_MSG_DESTROYRULE)
return 0;
NL_SET_BAD_ATTR(extack, nla[NFTA_RULE_HANDLE]);
return PTR_ERR(rule);
}
err = nft_delrule(&ctx, rule);
} else if (nla[NFTA_RULE_ID]) {
rule = nft_rule_lookup_byid(net, chain, nla[NFTA_RULE_ID]);
if (IS_ERR(rule)) {
NL_SET_BAD_ATTR(extack, nla[NFTA_RULE_ID]);
return PTR_ERR(rule);
}
err = nft_delrule(&ctx, rule);
} else {
err = nft_delrule_by_chain(&ctx);
}
} else {
list_for_each_entry(chain, &table->chains, list) {
if (!nft_is_active_next(net, chain))
continue;
if (nft_chain_binding(chain))
continue;
ctx.chain = chain;
err = nft_delrule_by_chain(&ctx);
if (err < 0)
break;
}
}
return err;
}
/*
* Sets
*/
static const struct nft_set_type *nft_set_types[] = {
&nft_set_hash_fast_type,
&nft_set_hash_type,
&nft_set_rhash_type,
&nft_set_bitmap_type,
&nft_set_rbtree_type,
#if defined(CONFIG_X86_64) && !defined(CONFIG_UML)
&nft_set_pipapo_avx2_type,
#endif
&nft_set_pipapo_type,
};
#define NFT_SET_FEATURES (NFT_SET_INTERVAL | NFT_SET_MAP | \
NFT_SET_TIMEOUT | NFT_SET_OBJECT | \
NFT_SET_EVAL)
static bool nft_set_ops_candidate(const struct nft_set_type *type, u32 flags)
{
return (flags & type->features) == (flags & NFT_SET_FEATURES);
}
/*
* Select a set implementation based on the data characteristics and the
* given policy. The total memory use might not be known if no size is
* given, in that case the amount of memory per element is used.
*/
static const struct nft_set_ops *
nft_select_set_ops(const struct nft_ctx *ctx,
const struct nlattr * const nla[],
const struct nft_set_desc *desc)
{
struct nftables_pernet *nft_net = nft_pernet(ctx->net);
const struct nft_set_ops *ops, *bops;
struct nft_set_estimate est, best;
const struct nft_set_type *type;
u32 flags = 0;
int i;
lockdep_assert_held(&nft_net->commit_mutex);
lockdep_nfnl_nft_mutex_not_held();
if (nla[NFTA_SET_FLAGS] != NULL)
flags = ntohl(nla_get_be32(nla[NFTA_SET_FLAGS]));
bops = NULL;
best.size = ~0;
best.lookup = ~0;
best.space = ~0;
for (i = 0; i < ARRAY_SIZE(nft_set_types); i++) {
type = nft_set_types[i];
ops = &type->ops;
if (!nft_set_ops_candidate(type, flags))
continue;
if (!ops->estimate(desc, flags, &est))
continue;
switch (desc->policy) {
case NFT_SET_POL_PERFORMANCE:
if (est.lookup < best.lookup)
break;
if (est.lookup == best.lookup &&
est.space < best.space)
break;
continue;
case NFT_SET_POL_MEMORY:
if (!desc->size) {
if (est.space < best.space)
break;
if (est.space == best.space &&
est.lookup < best.lookup)
break;
} else if (est.size < best.size || !bops) {
break;
}
continue;
default:
break;
}
bops = ops;
best = est;
}
if (bops != NULL)
return bops;
return ERR_PTR(-EOPNOTSUPP);
}
static const struct nla_policy nft_set_policy[NFTA_SET_MAX + 1] = {
[NFTA_SET_TABLE] = { .type = NLA_STRING,
.len = NFT_TABLE_MAXNAMELEN - 1 },
[NFTA_SET_NAME] = { .type = NLA_STRING,
.len = NFT_SET_MAXNAMELEN - 1 },
[NFTA_SET_FLAGS] = { .type = NLA_U32 },
[NFTA_SET_KEY_TYPE] = { .type = NLA_U32 },
[NFTA_SET_KEY_LEN] = { .type = NLA_U32 },
[NFTA_SET_DATA_TYPE] = { .type = NLA_U32 },
[NFTA_SET_DATA_LEN] = { .type = NLA_U32 },
[NFTA_SET_POLICY] = { .type = NLA_U32 },
[NFTA_SET_DESC] = { .type = NLA_NESTED },
[NFTA_SET_ID] = { .type = NLA_U32 },
[NFTA_SET_TIMEOUT] = { .type = NLA_U64 },
[NFTA_SET_GC_INTERVAL] = { .type = NLA_U32 },
[NFTA_SET_USERDATA] = { .type = NLA_BINARY,
.len = NFT_USERDATA_MAXLEN },
[NFTA_SET_OBJ_TYPE] = { .type = NLA_U32 },
[NFTA_SET_HANDLE] = { .type = NLA_U64 },
[NFTA_SET_EXPR] = { .type = NLA_NESTED },
[NFTA_SET_EXPRESSIONS] = { .type = NLA_NESTED },
};
static const struct nla_policy nft_set_desc_policy[NFTA_SET_DESC_MAX + 1] = {
[NFTA_SET_DESC_SIZE] = { .type = NLA_U32 },
[NFTA_SET_DESC_CONCAT] = { .type = NLA_NESTED },
};
static struct nft_set *nft_set_lookup(const struct nft_table *table,
const struct nlattr *nla, u8 genmask)
{
struct nft_set *set;
if (nla == NULL)
return ERR_PTR(-EINVAL);
list_for_each_entry_rcu(set, &table->sets, list) {
if (!nla_strcmp(nla, set->name) &&
nft_active_genmask(set, genmask))
return set;
}
return ERR_PTR(-ENOENT);
}
static struct nft_set *nft_set_lookup_byhandle(const struct nft_table *table,
const struct nlattr *nla,
u8 genmask)
{
struct nft_set *set;
list_for_each_entry(set, &table->sets, list) {
if (be64_to_cpu(nla_get_be64(nla)) == set->handle &&
nft_active_genmask(set, genmask))
return set;
}
return ERR_PTR(-ENOENT);
}
static struct nft_set *nft_set_lookup_byid(const struct net *net,
const struct nft_table *table,
const struct nlattr *nla, u8 genmask)
{
struct nftables_pernet *nft_net = nft_pernet(net);
u32 id = ntohl(nla_get_be32(nla));
struct nft_trans *trans;
list_for_each_entry(trans, &nft_net->commit_list, list) {
if (trans->msg_type == NFT_MSG_NEWSET) {
struct nft_set *set = nft_trans_set(trans);
if (id == nft_trans_set_id(trans) &&
set->table == table &&
nft_active_genmask(set, genmask))
return set;
}
}
return ERR_PTR(-ENOENT);
}
struct nft_set *nft_set_lookup_global(const struct net *net,
const struct nft_table *table,
const struct nlattr *nla_set_name,
const struct nlattr *nla_set_id,
u8 genmask)
{
struct nft_set *set;
set = nft_set_lookup(table, nla_set_name, genmask);
if (IS_ERR(set)) {
if (!nla_set_id)
return set;
set = nft_set_lookup_byid(net, table, nla_set_id, genmask);
}
return set;
}
EXPORT_SYMBOL_GPL(nft_set_lookup_global);
static int nf_tables_set_alloc_name(struct nft_ctx *ctx, struct nft_set *set,
const char *name)
{
const struct nft_set *i;
const char *p;
unsigned long *inuse;
unsigned int n = 0, min = 0;
p = strchr(name, '%');
if (p != NULL) {
if (p[1] != 'd' || strchr(p + 2, '%'))
return -EINVAL;
inuse = (unsigned long *)get_zeroed_page(GFP_KERNEL);
if (inuse == NULL)
return -ENOMEM;
cont:
list_for_each_entry(i, &ctx->table->sets, list) {
int tmp;
if (!nft_is_active_next(ctx->net, i))
continue;
if (!sscanf(i->name, name, &tmp))
continue;
if (tmp < min || tmp >= min + BITS_PER_BYTE * PAGE_SIZE)
continue;
set_bit(tmp - min, inuse);
}
n = find_first_zero_bit(inuse, BITS_PER_BYTE * PAGE_SIZE);
if (n >= BITS_PER_BYTE * PAGE_SIZE) {
min += BITS_PER_BYTE * PAGE_SIZE;
memset(inuse, 0, PAGE_SIZE);
goto cont;
}
free_page((unsigned long)inuse);
}
set->name = kasprintf(GFP_KERNEL_ACCOUNT, name, min + n);
if (!set->name)
return -ENOMEM;
list_for_each_entry(i, &ctx->table->sets, list) {
if (!nft_is_active_next(ctx->net, i))
continue;
if (!strcmp(set->name, i->name)) {
kfree(set->name);
set->name = NULL;
return -ENFILE;
}
}
return 0;
}
int nf_msecs_to_jiffies64(const struct nlattr *nla, u64 *result)
{
u64 ms = be64_to_cpu(nla_get_be64(nla));
u64 max = (u64)(~((u64)0));
max = div_u64(max, NSEC_PER_MSEC);
if (ms >= max)
return -ERANGE;
ms *= NSEC_PER_MSEC;
*result = nsecs_to_jiffies64(ms);
return 0;
}
__be64 nf_jiffies64_to_msecs(u64 input)
{
return cpu_to_be64(jiffies64_to_msecs(input));
}
static int nf_tables_fill_set_concat(struct sk_buff *skb,
const struct nft_set *set)
{
struct nlattr *concat, *field;
int i;
concat = nla_nest_start_noflag(skb, NFTA_SET_DESC_CONCAT);
if (!concat)
return -ENOMEM;
for (i = 0; i < set->field_count; i++) {
field = nla_nest_start_noflag(skb, NFTA_LIST_ELEM);
if (!field)
return -ENOMEM;
if (nla_put_be32(skb, NFTA_SET_FIELD_LEN,
htonl(set->field_len[i])))
return -ENOMEM;
nla_nest_end(skb, field);
}
nla_nest_end(skb, concat);
return 0;
}
static int nf_tables_fill_set(struct sk_buff *skb, const struct nft_ctx *ctx,
const struct nft_set *set, u16 event, u16 flags)
{
u64 timeout = READ_ONCE(set->timeout);
u32 gc_int = READ_ONCE(set->gc_int);
u32 portid = ctx->portid;
struct nlmsghdr *nlh;
struct nlattr *nest;
u32 seq = ctx->seq;
int i;
event = nfnl_msg_type(NFNL_SUBSYS_NFTABLES, event);
nlh = nfnl_msg_put(skb, portid, seq, event, flags, ctx->family,
NFNETLINK_V0, nft_base_seq(ctx->net));
if (!nlh)
goto nla_put_failure;
if (nla_put_string(skb, NFTA_SET_TABLE, ctx->table->name))
goto nla_put_failure;
if (nla_put_string(skb, NFTA_SET_NAME, set->name))
goto nla_put_failure;
if (nla_put_be64(skb, NFTA_SET_HANDLE, cpu_to_be64(set->handle),
NFTA_SET_PAD))
goto nla_put_failure;
if (event == NFT_MSG_DELSET) {
nlmsg_end(skb, nlh);
return 0;
}
if (set->flags != 0)
if (nla_put_be32(skb, NFTA_SET_FLAGS, htonl(set->flags)))
goto nla_put_failure;
if (nla_put_be32(skb, NFTA_SET_KEY_TYPE, htonl(set->ktype)))
goto nla_put_failure;
if (nla_put_be32(skb, NFTA_SET_KEY_LEN, htonl(set->klen)))
goto nla_put_failure;
if (set->flags & NFT_SET_MAP) {
if (nla_put_be32(skb, NFTA_SET_DATA_TYPE, htonl(set->dtype)))
goto nla_put_failure;
if (nla_put_be32(skb, NFTA_SET_DATA_LEN, htonl(set->dlen)))
goto nla_put_failure;
}
if (set->flags & NFT_SET_OBJECT &&
nla_put_be32(skb, NFTA_SET_OBJ_TYPE, htonl(set->objtype)))
goto nla_put_failure;
if (timeout &&
nla_put_be64(skb, NFTA_SET_TIMEOUT,
nf_jiffies64_to_msecs(timeout),
NFTA_SET_PAD))
goto nla_put_failure;
if (gc_int &&
nla_put_be32(skb, NFTA_SET_GC_INTERVAL, htonl(gc_int)))
goto nla_put_failure;
if (set->policy != NFT_SET_POL_PERFORMANCE) {
if (nla_put_be32(skb, NFTA_SET_POLICY, htonl(set->policy)))
goto nla_put_failure;
}
if (set->udata &&
nla_put(skb, NFTA_SET_USERDATA, set->udlen, set->udata))
goto nla_put_failure;
nest = nla_nest_start_noflag(skb, NFTA_SET_DESC);
if (!nest)
goto nla_put_failure;
if (set->size &&
nla_put_be32(skb, NFTA_SET_DESC_SIZE, htonl(set->size)))
goto nla_put_failure;
if (set->field_count > 1 &&
nf_tables_fill_set_concat(skb, set))
goto nla_put_failure;
nla_nest_end(skb, nest);
if (set->num_exprs == 1) {
nest = nla_nest_start_noflag(skb, NFTA_SET_EXPR);
if (nf_tables_fill_expr_info(skb, set->exprs[0], false) < 0)
goto nla_put_failure;
nla_nest_end(skb, nest);
} else if (set->num_exprs > 1) {
nest = nla_nest_start_noflag(skb, NFTA_SET_EXPRESSIONS);
if (nest == NULL)
goto nla_put_failure;
for (i = 0; i < set->num_exprs; i++) {
if (nft_expr_dump(skb, NFTA_LIST_ELEM,
set->exprs[i], false) < 0)
goto nla_put_failure;
}
nla_nest_end(skb, nest);
}
nlmsg_end(skb, nlh);
return 0;
nla_put_failure:
nlmsg_trim(skb, nlh);
return -1;
}
static void nf_tables_set_notify(const struct nft_ctx *ctx,
const struct nft_set *set, int event,
gfp_t gfp_flags)
{
struct nftables_pernet *nft_net = nft_pernet(ctx->net);
u32 portid = ctx->portid;
struct sk_buff *skb;
u16 flags = 0;
int err;
if (!ctx->report &&
!nfnetlink_has_listeners(ctx->net, NFNLGRP_NFTABLES))
return;
skb = nlmsg_new(NLMSG_GOODSIZE, gfp_flags);
if (skb == NULL)
goto err;
if (ctx->flags & (NLM_F_CREATE | NLM_F_EXCL))
flags |= ctx->flags & (NLM_F_CREATE | NLM_F_EXCL);
err = nf_tables_fill_set(skb, ctx, set, event, flags);
if (err < 0) {
kfree_skb(skb);
goto err;
}
nft_notify_enqueue(skb, ctx->report, &nft_net->notify_list);
return;
err:
nfnetlink_set_err(ctx->net, portid, NFNLGRP_NFTABLES, -ENOBUFS);
}
static int nf_tables_dump_sets(struct sk_buff *skb, struct netlink_callback *cb)
{
const struct nft_set *set;
unsigned int idx, s_idx = cb->args[0];
struct nft_table *table, *cur_table = (struct nft_table *)cb->args[2];
struct net *net = sock_net(skb->sk);
struct nft_ctx *ctx = cb->data, ctx_set;
struct nftables_pernet *nft_net;
if (cb->args[1])
return skb->len;
rcu_read_lock();
nft_net = nft_pernet(net);
cb->seq = READ_ONCE(nft_net->base_seq);
list_for_each_entry_rcu(table, &nft_net->tables, list) {
if (ctx->family != NFPROTO_UNSPEC &&
ctx->family != table->family)
continue;
if (ctx->table && ctx->table != table)
continue;
if (cur_table) {
if (cur_table != table)
continue;
cur_table = NULL;
}
idx = 0;
list_for_each_entry_rcu(set, &table->sets, list) {
if (idx < s_idx)
goto cont;
if (!nft_is_active(net, set))
goto cont;
ctx_set = *ctx;
ctx_set.table = table;
ctx_set.family = table->family;
if (nf_tables_fill_set(skb, &ctx_set, set,
NFT_MSG_NEWSET,
NLM_F_MULTI) < 0) {
cb->args[0] = idx;
cb->args[2] = (unsigned long) table;
goto done;
}
nl_dump_check_consistent(cb, nlmsg_hdr(skb));
cont:
idx++;
}
if (s_idx)
s_idx = 0;
}
cb->args[1] = 1;
done:
rcu_read_unlock();
return skb->len;
}
static int nf_tables_dump_sets_start(struct netlink_callback *cb)
{
struct nft_ctx *ctx_dump = NULL;
ctx_dump = kmemdup(cb->data, sizeof(*ctx_dump), GFP_ATOMIC);
if (ctx_dump == NULL)
return -ENOMEM;
cb->data = ctx_dump;
return 0;
}
static int nf_tables_dump_sets_done(struct netlink_callback *cb)
{
kfree(cb->data);
return 0;
}
/* called with rcu_read_lock held */
static int nf_tables_getset(struct sk_buff *skb, const struct nfnl_info *info,
const struct nlattr * const nla[])
{
struct netlink_ext_ack *extack = info->extack;
u8 genmask = nft_genmask_cur(info->net);
u8 family = info->nfmsg->nfgen_family;
struct nft_table *table = NULL;
struct net *net = info->net;
const struct nft_set *set;
struct sk_buff *skb2;
struct nft_ctx ctx;
int err;
if (nla[NFTA_SET_TABLE]) {
table = nft_table_lookup(net, nla[NFTA_SET_TABLE], family,
genmask, 0);
if (IS_ERR(table)) {
NL_SET_BAD_ATTR(extack, nla[NFTA_SET_TABLE]);
return PTR_ERR(table);
}
}
nft_ctx_init(&ctx, net, skb, info->nlh, family, table, NULL, nla);
if (info->nlh->nlmsg_flags & NLM_F_DUMP) {
struct netlink_dump_control c = {
.start = nf_tables_dump_sets_start,
.dump = nf_tables_dump_sets,
.done = nf_tables_dump_sets_done,
.data = &ctx,
.module = THIS_MODULE,
};
return nft_netlink_dump_start_rcu(info->sk, skb, info->nlh, &c);
}
/* Only accept unspec with dump */
if (info->nfmsg->nfgen_family == NFPROTO_UNSPEC)
return -EAFNOSUPPORT;
if (!nla[NFTA_SET_TABLE])
return -EINVAL;
set = nft_set_lookup(table, nla[NFTA_SET_NAME], genmask);
if (IS_ERR(set))
return PTR_ERR(set);
skb2 = alloc_skb(NLMSG_GOODSIZE, GFP_ATOMIC);
if (skb2 == NULL)
return -ENOMEM;
err = nf_tables_fill_set(skb2, &ctx, set, NFT_MSG_NEWSET, 0);
if (err < 0)
goto err_fill_set_info;
return nfnetlink_unicast(skb2, net, NETLINK_CB(skb).portid);
err_fill_set_info:
kfree_skb(skb2);
return err;
}
static const struct nla_policy nft_concat_policy[NFTA_SET_FIELD_MAX + 1] = {
[NFTA_SET_FIELD_LEN] = { .type = NLA_U32 },
};
static int nft_set_desc_concat_parse(const struct nlattr *attr,
struct nft_set_desc *desc)
{
struct nlattr *tb[NFTA_SET_FIELD_MAX + 1];
u32 len;
int err;
if (desc->field_count >= ARRAY_SIZE(desc->field_len))
return -E2BIG;
err = nla_parse_nested_deprecated(tb, NFTA_SET_FIELD_MAX, attr,
nft_concat_policy, NULL);
if (err < 0)
return err;
if (!tb[NFTA_SET_FIELD_LEN])
return -EINVAL;
len = ntohl(nla_get_be32(tb[NFTA_SET_FIELD_LEN]));
if (!len || len > U8_MAX)
return -EINVAL;
desc->field_len[desc->field_count++] = len;
return 0;
}
static int nft_set_desc_concat(struct nft_set_desc *desc,
const struct nlattr *nla)
{
struct nlattr *attr;
u32 num_regs = 0;
int rem, err, i;
nla_for_each_nested(attr, nla, rem) {
if (nla_type(attr) != NFTA_LIST_ELEM)
return -EINVAL;
err = nft_set_desc_concat_parse(attr, desc);
if (err < 0)
return err;
}
for (i = 0; i < desc->field_count; i++)
num_regs += DIV_ROUND_UP(desc->field_len[i], sizeof(u32));
if (num_regs > NFT_REG32_COUNT)
return -E2BIG;
return 0;
}
static int nf_tables_set_desc_parse(struct nft_set_desc *desc,
const struct nlattr *nla)
{
struct nlattr *da[NFTA_SET_DESC_MAX + 1];
int err;
err = nla_parse_nested_deprecated(da, NFTA_SET_DESC_MAX, nla,
nft_set_desc_policy, NULL);
if (err < 0)
return err;
if (da[NFTA_SET_DESC_SIZE] != NULL)
desc->size = ntohl(nla_get_be32(da[NFTA_SET_DESC_SIZE]));
if (da[NFTA_SET_DESC_CONCAT])
err = nft_set_desc_concat(desc, da[NFTA_SET_DESC_CONCAT]);
return err;
}
static int nft_set_expr_alloc(struct nft_ctx *ctx, struct nft_set *set,
const struct nlattr * const *nla,
struct nft_expr **exprs, int *num_exprs,
u32 flags)
{
struct nft_expr *expr;
int err, i;
if (nla[NFTA_SET_EXPR]) {
expr = nft_set_elem_expr_alloc(ctx, set, nla[NFTA_SET_EXPR]);
if (IS_ERR(expr)) {
err = PTR_ERR(expr);
goto err_set_expr_alloc;
}
exprs[0] = expr;
(*num_exprs)++;
} else if (nla[NFTA_SET_EXPRESSIONS]) {
struct nlattr *tmp;
int left;
if (!(flags & NFT_SET_EXPR)) {
err = -EINVAL;
goto err_set_expr_alloc;
}
i = 0;
nla_for_each_nested(tmp, nla[NFTA_SET_EXPRESSIONS], left) {
if (i == NFT_SET_EXPR_MAX) {
err = -E2BIG;
goto err_set_expr_alloc;
}
if (nla_type(tmp) != NFTA_LIST_ELEM) {
err = -EINVAL;
goto err_set_expr_alloc;
}
expr = nft_set_elem_expr_alloc(ctx, set, tmp);
if (IS_ERR(expr)) {
err = PTR_ERR(expr);
goto err_set_expr_alloc;
}
exprs[i++] = expr;
(*num_exprs)++;
}
}
return 0;
err_set_expr_alloc:
for (i = 0; i < *num_exprs; i++)
nft_expr_destroy(ctx, exprs[i]);
return err;
}
static bool nft_set_is_same(const struct nft_set *set,
const struct nft_set_desc *desc,
struct nft_expr *exprs[], u32 num_exprs, u32 flags)
{
int i;
if (set->ktype != desc->ktype ||
set->dtype != desc->dtype ||
set->flags != flags ||
set->klen != desc->klen ||
set->dlen != desc->dlen ||
set->field_count != desc->field_count ||
set->num_exprs != num_exprs)
return false;
for (i = 0; i < desc->field_count; i++) {
if (set->field_len[i] != desc->field_len[i])
return false;
}
for (i = 0; i < num_exprs; i++) {
if (set->exprs[i]->ops != exprs[i]->ops)
return false;
}
return true;
}
static int nf_tables_newset(struct sk_buff *skb, const struct nfnl_info *info,
const struct nlattr * const nla[])
{
struct netlink_ext_ack *extack = info->extack;
u8 genmask = nft_genmask_next(info->net);
u8 family = info->nfmsg->nfgen_family;
const struct nft_set_ops *ops;
struct net *net = info->net;
struct nft_set_desc desc;
struct nft_table *table;
unsigned char *udata;
struct nft_set *set;
struct nft_ctx ctx;
size_t alloc_size;
int num_exprs = 0;
char *name;
int err, i;
u16 udlen;
u32 flags;
u64 size;
if (nla[NFTA_SET_TABLE] == NULL ||
nla[NFTA_SET_NAME] == NULL ||
nla[NFTA_SET_KEY_LEN] == NULL ||
nla[NFTA_SET_ID] == NULL)
return -EINVAL;
memset(&desc, 0, sizeof(desc));
desc.ktype = NFT_DATA_VALUE;
if (nla[NFTA_SET_KEY_TYPE] != NULL) {
desc.ktype = ntohl(nla_get_be32(nla[NFTA_SET_KEY_TYPE]));
if ((desc.ktype & NFT_DATA_RESERVED_MASK) == NFT_DATA_RESERVED_MASK)
return -EINVAL;
}
desc.klen = ntohl(nla_get_be32(nla[NFTA_SET_KEY_LEN]));
if (desc.klen == 0 || desc.klen > NFT_DATA_VALUE_MAXLEN)
return -EINVAL;
flags = 0;
if (nla[NFTA_SET_FLAGS] != NULL) {
flags = ntohl(nla_get_be32(nla[NFTA_SET_FLAGS]));
if (flags & ~(NFT_SET_ANONYMOUS | NFT_SET_CONSTANT |
NFT_SET_INTERVAL | NFT_SET_TIMEOUT |
NFT_SET_MAP | NFT_SET_EVAL |
NFT_SET_OBJECT | NFT_SET_CONCAT | NFT_SET_EXPR))
return -EOPNOTSUPP;
/* Only one of these operations is supported */
if ((flags & (NFT_SET_MAP | NFT_SET_OBJECT)) ==
(NFT_SET_MAP | NFT_SET_OBJECT))
return -EOPNOTSUPP;
if ((flags & (NFT_SET_EVAL | NFT_SET_OBJECT)) ==
(NFT_SET_EVAL | NFT_SET_OBJECT))
return -EOPNOTSUPP;
}
desc.dtype = 0;
if (nla[NFTA_SET_DATA_TYPE] != NULL) {
if (!(flags & NFT_SET_MAP))
return -EINVAL;
desc.dtype = ntohl(nla_get_be32(nla[NFTA_SET_DATA_TYPE]));
if ((desc.dtype & NFT_DATA_RESERVED_MASK) == NFT_DATA_RESERVED_MASK &&
desc.dtype != NFT_DATA_VERDICT)
return -EINVAL;
if (desc.dtype != NFT_DATA_VERDICT) {
if (nla[NFTA_SET_DATA_LEN] == NULL)
return -EINVAL;
desc.dlen = ntohl(nla_get_be32(nla[NFTA_SET_DATA_LEN]));
if (desc.dlen == 0 || desc.dlen > NFT_DATA_VALUE_MAXLEN)
return -EINVAL;
} else
desc.dlen = sizeof(struct nft_verdict);
} else if (flags & NFT_SET_MAP)
return -EINVAL;
if (nla[NFTA_SET_OBJ_TYPE] != NULL) {
if (!(flags & NFT_SET_OBJECT))
return -EINVAL;
desc.objtype = ntohl(nla_get_be32(nla[NFTA_SET_OBJ_TYPE]));
if (desc.objtype == NFT_OBJECT_UNSPEC ||
desc.objtype > NFT_OBJECT_MAX)
return -EOPNOTSUPP;
} else if (flags & NFT_SET_OBJECT)
return -EINVAL;
else
desc.objtype = NFT_OBJECT_UNSPEC;
desc.timeout = 0;
if (nla[NFTA_SET_TIMEOUT] != NULL) {
if (!(flags & NFT_SET_TIMEOUT))
return -EINVAL;
if (flags & NFT_SET_ANONYMOUS)
return -EOPNOTSUPP;
err = nf_msecs_to_jiffies64(nla[NFTA_SET_TIMEOUT], &desc.timeout);
if (err)
return err;
}
desc.gc_int = 0;
if (nla[NFTA_SET_GC_INTERVAL] != NULL) {
if (!(flags & NFT_SET_TIMEOUT))
return -EINVAL;
if (flags & NFT_SET_ANONYMOUS)
return -EOPNOTSUPP;
desc.gc_int = ntohl(nla_get_be32(nla[NFTA_SET_GC_INTERVAL]));
}
desc.policy = NFT_SET_POL_PERFORMANCE;
if (nla[NFTA_SET_POLICY] != NULL)
desc.policy = ntohl(nla_get_be32(nla[NFTA_SET_POLICY]));
if (nla[NFTA_SET_DESC] != NULL) {
err = nf_tables_set_desc_parse(&desc, nla[NFTA_SET_DESC]);
if (err < 0)
return err;
if (desc.field_count > 1 && !(flags & NFT_SET_CONCAT))
return -EINVAL;
} else if (flags & NFT_SET_CONCAT) {
return -EINVAL;
}
if (nla[NFTA_SET_EXPR] || nla[NFTA_SET_EXPRESSIONS])
desc.expr = true;
table = nft_table_lookup(net, nla[NFTA_SET_TABLE], family, genmask,
NETLINK_CB(skb).portid);
if (IS_ERR(table)) {
NL_SET_BAD_ATTR(extack, nla[NFTA_SET_TABLE]);
return PTR_ERR(table);
}
nft_ctx_init(&ctx, net, skb, info->nlh, family, table, NULL, nla);
set = nft_set_lookup(table, nla[NFTA_SET_NAME], genmask);
if (IS_ERR(set)) {
if (PTR_ERR(set) != -ENOENT) {
NL_SET_BAD_ATTR(extack, nla[NFTA_SET_NAME]);
return PTR_ERR(set);
}
} else {
struct nft_expr *exprs[NFT_SET_EXPR_MAX] = {};
if (info->nlh->nlmsg_flags & NLM_F_EXCL) {
NL_SET_BAD_ATTR(extack, nla[NFTA_SET_NAME]);
return -EEXIST;
}
if (info->nlh->nlmsg_flags & NLM_F_REPLACE)
return -EOPNOTSUPP;
if (nft_set_is_anonymous(set))
return -EOPNOTSUPP;
err = nft_set_expr_alloc(&ctx, set, nla, exprs, &num_exprs, flags);
if (err < 0)
return err;
err = 0;
if (!nft_set_is_same(set, &desc, exprs, num_exprs, flags)) {
NL_SET_BAD_ATTR(extack, nla[NFTA_SET_NAME]);
err = -EEXIST;
}
for (i = 0; i < num_exprs; i++)
nft_expr_destroy(&ctx, exprs[i]);
if (err < 0)
return err;
return __nft_trans_set_add(&ctx, NFT_MSG_NEWSET, set, &desc);
}
if (!(info->nlh->nlmsg_flags & NLM_F_CREATE))
return -ENOENT;
ops = nft_select_set_ops(&ctx, nla, &desc);
if (IS_ERR(ops))
return PTR_ERR(ops);
udlen = 0;
if (nla[NFTA_SET_USERDATA])
udlen = nla_len(nla[NFTA_SET_USERDATA]);
size = 0;
if (ops->privsize != NULL)
size = ops->privsize(nla, &desc);
alloc_size = sizeof(*set) + size + udlen;
if (alloc_size < size || alloc_size > INT_MAX)
return -ENOMEM;
if (!nft_use_inc(&table->use))
return -EMFILE;
set = kvzalloc(alloc_size, GFP_KERNEL_ACCOUNT);
if (!set) {
err = -ENOMEM;
goto err_alloc;
}
name = nla_strdup(nla[NFTA_SET_NAME], GFP_KERNEL_ACCOUNT);
if (!name) {
err = -ENOMEM;
goto err_set_name;
}
err = nf_tables_set_alloc_name(&ctx, set, name);
kfree(name);
if (err < 0)
goto err_set_name;
udata = NULL;
if (udlen) {
udata = set->data + size;
nla_memcpy(udata, nla[NFTA_SET_USERDATA], udlen);
}
INIT_LIST_HEAD(&set->bindings);
INIT_LIST_HEAD(&set->catchall_list);
refcount_set(&set->refs, 1);
set->table = table;
write_pnet(&set->net, net);
set->ops = ops;
set->ktype = desc.ktype;
set->klen = desc.klen;
set->dtype = desc.dtype;
set->objtype = desc.objtype;
set->dlen = desc.dlen;
set->flags = flags;
set->size = desc.size;
set->policy = desc.policy;
set->udlen = udlen;
set->udata = udata;
set->timeout = desc.timeout;
set->gc_int = desc.gc_int;
set->field_count = desc.field_count;
for (i = 0; i < desc.field_count; i++)
set->field_len[i] = desc.field_len[i];
err = ops->init(set, &desc, nla);
if (err < 0)
goto err_set_init;
err = nft_set_expr_alloc(&ctx, set, nla, set->exprs, &num_exprs, flags);
if (err < 0)
goto err_set_destroy;
set->num_exprs = num_exprs;
set->handle = nf_tables_alloc_handle(table);
INIT_LIST_HEAD(&set->pending_update);
err = nft_trans_set_add(&ctx, NFT_MSG_NEWSET, set);
if (err < 0)
goto err_set_expr_alloc;
list_add_tail_rcu(&set->list, &table->sets);
return 0;
err_set_expr_alloc:
for (i = 0; i < set->num_exprs; i++)
nft_expr_destroy(&ctx, set->exprs[i]);
err_set_destroy:
ops->destroy(&ctx, set);
err_set_init:
kfree(set->name);
err_set_name:
kvfree(set);
err_alloc:
nft_use_dec_restore(&table->use);
return err;
}
static void nft_set_catchall_destroy(const struct nft_ctx *ctx,
struct nft_set *set)
{
struct nft_set_elem_catchall *next, *catchall;
list_for_each_entry_safe(catchall, next, &set->catchall_list, list) {
list_del_rcu(&catchall->list);
nf_tables_set_elem_destroy(ctx, set, catchall->elem);
kfree_rcu(catchall, rcu);
}
}
static void nft_set_put(struct nft_set *set)
{
if (refcount_dec_and_test(&set->refs)) {
kfree(set->name);
kvfree(set);
}
}
static void nft_set_destroy(const struct nft_ctx *ctx, struct nft_set *set)
{
int i;
if (WARN_ON(set->use > 0))
return;
for (i = 0; i < set->num_exprs; i++)
nft_expr_destroy(ctx, set->exprs[i]);
set->ops->destroy(ctx, set);
nft_set_catchall_destroy(ctx, set);
nft_set_put(set);
}
static int nf_tables_delset(struct sk_buff *skb, const struct nfnl_info *info,
const struct nlattr * const nla[])
{
struct netlink_ext_ack *extack = info->extack;
u8 genmask = nft_genmask_next(info->net);
u8 family = info->nfmsg->nfgen_family;
struct net *net = info->net;
const struct nlattr *attr;
struct nft_table *table;
struct nft_set *set;
struct nft_ctx ctx;
if (info->nfmsg->nfgen_family == NFPROTO_UNSPEC)
return -EAFNOSUPPORT;
table = nft_table_lookup(net, nla[NFTA_SET_TABLE], family,
genmask, NETLINK_CB(skb).portid);
if (IS_ERR(table)) {
NL_SET_BAD_ATTR(extack, nla[NFTA_SET_TABLE]);
return PTR_ERR(table);
}
if (nla[NFTA_SET_HANDLE]) {
attr = nla[NFTA_SET_HANDLE];
set = nft_set_lookup_byhandle(table, attr, genmask);
} else {
attr = nla[NFTA_SET_NAME];
set = nft_set_lookup(table, attr, genmask);
}
if (IS_ERR(set)) {
if (PTR_ERR(set) == -ENOENT &&
NFNL_MSG_TYPE(info->nlh->nlmsg_type) == NFT_MSG_DESTROYSET)
return 0;
NL_SET_BAD_ATTR(extack, attr);
return PTR_ERR(set);
}
if (set->use ||
(info->nlh->nlmsg_flags & NLM_F_NONREC &&
atomic_read(&set->nelems) > 0)) {
NL_SET_BAD_ATTR(extack, attr);
return -EBUSY;
}
nft_ctx_init(&ctx, net, skb, info->nlh, family, table, NULL, nla);
return nft_delset(&ctx, set);
}
static int nft_validate_register_store(const struct nft_ctx *ctx,
enum nft_registers reg,
const struct nft_data *data,
enum nft_data_types type,
unsigned int len);
static int nft_setelem_data_validate(const struct nft_ctx *ctx,
struct nft_set *set,
struct nft_set_elem *elem)
{
const struct nft_set_ext *ext = nft_set_elem_ext(set, elem->priv);
enum nft_registers dreg;
dreg = nft_type_to_reg(set->dtype);
return nft_validate_register_store(ctx, dreg, nft_set_ext_data(ext),
set->dtype == NFT_DATA_VERDICT ?
NFT_DATA_VERDICT : NFT_DATA_VALUE,
set->dlen);
}
static int nf_tables_bind_check_setelem(const struct nft_ctx *ctx,
struct nft_set *set,
const struct nft_set_iter *iter,
struct nft_set_elem *elem)
{
return nft_setelem_data_validate(ctx, set, elem);
}
static int nft_set_catchall_bind_check(const struct nft_ctx *ctx,
struct nft_set *set)
{
u8 genmask = nft_genmask_next(ctx->net);
struct nft_set_elem_catchall *catchall;
struct nft_set_elem elem;
struct nft_set_ext *ext;
int ret = 0;
list_for_each_entry_rcu(catchall, &set->catchall_list, list) {
ext = nft_set_elem_ext(set, catchall->elem);
if (!nft_set_elem_active(ext, genmask))
continue;
elem.priv = catchall->elem;
ret = nft_setelem_data_validate(ctx, set, &elem);
if (ret < 0)
break;
}
return ret;
}
int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set,
struct nft_set_binding *binding)
{
struct nft_set_binding *i;
struct nft_set_iter iter;
if (!list_empty(&set->bindings) && nft_set_is_anonymous(set))
return -EBUSY;
if (binding->flags & NFT_SET_MAP) {
/* If the set is already bound to the same chain all
* jumps are already validated for that chain.
*/
list_for_each_entry(i, &set->bindings, list) {
if (i->flags & NFT_SET_MAP &&
i->chain == binding->chain)
goto bind;
}
iter.genmask = nft_genmask_next(ctx->net);
iter.skip = 0;
iter.count = 0;
iter.err = 0;
iter.fn = nf_tables_bind_check_setelem;
set->ops->walk(ctx, set, &iter);
if (!iter.err)
iter.err = nft_set_catchall_bind_check(ctx, set);
if (iter.err < 0)
return iter.err;
}
bind:
if (!nft_use_inc(&set->use))
return -EMFILE;
binding->chain = ctx->chain;
list_add_tail_rcu(&binding->list, &set->bindings);
nft_set_trans_bind(ctx, set);
return 0;
}
EXPORT_SYMBOL_GPL(nf_tables_bind_set);
static void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set,
struct nft_set_binding *binding, bool event)
{
list_del_rcu(&binding->list);
if (list_empty(&set->bindings) && nft_set_is_anonymous(set)) {
list_del_rcu(&set->list);
if (event)
nf_tables_set_notify(ctx, set, NFT_MSG_DELSET,
GFP_KERNEL);
}
}
static void nft_setelem_data_activate(const struct net *net,
const struct nft_set *set,
struct nft_set_elem *elem);
static int nft_mapelem_activate(const struct nft_ctx *ctx,
struct nft_set *set,
const struct nft_set_iter *iter,
struct nft_set_elem *elem)
{
nft_setelem_data_activate(ctx->net, set, elem);
return 0;
}
static void nft_map_catchall_activate(const struct nft_ctx *ctx,
struct nft_set *set)
{
u8 genmask = nft_genmask_next(ctx->net);
struct nft_set_elem_catchall *catchall;
struct nft_set_elem elem;
struct nft_set_ext *ext;
list_for_each_entry(catchall, &set->catchall_list, list) {
ext = nft_set_elem_ext(set, catchall->elem);
if (!nft_set_elem_active(ext, genmask))
continue;
elem.priv = catchall->elem;
nft_setelem_data_activate(ctx->net, set, &elem);
break;
}
}
static void nft_map_activate(const struct nft_ctx *ctx, struct nft_set *set)
{
struct nft_set_iter iter = {
.genmask = nft_genmask_next(ctx->net),
.fn = nft_mapelem_activate,
};
set->ops->walk(ctx, set, &iter);
WARN_ON_ONCE(iter.err);
nft_map_catchall_activate(ctx, set);
}
void nf_tables_activate_set(const struct nft_ctx *ctx, struct nft_set *set)
{
if (nft_set_is_anonymous(set)) {
if (set->flags & (NFT_SET_MAP | NFT_SET_OBJECT))
nft_map_activate(ctx, set);
nft_clear(ctx->net, set);
}
nft_use_inc_restore(&set->use);
}
EXPORT_SYMBOL_GPL(nf_tables_activate_set);
void nf_tables_deactivate_set(const struct nft_ctx *ctx, struct nft_set *set,
struct nft_set_binding *binding,
enum nft_trans_phase phase)
{
switch (phase) {
case NFT_TRANS_PREPARE_ERROR:
nft_set_trans_unbind(ctx, set);
if (nft_set_is_anonymous(set))
nft_deactivate_next(ctx->net, set);
else
list_del_rcu(&binding->list);
nft_use_dec(&set->use);
break;
case NFT_TRANS_PREPARE:
if (nft_set_is_anonymous(set)) {
if (set->flags & (NFT_SET_MAP | NFT_SET_OBJECT))
nft_map_deactivate(ctx, set);
nft_deactivate_next(ctx->net, set);
}
nft_use_dec(&set->use);
return;
case NFT_TRANS_ABORT:
case NFT_TRANS_RELEASE:
if (nft_set_is_anonymous(set) &&
set->flags & (NFT_SET_MAP | NFT_SET_OBJECT))
nft_map_deactivate(ctx, set);
nft_use_dec(&set->use);
fallthrough;
default:
nf_tables_unbind_set(ctx, set, binding,
phase == NFT_TRANS_COMMIT);
}
}
EXPORT_SYMBOL_GPL(nf_tables_deactivate_set);
void nf_tables_destroy_set(const struct nft_ctx *ctx, struct nft_set *set)
{
if (list_empty(&set->bindings) && nft_set_is_anonymous(set))
nft_set_destroy(ctx, set);
}
EXPORT_SYMBOL_GPL(nf_tables_destroy_set);
const struct nft_set_ext_type nft_set_ext_types[] = {
[NFT_SET_EXT_KEY] = {
.align = __alignof__(u32),
},
[NFT_SET_EXT_DATA] = {
.align = __alignof__(u32),
},
[NFT_SET_EXT_EXPRESSIONS] = {
.align = __alignof__(struct nft_set_elem_expr),
},
[NFT_SET_EXT_OBJREF] = {
.len = sizeof(struct nft_object *),
.align = __alignof__(struct nft_object *),
},
[NFT_SET_EXT_FLAGS] = {
.len = sizeof(u8),
.align = __alignof__(u8),
},
[NFT_SET_EXT_TIMEOUT] = {
.len = sizeof(u64),
.align = __alignof__(u64),
},
[NFT_SET_EXT_EXPIRATION] = {
.len = sizeof(u64),
.align = __alignof__(u64),
},
[NFT_SET_EXT_USERDATA] = {
.len = sizeof(struct nft_userdata),
.align = __alignof__(struct nft_userdata),
},
[NFT_SET_EXT_KEY_END] = {
.align = __alignof__(u32),
},
};
/*
* Set elements
*/
static const struct nla_policy nft_set_elem_policy[NFTA_SET_ELEM_MAX + 1] = {
[NFTA_SET_ELEM_KEY] = { .type = NLA_NESTED },
[NFTA_SET_ELEM_DATA] = { .type = NLA_NESTED },
[NFTA_SET_ELEM_FLAGS] = { .type = NLA_U32 },
[NFTA_SET_ELEM_TIMEOUT] = { .type = NLA_U64 },
[NFTA_SET_ELEM_EXPIRATION] = { .type = NLA_U64 },
[NFTA_SET_ELEM_USERDATA] = { .type = NLA_BINARY,
.len = NFT_USERDATA_MAXLEN },
[NFTA_SET_ELEM_EXPR] = { .type = NLA_NESTED },
[NFTA_SET_ELEM_OBJREF] = { .type = NLA_STRING,
.len = NFT_OBJ_MAXNAMELEN - 1 },
[NFTA_SET_ELEM_KEY_END] = { .type = NLA_NESTED },
[NFTA_SET_ELEM_EXPRESSIONS] = { .type = NLA_NESTED },
};
static const struct nla_policy nft_set_elem_list_policy[NFTA_SET_ELEM_LIST_MAX + 1] = {
[NFTA_SET_ELEM_LIST_TABLE] = { .type = NLA_STRING,
.len = NFT_TABLE_MAXNAMELEN - 1 },
[NFTA_SET_ELEM_LIST_SET] = { .type = NLA_STRING,
.len = NFT_SET_MAXNAMELEN - 1 },
[NFTA_SET_ELEM_LIST_ELEMENTS] = { .type = NLA_NESTED },
[NFTA_SET_ELEM_LIST_SET_ID] = { .type = NLA_U32 },
};
static int nft_set_elem_expr_dump(struct sk_buff *skb,
const struct nft_set *set,
const struct nft_set_ext *ext,
bool reset)
{
struct nft_set_elem_expr *elem_expr;
u32 size, num_exprs = 0;
struct nft_expr *expr;
struct nlattr *nest;
elem_expr = nft_set_ext_expr(ext);
nft_setelem_expr_foreach(expr, elem_expr, size)
num_exprs++;
if (num_exprs == 1) {
expr = nft_setelem_expr_at(elem_expr, 0);
if (nft_expr_dump(skb, NFTA_SET_ELEM_EXPR, expr, reset) < 0)
return -1;
return 0;
} else if (num_exprs > 1) {
nest = nla_nest_start_noflag(skb, NFTA_SET_ELEM_EXPRESSIONS);
if (nest == NULL)
goto nla_put_failure;
nft_setelem_expr_foreach(expr, elem_expr, size) {
expr = nft_setelem_expr_at(elem_expr, size);
if (nft_expr_dump(skb, NFTA_LIST_ELEM, expr, reset) < 0)
goto nla_put_failure;
}
nla_nest_end(skb, nest);
}
return 0;
nla_put_failure:
return -1;
}
static int nf_tables_fill_setelem(struct sk_buff *skb,
const struct nft_set *set,
const struct nft_set_elem *elem,
bool reset)
{
const struct nft_set_ext *ext = nft_set_elem_ext(set, elem->priv);
unsigned char *b = skb_tail_pointer(skb);
struct nlattr *nest;
u64 timeout = 0;
nest = nla_nest_start_noflag(skb, NFTA_LIST_ELEM);
if (nest == NULL)
goto nla_put_failure;
if (nft_set_ext_exists(ext, NFT_SET_EXT_KEY) &&
nft_data_dump(skb, NFTA_SET_ELEM_KEY, nft_set_ext_key(ext),
NFT_DATA_VALUE, set->klen) < 0)
goto nla_put_failure;
if (nft_set_ext_exists(ext, NFT_SET_EXT_KEY_END) &&
nft_data_dump(skb, NFTA_SET_ELEM_KEY_END, nft_set_ext_key_end(ext),
NFT_DATA_VALUE, set->klen) < 0)
goto nla_put_failure;
if (nft_set_ext_exists(ext, NFT_SET_EXT_DATA) &&
nft_data_dump(skb, NFTA_SET_ELEM_DATA, nft_set_ext_data(ext),
set->dtype == NFT_DATA_VERDICT ? NFT_DATA_VERDICT : NFT_DATA_VALUE,
set->dlen) < 0)
goto nla_put_failure;
if (nft_set_ext_exists(ext, NFT_SET_EXT_EXPRESSIONS) &&
nft_set_elem_expr_dump(skb, set, ext, reset))
goto nla_put_failure;
if (nft_set_ext_exists(ext, NFT_SET_EXT_OBJREF) &&
nla_put_string(skb, NFTA_SET_ELEM_OBJREF,
(*nft_set_ext_obj(ext))->key.name) < 0)
goto nla_put_failure;
if (nft_set_ext_exists(ext, NFT_SET_EXT_FLAGS) &&
nla_put_be32(skb, NFTA_SET_ELEM_FLAGS,
htonl(*nft_set_ext_flags(ext))))
goto nla_put_failure;
if (nft_set_ext_exists(ext, NFT_SET_EXT_TIMEOUT)) {
timeout = *nft_set_ext_timeout(ext);
if (nla_put_be64(skb, NFTA_SET_ELEM_TIMEOUT,
nf_jiffies64_to_msecs(timeout),
NFTA_SET_ELEM_PAD))
goto nla_put_failure;
} else if (set->flags & NFT_SET_TIMEOUT) {
timeout = READ_ONCE(set->timeout);
}
if (nft_set_ext_exists(ext, NFT_SET_EXT_EXPIRATION)) {
u64 expires, now = get_jiffies_64();
expires = *nft_set_ext_expiration(ext);
if (time_before64(now, expires))
expires -= now;
else
expires = 0;
if (nla_put_be64(skb, NFTA_SET_ELEM_EXPIRATION,
nf_jiffies64_to_msecs(expires),
NFTA_SET_ELEM_PAD))
goto nla_put_failure;
if (reset)
*nft_set_ext_expiration(ext) = now + timeout;
}
if (nft_set_ext_exists(ext, NFT_SET_EXT_USERDATA)) {
struct nft_userdata *udata;
udata = nft_set_ext_userdata(ext);
if (nla_put(skb, NFTA_SET_ELEM_USERDATA,
udata->len + 1, udata->data))
goto nla_put_failure;
}
nla_nest_end(skb, nest);
return 0;
nla_put_failure:
nlmsg_trim(skb, b);
return -EMSGSIZE;
}
struct nft_set_dump_args {
const struct netlink_callback *cb;
struct nft_set_iter iter;
struct sk_buff *skb;
bool reset;
};
static int nf_tables_dump_setelem(const struct nft_ctx *ctx,
struct nft_set *set,
const struct nft_set_iter *iter,
struct nft_set_elem *elem)
{
const struct nft_set_ext *ext = nft_set_elem_ext(set, elem->priv);
struct nft_set_dump_args *args;
if (nft_set_elem_expired(ext))
return 0;
args = container_of(iter, struct nft_set_dump_args, iter);
return nf_tables_fill_setelem(args->skb, set, elem, args->reset);
}
static void audit_log_nft_set_reset(const struct nft_table *table,
unsigned int base_seq,
unsigned int nentries)
{
char *buf = kasprintf(GFP_ATOMIC, "%s:%u", table->name, base_seq);
audit_log_nfcfg(buf, table->family, nentries,
AUDIT_NFT_OP_SETELEM_RESET, GFP_ATOMIC);
kfree(buf);
}
struct nft_set_dump_ctx {
const struct nft_set *set;
struct nft_ctx ctx;
};
static int nft_set_catchall_dump(struct net *net, struct sk_buff *skb,
const struct nft_set *set, bool reset,
unsigned int base_seq)
{
struct nft_set_elem_catchall *catchall;
u8 genmask = nft_genmask_cur(net);
struct nft_set_elem elem;
struct nft_set_ext *ext;
int ret = 0;
list_for_each_entry_rcu(catchall, &set->catchall_list, list) {
ext = nft_set_elem_ext(set, catchall->elem);
if (!nft_set_elem_active(ext, genmask) ||
nft_set_elem_expired(ext))
continue;
elem.priv = catchall->elem;
ret = nf_tables_fill_setelem(skb, set, &elem, reset);
if (reset && !ret)
audit_log_nft_set_reset(set->table, base_seq, 1);
break;
}
return ret;
}
static int nf_tables_dump_set(struct sk_buff *skb, struct netlink_callback *cb)
{
struct nft_set_dump_ctx *dump_ctx = cb->data;
struct net *net = sock_net(skb->sk);
struct nftables_pernet *nft_net;
struct nft_table *table;
struct nft_set *set;
struct nft_set_dump_args args;
bool set_found = false;
struct nlmsghdr *nlh;
struct nlattr *nest;
bool reset = false;
u32 portid, seq;
int event;
rcu_read_lock();
nft_net = nft_pernet(net);
cb->seq = READ_ONCE(nft_net->base_seq);
list_for_each_entry_rcu(table, &nft_net->tables, list) {
if (dump_ctx->ctx.family != NFPROTO_UNSPEC &&
dump_ctx->ctx.family != table->family)
continue;
if (table != dump_ctx->ctx.table)
continue;
list_for_each_entry_rcu(set, &table->sets, list) {
if (set == dump_ctx->set) {
set_found = true;
break;
}
}
break;
}
if (!set_found) {
rcu_read_unlock();
return -ENOENT;
}
event = nfnl_msg_type(NFNL_SUBSYS_NFTABLES, NFT_MSG_NEWSETELEM);
portid = NETLINK_CB(cb->skb).portid;
seq = cb->nlh->nlmsg_seq;
nlh = nfnl_msg_put(skb, portid, seq, event, NLM_F_MULTI,
table->family, NFNETLINK_V0, nft_base_seq(net));
if (!nlh)
goto nla_put_failure;
if (nla_put_string(skb, NFTA_SET_ELEM_LIST_TABLE, table->name))
goto nla_put_failure;
if (nla_put_string(skb, NFTA_SET_ELEM_LIST_SET, set->name))
goto nla_put_failure;
nest = nla_nest_start_noflag(skb, NFTA_SET_ELEM_LIST_ELEMENTS);
if (nest == NULL)
goto nla_put_failure;
if (NFNL_MSG_TYPE(cb->nlh->nlmsg_type) == NFT_MSG_GETSETELEM_RESET)
reset = true;
args.cb = cb;
args.skb = skb;
args.reset = reset;
args.iter.genmask = nft_genmask_cur(net);
args.iter.skip = cb->args[0];
args.iter.count = 0;
args.iter.err = 0;
args.iter.fn = nf_tables_dump_setelem;
set->ops->walk(&dump_ctx->ctx, set, &args.iter);
if (!args.iter.err && args.iter.count == cb->args[0])
args.iter.err = nft_set_catchall_dump(net, skb, set,
reset, cb->seq);
nla_nest_end(skb, nest);
nlmsg_end(skb, nlh);
if (reset && args.iter.count > args.iter.skip)
audit_log_nft_set_reset(table, cb->seq,
args.iter.count - args.iter.skip);
rcu_read_unlock();
if (args.iter.err && args.iter.err != -EMSGSIZE)
return args.iter.err;
if (args.iter.count == cb->args[0])
return 0;
cb->args[0] = args.iter.count;
return skb->len;
nla_put_failure:
rcu_read_unlock();
return -ENOSPC;
}
static int nf_tables_dump_set_start(struct netlink_callback *cb)
{
struct nft_set_dump_ctx *dump_ctx = cb->data;
cb->data = kmemdup(dump_ctx, sizeof(*dump_ctx), GFP_ATOMIC);
return cb->data ? 0 : -ENOMEM;
}
static int nf_tables_dump_set_done(struct netlink_callback *cb)
{
kfree(cb->data);
return 0;
}
static int nf_tables_fill_setelem_info(struct sk_buff *skb,
const struct nft_ctx *ctx, u32 seq,
u32 portid, int event, u16 flags,
const struct nft_set *set,
const struct nft_set_elem *elem,
bool reset)
{
struct nlmsghdr *nlh;
struct nlattr *nest;
int err;
event = nfnl_msg_type(NFNL_SUBSYS_NFTABLES, event);
nlh = nfnl_msg_put(skb, portid, seq, event, flags, ctx->family,
NFNETLINK_V0, nft_base_seq(ctx->net));
if (!nlh)
goto nla_put_failure;
if (nla_put_string(skb, NFTA_SET_TABLE, ctx->table->name))
goto nla_put_failure;
if (nla_put_string(skb, NFTA_SET_NAME, set->name))
goto nla_put_failure;
nest = nla_nest_start_noflag(skb, NFTA_SET_ELEM_LIST_ELEMENTS);
if (nest == NULL)
goto nla_put_failure;
err = nf_tables_fill_setelem(skb, set, elem, reset);
if (err < 0)
goto nla_put_failure;
nla_nest_end(skb, nest);
nlmsg_end(skb, nlh);
return 0;
nla_put_failure:
nlmsg_trim(skb, nlh);
return -1;
}
static int nft_setelem_parse_flags(const struct nft_set *set,
const struct nlattr *attr, u32 *flags)
{
if (attr == NULL)
return 0;
*flags = ntohl(nla_get_be32(attr));
if (*flags & ~(NFT_SET_ELEM_INTERVAL_END | NFT_SET_ELEM_CATCHALL))
return -EOPNOTSUPP;
if (!(set->flags & NFT_SET_INTERVAL) &&
*flags & NFT_SET_ELEM_INTERVAL_END)
return -EINVAL;
if ((*flags & (NFT_SET_ELEM_INTERVAL_END | NFT_SET_ELEM_CATCHALL)) ==
(NFT_SET_ELEM_INTERVAL_END | NFT_SET_ELEM_CATCHALL))
return -EINVAL;
return 0;
}
static int nft_setelem_parse_key(struct nft_ctx *ctx, struct nft_set *set,
struct nft_data *key, struct nlattr *attr)
{
struct nft_data_desc desc = {
.type = NFT_DATA_VALUE,
.size = NFT_DATA_VALUE_MAXLEN,
.len = set->klen,
};
return nft_data_init(ctx, key, &desc, attr);
}
static int nft_setelem_parse_data(struct nft_ctx *ctx, struct nft_set *set,
struct nft_data_desc *desc,
struct nft_data *data,
struct nlattr *attr)
{
u32 dtype;
if (set->dtype == NFT_DATA_VERDICT)
dtype = NFT_DATA_VERDICT;
else
dtype = NFT_DATA_VALUE;
desc->type = dtype;
desc->size = NFT_DATA_VALUE_MAXLEN;
desc->len = set->dlen;
desc->flags = NFT_DATA_DESC_SETELEM;
return nft_data_init(ctx, data, desc, attr);
}
static void *nft_setelem_catchall_get(const struct net *net,
const struct nft_set *set)
{
struct nft_set_elem_catchall *catchall;
u8 genmask = nft_genmask_cur(net);
struct nft_set_ext *ext;
void *priv = NULL;
list_for_each_entry_rcu(catchall, &set->catchall_list, list) {
ext = nft_set_elem_ext(set, catchall->elem);
if (!nft_set_elem_active(ext, genmask) ||
nft_set_elem_expired(ext))
continue;
priv = catchall->elem;
break;
}
return priv;
}
static int nft_setelem_get(struct nft_ctx *ctx, struct nft_set *set,
struct nft_set_elem *elem, u32 flags)
{
void *priv;
if (!(flags & NFT_SET_ELEM_CATCHALL)) {
priv = set->ops->get(ctx->net, set, elem, flags);
if (IS_ERR(priv))
return PTR_ERR(priv);
} else {
priv = nft_setelem_catchall_get(ctx->net, set);
if (!priv)
return -ENOENT;
}
elem->priv = priv;
return 0;
}
static int nft_get_set_elem(struct nft_ctx *ctx, struct nft_set *set,
const struct nlattr *attr, bool reset)
{
struct nlattr *nla[NFTA_SET_ELEM_MAX + 1];
struct nft_set_elem elem;
struct sk_buff *skb;
uint32_t flags = 0;
int err;
err = nla_parse_nested_deprecated(nla, NFTA_SET_ELEM_MAX, attr,
nft_set_elem_policy, NULL);
if (err < 0)
return err;
err = nft_setelem_parse_flags(set, nla[NFTA_SET_ELEM_FLAGS], &flags);
if (err < 0)
return err;
if (!nla[NFTA_SET_ELEM_KEY] && !(flags & NFT_SET_ELEM_CATCHALL))
return -EINVAL;
if (nla[NFTA_SET_ELEM_KEY]) {
err = nft_setelem_parse_key(ctx, set, &elem.key.val,
nla[NFTA_SET_ELEM_KEY]);
if (err < 0)
return err;
}
if (nla[NFTA_SET_ELEM_KEY_END]) {
err = nft_setelem_parse_key(ctx, set, &elem.key_end.val,
nla[NFTA_SET_ELEM_KEY_END]);
if (err < 0)
return err;
}
err = nft_setelem_get(ctx, set, &elem, flags);
if (err < 0)
return err;
err = -ENOMEM;
skb = nlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
if (skb == NULL)
return err;
err = nf_tables_fill_setelem_info(skb, ctx, ctx->seq, ctx->portid,
NFT_MSG_NEWSETELEM, 0, set, &elem,
reset);
if (err < 0)
goto err_fill_setelem;
return nfnetlink_unicast(skb, ctx->net, ctx->portid);
err_fill_setelem:
kfree_skb(skb);
return err;
}
/* called with rcu_read_lock held */
static int nf_tables_getsetelem(struct sk_buff *skb,
const struct nfnl_info *info,
const struct nlattr * const nla[])
{
struct netlink_ext_ack *extack = info->extack;
u8 genmask = nft_genmask_cur(info->net);
u8 family = info->nfmsg->nfgen_family;
int rem, err = 0, nelems = 0;
struct net *net = info->net;
struct nft_table *table;
struct nft_set *set;
struct nlattr *attr;
struct nft_ctx ctx;
bool reset = false;
table = nft_table_lookup(net, nla[NFTA_SET_ELEM_LIST_TABLE], family,
genmask, 0);
if (IS_ERR(table)) {
NL_SET_BAD_ATTR(extack, nla[NFTA_SET_ELEM_LIST_TABLE]);
return PTR_ERR(table);
}
set = nft_set_lookup(table, nla[NFTA_SET_ELEM_LIST_SET], genmask);
if (IS_ERR(set))
return PTR_ERR(set);
nft_ctx_init(&ctx, net, skb, info->nlh, family, table, NULL, nla);
if (info->nlh->nlmsg_flags & NLM_F_DUMP) {
struct netlink_dump_control c = {
.start = nf_tables_dump_set_start,
.dump = nf_tables_dump_set,
.done = nf_tables_dump_set_done,
.module = THIS_MODULE,
};
struct nft_set_dump_ctx dump_ctx = {
.set = set,
.ctx = ctx,
};
c.data = &dump_ctx;
return nft_netlink_dump_start_rcu(info->sk, skb, info->nlh, &c);
}
if (!nla[NFTA_SET_ELEM_LIST_ELEMENTS])
return -EINVAL;
if (NFNL_MSG_TYPE(info->nlh->nlmsg_type) == NFT_MSG_GETSETELEM_RESET)
reset = true;
nla_for_each_nested(attr, nla[NFTA_SET_ELEM_LIST_ELEMENTS], rem) {
err = nft_get_set_elem(&ctx, set, attr, reset);
if (err < 0) {
NL_SET_BAD_ATTR(extack, attr);
break;
}
nelems++;
}
if (reset)
audit_log_nft_set_reset(table, nft_pernet(net)->base_seq,
nelems);
return err;
}
static void nf_tables_setelem_notify(const struct nft_ctx *ctx,
const struct nft_set *set,
const struct nft_set_elem *elem,
int event)
{
struct nftables_pernet *nft_net;
struct net *net = ctx->net;
u32 portid = ctx->portid;
struct sk_buff *skb;
u16 flags = 0;
int err;
if (!ctx->report && !nfnetlink_has_listeners(net, NFNLGRP_NFTABLES))
return;
skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
if (skb == NULL)
goto err;
if (ctx->flags & (NLM_F_CREATE | NLM_F_EXCL))
flags |= ctx->flags & (NLM_F_CREATE | NLM_F_EXCL);
err = nf_tables_fill_setelem_info(skb, ctx, 0, portid, event, flags,
set, elem, false);
if (err < 0) {
kfree_skb(skb);
goto err;
}
nft_net = nft_pernet(net);
nft_notify_enqueue(skb, ctx->report, &nft_net->notify_list);
return;
err:
nfnetlink_set_err(net, portid, NFNLGRP_NFTABLES, -ENOBUFS);
}
static struct nft_trans *nft_trans_elem_alloc(struct nft_ctx *ctx,
int msg_type,
struct nft_set *set)
{
struct nft_trans *trans;
trans = nft_trans_alloc(ctx, msg_type, sizeof(struct nft_trans_elem));
if (trans == NULL)
return NULL;
nft_trans_elem_set(trans) = set;
return trans;
}
struct nft_expr *nft_set_elem_expr_alloc(const struct nft_ctx *ctx,
const struct nft_set *set,
const struct nlattr *attr)
{
struct nft_expr *expr;
int err;
expr = nft_expr_init(ctx, attr);
if (IS_ERR(expr))
return expr;
err = -EOPNOTSUPP;
if (expr->ops->type->flags & NFT_EXPR_GC) {
if (set->flags & NFT_SET_TIMEOUT)
goto err_set_elem_expr;
if (!set->ops->gc_init)
goto err_set_elem_expr;
set->ops->gc_init(set);
}
return expr;
err_set_elem_expr:
nft_expr_destroy(ctx, expr);
return ERR_PTR(err);
}
static int nft_set_ext_check(const struct nft_set_ext_tmpl *tmpl, u8 id, u32 len)
{
len += nft_set_ext_types[id].len;
if (len > tmpl->ext_len[id] ||
len > U8_MAX)
return -1;
return 0;
}
static int nft_set_ext_memcpy(const struct nft_set_ext_tmpl *tmpl, u8 id,
void *to, const void *from, u32 len)
{
if (nft_set_ext_check(tmpl, id, len) < 0)
return -1;
memcpy(to, from, len);
return 0;
}
void *nft_set_elem_init(const struct nft_set *set,
const struct nft_set_ext_tmpl *tmpl,
const u32 *key, const u32 *key_end,
const u32 *data, u64 timeout, u64 expiration, gfp_t gfp)
{
struct nft_set_ext *ext;
void *elem;
elem = kzalloc(set->ops->elemsize + tmpl->len, gfp);
if (elem == NULL)
return ERR_PTR(-ENOMEM);
ext = nft_set_elem_ext(set, elem);
nft_set_ext_init(ext, tmpl);
if (nft_set_ext_exists(ext, NFT_SET_EXT_KEY) &&
nft_set_ext_memcpy(tmpl, NFT_SET_EXT_KEY,
nft_set_ext_key(ext), key, set->klen) < 0)
goto err_ext_check;
if (nft_set_ext_exists(ext, NFT_SET_EXT_KEY_END) &&
nft_set_ext_memcpy(tmpl, NFT_SET_EXT_KEY_END,
nft_set_ext_key_end(ext), key_end, set->klen) < 0)
goto err_ext_check;
if (nft_set_ext_exists(ext, NFT_SET_EXT_DATA) &&
nft_set_ext_memcpy(tmpl, NFT_SET_EXT_DATA,
nft_set_ext_data(ext), data, set->dlen) < 0)
goto err_ext_check;
if (nft_set_ext_exists(ext, NFT_SET_EXT_EXPIRATION)) {
*nft_set_ext_expiration(ext) = get_jiffies_64() + expiration;
if (expiration == 0)
*nft_set_ext_expiration(ext) += timeout;
}
if (nft_set_ext_exists(ext, NFT_SET_EXT_TIMEOUT))
*nft_set_ext_timeout(ext) = timeout;
return elem;
err_ext_check:
kfree(elem);
return ERR_PTR(-EINVAL);
}
static void __nft_set_elem_expr_destroy(const struct nft_ctx *ctx,
struct nft_expr *expr)
{
if (expr->ops->destroy_clone) {
expr->ops->destroy_clone(ctx, expr);
module_put(expr->ops->type->owner);
} else {
nf_tables_expr_destroy(ctx, expr);
}
}
static void nft_set_elem_expr_destroy(const struct nft_ctx *ctx,
struct nft_set_elem_expr *elem_expr)
{
struct nft_expr *expr;
u32 size;
nft_setelem_expr_foreach(expr, elem_expr, size)
__nft_set_elem_expr_destroy(ctx, expr);
}
/* Drop references and destroy. Called from gc, dynset and abort path. */
void nft_set_elem_destroy(const struct nft_set *set, void *elem,
bool destroy_expr)
{
struct nft_set_ext *ext = nft_set_elem_ext(set, elem);
struct nft_ctx ctx = {
.net = read_pnet(&set->net),
.family = set->table->family,
};
nft_data_release(nft_set_ext_key(ext), NFT_DATA_VALUE);
if (nft_set_ext_exists(ext, NFT_SET_EXT_DATA))
nft_data_release(nft_set_ext_data(ext), set->dtype);
if (destroy_expr && nft_set_ext_exists(ext, NFT_SET_EXT_EXPRESSIONS))
nft_set_elem_expr_destroy(&ctx, nft_set_ext_expr(ext));
if (nft_set_ext_exists(ext, NFT_SET_EXT_OBJREF))
nft_use_dec(&(*nft_set_ext_obj(ext))->use);
kfree(elem);
}
EXPORT_SYMBOL_GPL(nft_set_elem_destroy);
/* Destroy element. References have been already dropped in the preparation
* path via nft_setelem_data_deactivate().
*/
void nf_tables_set_elem_destroy(const struct nft_ctx *ctx,
const struct nft_set *set, void *elem)
{
struct nft_set_ext *ext = nft_set_elem_ext(set, elem);
if (nft_set_ext_exists(ext, NFT_SET_EXT_EXPRESSIONS))
nft_set_elem_expr_destroy(ctx, nft_set_ext_expr(ext));
kfree(elem);
}
int nft_set_elem_expr_clone(const struct nft_ctx *ctx, struct nft_set *set,
struct nft_expr *expr_array[])
{
struct nft_expr *expr;
int err, i, k;
for (i = 0; i < set->num_exprs; i++) {
expr = kzalloc(set->exprs[i]->ops->size, GFP_KERNEL_ACCOUNT);
if (!expr)
goto err_expr;
err = nft_expr_clone(expr, set->exprs[i]);
if (err < 0) {
kfree(expr);
goto err_expr;
}
expr_array[i] = expr;
}
return 0;
err_expr:
for (k = i - 1; k >= 0; k--)
nft_expr_destroy(ctx, expr_array[k]);
return -ENOMEM;
}
static int nft_set_elem_expr_setup(struct nft_ctx *ctx,
const struct nft_set_ext_tmpl *tmpl,
const struct nft_set_ext *ext,
struct nft_expr *expr_array[],
u32 num_exprs)
{
struct nft_set_elem_expr *elem_expr = nft_set_ext_expr(ext);
u32 len = sizeof(struct nft_set_elem_expr);
struct nft_expr *expr;
int i, err;
if (num_exprs == 0)
return 0;
for (i = 0; i < num_exprs; i++)
len += expr_array[i]->ops->size;
if (nft_set_ext_check(tmpl, NFT_SET_EXT_EXPRESSIONS, len) < 0)
return -EINVAL;
for (i = 0; i < num_exprs; i++) {
expr = nft_setelem_expr_at(elem_expr, elem_expr->size);
err = nft_expr_clone(expr, expr_array[i]);
if (err < 0)
goto err_elem_expr_setup;
elem_expr->size += expr_array[i]->ops->size;
nft_expr_destroy(ctx, expr_array[i]);
expr_array[i] = NULL;
}
return 0;
err_elem_expr_setup:
for (; i < num_exprs; i++) {
nft_expr_destroy(ctx, expr_array[i]);
expr_array[i] = NULL;
}
return -ENOMEM;
}
struct nft_set_ext *nft_set_catchall_lookup(const struct net *net,
const struct nft_set *set)
{
struct nft_set_elem_catchall *catchall;
u8 genmask = nft_genmask_cur(net);
struct nft_set_ext *ext;
list_for_each_entry_rcu(catchall, &set->catchall_list, list) {
ext = nft_set_elem_ext(set, catchall->elem);
if (nft_set_elem_active(ext, genmask) &&
!nft_set_elem_expired(ext) &&
!nft_set_elem_is_dead(ext))
return ext;
}
return NULL;
}
EXPORT_SYMBOL_GPL(nft_set_catchall_lookup);
static int nft_setelem_catchall_insert(const struct net *net,
struct nft_set *set,
const struct nft_set_elem *elem,
struct nft_set_ext **pext)
{
struct nft_set_elem_catchall *catchall;
u8 genmask = nft_genmask_next(net);
struct nft_set_ext *ext;
list_for_each_entry(catchall, &set->catchall_list, list) {
ext = nft_set_elem_ext(set, catchall->elem);
if (nft_set_elem_active(ext, genmask)) {
*pext = ext;
return -EEXIST;
}
}
catchall = kmalloc(sizeof(*catchall), GFP_KERNEL);
if (!catchall)
return -ENOMEM;
catchall->elem = elem->priv;
list_add_tail_rcu(&catchall->list, &set->catchall_list);
return 0;
}
static int nft_setelem_insert(const struct net *net,
struct nft_set *set,
const struct nft_set_elem *elem,
struct nft_set_ext **ext, unsigned int flags)
{
int ret;
if (flags & NFT_SET_ELEM_CATCHALL)
ret = nft_setelem_catchall_insert(net, set, elem, ext);
else
ret = set->ops->insert(net, set, elem, ext);
return ret;
}
static bool nft_setelem_is_catchall(const struct nft_set *set,
const struct nft_set_elem *elem)
{
struct nft_set_ext *ext = nft_set_elem_ext(set, elem->priv);
if (nft_set_ext_exists(ext, NFT_SET_EXT_FLAGS) &&
*nft_set_ext_flags(ext) & NFT_SET_ELEM_CATCHALL)
return true;
return false;
}
static void nft_setelem_activate(struct net *net, struct nft_set *set,
struct nft_set_elem *elem)
{
struct nft_set_ext *ext = nft_set_elem_ext(set, elem->priv);
if (nft_setelem_is_catchall(set, elem)) {
nft_set_elem_change_active(net, set, ext);
} else {
set->ops->activate(net, set, elem);
}
}
static int nft_setelem_catchall_deactivate(const struct net *net,
struct nft_set *set,
struct nft_set_elem *elem)
{
struct nft_set_elem_catchall *catchall;
struct nft_set_ext *ext;
list_for_each_entry(catchall, &set->catchall_list, list) {
ext = nft_set_elem_ext(set, catchall->elem);
if (!nft_is_active(net, ext))
continue;
kfree(elem->priv);
elem->priv = catchall->elem;
nft_set_elem_change_active(net, set, ext);
return 0;
}
return -ENOENT;
}
static int __nft_setelem_deactivate(const struct net *net,
struct nft_set *set,
struct nft_set_elem *elem)
{
void *priv;
priv = set->ops->deactivate(net, set, elem);
if (!priv)
return -ENOENT;
kfree(elem->priv);
elem->priv = priv;
set->ndeact++;
return 0;
}
static int nft_setelem_deactivate(const struct net *net,
struct nft_set *set,
struct nft_set_elem *elem, u32 flags)
{
int ret;
if (flags & NFT_SET_ELEM_CATCHALL)
ret = nft_setelem_catchall_deactivate(net, set, elem);
else
ret = __nft_setelem_deactivate(net, set, elem);
return ret;
}
static void nft_setelem_catchall_remove(const struct net *net,
const struct nft_set *set,
const struct nft_set_elem *elem)
{
struct nft_set_elem_catchall *catchall, *next;
list_for_each_entry_safe(catchall, next, &set->catchall_list, list) {
if (catchall->elem == elem->priv) {
list_del_rcu(&catchall->list);
kfree_rcu(catchall, rcu);
break;
}
}
}
static void nft_setelem_remove(const struct net *net,
const struct nft_set *set,
const struct nft_set_elem *elem)
{
if (nft_setelem_is_catchall(set, elem))
nft_setelem_catchall_remove(net, set, elem);
else
set->ops->remove(net, set, elem);
}
static bool nft_setelem_valid_key_end(const struct nft_set *set,
struct nlattr **nla, u32 flags)
{
if ((set->flags & (NFT_SET_CONCAT | NFT_SET_INTERVAL)) ==
(NFT_SET_CONCAT | NFT_SET_INTERVAL)) {
if (flags & NFT_SET_ELEM_INTERVAL_END)
return false;
if (nla[NFTA_SET_ELEM_KEY_END] &&
flags & NFT_SET_ELEM_CATCHALL)
return false;
} else {
if (nla[NFTA_SET_ELEM_KEY_END])
return false;
}
return true;
}
static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
const struct nlattr *attr, u32 nlmsg_flags)
{
struct nft_expr *expr_array[NFT_SET_EXPR_MAX] = {};
struct nlattr *nla[NFTA_SET_ELEM_MAX + 1];
u8 genmask = nft_genmask_next(ctx->net);
u32 flags = 0, size = 0, num_exprs = 0;
struct nft_set_ext_tmpl tmpl;
struct nft_set_ext *ext, *ext2;
struct nft_set_elem elem;
struct nft_set_binding *binding;
struct nft_object *obj = NULL;
struct nft_userdata *udata;
struct nft_data_desc desc;
enum nft_registers dreg;
struct nft_trans *trans;
u64 timeout;
u64 expiration;
int err, i;
u8 ulen;
err = nla_parse_nested_deprecated(nla, NFTA_SET_ELEM_MAX, attr,
nft_set_elem_policy, NULL);
if (err < 0)
return err;
nft_set_ext_prepare(&tmpl);
err = nft_setelem_parse_flags(set, nla[NFTA_SET_ELEM_FLAGS], &flags);
if (err < 0)
return err;
if (((flags & NFT_SET_ELEM_CATCHALL) && nla[NFTA_SET_ELEM_KEY]) ||
(!(flags & NFT_SET_ELEM_CATCHALL) && !nla[NFTA_SET_ELEM_KEY]))
return -EINVAL;
if (flags != 0) {
err = nft_set_ext_add(&tmpl, NFT_SET_EXT_FLAGS);
if (err < 0)
return err;
}
if (set->flags & NFT_SET_MAP) {
if (nla[NFTA_SET_ELEM_DATA] == NULL &&
!(flags & NFT_SET_ELEM_INTERVAL_END))
return -EINVAL;
} else {
if (nla[NFTA_SET_ELEM_DATA] != NULL)
return -EINVAL;
}
if (set->flags & NFT_SET_OBJECT) {
if (!nla[NFTA_SET_ELEM_OBJREF] &&
!(flags & NFT_SET_ELEM_INTERVAL_END))
return -EINVAL;
} else {
if (nla[NFTA_SET_ELEM_OBJREF])
return -EINVAL;
}
if (!nft_setelem_valid_key_end(set, nla, flags))
return -EINVAL;
if ((flags & NFT_SET_ELEM_INTERVAL_END) &&
(nla[NFTA_SET_ELEM_DATA] ||
nla[NFTA_SET_ELEM_OBJREF] ||
nla[NFTA_SET_ELEM_TIMEOUT] ||
nla[NFTA_SET_ELEM_EXPIRATION] ||
nla[NFTA_SET_ELEM_USERDATA] ||
nla[NFTA_SET_ELEM_EXPR] ||
nla[NFTA_SET_ELEM_KEY_END] ||
nla[NFTA_SET_ELEM_EXPRESSIONS]))
return -EINVAL;
timeout = 0;
if (nla[NFTA_SET_ELEM_TIMEOUT] != NULL) {
if (!(set->flags & NFT_SET_TIMEOUT))
return -EINVAL;
err = nf_msecs_to_jiffies64(nla[NFTA_SET_ELEM_TIMEOUT],
&timeout);
if (err)
return err;
} else if (set->flags & NFT_SET_TIMEOUT &&
!(flags & NFT_SET_ELEM_INTERVAL_END)) {
timeout = READ_ONCE(set->timeout);
}
expiration = 0;
if (nla[NFTA_SET_ELEM_EXPIRATION] != NULL) {
if (!(set->flags & NFT_SET_TIMEOUT))
return -EINVAL;
err = nf_msecs_to_jiffies64(nla[NFTA_SET_ELEM_EXPIRATION],
&expiration);
if (err)
return err;
}
if (nla[NFTA_SET_ELEM_EXPR]) {
struct nft_expr *expr;
if (set->num_exprs && set->num_exprs != 1)
return -EOPNOTSUPP;
expr = nft_set_elem_expr_alloc(ctx, set,
nla[NFTA_SET_ELEM_EXPR]);
if (IS_ERR(expr))
return PTR_ERR(expr);
expr_array[0] = expr;
num_exprs = 1;
if (set->num_exprs && set->exprs[0]->ops != expr->ops) {
err = -EOPNOTSUPP;
goto err_set_elem_expr;
}
} else if (nla[NFTA_SET_ELEM_EXPRESSIONS]) {
struct nft_expr *expr;
struct nlattr *tmp;
int left;
i = 0;
nla_for_each_nested(tmp, nla[NFTA_SET_ELEM_EXPRESSIONS], left) {
if (i == NFT_SET_EXPR_MAX ||
(set->num_exprs && set->num_exprs == i)) {
err = -E2BIG;
goto err_set_elem_expr;
}
if (nla_type(tmp) != NFTA_LIST_ELEM) {
err = -EINVAL;
goto err_set_elem_expr;
}
expr = nft_set_elem_expr_alloc(ctx, set, tmp);
if (IS_ERR(expr)) {
err = PTR_ERR(expr);
goto err_set_elem_expr;
}
expr_array[i] = expr;
num_exprs++;
if (set->num_exprs && expr->ops != set->exprs[i]->ops) {
err = -EOPNOTSUPP;
goto err_set_elem_expr;
}
i++;
}
if (set->num_exprs && set->num_exprs != i) {
err = -EOPNOTSUPP;
goto err_set_elem_expr;
}
} else if (set->num_exprs > 0 &&
!(flags & NFT_SET_ELEM_INTERVAL_END)) {
err = nft_set_elem_expr_clone(ctx, set, expr_array);
if (err < 0)
goto err_set_elem_expr_clone;
num_exprs = set->num_exprs;
}
if (nla[NFTA_SET_ELEM_KEY]) {
err = nft_setelem_parse_key(ctx, set, &elem.key.val,
nla[NFTA_SET_ELEM_KEY]);
if (err < 0)
goto err_set_elem_expr;
err = nft_set_ext_add_length(&tmpl, NFT_SET_EXT_KEY, set->klen);
if (err < 0)
goto err_parse_key;
}
if (nla[NFTA_SET_ELEM_KEY_END]) {
err = nft_setelem_parse_key(ctx, set, &elem.key_end.val,
nla[NFTA_SET_ELEM_KEY_END]);
if (err < 0)
goto err_parse_key;
err = nft_set_ext_add_length(&tmpl, NFT_SET_EXT_KEY_END, set->klen);
if (err < 0)
goto err_parse_key_end;
}
if (timeout > 0) {
err = nft_set_ext_add(&tmpl, NFT_SET_EXT_EXPIRATION);
if (err < 0)
goto err_parse_key_end;
if (timeout != READ_ONCE(set->timeout)) {
err = nft_set_ext_add(&tmpl, NFT_SET_EXT_TIMEOUT);
if (err < 0)
goto err_parse_key_end;
}
}
if (num_exprs) {
for (i = 0; i < num_exprs; i++)
size += expr_array[i]->ops->size;
err = nft_set_ext_add_length(&tmpl, NFT_SET_EXT_EXPRESSIONS,
sizeof(struct nft_set_elem_expr) + size);
if (err < 0)
goto err_parse_key_end;
}
if (nla[NFTA_SET_ELEM_OBJREF] != NULL) {
obj = nft_obj_lookup(ctx->net, ctx->table,
nla[NFTA_SET_ELEM_OBJREF],
set->objtype, genmask);
if (IS_ERR(obj)) {
err = PTR_ERR(obj);
obj = NULL;
goto err_parse_key_end;
}
if (!nft_use_inc(&obj->use)) {
err = -EMFILE;
obj = NULL;
goto err_parse_key_end;
}
err = nft_set_ext_add(&tmpl, NFT_SET_EXT_OBJREF);
if (err < 0)
goto err_parse_key_end;
}
if (nla[NFTA_SET_ELEM_DATA] != NULL) {
err = nft_setelem_parse_data(ctx, set, &desc, &elem.data.val,
nla[NFTA_SET_ELEM_DATA]);
if (err < 0)
goto err_parse_key_end;
dreg = nft_type_to_reg(set->dtype);
list_for_each_entry(binding, &set->bindings, list) {
struct nft_ctx bind_ctx = {
.net = ctx->net,
.family = ctx->family,
.table = ctx->table,
.chain = (struct nft_chain *)binding->chain,
};
if (!(binding->flags & NFT_SET_MAP))
continue;
err = nft_validate_register_store(&bind_ctx, dreg,
&elem.data.val,
desc.type, desc.len);
if (err < 0)
goto err_parse_data;
if (desc.type == NFT_DATA_VERDICT &&
(elem.data.val.verdict.code == NFT_GOTO ||
elem.data.val.verdict.code == NFT_JUMP))
nft_validate_state_update(ctx->table,
NFT_VALIDATE_NEED);
}
err = nft_set_ext_add_length(&tmpl, NFT_SET_EXT_DATA, desc.len);
if (err < 0)
goto err_parse_data;
}
/* The full maximum length of userdata can exceed the maximum
* offset value (U8_MAX) for following extensions, therefor it
* must be the last extension added.
*/
ulen = 0;
if (nla[NFTA_SET_ELEM_USERDATA] != NULL) {
ulen = nla_len(nla[NFTA_SET_ELEM_USERDATA]);
if (ulen > 0) {
err = nft_set_ext_add_length(&tmpl, NFT_SET_EXT_USERDATA,
ulen);
if (err < 0)
goto err_parse_data;
}
}
elem.priv = nft_set_elem_init(set, &tmpl, elem.key.val.data,
elem.key_end.val.data, elem.data.val.data,
timeout, expiration, GFP_KERNEL_ACCOUNT);
if (IS_ERR(elem.priv)) {
err = PTR_ERR(elem.priv);
goto err_parse_data;
}
ext = nft_set_elem_ext(set, elem.priv);
if (flags)
*nft_set_ext_flags(ext) = flags;
if (obj)
*nft_set_ext_obj(ext) = obj;
if (ulen > 0) {
if (nft_set_ext_check(&tmpl, NFT_SET_EXT_USERDATA, ulen) < 0) {
err = -EINVAL;
goto err_elem_free;
}
udata = nft_set_ext_userdata(ext);
udata->len = ulen - 1;
nla_memcpy(&udata->data, nla[NFTA_SET_ELEM_USERDATA], ulen);
}
err = nft_set_elem_expr_setup(ctx, &tmpl, ext, expr_array, num_exprs);
if (err < 0)
goto err_elem_free;
trans = nft_trans_elem_alloc(ctx, NFT_MSG_NEWSETELEM, set);
if (trans == NULL) {
err = -ENOMEM;
goto err_elem_free;
}
ext->genmask = nft_genmask_cur(ctx->net);
err = nft_setelem_insert(ctx->net, set, &elem, &ext2, flags);
if (err) {
if (err == -EEXIST) {
if (nft_set_ext_exists(ext, NFT_SET_EXT_DATA) ^
nft_set_ext_exists(ext2, NFT_SET_EXT_DATA) ||
nft_set_ext_exists(ext, NFT_SET_EXT_OBJREF) ^
nft_set_ext_exists(ext2, NFT_SET_EXT_OBJREF))
goto err_element_clash;
if ((nft_set_ext_exists(ext, NFT_SET_EXT_DATA) &&
nft_set_ext_exists(ext2, NFT_SET_EXT_DATA) &&
memcmp(nft_set_ext_data(ext),
nft_set_ext_data(ext2), set->dlen) != 0) ||
(nft_set_ext_exists(ext, NFT_SET_EXT_OBJREF) &&
nft_set_ext_exists(ext2, NFT_SET_EXT_OBJREF) &&
*nft_set_ext_obj(ext) != *nft_set_ext_obj(ext2)))
goto err_element_clash;
else if (!(nlmsg_flags & NLM_F_EXCL))
err = 0;
} else if (err == -ENOTEMPTY) {
/* ENOTEMPTY reports overlapping between this element
* and an existing one.
*/
err = -EEXIST;
}
goto err_element_clash;
}
if (!(flags & NFT_SET_ELEM_CATCHALL)) {
unsigned int max = set->size ? set->size + set->ndeact : UINT_MAX;
if (!atomic_add_unless(&set->nelems, 1, max)) {
err = -ENFILE;
goto err_set_full;
}
}
nft_trans_elem(trans) = elem;
nft_trans_commit_list_add_tail(ctx->net, trans);
return 0;
err_set_full:
nft_setelem_remove(ctx->net, set, &elem);
err_element_clash:
kfree(trans);
err_elem_free:
nf_tables_set_elem_destroy(ctx, set, elem.priv);
err_parse_data:
if (nla[NFTA_SET_ELEM_DATA] != NULL)
nft_data_release(&elem.data.val, desc.type);
err_parse_key_end:
if (obj)
nft_use_dec_restore(&obj->use);
nft_data_release(&elem.key_end.val, NFT_DATA_VALUE);
err_parse_key:
nft_data_release(&elem.key.val, NFT_DATA_VALUE);
err_set_elem_expr:
for (i = 0; i < num_exprs && expr_array[i]; i++)
nft_expr_destroy(ctx, expr_array[i]);
err_set_elem_expr_clone:
return err;
}
static int nf_tables_newsetelem(struct sk_buff *skb,
const struct nfnl_info *info,
const struct nlattr * const nla[])
{
struct netlink_ext_ack *extack = info->extack;
u8 genmask = nft_genmask_next(info->net);
u8 family = info->nfmsg->nfgen_family;
struct net *net = info->net;
const struct nlattr *attr;
struct nft_table *table;
struct nft_set *set;
struct nft_ctx ctx;
int rem, err;
if (nla[NFTA_SET_ELEM_LIST_ELEMENTS] == NULL)
return -EINVAL;
table = nft_table_lookup(net, nla[NFTA_SET_ELEM_LIST_TABLE], family,
genmask, NETLINK_CB(skb).portid);
if (IS_ERR(table)) {
NL_SET_BAD_ATTR(extack, nla[NFTA_SET_ELEM_LIST_TABLE]);
return PTR_ERR(table);
}
set = nft_set_lookup_global(net, table, nla[NFTA_SET_ELEM_LIST_SET],
nla[NFTA_SET_ELEM_LIST_SET_ID], genmask);
if (IS_ERR(set))
return PTR_ERR(set);
if (!list_empty(&set->bindings) &&
(set->flags & (NFT_SET_CONSTANT | NFT_SET_ANONYMOUS)))
return -EBUSY;
nft_ctx_init(&ctx, net, skb, info->nlh, family, table, NULL, nla);
nla_for_each_nested(attr, nla[NFTA_SET_ELEM_LIST_ELEMENTS], rem) {
err = nft_add_set_elem(&ctx, set, attr, info->nlh->nlmsg_flags);
if (err < 0) {
NL_SET_BAD_ATTR(extack, attr);
return err;
}
}
if (table->validate_state == NFT_VALIDATE_DO)
return nft_table_validate(net, table);
return 0;
}
/**
* nft_data_hold - hold a nft_data item
*
* @data: struct nft_data to release
* @type: type of data
*
* Hold a nft_data item. NFT_DATA_VALUE types can be silently discarded,
* NFT_DATA_VERDICT bumps the reference to chains in case of NFT_JUMP and
* NFT_GOTO verdicts. This function must be called on active data objects
* from the second phase of the commit protocol.
*/
void nft_data_hold(const struct nft_data *data, enum nft_data_types type)
{
struct nft_chain *chain;
if (type == NFT_DATA_VERDICT) {
switch (data->verdict.code) {
case NFT_JUMP:
case NFT_GOTO:
chain = data->verdict.chain;
nft_use_inc_restore(&chain->use);
break;
}
}
}
static void nft_setelem_data_activate(const struct net *net,
const struct nft_set *set,
struct nft_set_elem *elem)
{
const struct nft_set_ext *ext = nft_set_elem_ext(set, elem->priv);
if (nft_set_ext_exists(ext, NFT_SET_EXT_DATA))
nft_data_hold(nft_set_ext_data(ext), set->dtype);
if (nft_set_ext_exists(ext, NFT_SET_EXT_OBJREF))
nft_use_inc_restore(&(*nft_set_ext_obj(ext))->use);
}
void nft_setelem_data_deactivate(const struct net *net,
const struct nft_set *set,
struct nft_set_elem *elem)
{
const struct nft_set_ext *ext = nft_set_elem_ext(set, elem->priv);
if (nft_set_ext_exists(ext, NFT_SET_EXT_DATA))
nft_data_release(nft_set_ext_data(ext), set->dtype);
if (nft_set_ext_exists(ext, NFT_SET_EXT_OBJREF))
nft_use_dec(&(*nft_set_ext_obj(ext))->use);
}
static int nft_del_setelem(struct nft_ctx *ctx, struct nft_set *set,
const struct nlattr *attr)
{
struct nlattr *nla[NFTA_SET_ELEM_MAX + 1];
struct nft_set_ext_tmpl tmpl;
struct nft_set_elem elem;
struct nft_set_ext *ext;
struct nft_trans *trans;
u32 flags = 0;
int err;
err = nla_parse_nested_deprecated(nla, NFTA_SET_ELEM_MAX, attr,
nft_set_elem_policy, NULL);
if (err < 0)
return err;
err = nft_setelem_parse_flags(set, nla[NFTA_SET_ELEM_FLAGS], &flags);
if (err < 0)
return err;
if (!nla[NFTA_SET_ELEM_KEY] && !(flags & NFT_SET_ELEM_CATCHALL))
return -EINVAL;
if (!nft_setelem_valid_key_end(set, nla, flags))
return -EINVAL;
nft_set_ext_prepare(&tmpl);
if (flags != 0) {
err = nft_set_ext_add(&tmpl, NFT_SET_EXT_FLAGS);
if (err < 0)
return err;
}
if (nla[NFTA_SET_ELEM_KEY]) {
err = nft_setelem_parse_key(ctx, set, &elem.key.val,
nla[NFTA_SET_ELEM_KEY]);
if (err < 0)
return err;
err = nft_set_ext_add_length(&tmpl, NFT_SET_EXT_KEY, set->klen);
if (err < 0)
goto fail_elem;
}
if (nla[NFTA_SET_ELEM_KEY_END]) {
err = nft_setelem_parse_key(ctx, set, &elem.key_end.val,
nla[NFTA_SET_ELEM_KEY_END]);
if (err < 0)
goto fail_elem;
err = nft_set_ext_add_length(&tmpl, NFT_SET_EXT_KEY_END, set->klen);
if (err < 0)
goto fail_elem_key_end;
}
err = -ENOMEM;
elem.priv = nft_set_elem_init(set, &tmpl, elem.key.val.data,
elem.key_end.val.data, NULL, 0, 0,
GFP_KERNEL_ACCOUNT);
if (IS_ERR(elem.priv)) {
err = PTR_ERR(elem.priv);
goto fail_elem_key_end;
}
ext = nft_set_elem_ext(set, elem.priv);
if (flags)
*nft_set_ext_flags(ext) = flags;
trans = nft_trans_elem_alloc(ctx, NFT_MSG_DELSETELEM, set);
if (trans == NULL)
goto fail_trans;
err = nft_setelem_deactivate(ctx->net, set, &elem, flags);
if (err < 0)
goto fail_ops;
nft_setelem_data_deactivate(ctx->net, set, &elem);
nft_trans_elem(trans) = elem;
nft_trans_commit_list_add_tail(ctx->net, trans);
return 0;
fail_ops:
kfree(trans);
fail_trans:
kfree(elem.priv);
fail_elem_key_end:
nft_data_release(&elem.key_end.val, NFT_DATA_VALUE);
fail_elem:
nft_data_release(&elem.key.val, NFT_DATA_VALUE);
return err;
}
static int nft_setelem_flush(const struct nft_ctx *ctx,
struct nft_set *set,
const struct nft_set_iter *iter,
struct nft_set_elem *elem)
{
struct nft_trans *trans;
int err;
trans = nft_trans_alloc_gfp(ctx, NFT_MSG_DELSETELEM,
sizeof(struct nft_trans_elem), GFP_ATOMIC);
if (!trans)
return -ENOMEM;
if (!set->ops->flush(ctx->net, set, elem->priv)) {
err = -ENOENT;
goto err1;
}
set->ndeact++;
nft_setelem_data_deactivate(ctx->net, set, elem);
nft_trans_elem_set(trans) = set;
nft_trans_elem(trans) = *elem;
nft_trans_commit_list_add_tail(ctx->net, trans);
return 0;
err1:
kfree(trans);
return err;
}
static int __nft_set_catchall_flush(const struct nft_ctx *ctx,
struct nft_set *set,
struct nft_set_elem *elem)
{
struct nft_trans *trans;
trans = nft_trans_alloc_gfp(ctx, NFT_MSG_DELSETELEM,
sizeof(struct nft_trans_elem), GFP_KERNEL);
if (!trans)
return -ENOMEM;
nft_setelem_data_deactivate(ctx->net, set, elem);
nft_trans_elem_set(trans) = set;
nft_trans_elem(trans) = *elem;
nft_trans_commit_list_add_tail(ctx->net, trans);
return 0;
}
static int nft_set_catchall_flush(const struct nft_ctx *ctx,
struct nft_set *set)
{
u8 genmask = nft_genmask_next(ctx->net);
struct nft_set_elem_catchall *catchall;
struct nft_set_elem elem;
struct nft_set_ext *ext;
int ret = 0;
list_for_each_entry_rcu(catchall, &set->catchall_list, list) {
ext = nft_set_elem_ext(set, catchall->elem);
if (!nft_set_elem_active(ext, genmask))
continue;
elem.priv = catchall->elem;
ret = __nft_set_catchall_flush(ctx, set, &elem);
if (ret < 0)
break;
nft_set_elem_change_active(ctx->net, set, ext);
}
return ret;
}
static int nft_set_flush(struct nft_ctx *ctx, struct nft_set *set, u8 genmask)
{
struct nft_set_iter iter = {
.genmask = genmask,
.fn = nft_setelem_flush,
};
set->ops->walk(ctx, set, &iter);
if (!iter.err)
iter.err = nft_set_catchall_flush(ctx, set);
return iter.err;
}
static int nf_tables_delsetelem(struct sk_buff *skb,
const struct nfnl_info *info,
const struct nlattr * const nla[])
{
struct netlink_ext_ack *extack = info->extack;
u8 genmask = nft_genmask_next(info->net);
u8 family = info->nfmsg->nfgen_family;
struct net *net = info->net;
const struct nlattr *attr;
struct nft_table *table;
struct nft_set *set;
struct nft_ctx ctx;
int rem, err = 0;
table = nft_table_lookup(net, nla[NFTA_SET_ELEM_LIST_TABLE], family,
genmask, NETLINK_CB(skb).portid);
if (IS_ERR(table)) {
NL_SET_BAD_ATTR(extack, nla[NFTA_SET_ELEM_LIST_TABLE]);
return PTR_ERR(table);
}
set = nft_set_lookup(table, nla[NFTA_SET_ELEM_LIST_SET], genmask);
if (IS_ERR(set))
return PTR_ERR(set);
if (nft_set_is_anonymous(set))
return -EOPNOTSUPP;
if (!list_empty(&set->bindings) && (set->flags & NFT_SET_CONSTANT))
return -EBUSY;
nft_ctx_init(&ctx, net, skb, info->nlh, family, table, NULL, nla);
if (!nla[NFTA_SET_ELEM_LIST_ELEMENTS])
return nft_set_flush(&ctx, set, genmask);
nla_for_each_nested(attr, nla[NFTA_SET_ELEM_LIST_ELEMENTS], rem) {
err = nft_del_setelem(&ctx, set, attr);
if (err == -ENOENT &&
NFNL_MSG_TYPE(info->nlh->nlmsg_type) == NFT_MSG_DESTROYSETELEM)
continue;
if (err < 0) {
NL_SET_BAD_ATTR(extack, attr);
break;
}
}
return err;
}
/*
* Stateful objects
*/
/**
* nft_register_obj- register nf_tables stateful object type
* @obj_type: object type
*
* Registers the object type for use with nf_tables. Returns zero on
* success or a negative errno code otherwise.
*/
int nft_register_obj(struct nft_object_type *obj_type)
{
if (obj_type->type == NFT_OBJECT_UNSPEC)
return -EINVAL;
nfnl_lock(NFNL_SUBSYS_NFTABLES);
list_add_rcu(&obj_type->list, &nf_tables_objects);
nfnl_unlock(NFNL_SUBSYS_NFTABLES);
return 0;
}
EXPORT_SYMBOL_GPL(nft_register_obj);
/**
* nft_unregister_obj - unregister nf_tables object type
* @obj_type: object type
*
* Unregisters the object type for use with nf_tables.
*/
void nft_unregister_obj(struct nft_object_type *obj_type)
{
nfnl_lock(NFNL_SUBSYS_NFTABLES);
list_del_rcu(&obj_type->list);
nfnl_unlock(NFNL_SUBSYS_NFTABLES);
}
EXPORT_SYMBOL_GPL(nft_unregister_obj);
struct nft_object *nft_obj_lookup(const struct net *net,
const struct nft_table *table,
const struct nlattr *nla, u32 objtype,
u8 genmask)
{
struct nft_object_hash_key k = { .table = table };
char search[NFT_OBJ_MAXNAMELEN];
struct rhlist_head *tmp, *list;
struct nft_object *obj;
nla_strscpy(search, nla, sizeof(search));
k.name = search;
WARN_ON_ONCE(!rcu_read_lock_held() &&
!lockdep_commit_lock_is_held(net));
rcu_read_lock();
list = rhltable_lookup(&nft_objname_ht, &k, nft_objname_ht_params);
if (!list)
goto out;
rhl_for_each_entry_rcu(obj, tmp, list, rhlhead) {
if (objtype == obj->ops->type->type &&
nft_active_genmask(obj, genmask)) {
rcu_read_unlock();
return obj;
}
}
out:
rcu_read_unlock();
return ERR_PTR(-ENOENT);
}
EXPORT_SYMBOL_GPL(nft_obj_lookup);
static struct nft_object *nft_obj_lookup_byhandle(const struct nft_table *table,
const struct nlattr *nla,
u32 objtype, u8 genmask)
{
struct nft_object *obj;
list_for_each_entry(obj, &table->objects, list) {
if (be64_to_cpu(nla_get_be64(nla)) == obj->handle &&
objtype == obj->ops->type->type &&
nft_active_genmask(obj, genmask))
return obj;
}
return ERR_PTR(-ENOENT);
}
static const struct nla_policy nft_obj_policy[NFTA_OBJ_MAX + 1] = {
[NFTA_OBJ_TABLE] = { .type = NLA_STRING,
.len = NFT_TABLE_MAXNAMELEN - 1 },
[NFTA_OBJ_NAME] = { .type = NLA_STRING,
.len = NFT_OBJ_MAXNAMELEN - 1 },
[NFTA_OBJ_TYPE] = { .type = NLA_U32 },
[NFTA_OBJ_DATA] = { .type = NLA_NESTED },
[NFTA_OBJ_HANDLE] = { .type = NLA_U64},
[NFTA_OBJ_USERDATA] = { .type = NLA_BINARY,
.len = NFT_USERDATA_MAXLEN },
};
static struct nft_object *nft_obj_init(const struct nft_ctx *ctx,
const struct nft_object_type *type,
const struct nlattr *attr)
{
struct nlattr **tb;
const struct nft_object_ops *ops;
struct nft_object *obj;
int err = -ENOMEM;
tb = kmalloc_array(type->maxattr + 1, sizeof(*tb), GFP_KERNEL);
if (!tb)
goto err1;
if (attr) {
err = nla_parse_nested_deprecated(tb, type->maxattr, attr,
type->policy, NULL);
if (err < 0)
goto err2;
} else {
memset(tb, 0, sizeof(tb[0]) * (type->maxattr + 1));
}
if (type->select_ops) {
ops = type->select_ops(ctx, (const struct nlattr * const *)tb);
if (IS_ERR(ops)) {
err = PTR_ERR(ops);
goto err2;
}
} else {
ops = type->ops;
}
err = -ENOMEM;
obj = kzalloc(sizeof(*obj) + ops->size, GFP_KERNEL_ACCOUNT);
if (!obj)
goto err2;
err = ops->init(ctx, (const struct nlattr * const *)tb, obj);
if (err < 0)
goto err3;
obj->ops = ops;
kfree(tb);
return obj;
err3:
kfree(obj);
err2:
kfree(tb);
err1:
return ERR_PTR(err);
}
static int nft_object_dump(struct sk_buff *skb, unsigned int attr,
struct nft_object *obj, bool reset)
{
struct nlattr *nest;
nest = nla_nest_start_noflag(skb, attr);
if (!nest)
goto nla_put_failure;
if (obj->ops->dump(skb, obj, reset) < 0)
goto nla_put_failure;
nla_nest_end(skb, nest);
return 0;
nla_put_failure:
return -1;
}
static const struct nft_object_type *__nft_obj_type_get(u32 objtype)
{
const struct nft_object_type *type;
list_for_each_entry(type, &nf_tables_objects, list) {
if (objtype == type->type)
return type;
}
return NULL;
}
static const struct nft_object_type *
nft_obj_type_get(struct net *net, u32 objtype)
{
const struct nft_object_type *type;
type = __nft_obj_type_get(objtype);
if (type != NULL && try_module_get(type->owner))
return type;
lockdep_nfnl_nft_mutex_not_held();
#ifdef CONFIG_MODULES
if (type == NULL) {
if (nft_request_module(net, "nft-obj-%u", objtype) == -EAGAIN)
return ERR_PTR(-EAGAIN);
}
#endif
return ERR_PTR(-ENOENT);
}
static int nf_tables_updobj(const struct nft_ctx *ctx,
const struct nft_object_type *type,
const struct nlattr *attr,
struct nft_object *obj)
{
struct nft_object *newobj;
struct nft_trans *trans;
int err = -ENOMEM;
if (!try_module_get(type->owner))
return -ENOENT;
trans = nft_trans_alloc(ctx, NFT_MSG_NEWOBJ,
sizeof(struct nft_trans_obj));
if (!trans)
goto err_trans;
newobj = nft_obj_init(ctx, type, attr);
if (IS_ERR(newobj)) {
err = PTR_ERR(newobj);
goto err_free_trans;
}
nft_trans_obj(trans) = obj;
nft_trans_obj_update(trans) = true;
nft_trans_obj_newobj(trans) = newobj;
nft_trans_commit_list_add_tail(ctx->net, trans);
return 0;
err_free_trans:
kfree(trans);
err_trans:
module_put(type->owner);
return err;
}
static int nf_tables_newobj(struct sk_buff *skb, const struct nfnl_info *info,
const struct nlattr * const nla[])
{
struct netlink_ext_ack *extack = info->extack;
u8 genmask = nft_genmask_next(info->net);
u8 family = info->nfmsg->nfgen_family;
const struct nft_object_type *type;
struct net *net = info->net;
struct nft_table *table;
struct nft_object *obj;
struct nft_ctx ctx;
u32 objtype;
int err;
if (!nla[NFTA_OBJ_TYPE] ||
!nla[NFTA_OBJ_NAME] ||
!nla[NFTA_OBJ_DATA])
return -EINVAL;
table = nft_table_lookup(net, nla[NFTA_OBJ_TABLE], family, genmask,
NETLINK_CB(skb).portid);
if (IS_ERR(table)) {
NL_SET_BAD_ATTR(extack, nla[NFTA_OBJ_TABLE]);
return PTR_ERR(table);
}
objtype = ntohl(nla_get_be32(nla[NFTA_OBJ_TYPE]));
obj = nft_obj_lookup(net, table, nla[NFTA_OBJ_NAME], objtype, genmask);
if (IS_ERR(obj)) {
err = PTR_ERR(obj);
if (err != -ENOENT) {
NL_SET_BAD_ATTR(extack, nla[NFTA_OBJ_NAME]);
return err;
}
} else {
if (info->nlh->nlmsg_flags & NLM_F_EXCL) {
NL_SET_BAD_ATTR(extack, nla[NFTA_OBJ_NAME]);
return -EEXIST;
}
if (info->nlh->nlmsg_flags & NLM_F_REPLACE)
return -EOPNOTSUPP;
type = __nft_obj_type_get(objtype);
if (WARN_ON_ONCE(!type))
return -ENOENT;
nft_ctx_init(&ctx, net, skb, info->nlh, family, table, NULL, nla);
return nf_tables_updobj(&ctx, type, nla[NFTA_OBJ_DATA], obj);
}
nft_ctx_init(&ctx, net, skb, info->nlh, family, table, NULL, nla);
if (!nft_use_inc(&table->use))
return -EMFILE;
type = nft_obj_type_get(net, objtype);
if (IS_ERR(type)) {
err = PTR_ERR(type);
goto err_type;
}
obj = nft_obj_init(&ctx, type, nla[NFTA_OBJ_DATA]);
if (IS_ERR(obj)) {
err = PTR_ERR(obj);
goto err_init;
}
obj->key.table = table;
obj->handle = nf_tables_alloc_handle(table);
obj->key.name = nla_strdup(nla[NFTA_OBJ_NAME], GFP_KERNEL_ACCOUNT);
if (!obj->key.name) {
err = -ENOMEM;
goto err_strdup;
}
if (nla[NFTA_OBJ_USERDATA]) {
obj->udata = nla_memdup(nla[NFTA_OBJ_USERDATA], GFP_KERNEL_ACCOUNT);
if (obj->udata == NULL)
goto err_userdata;
obj->udlen = nla_len(nla[NFTA_OBJ_USERDATA]);
}
err = nft_trans_obj_add(&ctx, NFT_MSG_NEWOBJ, obj);
if (err < 0)
goto err_trans;
err = rhltable_insert(&nft_objname_ht, &obj->rhlhead,
nft_objname_ht_params);
if (err < 0)
goto err_obj_ht;
list_add_tail_rcu(&obj->list, &table->objects);
return 0;
err_obj_ht:
/* queued in transaction log */
INIT_LIST_HEAD(&obj->list);
return err;
err_trans:
kfree(obj->udata);
err_userdata:
kfree(obj->key.name);
err_strdup:
if (obj->ops->destroy)
obj->ops->destroy(&ctx, obj);
kfree(obj);
err_init:
module_put(type->owner);
err_type:
nft_use_dec_restore(&table->use);
return err;
}
static int nf_tables_fill_obj_info(struct sk_buff *skb, struct net *net,
u32 portid, u32 seq, int event, u32 flags,
int family, const struct nft_table *table,
struct nft_object *obj, bool reset)
{
struct nlmsghdr *nlh;
event = nfnl_msg_type(NFNL_SUBSYS_NFTABLES, event);
nlh = nfnl_msg_put(skb, portid, seq, event, flags, family,
NFNETLINK_V0, nft_base_seq(net));
if (!nlh)
goto nla_put_failure;
if (nla_put_string(skb, NFTA_OBJ_TABLE, table->name) ||
nla_put_string(skb, NFTA_OBJ_NAME, obj->key.name) ||
nla_put_be64(skb, NFTA_OBJ_HANDLE, cpu_to_be64(obj->handle),
NFTA_OBJ_PAD))
goto nla_put_failure;
if (event == NFT_MSG_DELOBJ) {
nlmsg_end(skb, nlh);
return 0;
}
if (nla_put_be32(skb, NFTA_OBJ_TYPE, htonl(obj->ops->type->type)) ||
nla_put_be32(skb, NFTA_OBJ_USE, htonl(obj->use)) ||
nft_object_dump(skb, NFTA_OBJ_DATA, obj, reset))
goto nla_put_failure;
if (obj->udata &&
nla_put(skb, NFTA_OBJ_USERDATA, obj->udlen, obj->udata))
goto nla_put_failure;
nlmsg_end(skb, nlh);
return 0;
nla_put_failure:
nlmsg_trim(skb, nlh);
return -1;
}
struct nft_obj_filter {
char *table;
u32 type;
};
static int nf_tables_dump_obj(struct sk_buff *skb, struct netlink_callback *cb)
{
const struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
const struct nft_table *table;
unsigned int idx = 0, s_idx = cb->args[0];
struct nft_obj_filter *filter = cb->data;
struct net *net = sock_net(skb->sk);
int family = nfmsg->nfgen_family;
struct nftables_pernet *nft_net;
struct nft_object *obj;
bool reset = false;
if (NFNL_MSG_TYPE(cb->nlh->nlmsg_type) == NFT_MSG_GETOBJ_RESET)
reset = true;
rcu_read_lock();
nft_net = nft_pernet(net);
cb->seq = READ_ONCE(nft_net->base_seq);
list_for_each_entry_rcu(table, &nft_net->tables, list) {
if (family != NFPROTO_UNSPEC && family != table->family)
continue;
list_for_each_entry_rcu(obj, &table->objects, list) {
if (!nft_is_active(net, obj))
goto cont;
if (idx < s_idx)
goto cont;
if (idx > s_idx)
memset(&cb->args[1], 0,
sizeof(cb->args) - sizeof(cb->args[0]));
if (filter && filter->table &&
strcmp(filter->table, table->name))
goto cont;
if (filter &&
filter->type != NFT_OBJECT_UNSPEC &&
obj->ops->type->type != filter->type)
goto cont;
if (reset) {
char *buf = kasprintf(GFP_ATOMIC,
"%s:%u",
table->name,
nft_net->base_seq);
audit_log_nfcfg(buf,
family,
obj->handle,
AUDIT_NFT_OP_OBJ_RESET,
GFP_ATOMIC);
kfree(buf);
}
if (nf_tables_fill_obj_info(skb, net, NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq,
NFT_MSG_NEWOBJ,
NLM_F_MULTI | NLM_F_APPEND,
table->family, table,
obj, reset) < 0)
goto done;
nl_dump_check_consistent(cb, nlmsg_hdr(skb));
cont:
idx++;
}
}
done:
rcu_read_unlock();
cb->args[0] = idx;
return skb->len;
}
static int nf_tables_dump_obj_start(struct netlink_callback *cb)
{
const struct nlattr * const *nla = cb->data;
struct nft_obj_filter *filter = NULL;
if (nla[NFTA_OBJ_TABLE] || nla[NFTA_OBJ_TYPE]) {
filter = kzalloc(sizeof(*filter), GFP_ATOMIC);
if (!filter)
return -ENOMEM;
if (nla[NFTA_OBJ_TABLE]) {
filter->table = nla_strdup(nla[NFTA_OBJ_TABLE], GFP_ATOMIC);
if (!filter->table) {
kfree(filter);
return -ENOMEM;
}
}
if (nla[NFTA_OBJ_TYPE])
filter->type = ntohl(nla_get_be32(nla[NFTA_OBJ_TYPE]));
}
cb->data = filter;
return 0;
}
static int nf_tables_dump_obj_done(struct netlink_callback *cb)
{
struct nft_obj_filter *filter = cb->data;
if (filter) {
kfree(filter->table);
kfree(filter);
}
return 0;
}
/* called with rcu_read_lock held */
static int nf_tables_getobj(struct sk_buff *skb, const struct nfnl_info *info,
const struct nlattr * const nla[])
{
struct netlink_ext_ack *extack = info->extack;
u8 genmask = nft_genmask_cur(info->net);
u8 family = info->nfmsg->nfgen_family;
const struct nft_table *table;
struct net *net = info->net;
struct nft_object *obj;
struct sk_buff *skb2;
bool reset = false;
u32 objtype;
int err;
if (info->nlh->nlmsg_flags & NLM_F_DUMP) {
struct netlink_dump_control c = {
.start = nf_tables_dump_obj_start,
.dump = nf_tables_dump_obj,
.done = nf_tables_dump_obj_done,
.module = THIS_MODULE,
.data = (void *)nla,
};
return nft_netlink_dump_start_rcu(info->sk, skb, info->nlh, &c);
}
if (!nla[NFTA_OBJ_NAME] ||
!nla[NFTA_OBJ_TYPE])
return -EINVAL;
table = nft_table_lookup(net, nla[NFTA_OBJ_TABLE], family, genmask, 0);
if (IS_ERR(table)) {
NL_SET_BAD_ATTR(extack, nla[NFTA_OBJ_TABLE]);
return PTR_ERR(table);
}
objtype = ntohl(nla_get_be32(nla[NFTA_OBJ_TYPE]));
obj = nft_obj_lookup(net, table, nla[NFTA_OBJ_NAME], objtype, genmask);
if (IS_ERR(obj)) {
NL_SET_BAD_ATTR(extack, nla[NFTA_OBJ_NAME]);
return PTR_ERR(obj);
}
skb2 = alloc_skb(NLMSG_GOODSIZE, GFP_ATOMIC);
if (!skb2)
return -ENOMEM;
if (NFNL_MSG_TYPE(info->nlh->nlmsg_type) == NFT_MSG_GETOBJ_RESET)
reset = true;
if (reset) {
const struct nftables_pernet *nft_net;
char *buf;
nft_net = nft_pernet(net);
buf = kasprintf(GFP_ATOMIC, "%s:%u", table->name, nft_net->base_seq);
audit_log_nfcfg(buf,
family,
obj->handle,
AUDIT_NFT_OP_OBJ_RESET,
GFP_ATOMIC);
kfree(buf);
}
err = nf_tables_fill_obj_info(skb2, net, NETLINK_CB(skb).portid,
info->nlh->nlmsg_seq, NFT_MSG_NEWOBJ, 0,
family, table, obj, reset);
if (err < 0)
goto err_fill_obj_info;
return nfnetlink_unicast(skb2, net, NETLINK_CB(skb).portid);
err_fill_obj_info:
kfree_skb(skb2);
return err;
}
static void nft_obj_destroy(const struct nft_ctx *ctx, struct nft_object *obj)
{
if (obj->ops->destroy)
obj->ops->destroy(ctx, obj);
module_put(obj->ops->type->owner);
kfree(obj->key.name);
kfree(obj->udata);
kfree(obj);
}
static int nf_tables_delobj(struct sk_buff *skb, const struct nfnl_info *info,
const struct nlattr * const nla[])
{
struct netlink_ext_ack *extack = info->extack;
u8 genmask = nft_genmask_next(info->net);
u8 family = info->nfmsg->nfgen_family;
struct net *net = info->net;
const struct nlattr *attr;
struct nft_table *table;
struct nft_object *obj;
struct nft_ctx ctx;
u32 objtype;
if (!nla[NFTA_OBJ_TYPE] ||
(!nla[NFTA_OBJ_NAME] && !nla[NFTA_OBJ_HANDLE]))
return -EINVAL;
table = nft_table_lookup(net, nla[NFTA_OBJ_TABLE], family, genmask,
NETLINK_CB(skb).portid);
if (IS_ERR(table)) {
NL_SET_BAD_ATTR(extack, nla[NFTA_OBJ_TABLE]);
return PTR_ERR(table);
}
objtype = ntohl(nla_get_be32(nla[NFTA_OBJ_TYPE]));
if (nla[NFTA_OBJ_HANDLE]) {
attr = nla[NFTA_OBJ_HANDLE];
obj = nft_obj_lookup_byhandle(table, attr, objtype, genmask);
} else {
attr = nla[NFTA_OBJ_NAME];
obj = nft_obj_lookup(net, table, attr, objtype, genmask);
}
if (IS_ERR(obj)) {
if (PTR_ERR(obj) == -ENOENT &&
NFNL_MSG_TYPE(info->nlh->nlmsg_type) == NFT_MSG_DESTROYOBJ)
return 0;
NL_SET_BAD_ATTR(extack, attr);
return PTR_ERR(obj);
}
if (obj->use > 0) {
NL_SET_BAD_ATTR(extack, attr);
return -EBUSY;
}
nft_ctx_init(&ctx, net, skb, info->nlh, family, table, NULL, nla);
return nft_delobj(&ctx, obj);
}
void nft_obj_notify(struct net *net, const struct nft_table *table,
struct nft_object *obj, u32 portid, u32 seq, int event,
u16 flags, int family, int report, gfp_t gfp)
{
struct nftables_pernet *nft_net = nft_pernet(net);
struct sk_buff *skb;
int err;
char *buf = kasprintf(gfp, "%s:%u",
table->name, nft_net->base_seq);
audit_log_nfcfg(buf,
family,
obj->handle,
event == NFT_MSG_NEWOBJ ?
AUDIT_NFT_OP_OBJ_REGISTER :
AUDIT_NFT_OP_OBJ_UNREGISTER,
gfp);
kfree(buf);
if (!report &&
!nfnetlink_has_listeners(net, NFNLGRP_NFTABLES))
return;
skb = nlmsg_new(NLMSG_GOODSIZE, gfp);
if (skb == NULL)
goto err;
err = nf_tables_fill_obj_info(skb, net, portid, seq, event,
flags & (NLM_F_CREATE | NLM_F_EXCL),
family, table, obj, false);
if (err < 0) {
kfree_skb(skb);
goto err;
}
nft_notify_enqueue(skb, report, &nft_net->notify_list);
return;
err:
nfnetlink_set_err(net, portid, NFNLGRP_NFTABLES, -ENOBUFS);
}
EXPORT_SYMBOL_GPL(nft_obj_notify);
static void nf_tables_obj_notify(const struct nft_ctx *ctx,
struct nft_object *obj, int event)
{
nft_obj_notify(ctx->net, ctx->table, obj, ctx->portid, ctx->seq, event,
ctx->flags, ctx->family, ctx->report, GFP_KERNEL);
}
/*
* Flow tables
*/
void nft_register_flowtable_type(struct nf_flowtable_type *type)
{
nfnl_lock(NFNL_SUBSYS_NFTABLES);
list_add_tail_rcu(&type->list, &nf_tables_flowtables);
nfnl_unlock(NFNL_SUBSYS_NFTABLES);
}
EXPORT_SYMBOL_GPL(nft_register_flowtable_type);
void nft_unregister_flowtable_type(struct nf_flowtable_type *type)
{
nfnl_lock(NFNL_SUBSYS_NFTABLES);
list_del_rcu(&type->list);
nfnl_unlock(NFNL_SUBSYS_NFTABLES);
}
EXPORT_SYMBOL_GPL(nft_unregister_flowtable_type);
static const struct nla_policy nft_flowtable_policy[NFTA_FLOWTABLE_MAX + 1] = {
[NFTA_FLOWTABLE_TABLE] = { .type = NLA_STRING,
.len = NFT_NAME_MAXLEN - 1 },
[NFTA_FLOWTABLE_NAME] = { .type = NLA_STRING,
.len = NFT_NAME_MAXLEN - 1 },
[NFTA_FLOWTABLE_HOOK] = { .type = NLA_NESTED },
[NFTA_FLOWTABLE_HANDLE] = { .type = NLA_U64 },
[NFTA_FLOWTABLE_FLAGS] = { .type = NLA_U32 },
};
struct nft_flowtable *nft_flowtable_lookup(const struct nft_table *table,
const struct nlattr *nla, u8 genmask)
{
struct nft_flowtable *flowtable;
list_for_each_entry_rcu(flowtable, &table->flowtables, list) {
if (!nla_strcmp(nla, flowtable->name) &&
nft_active_genmask(flowtable, genmask))
return flowtable;
}
return ERR_PTR(-ENOENT);
}
EXPORT_SYMBOL_GPL(nft_flowtable_lookup);
void nf_tables_deactivate_flowtable(const struct nft_ctx *ctx,
struct nft_flowtable *flowtable,
enum nft_trans_phase phase)
{
switch (phase) {
case NFT_TRANS_PREPARE_ERROR:
case NFT_TRANS_PREPARE:
case NFT_TRANS_ABORT:
case NFT_TRANS_RELEASE:
nft_use_dec(&flowtable->use);
fallthrough;
default:
return;
}
}
EXPORT_SYMBOL_GPL(nf_tables_deactivate_flowtable);
static struct nft_flowtable *
nft_flowtable_lookup_byhandle(const struct nft_table *table,
const struct nlattr *nla, u8 genmask)
{
struct nft_flowtable *flowtable;
list_for_each_entry(flowtable, &table->flowtables, list) {
if (be64_to_cpu(nla_get_be64(nla)) == flowtable->handle &&
nft_active_genmask(flowtable, genmask))
return flowtable;
}
return ERR_PTR(-ENOENT);
}
struct nft_flowtable_hook {
u32 num;
int priority;
struct list_head list;
};
static const struct nla_policy nft_flowtable_hook_policy[NFTA_FLOWTABLE_HOOK_MAX + 1] = {
[NFTA_FLOWTABLE_HOOK_NUM] = { .type = NLA_U32 },
[NFTA_FLOWTABLE_HOOK_PRIORITY] = { .type = NLA_U32 },
[NFTA_FLOWTABLE_HOOK_DEVS] = { .type = NLA_NESTED },
};
static int nft_flowtable_parse_hook(const struct nft_ctx *ctx,
const struct nlattr * const nla[],
struct nft_flowtable_hook *flowtable_hook,
struct nft_flowtable *flowtable,
struct netlink_ext_ack *extack, bool add)
{
struct nlattr *tb[NFTA_FLOWTABLE_HOOK_MAX + 1];
struct nft_hook *hook;
int hooknum, priority;
int err;
INIT_LIST_HEAD(&flowtable_hook->list);
err = nla_parse_nested_deprecated(tb, NFTA_FLOWTABLE_HOOK_MAX,
nla[NFTA_FLOWTABLE_HOOK],
nft_flowtable_hook_policy, NULL);
if (err < 0)
return err;
if (add) {
if (!tb[NFTA_FLOWTABLE_HOOK_NUM] ||
!tb[NFTA_FLOWTABLE_HOOK_PRIORITY]) {
NL_SET_BAD_ATTR(extack, nla[NFTA_FLOWTABLE_NAME]);
return -ENOENT;
}
hooknum = ntohl(nla_get_be32(tb[NFTA_FLOWTABLE_HOOK_NUM]));
if (hooknum != NF_NETDEV_INGRESS)
return -EOPNOTSUPP;
priority = ntohl(nla_get_be32(tb[NFTA_FLOWTABLE_HOOK_PRIORITY]));
flowtable_hook->priority = priority;
flowtable_hook->num = hooknum;
} else {
if (tb[NFTA_FLOWTABLE_HOOK_NUM]) {
hooknum = ntohl(nla_get_be32(tb[NFTA_FLOWTABLE_HOOK_NUM]));
if (hooknum != flowtable->hooknum)
return -EOPNOTSUPP;
}
if (tb[NFTA_FLOWTABLE_HOOK_PRIORITY]) {
priority = ntohl(nla_get_be32(tb[NFTA_FLOWTABLE_HOOK_PRIORITY]));
if (priority != flowtable->data.priority)
return -EOPNOTSUPP;
}
flowtable_hook->priority = flowtable->data.priority;
flowtable_hook->num = flowtable->hooknum;
}
if (tb[NFTA_FLOWTABLE_HOOK_DEVS]) {
err = nf_tables_parse_netdev_hooks(ctx->net,
tb[NFTA_FLOWTABLE_HOOK_DEVS],
&flowtable_hook->list,
extack);
if (err < 0)
return err;
}
list_for_each_entry(hook, &flowtable_hook->list, list) {
hook->ops.pf = NFPROTO_NETDEV;
hook->ops.hooknum = flowtable_hook->num;
hook->ops.priority = flowtable_hook->priority;
hook->ops.priv = &flowtable->data;
hook->ops.hook = flowtable->data.type->hook;
}
return err;
}
static const struct nf_flowtable_type *__nft_flowtable_type_get(u8 family)
{
const struct nf_flowtable_type *type;
list_for_each_entry(type, &nf_tables_flowtables, list) {
if (family == type->family)
return type;
}
return NULL;
}
static const struct nf_flowtable_type *
nft_flowtable_type_get(struct net *net, u8 family)
{
const struct nf_flowtable_type *type;
type = __nft_flowtable_type_get(family);
if (type != NULL && try_module_get(type->owner))
return type;
lockdep_nfnl_nft_mutex_not_held();
#ifdef CONFIG_MODULES
if (type == NULL) {
if (nft_request_module(net, "nf-flowtable-%u", family) == -EAGAIN)
return ERR_PTR(-EAGAIN);
}
#endif
return ERR_PTR(-ENOENT);
}
/* Only called from error and netdev event paths. */
static void nft_unregister_flowtable_hook(struct net *net,
struct nft_flowtable *flowtable,
struct nft_hook *hook)
{
nf_unregister_net_hook(net, &hook->ops);
flowtable->data.type->setup(&flowtable->data, hook->ops.dev,
FLOW_BLOCK_UNBIND);
}
static void __nft_unregister_flowtable_net_hooks(struct net *net,
struct list_head *hook_list,
bool release_netdev)
{
struct nft_hook *hook, *next;
list_for_each_entry_safe(hook, next, hook_list, list) {
nf_unregister_net_hook(net, &hook->ops);
if (release_netdev) {
list_del(&hook->list);
kfree_rcu(hook, rcu);
}
}
}
static void nft_unregister_flowtable_net_hooks(struct net *net,
struct list_head *hook_list)
{
__nft_unregister_flowtable_net_hooks(net, hook_list, false);
}
static int nft_register_flowtable_net_hooks(struct net *net,
struct nft_table *table,
struct list_head *hook_list,
struct nft_flowtable *flowtable)
{
struct nft_hook *hook, *hook2, *next;
struct nft_flowtable *ft;
int err, i = 0;
list_for_each_entry(hook, hook_list, list) {
list_for_each_entry(ft, &table->flowtables, list) {
if (!nft_is_active_next(net, ft))
continue;
list_for_each_entry(hook2, &ft->hook_list, list) {
if (hook->ops.dev == hook2->ops.dev &&
hook->ops.pf == hook2->ops.pf) {
err = -EEXIST;
goto err_unregister_net_hooks;
}
}
}
err = flowtable->data.type->setup(&flowtable->data,
hook->ops.dev,
FLOW_BLOCK_BIND);
if (err < 0)
goto err_unregister_net_hooks;
err = nf_register_net_hook(net, &hook->ops);
if (err < 0) {
flowtable->data.type->setup(&flowtable->data,
hook->ops.dev,
FLOW_BLOCK_UNBIND);
goto err_unregister_net_hooks;
}
i++;
}
return 0;
err_unregister_net_hooks:
list_for_each_entry_safe(hook, next, hook_list, list) {
if (i-- <= 0)
break;
nft_unregister_flowtable_hook(net, flowtable, hook);
list_del_rcu(&hook->list);
kfree_rcu(hook, rcu);
}
return err;
}
static void nft_hooks_destroy(struct list_head *hook_list)
{
struct nft_hook *hook, *next;
list_for_each_entry_safe(hook, next, hook_list, list) {
list_del_rcu(&hook->list);
kfree_rcu(hook, rcu);
}
}
static int nft_flowtable_update(struct nft_ctx *ctx, const struct nlmsghdr *nlh,
struct nft_flowtable *flowtable,
struct netlink_ext_ack *extack)
{
const struct nlattr * const *nla = ctx->nla;
struct nft_flowtable_hook flowtable_hook;
struct nft_hook *hook, *next;
struct nft_trans *trans;
bool unregister = false;
u32 flags;
int err;
err = nft_flowtable_parse_hook(ctx, nla, &flowtable_hook, flowtable,
extack, false);
if (err < 0)
return err;
list_for_each_entry_safe(hook, next, &flowtable_hook.list, list) {
if (nft_hook_list_find(&flowtable->hook_list, hook)) {
list_del(&hook->list);
kfree(hook);
}
}
if (nla[NFTA_FLOWTABLE_FLAGS]) {
flags = ntohl(nla_get_be32(nla[NFTA_FLOWTABLE_FLAGS]));
if (flags & ~NFT_FLOWTABLE_MASK) {
err = -EOPNOTSUPP;
goto err_flowtable_update_hook;
}
if ((flowtable->data.flags & NFT_FLOWTABLE_HW_OFFLOAD) ^
(flags & NFT_FLOWTABLE_HW_OFFLOAD)) {
err = -EOPNOTSUPP;
goto err_flowtable_update_hook;
}
} else {
flags = flowtable->data.flags;
}
err = nft_register_flowtable_net_hooks(ctx->net, ctx->table,
&flowtable_hook.list, flowtable);
if (err < 0)
goto err_flowtable_update_hook;
trans = nft_trans_alloc(ctx, NFT_MSG_NEWFLOWTABLE,
sizeof(struct nft_trans_flowtable));
if (!trans) {
unregister = true;
err = -ENOMEM;
goto err_flowtable_update_hook;
}
nft_trans_flowtable_flags(trans) = flags;
nft_trans_flowtable(trans) = flowtable;
nft_trans_flowtable_update(trans) = true;
INIT_LIST_HEAD(&nft_trans_flowtable_hooks(trans));
list_splice(&flowtable_hook.list, &nft_trans_flowtable_hooks(trans));
nft_trans_commit_list_add_tail(ctx->net, trans);
return 0;
err_flowtable_update_hook:
list_for_each_entry_safe(hook, next, &flowtable_hook.list, list) {
if (unregister)
nft_unregister_flowtable_hook(ctx->net, flowtable, hook);
list_del_rcu(&hook->list);
kfree_rcu(hook, rcu);
}
return err;
}
static int nf_tables_newflowtable(struct sk_buff *skb,
const struct nfnl_info *info,
const struct nlattr * const nla[])
{
struct netlink_ext_ack *extack = info->extack;
struct nft_flowtable_hook flowtable_hook;
u8 genmask = nft_genmask_next(info->net);
u8 family = info->nfmsg->nfgen_family;
const struct nf_flowtable_type *type;
struct nft_flowtable *flowtable;
struct nft_hook *hook, *next;
struct net *net = info->net;
struct nft_table *table;
struct nft_ctx ctx;
int err;
if (!nla[NFTA_FLOWTABLE_TABLE] ||
!nla[NFTA_FLOWTABLE_NAME] ||
!nla[NFTA_FLOWTABLE_HOOK])
return -EINVAL;
table = nft_table_lookup(net, nla[NFTA_FLOWTABLE_TABLE], family,
genmask, NETLINK_CB(skb).portid);
if (IS_ERR(table)) {
NL_SET_BAD_ATTR(extack, nla[NFTA_FLOWTABLE_TABLE]);
return PTR_ERR(table);
}
flowtable = nft_flowtable_lookup(table, nla[NFTA_FLOWTABLE_NAME],
genmask);
if (IS_ERR(flowtable)) {
err = PTR_ERR(flowtable);
if (err != -ENOENT) {
NL_SET_BAD_ATTR(extack, nla[NFTA_FLOWTABLE_NAME]);
return err;
}
} else {
if (info->nlh->nlmsg_flags & NLM_F_EXCL) {
NL_SET_BAD_ATTR(extack, nla[NFTA_FLOWTABLE_NAME]);
return -EEXIST;
}
nft_ctx_init(&ctx, net, skb, info->nlh, family, table, NULL, nla);
return nft_flowtable_update(&ctx, info->nlh, flowtable, extack);
}
nft_ctx_init(&ctx, net, skb, info->nlh, family, table, NULL, nla);
if (!nft_use_inc(&table->use))
return -EMFILE;
flowtable = kzalloc(sizeof(*flowtable), GFP_KERNEL_ACCOUNT);
if (!flowtable) {
err = -ENOMEM;
goto flowtable_alloc;
}
flowtable->table = table;
flowtable->handle = nf_tables_alloc_handle(table);
INIT_LIST_HEAD(&flowtable->hook_list);
flowtable->name = nla_strdup(nla[NFTA_FLOWTABLE_NAME], GFP_KERNEL_ACCOUNT);
if (!flowtable->name) {
err = -ENOMEM;
goto err1;
}
type = nft_flowtable_type_get(net, family);
if (IS_ERR(type)) {
err = PTR_ERR(type);
goto err2;
}
if (nla[NFTA_FLOWTABLE_FLAGS]) {
flowtable->data.flags =
ntohl(nla_get_be32(nla[NFTA_FLOWTABLE_FLAGS]));
if (flowtable->data.flags & ~NFT_FLOWTABLE_MASK) {
err = -EOPNOTSUPP;
goto err3;
}
}
write_pnet(&flowtable->data.net, net);
flowtable->data.type = type;
err = type->init(&flowtable->data);
if (err < 0)
goto err3;
err = nft_flowtable_parse_hook(&ctx, nla, &flowtable_hook, flowtable,
extack, true);
if (err < 0)
goto err4;
list_splice(&flowtable_hook.list, &flowtable->hook_list);
flowtable->data.priority = flowtable_hook.priority;
flowtable->hooknum = flowtable_hook.num;
err = nft_register_flowtable_net_hooks(ctx.net, table,
&flowtable->hook_list,
flowtable);
if (err < 0) {
nft_hooks_destroy(&flowtable->hook_list);
goto err4;
}
err = nft_trans_flowtable_add(&ctx, NFT_MSG_NEWFLOWTABLE, flowtable);
if (err < 0)
goto err5;
list_add_tail_rcu(&flowtable->list, &table->flowtables);
return 0;
err5:
list_for_each_entry_safe(hook, next, &flowtable->hook_list, list) {
nft_unregister_flowtable_hook(net, flowtable, hook);
list_del_rcu(&hook->list);
kfree_rcu(hook, rcu);
}
err4:
flowtable->data.type->free(&flowtable->data);
err3:
module_put(type->owner);
err2:
kfree(flowtable->name);
err1:
kfree(flowtable);
flowtable_alloc:
nft_use_dec_restore(&table->use);
return err;
}
static void nft_flowtable_hook_release(struct nft_flowtable_hook *flowtable_hook)
{
struct nft_hook *this, *next;
list_for_each_entry_safe(this, next, &flowtable_hook->list, list) {
list_del(&this->list);
kfree(this);
}
}
static int nft_delflowtable_hook(struct nft_ctx *ctx,
struct nft_flowtable *flowtable,
struct netlink_ext_ack *extack)
{
const struct nlattr * const *nla = ctx->nla;
struct nft_flowtable_hook flowtable_hook;
LIST_HEAD(flowtable_del_list);
struct nft_hook *this, *hook;
struct nft_trans *trans;
int err;
err = nft_flowtable_parse_hook(ctx, nla, &flowtable_hook, flowtable,
extack, false);
if (err < 0)
return err;
list_for_each_entry(this, &flowtable_hook.list, list) {
hook = nft_hook_list_find(&flowtable->hook_list, this);
if (!hook) {
err = -ENOENT;
goto err_flowtable_del_hook;
}
list_move(&hook->list, &flowtable_del_list);
}
trans = nft_trans_alloc(ctx, NFT_MSG_DELFLOWTABLE,
sizeof(struct nft_trans_flowtable));
if (!trans) {
err = -ENOMEM;
goto err_flowtable_del_hook;
}
nft_trans_flowtable(trans) = flowtable;
nft_trans_flowtable_update(trans) = true;
INIT_LIST_HEAD(&nft_trans_flowtable_hooks(trans));
list_splice(&flowtable_del_list, &nft_trans_flowtable_hooks(trans));
nft_flowtable_hook_release(&flowtable_hook);
nft_trans_commit_list_add_tail(ctx->net, trans);
return 0;
err_flowtable_del_hook:
list_splice(&flowtable_del_list, &flowtable->hook_list);
nft_flowtable_hook_release(&flowtable_hook);
return err;
}
static int nf_tables_delflowtable(struct sk_buff *skb,
const struct nfnl_info *info,
const struct nlattr * const nla[])
{
struct netlink_ext_ack *extack = info->extack;
u8 genmask = nft_genmask_next(info->net);
u8 family = info->nfmsg->nfgen_family;
struct nft_flowtable *flowtable;
struct net *net = info->net;
const struct nlattr *attr;
struct nft_table *table;
struct nft_ctx ctx;
if (!nla[NFTA_FLOWTABLE_TABLE] ||
(!nla[NFTA_FLOWTABLE_NAME] &&
!nla[NFTA_FLOWTABLE_HANDLE]))
return -EINVAL;
table = nft_table_lookup(net, nla[NFTA_FLOWTABLE_TABLE], family,
genmask, NETLINK_CB(skb).portid);
if (IS_ERR(table)) {
NL_SET_BAD_ATTR(extack, nla[NFTA_FLOWTABLE_TABLE]);
return PTR_ERR(table);
}
if (nla[NFTA_FLOWTABLE_HANDLE]) {
attr = nla[NFTA_FLOWTABLE_HANDLE];
flowtable = nft_flowtable_lookup_byhandle(table, attr, genmask);
} else {
attr = nla[NFTA_FLOWTABLE_NAME];
flowtable = nft_flowtable_lookup(table, attr, genmask);
}
if (IS_ERR(flowtable)) {
if (PTR_ERR(flowtable) == -ENOENT &&
NFNL_MSG_TYPE(info->nlh->nlmsg_type) == NFT_MSG_DESTROYFLOWTABLE)
return 0;
NL_SET_BAD_ATTR(extack, attr);
return PTR_ERR(flowtable);
}
nft_ctx_init(&ctx, net, skb, info->nlh, family, table, NULL, nla);
if (nla[NFTA_FLOWTABLE_HOOK])
return nft_delflowtable_hook(&ctx, flowtable, extack);
if (flowtable->use > 0) {
NL_SET_BAD_ATTR(extack, attr);
return -EBUSY;
}
return nft_delflowtable(&ctx, flowtable);
}
static int nf_tables_fill_flowtable_info(struct sk_buff *skb, struct net *net,
u32 portid, u32 seq, int event,
u32 flags, int family,
struct nft_flowtable *flowtable,
struct list_head *hook_list)
{
struct nlattr *nest, *nest_devs;
struct nft_hook *hook;
struct nlmsghdr *nlh;
event = nfnl_msg_type(NFNL_SUBSYS_NFTABLES, event);
nlh = nfnl_msg_put(skb, portid, seq, event, flags, family,
NFNETLINK_V0, nft_base_seq(net));
if (!nlh)
goto nla_put_failure;
if (nla_put_string(skb, NFTA_FLOWTABLE_TABLE, flowtable->table->name) ||
nla_put_string(skb, NFTA_FLOWTABLE_NAME, flowtable->name) ||
nla_put_be64(skb, NFTA_FLOWTABLE_HANDLE, cpu_to_be64(flowtable->handle),
NFTA_FLOWTABLE_PAD))
goto nla_put_failure;
if (event == NFT_MSG_DELFLOWTABLE && !hook_list) {
nlmsg_end(skb, nlh);
return 0;
}
if (nla_put_be32(skb, NFTA_FLOWTABLE_USE, htonl(flowtable->use)) ||
nla_put_be32(skb, NFTA_FLOWTABLE_FLAGS, htonl(flowtable->data.flags)))
goto nla_put_failure;
nest = nla_nest_start_noflag(skb, NFTA_FLOWTABLE_HOOK);
if (!nest)
goto nla_put_failure;
if (nla_put_be32(skb, NFTA_FLOWTABLE_HOOK_NUM, htonl(flowtable->hooknum)) ||
nla_put_be32(skb, NFTA_FLOWTABLE_HOOK_PRIORITY, htonl(flowtable->data.priority)))
goto nla_put_failure;
nest_devs = nla_nest_start_noflag(skb, NFTA_FLOWTABLE_HOOK_DEVS);
if (!nest_devs)
goto nla_put_failure;
if (!hook_list)
hook_list = &flowtable->hook_list;
list_for_each_entry_rcu(hook, hook_list, list) {
if (nla_put_string(skb, NFTA_DEVICE_NAME, hook->ops.dev->name))
goto nla_put_failure;
}
nla_nest_end(skb, nest_devs);
nla_nest_end(skb, nest);
nlmsg_end(skb, nlh);
return 0;
nla_put_failure:
nlmsg_trim(skb, nlh);
return -1;
}
struct nft_flowtable_filter {
char *table;
};
static int nf_tables_dump_flowtable(struct sk_buff *skb,
struct netlink_callback *cb)
{
const struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
struct nft_flowtable_filter *filter = cb->data;
unsigned int idx = 0, s_idx = cb->args[0];
struct net *net = sock_net(skb->sk);
int family = nfmsg->nfgen_family;
struct nft_flowtable *flowtable;
struct nftables_pernet *nft_net;
const struct nft_table *table;
rcu_read_lock();
nft_net = nft_pernet(net);
cb->seq = READ_ONCE(nft_net->base_seq);
list_for_each_entry_rcu(table, &nft_net->tables, list) {
if (family != NFPROTO_UNSPEC && family != table->family)
continue;
list_for_each_entry_rcu(flowtable, &table->flowtables, list) {
if (!nft_is_active(net, flowtable))
goto cont;
if (idx < s_idx)
goto cont;
if (idx > s_idx)
memset(&cb->args[1], 0,
sizeof(cb->args) - sizeof(cb->args[0]));
if (filter && filter->table &&
strcmp(filter->table, table->name))
goto cont;
if (nf_tables_fill_flowtable_info(skb, net, NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq,
NFT_MSG_NEWFLOWTABLE,
NLM_F_MULTI | NLM_F_APPEND,
table->family,
flowtable, NULL) < 0)
goto done;
nl_dump_check_consistent(cb, nlmsg_hdr(skb));
cont:
idx++;
}
}
done:
rcu_read_unlock();
cb->args[0] = idx;
return skb->len;
}
static int nf_tables_dump_flowtable_start(struct netlink_callback *cb)
{
const struct nlattr * const *nla = cb->data;
struct nft_flowtable_filter *filter = NULL;
if (nla[NFTA_FLOWTABLE_TABLE]) {
filter = kzalloc(sizeof(*filter), GFP_ATOMIC);
if (!filter)
return -ENOMEM;
filter->table = nla_strdup(nla[NFTA_FLOWTABLE_TABLE],
GFP_ATOMIC);
if (!filter->table) {
kfree(filter);
return -ENOMEM;
}
}
cb->data = filter;
return 0;
}
static int nf_tables_dump_flowtable_done(struct netlink_callback *cb)
{
struct nft_flowtable_filter *filter = cb->data;
if (!filter)
return 0;
kfree(filter->table);
kfree(filter);
return 0;
}
/* called with rcu_read_lock held */
static int nf_tables_getflowtable(struct sk_buff *skb,
const struct nfnl_info *info,
const struct nlattr * const nla[])
{
u8 genmask = nft_genmask_cur(info->net);
u8 family = info->nfmsg->nfgen_family;
struct nft_flowtable *flowtable;
const struct nft_table *table;
struct net *net = info->net;
struct sk_buff *skb2;
int err;
if (info->nlh->nlmsg_flags & NLM_F_DUMP) {
struct netlink_dump_control c = {
.start = nf_tables_dump_flowtable_start,
.dump = nf_tables_dump_flowtable,
.done = nf_tables_dump_flowtable_done,
.module = THIS_MODULE,
.data = (void *)nla,
};
return nft_netlink_dump_start_rcu(info->sk, skb, info->nlh, &c);
}
if (!nla[NFTA_FLOWTABLE_NAME])
return -EINVAL;
table = nft_table_lookup(net, nla[NFTA_FLOWTABLE_TABLE], family,
genmask, 0);
if (IS_ERR(table))
return PTR_ERR(table);
flowtable = nft_flowtable_lookup(table, nla[NFTA_FLOWTABLE_NAME],
genmask);
if (IS_ERR(flowtable))
return PTR_ERR(flowtable);
skb2 = alloc_skb(NLMSG_GOODSIZE, GFP_ATOMIC);
if (!skb2)
return -ENOMEM;
err = nf_tables_fill_flowtable_info(skb2, net, NETLINK_CB(skb).portid,
info->nlh->nlmsg_seq,
NFT_MSG_NEWFLOWTABLE, 0, family,
flowtable, NULL);
if (err < 0)
goto err_fill_flowtable_info;
return nfnetlink_unicast(skb2, net, NETLINK_CB(skb).portid);
err_fill_flowtable_info:
kfree_skb(skb2);
return err;
}
static void nf_tables_flowtable_notify(struct nft_ctx *ctx,
struct nft_flowtable *flowtable,
struct list_head *hook_list, int event)
{
struct nftables_pernet *nft_net = nft_pernet(ctx->net);
struct sk_buff *skb;
u16 flags = 0;
int err;
if (!ctx->report &&
!nfnetlink_has_listeners(ctx->net, NFNLGRP_NFTABLES))
return;
skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
if (skb == NULL)
goto err;
if (ctx->flags & (NLM_F_CREATE | NLM_F_EXCL))
flags |= ctx->flags & (NLM_F_CREATE | NLM_F_EXCL);
err = nf_tables_fill_flowtable_info(skb, ctx->net, ctx->portid,
ctx->seq, event, flags,
ctx->family, flowtable, hook_list);
if (err < 0) {
kfree_skb(skb);
goto err;
}
nft_notify_enqueue(skb, ctx->report, &nft_net->notify_list);
return;
err:
nfnetlink_set_err(ctx->net, ctx->portid, NFNLGRP_NFTABLES, -ENOBUFS);
}
static void nf_tables_flowtable_destroy(struct nft_flowtable *flowtable)
{
struct nft_hook *hook, *next;
flowtable->data.type->free(&flowtable->data);
list_for_each_entry_safe(hook, next, &flowtable->hook_list, list) {
flowtable->data.type->setup(&flowtable->data, hook->ops.dev,
FLOW_BLOCK_UNBIND);
list_del_rcu(&hook->list);
kfree(hook);
}
kfree(flowtable->name);
module_put(flowtable->data.type->owner);
kfree(flowtable);
}
static int nf_tables_fill_gen_info(struct sk_buff *skb, struct net *net,
u32 portid, u32 seq)
{
struct nftables_pernet *nft_net = nft_pernet(net);
struct nlmsghdr *nlh;
char buf[TASK_COMM_LEN];
int event = nfnl_msg_type(NFNL_SUBSYS_NFTABLES, NFT_MSG_NEWGEN);
nlh = nfnl_msg_put(skb, portid, seq, event, 0, AF_UNSPEC,
NFNETLINK_V0, nft_base_seq(net));
if (!nlh)
goto nla_put_failure;
if (nla_put_be32(skb, NFTA_GEN_ID, htonl(nft_net->base_seq)) ||
nla_put_be32(skb, NFTA_GEN_PROC_PID, htonl(task_pid_nr(current))) ||
nla_put_string(skb, NFTA_GEN_PROC_NAME, get_task_comm(buf, current)))
goto nla_put_failure;
nlmsg_end(skb, nlh);
return 0;
nla_put_failure:
nlmsg_trim(skb, nlh);
return -EMSGSIZE;
}
static void nft_flowtable_event(unsigned long event, struct net_device *dev,
struct nft_flowtable *flowtable)
{
struct nft_hook *hook;
list_for_each_entry(hook, &flowtable->hook_list, list) {
if (hook->ops.dev != dev)
continue;
/* flow_offload_netdev_event() cleans up entries for us. */
nft_unregister_flowtable_hook(dev_net(dev), flowtable, hook);
list_del_rcu(&hook->list);
kfree_rcu(hook, rcu);
break;
}
}
static int nf_tables_flowtable_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
struct nft_flowtable *flowtable;
struct nftables_pernet *nft_net;
struct nft_table *table;
struct net *net;
if (event != NETDEV_UNREGISTER)
return 0;
net = dev_net(dev);
nft_net = nft_pernet(net);
mutex_lock(&nft_net->commit_mutex);
list_for_each_entry(table, &nft_net->tables, list) {
list_for_each_entry(flowtable, &table->flowtables, list) {
nft_flowtable_event(event, dev, flowtable);
}
}
mutex_unlock(&nft_net->commit_mutex);
return NOTIFY_DONE;
}
static struct notifier_block nf_tables_flowtable_notifier = {
.notifier_call = nf_tables_flowtable_event,
};
static void nf_tables_gen_notify(struct net *net, struct sk_buff *skb,
int event)
{
struct nlmsghdr *nlh = nlmsg_hdr(skb);
struct sk_buff *skb2;
int err;
if (!nlmsg_report(nlh) &&
!nfnetlink_has_listeners(net, NFNLGRP_NFTABLES))
return;
skb2 = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
if (skb2 == NULL)
goto err;
err = nf_tables_fill_gen_info(skb2, net, NETLINK_CB(skb).portid,
nlh->nlmsg_seq);
if (err < 0) {
kfree_skb(skb2);
goto err;
}
nfnetlink_send(skb2, net, NETLINK_CB(skb).portid, NFNLGRP_NFTABLES,
nlmsg_report(nlh), GFP_KERNEL);
return;
err:
nfnetlink_set_err(net, NETLINK_CB(skb).portid, NFNLGRP_NFTABLES,
-ENOBUFS);
}
static int nf_tables_getgen(struct sk_buff *skb, const struct nfnl_info *info,
const struct nlattr * const nla[])
{
struct sk_buff *skb2;
int err;
skb2 = alloc_skb(NLMSG_GOODSIZE, GFP_ATOMIC);
if (skb2 == NULL)
return -ENOMEM;
err = nf_tables_fill_gen_info(skb2, info->net, NETLINK_CB(skb).portid,
info->nlh->nlmsg_seq);
if (err < 0)
goto err_fill_gen_info;
return nfnetlink_unicast(skb2, info->net, NETLINK_CB(skb).portid);
err_fill_gen_info:
kfree_skb(skb2);
return err;
}
static const struct nfnl_callback nf_tables_cb[NFT_MSG_MAX] = {
[NFT_MSG_NEWTABLE] = {
.call = nf_tables_newtable,
.type = NFNL_CB_BATCH,
.attr_count = NFTA_TABLE_MAX,
.policy = nft_table_policy,
},
[NFT_MSG_GETTABLE] = {
.call = nf_tables_gettable,
.type = NFNL_CB_RCU,
.attr_count = NFTA_TABLE_MAX,
.policy = nft_table_policy,
},
[NFT_MSG_DELTABLE] = {
.call = nf_tables_deltable,
.type = NFNL_CB_BATCH,
.attr_count = NFTA_TABLE_MAX,
.policy = nft_table_policy,
},
[NFT_MSG_DESTROYTABLE] = {
.call = nf_tables_deltable,
.type = NFNL_CB_BATCH,
.attr_count = NFTA_TABLE_MAX,
.policy = nft_table_policy,
},
[NFT_MSG_NEWCHAIN] = {
.call = nf_tables_newchain,
.type = NFNL_CB_BATCH,
.attr_count = NFTA_CHAIN_MAX,
.policy = nft_chain_policy,
},
[NFT_MSG_GETCHAIN] = {
.call = nf_tables_getchain,
.type = NFNL_CB_RCU,
.attr_count = NFTA_CHAIN_MAX,
.policy = nft_chain_policy,
},
[NFT_MSG_DELCHAIN] = {
.call = nf_tables_delchain,
.type = NFNL_CB_BATCH,
.attr_count = NFTA_CHAIN_MAX,
.policy = nft_chain_policy,
},
[NFT_MSG_DESTROYCHAIN] = {
.call = nf_tables_delchain,
.type = NFNL_CB_BATCH,
.attr_count = NFTA_CHAIN_MAX,
.policy = nft_chain_policy,
},
[NFT_MSG_NEWRULE] = {
.call = nf_tables_newrule,
.type = NFNL_CB_BATCH,
.attr_count = NFTA_RULE_MAX,
.policy = nft_rule_policy,
},
[NFT_MSG_GETRULE] = {
.call = nf_tables_getrule,
.type = NFNL_CB_RCU,
.attr_count = NFTA_RULE_MAX,
.policy = nft_rule_policy,
},
[NFT_MSG_GETRULE_RESET] = {
.call = nf_tables_getrule,
.type = NFNL_CB_RCU,
.attr_count = NFTA_RULE_MAX,
.policy = nft_rule_policy,
},
[NFT_MSG_DELRULE] = {
.call = nf_tables_delrule,
.type = NFNL_CB_BATCH,
.attr_count = NFTA_RULE_MAX,
.policy = nft_rule_policy,
},
[NFT_MSG_DESTROYRULE] = {
.call = nf_tables_delrule,
.type = NFNL_CB_BATCH,
.attr_count = NFTA_RULE_MAX,
.policy = nft_rule_policy,
},
[NFT_MSG_NEWSET] = {
.call = nf_tables_newset,
.type = NFNL_CB_BATCH,
.attr_count = NFTA_SET_MAX,
.policy = nft_set_policy,
},
[NFT_MSG_GETSET] = {
.call = nf_tables_getset,
.type = NFNL_CB_RCU,
.attr_count = NFTA_SET_MAX,
.policy = nft_set_policy,
},
[NFT_MSG_DELSET] = {
.call = nf_tables_delset,
.type = NFNL_CB_BATCH,
.attr_count = NFTA_SET_MAX,
.policy = nft_set_policy,
},
[NFT_MSG_DESTROYSET] = {
.call = nf_tables_delset,
.type = NFNL_CB_BATCH,
.attr_count = NFTA_SET_MAX,
.policy = nft_set_policy,
},
[NFT_MSG_NEWSETELEM] = {
.call = nf_tables_newsetelem,
.type = NFNL_CB_BATCH,
.attr_count = NFTA_SET_ELEM_LIST_MAX,
.policy = nft_set_elem_list_policy,
},
[NFT_MSG_GETSETELEM] = {
.call = nf_tables_getsetelem,
.type = NFNL_CB_RCU,
.attr_count = NFTA_SET_ELEM_LIST_MAX,
.policy = nft_set_elem_list_policy,
},
[NFT_MSG_GETSETELEM_RESET] = {
.call = nf_tables_getsetelem,
.type = NFNL_CB_RCU,
.attr_count = NFTA_SET_ELEM_LIST_MAX,
.policy = nft_set_elem_list_policy,
},
[NFT_MSG_DELSETELEM] = {
.call = nf_tables_delsetelem,
.type = NFNL_CB_BATCH,
.attr_count = NFTA_SET_ELEM_LIST_MAX,
.policy = nft_set_elem_list_policy,
},
[NFT_MSG_DESTROYSETELEM] = {
.call = nf_tables_delsetelem,
.type = NFNL_CB_BATCH,
.attr_count = NFTA_SET_ELEM_LIST_MAX,
.policy = nft_set_elem_list_policy,
},
[NFT_MSG_GETGEN] = {
.call = nf_tables_getgen,
.type = NFNL_CB_RCU,
},
[NFT_MSG_NEWOBJ] = {
.call = nf_tables_newobj,
.type = NFNL_CB_BATCH,
.attr_count = NFTA_OBJ_MAX,
.policy = nft_obj_policy,
},
[NFT_MSG_GETOBJ] = {
.call = nf_tables_getobj,
.type = NFNL_CB_RCU,
.attr_count = NFTA_OBJ_MAX,
.policy = nft_obj_policy,
},
[NFT_MSG_DELOBJ] = {
.call = nf_tables_delobj,
.type = NFNL_CB_BATCH,
.attr_count = NFTA_OBJ_MAX,
.policy = nft_obj_policy,
},
[NFT_MSG_DESTROYOBJ] = {
.call = nf_tables_delobj,
.type = NFNL_CB_BATCH,
.attr_count = NFTA_OBJ_MAX,
.policy = nft_obj_policy,
},
[NFT_MSG_GETOBJ_RESET] = {
.call = nf_tables_getobj,
.type = NFNL_CB_RCU,
.attr_count = NFTA_OBJ_MAX,
.policy = nft_obj_policy,
},
[NFT_MSG_NEWFLOWTABLE] = {
.call = nf_tables_newflowtable,
.type = NFNL_CB_BATCH,
.attr_count = NFTA_FLOWTABLE_MAX,
.policy = nft_flowtable_policy,
},
[NFT_MSG_GETFLOWTABLE] = {
.call = nf_tables_getflowtable,
.type = NFNL_CB_RCU,
.attr_count = NFTA_FLOWTABLE_MAX,
.policy = nft_flowtable_policy,
},
[NFT_MSG_DELFLOWTABLE] = {
.call = nf_tables_delflowtable,
.type = NFNL_CB_BATCH,
.attr_count = NFTA_FLOWTABLE_MAX,
.policy = nft_flowtable_policy,
},
[NFT_MSG_DESTROYFLOWTABLE] = {
.call = nf_tables_delflowtable,
.type = NFNL_CB_BATCH,
.attr_count = NFTA_FLOWTABLE_MAX,
.policy = nft_flowtable_policy,
},
};
static int nf_tables_validate(struct net *net)
{
struct nftables_pernet *nft_net = nft_pernet(net);
struct nft_table *table;
list_for_each_entry(table, &nft_net->tables, list) {
switch (table->validate_state) {
case NFT_VALIDATE_SKIP:
continue;
case NFT_VALIDATE_NEED:
nft_validate_state_update(table, NFT_VALIDATE_DO);
fallthrough;
case NFT_VALIDATE_DO:
if (nft_table_validate(net, table) < 0)
return -EAGAIN;
nft_validate_state_update(table, NFT_VALIDATE_SKIP);
break;
}
}
return 0;
}
/* a drop policy has to be deferred until all rules have been activated,
* otherwise a large ruleset that contains a drop-policy base chain will
* cause all packets to get dropped until the full transaction has been
* processed.
*
* We defer the drop policy until the transaction has been finalized.
*/
static void nft_chain_commit_drop_policy(struct nft_trans *trans)
{
struct nft_base_chain *basechain;
if (nft_trans_chain_policy(trans) != NF_DROP)
return;
if (!nft_is_base_chain(trans->ctx.chain))
return;
basechain = nft_base_chain(trans->ctx.chain);
basechain->policy = NF_DROP;
}
static void nft_chain_commit_update(struct nft_trans *trans)
{
struct nft_base_chain *basechain;
if (nft_trans_chain_name(trans)) {
rhltable_remove(&trans->ctx.table->chains_ht,
&trans->ctx.chain->rhlhead,
nft_chain_ht_params);
swap(trans->ctx.chain->name, nft_trans_chain_name(trans));
rhltable_insert_key(&trans->ctx.table->chains_ht,
trans->ctx.chain->name,
&trans->ctx.chain->rhlhead,
nft_chain_ht_params);
}
if (!nft_is_base_chain(trans->ctx.chain))
return;
nft_chain_stats_replace(trans);
basechain = nft_base_chain(trans->ctx.chain);
switch (nft_trans_chain_policy(trans)) {
case NF_DROP:
case NF_ACCEPT:
basechain->policy = nft_trans_chain_policy(trans);
break;
}
}
static void nft_obj_commit_update(struct nft_trans *trans)
{
struct nft_object *newobj;
struct nft_object *obj;
obj = nft_trans_obj(trans);
newobj = nft_trans_obj_newobj(trans);
if (obj->ops->update)
obj->ops->update(obj, newobj);
nft_obj_destroy(&trans->ctx, newobj);
}
static void nft_commit_release(struct nft_trans *trans)
{
switch (trans->msg_type) {
case NFT_MSG_DELTABLE:
case NFT_MSG_DESTROYTABLE:
nf_tables_table_destroy(&trans->ctx);
break;
case NFT_MSG_NEWCHAIN:
free_percpu(nft_trans_chain_stats(trans));
kfree(nft_trans_chain_name(trans));
break;
case NFT_MSG_DELCHAIN:
case NFT_MSG_DESTROYCHAIN:
if (nft_trans_chain_update(trans))
nft_hooks_destroy(&nft_trans_chain_hooks(trans));
else
nf_tables_chain_destroy(&trans->ctx);
break;
case NFT_MSG_DELRULE:
case NFT_MSG_DESTROYRULE:
nf_tables_rule_destroy(&trans->ctx, nft_trans_rule(trans));
break;
case NFT_MSG_DELSET:
case NFT_MSG_DESTROYSET:
nft_set_destroy(&trans->ctx, nft_trans_set(trans));
break;
case NFT_MSG_DELSETELEM:
case NFT_MSG_DESTROYSETELEM:
nf_tables_set_elem_destroy(&trans->ctx,
nft_trans_elem_set(trans),
nft_trans_elem(trans).priv);
break;
case NFT_MSG_DELOBJ:
case NFT_MSG_DESTROYOBJ:
nft_obj_destroy(&trans->ctx, nft_trans_obj(trans));
break;
case NFT_MSG_DELFLOWTABLE:
case NFT_MSG_DESTROYFLOWTABLE:
if (nft_trans_flowtable_update(trans))
nft_hooks_destroy(&nft_trans_flowtable_hooks(trans));
else
nf_tables_flowtable_destroy(nft_trans_flowtable(trans));
break;
}
if (trans->put_net)
put_net(trans->ctx.net);
kfree(trans);
}
static void nf_tables_trans_destroy_work(struct work_struct *w)
{
struct nft_trans *trans, *next;
LIST_HEAD(head);
spin_lock(&nf_tables_destroy_list_lock);
list_splice_init(&nf_tables_destroy_list, &head);
spin_unlock(&nf_tables_destroy_list_lock);
if (list_empty(&head))
return;
synchronize_rcu();
list_for_each_entry_safe(trans, next, &head, list) {
nft_trans_list_del(trans);
nft_commit_release(trans);
}
}
void nf_tables_trans_destroy_flush_work(void)
{
flush_work(&trans_destroy_work);
}
EXPORT_SYMBOL_GPL(nf_tables_trans_destroy_flush_work);
static bool nft_expr_reduce(struct nft_regs_track *track,
const struct nft_expr *expr)
{
return false;
}
static int nf_tables_commit_chain_prepare(struct net *net, struct nft_chain *chain)
{
const struct nft_expr *expr, *last;
struct nft_regs_track track = {};
unsigned int size, data_size;
void *data, *data_boundary;
struct nft_rule_dp *prule;
struct nft_rule *rule;
/* already handled or inactive chain? */
if (chain->blob_next || !nft_is_active_next(net, chain))
return 0;
data_size = 0;
list_for_each_entry(rule, &chain->rules, list) {
if (nft_is_active_next(net, rule)) {
data_size += sizeof(*prule) + rule->dlen;
if (data_size > INT_MAX)
return -ENOMEM;
}
}
chain->blob_next = nf_tables_chain_alloc_rules(chain, data_size);
if (!chain->blob_next)
return -ENOMEM;
data = (void *)chain->blob_next->data;
data_boundary = data + data_size;
size = 0;
list_for_each_entry(rule, &chain->rules, list) {
if (!nft_is_active_next(net, rule))
continue;
prule = (struct nft_rule_dp *)data;
data += offsetof(struct nft_rule_dp, data);
if (WARN_ON_ONCE(data > data_boundary))
return -ENOMEM;
size = 0;
track.last = nft_expr_last(rule);
nft_rule_for_each_expr(expr, last, rule) {
track.cur = expr;
if (nft_expr_reduce(&track, expr)) {
expr = track.cur;
continue;
}
if (WARN_ON_ONCE(data + size + expr->ops->size > data_boundary))
return -ENOMEM;
memcpy(data + size, expr, expr->ops->size);
size += expr->ops->size;
}
if (WARN_ON_ONCE(size >= 1 << 12))
return -ENOMEM;
prule->handle = rule->handle;
prule->dlen = size;
prule->is_last = 0;
data += size;
size = 0;
chain->blob_next->size += (unsigned long)(data - (void *)prule);
}
if (WARN_ON_ONCE(data > data_boundary))
return -ENOMEM;
prule = (struct nft_rule_dp *)data;
nft_last_rule(chain, prule);
return 0;
}
static void nf_tables_commit_chain_prepare_cancel(struct net *net)
{
struct nftables_pernet *nft_net = nft_pernet(net);
struct nft_trans *trans, *next;
list_for_each_entry_safe(trans, next, &nft_net->commit_list, list) {
struct nft_chain *chain = trans->ctx.chain;
if (trans->msg_type == NFT_MSG_NEWRULE ||
trans->msg_type == NFT_MSG_DELRULE) {
kvfree(chain->blob_next);
chain->blob_next = NULL;
}
}
}
static void __nf_tables_commit_chain_free_rules(struct rcu_head *h)
{
struct nft_rule_dp_last *l = container_of(h, struct nft_rule_dp_last, h);
kvfree(l->blob);
}
static void nf_tables_commit_chain_free_rules_old(struct nft_rule_blob *blob)
{
struct nft_rule_dp_last *last;
/* last rule trailer is after end marker */
last = (void *)blob + sizeof(*blob) + blob->size;
last->blob = blob;
call_rcu(&last->h, __nf_tables_commit_chain_free_rules);
}
static void nf_tables_commit_chain(struct net *net, struct nft_chain *chain)
{
struct nft_rule_blob *g0, *g1;
bool next_genbit;
next_genbit = nft_gencursor_next(net);
g0 = rcu_dereference_protected(chain->blob_gen_0,
lockdep_commit_lock_is_held(net));
g1 = rcu_dereference_protected(chain->blob_gen_1,
lockdep_commit_lock_is_held(net));
/* No changes to this chain? */
if (chain->blob_next == NULL) {
/* chain had no change in last or next generation */
if (g0 == g1)
return;
/*
* chain had no change in this generation; make sure next
* one uses same rules as current generation.
*/
if (next_genbit) {
rcu_assign_pointer(chain->blob_gen_1, g0);
nf_tables_commit_chain_free_rules_old(g1);
} else {
rcu_assign_pointer(chain->blob_gen_0, g1);
nf_tables_commit_chain_free_rules_old(g0);
}
return;
}
if (next_genbit)
rcu_assign_pointer(chain->blob_gen_1, chain->blob_next);
else
rcu_assign_pointer(chain->blob_gen_0, chain->blob_next);
chain->blob_next = NULL;
if (g0 == g1)
return;
if (next_genbit)
nf_tables_commit_chain_free_rules_old(g1);
else
nf_tables_commit_chain_free_rules_old(g0);
}
static void nft_obj_del(struct nft_object *obj)
{
rhltable_remove(&nft_objname_ht, &obj->rhlhead, nft_objname_ht_params);
list_del_rcu(&obj->list);
}
void nft_chain_del(struct nft_chain *chain)
{
struct nft_table *table = chain->table;
WARN_ON_ONCE(rhltable_remove(&table->chains_ht, &chain->rhlhead,
nft_chain_ht_params));
list_del_rcu(&chain->list);
}
static void nft_trans_gc_setelem_remove(struct nft_ctx *ctx,
struct nft_trans_gc *trans)
{
void **priv = trans->priv;
unsigned int i;
for (i = 0; i < trans->count; i++) {
struct nft_set_elem elem = {
.priv = priv[i],
};
nft_setelem_data_deactivate(ctx->net, trans->set, &elem);
nft_setelem_remove(ctx->net, trans->set, &elem);
}
}
void nft_trans_gc_destroy(struct nft_trans_gc *trans)
{
nft_set_put(trans->set);
put_net(trans->net);
kfree(trans);
}
static void nft_trans_gc_trans_free(struct rcu_head *rcu)
{
struct nft_set_elem elem = {};
struct nft_trans_gc *trans;
struct nft_ctx ctx = {};
unsigned int i;
trans = container_of(rcu, struct nft_trans_gc, rcu);
ctx.net = read_pnet(&trans->set->net);
for (i = 0; i < trans->count; i++) {
elem.priv = trans->priv[i];
if (!nft_setelem_is_catchall(trans->set, &elem))
atomic_dec(&trans->set->nelems);
nf_tables_set_elem_destroy(&ctx, trans->set, elem.priv);
}
nft_trans_gc_destroy(trans);
}
static bool nft_trans_gc_work_done(struct nft_trans_gc *trans)
{
struct nftables_pernet *nft_net;
struct nft_ctx ctx = {};
nft_net = nft_pernet(trans->net);
mutex_lock(&nft_net->commit_mutex);
/* Check for race with transaction, otherwise this batch refers to
* stale objects that might not be there anymore. Skip transaction if
* set has been destroyed from control plane transaction in case gc
* worker loses race.
*/
if (READ_ONCE(nft_net->gc_seq) != trans->seq || trans->set->dead) {
mutex_unlock(&nft_net->commit_mutex);
return false;
}
ctx.net = trans->net;
ctx.table = trans->set->table;
nft_trans_gc_setelem_remove(&ctx, trans);
mutex_unlock(&nft_net->commit_mutex);
return true;
}
static void nft_trans_gc_work(struct work_struct *work)
{
struct nft_trans_gc *trans, *next;
LIST_HEAD(trans_gc_list);
spin_lock(&nf_tables_gc_list_lock);
list_splice_init(&nf_tables_gc_list, &trans_gc_list);
spin_unlock(&nf_tables_gc_list_lock);
list_for_each_entry_safe(trans, next, &trans_gc_list, list) {
list_del(&trans->list);
if (!nft_trans_gc_work_done(trans)) {
nft_trans_gc_destroy(trans);
continue;
}
call_rcu(&trans->rcu, nft_trans_gc_trans_free);
}
}
struct nft_trans_gc *nft_trans_gc_alloc(struct nft_set *set,
unsigned int gc_seq, gfp_t gfp)
{
struct net *net = read_pnet(&set->net);
struct nft_trans_gc *trans;
trans = kzalloc(sizeof(*trans), gfp);
if (!trans)
return NULL;
trans->net = maybe_get_net(net);
if (!trans->net) {
kfree(trans);
return NULL;
}
refcount_inc(&set->refs);
trans->set = set;
trans->seq = gc_seq;
return trans;
}
void nft_trans_gc_elem_add(struct nft_trans_gc *trans, void *priv)
{
trans->priv[trans->count++] = priv;
}
static void nft_trans_gc_queue_work(struct nft_trans_gc *trans)
{
spin_lock(&nf_tables_gc_list_lock);
list_add_tail(&trans->list, &nf_tables_gc_list);
spin_unlock(&nf_tables_gc_list_lock);
schedule_work(&trans_gc_work);
}
static int nft_trans_gc_space(struct nft_trans_gc *trans)
{
return NFT_TRANS_GC_BATCHCOUNT - trans->count;
}
struct nft_trans_gc *nft_trans_gc_queue_async(struct nft_trans_gc *gc,
unsigned int gc_seq, gfp_t gfp)
{
struct nft_set *set;
if (nft_trans_gc_space(gc))
return gc;
set = gc->set;
nft_trans_gc_queue_work(gc);
return nft_trans_gc_alloc(set, gc_seq, gfp);
}
void nft_trans_gc_queue_async_done(struct nft_trans_gc *trans)
{
if (trans->count == 0) {
nft_trans_gc_destroy(trans);
return;
}
nft_trans_gc_queue_work(trans);
}
struct nft_trans_gc *nft_trans_gc_queue_sync(struct nft_trans_gc *gc, gfp_t gfp)
{
struct nft_set *set;
if (WARN_ON_ONCE(!lockdep_commit_lock_is_held(gc->net)))
return NULL;
if (nft_trans_gc_space(gc))
return gc;
set = gc->set;
call_rcu(&gc->rcu, nft_trans_gc_trans_free);
return nft_trans_gc_alloc(set, 0, gfp);
}
void nft_trans_gc_queue_sync_done(struct nft_trans_gc *trans)
{
WARN_ON_ONCE(!lockdep_commit_lock_is_held(trans->net));
if (trans->count == 0) {
nft_trans_gc_destroy(trans);
return;
}
call_rcu(&trans->rcu, nft_trans_gc_trans_free);
}
static struct nft_trans_gc *nft_trans_gc_catchall(struct nft_trans_gc *gc,
unsigned int gc_seq,
bool sync)
{
struct nft_set_elem_catchall *catchall;
const struct nft_set *set = gc->set;
struct nft_set_ext *ext;
list_for_each_entry_rcu(catchall, &set->catchall_list, list) {
ext = nft_set_elem_ext(set, catchall->elem);
if (!nft_set_elem_expired(ext))
continue;
if (nft_set_elem_is_dead(ext))
goto dead_elem;
nft_set_elem_dead(ext);
dead_elem:
if (sync)
gc = nft_trans_gc_queue_sync(gc, GFP_ATOMIC);
else
gc = nft_trans_gc_queue_async(gc, gc_seq, GFP_ATOMIC);
if (!gc)
return NULL;
nft_trans_gc_elem_add(gc, catchall->elem);
}
return gc;
}
struct nft_trans_gc *nft_trans_gc_catchall_async(struct nft_trans_gc *gc,
unsigned int gc_seq)
{
return nft_trans_gc_catchall(gc, gc_seq, false);
}
struct nft_trans_gc *nft_trans_gc_catchall_sync(struct nft_trans_gc *gc)
{
return nft_trans_gc_catchall(gc, 0, true);
}
static void nf_tables_module_autoload_cleanup(struct net *net)
{
struct nftables_pernet *nft_net = nft_pernet(net);
struct nft_module_request *req, *next;
WARN_ON_ONCE(!list_empty(&nft_net->commit_list));
list_for_each_entry_safe(req, next, &nft_net->module_list, list) {
WARN_ON_ONCE(!req->done);
list_del(&req->list);
kfree(req);
}
}
static void nf_tables_commit_release(struct net *net)
{
struct nftables_pernet *nft_net = nft_pernet(net);
struct nft_trans *trans;
/* all side effects have to be made visible.
* For example, if a chain named 'foo' has been deleted, a
* new transaction must not find it anymore.
*
* Memory reclaim happens asynchronously from work queue
* to prevent expensive synchronize_rcu() in commit phase.
*/
if (list_empty(&nft_net->commit_list)) {
nf_tables_module_autoload_cleanup(net);
mutex_unlock(&nft_net->commit_mutex);
return;
}
trans = list_last_entry(&nft_net->commit_list,
struct nft_trans, list);
get_net(trans->ctx.net);
WARN_ON_ONCE(trans->put_net);
trans->put_net = true;
spin_lock(&nf_tables_destroy_list_lock);
list_splice_tail_init(&nft_net->commit_list, &nf_tables_destroy_list);
spin_unlock(&nf_tables_destroy_list_lock);
nf_tables_module_autoload_cleanup(net);
schedule_work(&trans_destroy_work);
mutex_unlock(&nft_net->commit_mutex);
}
static void nft_commit_notify(struct net *net, u32 portid)
{
struct nftables_pernet *nft_net = nft_pernet(net);
struct sk_buff *batch_skb = NULL, *nskb, *skb;
unsigned char *data;
int len;
list_for_each_entry_safe(skb, nskb, &nft_net->notify_list, list) {
if (!batch_skb) {
new_batch:
batch_skb = skb;
len = NLMSG_GOODSIZE - skb->len;
list_del(&skb->list);
continue;
}
len -= skb->len;
if (len > 0 && NFT_CB(skb).report == NFT_CB(batch_skb).report) {
data = skb_put(batch_skb, skb->len);
memcpy(data, skb->data, skb->len);
list_del(&skb->list);
kfree_skb(skb);
continue;
}
nfnetlink_send(batch_skb, net, portid, NFNLGRP_NFTABLES,
NFT_CB(batch_skb).report, GFP_KERNEL);
goto new_batch;
}
if (batch_skb) {
nfnetlink_send(batch_skb, net, portid, NFNLGRP_NFTABLES,
NFT_CB(batch_skb).report, GFP_KERNEL);
}
WARN_ON_ONCE(!list_empty(&nft_net->notify_list));
}
static int nf_tables_commit_audit_alloc(struct list_head *adl,
struct nft_table *table)
{
struct nft_audit_data *adp;
list_for_each_entry(adp, adl, list) {
if (adp->table == table)
return 0;
}
adp = kzalloc(sizeof(*adp), GFP_KERNEL);
if (!adp)
return -ENOMEM;
adp->table = table;
list_add(&adp->list, adl);
return 0;
}
static void nf_tables_commit_audit_free(struct list_head *adl)
{
struct nft_audit_data *adp, *adn;
list_for_each_entry_safe(adp, adn, adl, list) {
list_del(&adp->list);
kfree(adp);
}
}
static void nf_tables_commit_audit_collect(struct list_head *adl,
struct nft_table *table, u32 op)
{
struct nft_audit_data *adp;
list_for_each_entry(adp, adl, list) {
if (adp->table == table)
goto found;
}
WARN_ONCE(1, "table=%s not expected in commit list", table->name);
return;
found:
adp->entries++;
if (!adp->op || adp->op > op)
adp->op = op;
}
#define AUNFTABLENAMELEN (NFT_TABLE_MAXNAMELEN + 22)
static void nf_tables_commit_audit_log(struct list_head *adl, u32 generation)
{
struct nft_audit_data *adp, *adn;
char aubuf[AUNFTABLENAMELEN];
list_for_each_entry_safe(adp, adn, adl, list) {
snprintf(aubuf, AUNFTABLENAMELEN, "%s:%u", adp->table->name,
generation);
audit_log_nfcfg(aubuf, adp->table->family, adp->entries,
nft2audit_op[adp->op], GFP_KERNEL);
list_del(&adp->list);
kfree(adp);
}
}
static void nft_set_commit_update(struct list_head *set_update_list)
{
struct nft_set *set, *next;
list_for_each_entry_safe(set, next, set_update_list, pending_update) {
list_del_init(&set->pending_update);
if (!set->ops->commit)
continue;
set->ops->commit(set);
}
}
static unsigned int nft_gc_seq_begin(struct nftables_pernet *nft_net)
{
unsigned int gc_seq;
/* Bump gc counter, it becomes odd, this is the busy mark. */
gc_seq = READ_ONCE(nft_net->gc_seq);
WRITE_ONCE(nft_net->gc_seq, ++gc_seq);
return gc_seq;
}
static void nft_gc_seq_end(struct nftables_pernet *nft_net, unsigned int gc_seq)
{
WRITE_ONCE(nft_net->gc_seq, ++gc_seq);
}
static int nf_tables_commit(struct net *net, struct sk_buff *skb)
{
struct nftables_pernet *nft_net = nft_pernet(net);
struct nft_trans *trans, *next;
unsigned int base_seq, gc_seq;
LIST_HEAD(set_update_list);
struct nft_trans_elem *te;
struct nft_chain *chain;
struct nft_table *table;
LIST_HEAD(adl);
int err;
if (list_empty(&nft_net->commit_list)) {
mutex_unlock(&nft_net->commit_mutex);
return 0;
}
list_for_each_entry(trans, &nft_net->binding_list, binding_list) {
switch (trans->msg_type) {
case NFT_MSG_NEWSET:
if (!nft_trans_set_update(trans) &&
nft_set_is_anonymous(nft_trans_set(trans)) &&
!nft_trans_set_bound(trans)) {
pr_warn_once("nftables ruleset with unbound set\n");
return -EINVAL;
}
break;
case NFT_MSG_NEWCHAIN:
if (!nft_trans_chain_update(trans) &&
nft_chain_binding(nft_trans_chain(trans)) &&
!nft_trans_chain_bound(trans)) {
pr_warn_once("nftables ruleset with unbound chain\n");
return -EINVAL;
}
break;
}
}
/* 0. Validate ruleset, otherwise roll back for error reporting. */
if (nf_tables_validate(net) < 0) {
nft_net->validate_state = NFT_VALIDATE_DO;
return -EAGAIN;
}
err = nft_flow_rule_offload_commit(net);
if (err < 0)
return err;
/* 1. Allocate space for next generation rules_gen_X[] */
list_for_each_entry_safe(trans, next, &nft_net->commit_list, list) {
int ret;
ret = nf_tables_commit_audit_alloc(&adl, trans->ctx.table);
if (ret) {
nf_tables_commit_chain_prepare_cancel(net);
nf_tables_commit_audit_free(&adl);
return ret;
}
if (trans->msg_type == NFT_MSG_NEWRULE ||
trans->msg_type == NFT_MSG_DELRULE) {
chain = trans->ctx.chain;
ret = nf_tables_commit_chain_prepare(net, chain);
if (ret < 0) {
nf_tables_commit_chain_prepare_cancel(net);
nf_tables_commit_audit_free(&adl);
return ret;
}
}
}
/* step 2. Make rules_gen_X visible to packet path */
list_for_each_entry(table, &nft_net->tables, list) {
list_for_each_entry(chain, &table->chains, list)
nf_tables_commit_chain(net, chain);
}
/*
* Bump generation counter, invalidate any dump in progress.
* Cannot fail after this point.
*/
base_seq = READ_ONCE(nft_net->base_seq);
while (++base_seq == 0)
;
WRITE_ONCE(nft_net->base_seq, base_seq);
gc_seq = nft_gc_seq_begin(nft_net);
/* step 3. Start new generation, rules_gen_X now in use. */
net->nft.gencursor = nft_gencursor_next(net);
list_for_each_entry_safe(trans, next, &nft_net->commit_list, list) {
nf_tables_commit_audit_collect(&adl, trans->ctx.table,
trans->msg_type);
switch (trans->msg_type) {
case NFT_MSG_NEWTABLE:
if (nft_trans_table_update(trans)) {
if (!(trans->ctx.table->flags & __NFT_TABLE_F_UPDATE)) {
nft_trans_destroy(trans);
break;
}
if (trans->ctx.table->flags & NFT_TABLE_F_DORMANT)
nf_tables_table_disable(net, trans->ctx.table);
trans->ctx.table->flags &= ~__NFT_TABLE_F_UPDATE;
} else {
nft_clear(net, trans->ctx.table);
}
nf_tables_table_notify(&trans->ctx, NFT_MSG_NEWTABLE);
nft_trans_destroy(trans);
break;
case NFT_MSG_DELTABLE:
case NFT_MSG_DESTROYTABLE:
list_del_rcu(&trans->ctx.table->list);
nf_tables_table_notify(&trans->ctx, trans->msg_type);
break;
case NFT_MSG_NEWCHAIN:
if (nft_trans_chain_update(trans)) {
nft_chain_commit_update(trans);
nf_tables_chain_notify(&trans->ctx, NFT_MSG_NEWCHAIN,
&nft_trans_chain_hooks(trans));
list_splice(&nft_trans_chain_hooks(trans),
&nft_trans_basechain(trans)->hook_list);
/* trans destroyed after rcu grace period */
} else {
nft_chain_commit_drop_policy(trans);
nft_clear(net, trans->ctx.chain);
nf_tables_chain_notify(&trans->ctx, NFT_MSG_NEWCHAIN, NULL);
nft_trans_destroy(trans);
}
break;
case NFT_MSG_DELCHAIN:
case NFT_MSG_DESTROYCHAIN:
if (nft_trans_chain_update(trans)) {
nf_tables_chain_notify(&trans->ctx, NFT_MSG_DELCHAIN,
&nft_trans_chain_hooks(trans));
nft_netdev_unregister_hooks(net,
&nft_trans_chain_hooks(trans),
true);
} else {
nft_chain_del(trans->ctx.chain);
nf_tables_chain_notify(&trans->ctx, NFT_MSG_DELCHAIN,
NULL);
nf_tables_unregister_hook(trans->ctx.net,
trans->ctx.table,
trans->ctx.chain);
}
break;
case NFT_MSG_NEWRULE:
nft_clear(trans->ctx.net, nft_trans_rule(trans));
nf_tables_rule_notify(&trans->ctx,
nft_trans_rule(trans),
NFT_MSG_NEWRULE);
if (trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD)
nft_flow_rule_destroy(nft_trans_flow_rule(trans));
nft_trans_destroy(trans);
break;
case NFT_MSG_DELRULE:
case NFT_MSG_DESTROYRULE:
list_del_rcu(&nft_trans_rule(trans)->list);
nf_tables_rule_notify(&trans->ctx,
nft_trans_rule(trans),
trans->msg_type);
nft_rule_expr_deactivate(&trans->ctx,
nft_trans_rule(trans),
NFT_TRANS_COMMIT);
if (trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD)
nft_flow_rule_destroy(nft_trans_flow_rule(trans));
break;
case NFT_MSG_NEWSET:
if (nft_trans_set_update(trans)) {
struct nft_set *set = nft_trans_set(trans);
WRITE_ONCE(set->timeout, nft_trans_set_timeout(trans));
WRITE_ONCE(set->gc_int, nft_trans_set_gc_int(trans));
if (nft_trans_set_size(trans))
WRITE_ONCE(set->size, nft_trans_set_size(trans));
} else {
nft_clear(net, nft_trans_set(trans));
/* This avoids hitting -EBUSY when deleting the table
* from the transaction.
*/
if (nft_set_is_anonymous(nft_trans_set(trans)) &&
!list_empty(&nft_trans_set(trans)->bindings))
nft_use_dec(&trans->ctx.table->use);
}
nf_tables_set_notify(&trans->ctx, nft_trans_set(trans),
NFT_MSG_NEWSET, GFP_KERNEL);
nft_trans_destroy(trans);
break;
case NFT_MSG_DELSET:
case NFT_MSG_DESTROYSET:
nft_trans_set(trans)->dead = 1;
list_del_rcu(&nft_trans_set(trans)->list);
nf_tables_set_notify(&trans->ctx, nft_trans_set(trans),
trans->msg_type, GFP_KERNEL);
break;
case NFT_MSG_NEWSETELEM:
te = (struct nft_trans_elem *)trans->data;
nft_setelem_activate(net, te->set, &te->elem);
nf_tables_setelem_notify(&trans->ctx, te->set,
&te->elem,
NFT_MSG_NEWSETELEM);
if (te->set->ops->commit &&
list_empty(&te->set->pending_update)) {
list_add_tail(&te->set->pending_update,
&set_update_list);
}
nft_trans_destroy(trans);
break;
case NFT_MSG_DELSETELEM:
case NFT_MSG_DESTROYSETELEM:
te = (struct nft_trans_elem *)trans->data;
nf_tables_setelem_notify(&trans->ctx, te->set,
&te->elem,
trans->msg_type);
nft_setelem_remove(net, te->set, &te->elem);
if (!nft_setelem_is_catchall(te->set, &te->elem)) {
atomic_dec(&te->set->nelems);
te->set->ndeact--;
}
if (te->set->ops->commit &&
list_empty(&te->set->pending_update)) {
list_add_tail(&te->set->pending_update,
&set_update_list);
}
break;
case NFT_MSG_NEWOBJ:
if (nft_trans_obj_update(trans)) {
nft_obj_commit_update(trans);
nf_tables_obj_notify(&trans->ctx,
nft_trans_obj(trans),
NFT_MSG_NEWOBJ);
} else {
nft_clear(net, nft_trans_obj(trans));
nf_tables_obj_notify(&trans->ctx,
nft_trans_obj(trans),
NFT_MSG_NEWOBJ);
nft_trans_destroy(trans);
}
break;
case NFT_MSG_DELOBJ:
case NFT_MSG_DESTROYOBJ:
nft_obj_del(nft_trans_obj(trans));
nf_tables_obj_notify(&trans->ctx, nft_trans_obj(trans),
trans->msg_type);
break;
case NFT_MSG_NEWFLOWTABLE:
if (nft_trans_flowtable_update(trans)) {
nft_trans_flowtable(trans)->data.flags =
nft_trans_flowtable_flags(trans);
nf_tables_flowtable_notify(&trans->ctx,
nft_trans_flowtable(trans),
&nft_trans_flowtable_hooks(trans),
NFT_MSG_NEWFLOWTABLE);
list_splice(&nft_trans_flowtable_hooks(trans),
&nft_trans_flowtable(trans)->hook_list);
} else {
nft_clear(net, nft_trans_flowtable(trans));
nf_tables_flowtable_notify(&trans->ctx,
nft_trans_flowtable(trans),
NULL,
NFT_MSG_NEWFLOWTABLE);
}
nft_trans_destroy(trans);
break;
case NFT_MSG_DELFLOWTABLE:
case NFT_MSG_DESTROYFLOWTABLE:
if (nft_trans_flowtable_update(trans)) {
nf_tables_flowtable_notify(&trans->ctx,
nft_trans_flowtable(trans),
&nft_trans_flowtable_hooks(trans),
trans->msg_type);
nft_unregister_flowtable_net_hooks(net,
&nft_trans_flowtable_hooks(trans));
} else {
list_del_rcu(&nft_trans_flowtable(trans)->list);
nf_tables_flowtable_notify(&trans->ctx,
nft_trans_flowtable(trans),
NULL,
trans->msg_type);
nft_unregister_flowtable_net_hooks(net,
&nft_trans_flowtable(trans)->hook_list);
}
break;
}
}
nft_set_commit_update(&set_update_list);
nft_commit_notify(net, NETLINK_CB(skb).portid);
nf_tables_gen_notify(net, skb, NFT_MSG_NEWGEN);
nf_tables_commit_audit_log(&adl, nft_net->base_seq);
nft_gc_seq_end(nft_net, gc_seq);
nft_net->validate_state = NFT_VALIDATE_SKIP;
nf_tables_commit_release(net);
return 0;
}
static void nf_tables_module_autoload(struct net *net)
{
struct nftables_pernet *nft_net = nft_pernet(net);
struct nft_module_request *req, *next;
LIST_HEAD(module_list);
list_splice_init(&nft_net->module_list, &module_list);
mutex_unlock(&nft_net->commit_mutex);
list_for_each_entry_safe(req, next, &module_list, list) {
request_module("%s", req->module);
req->done = true;
}
mutex_lock(&nft_net->commit_mutex);
list_splice(&module_list, &nft_net->module_list);
}
static void nf_tables_abort_release(struct nft_trans *trans)
{
switch (trans->msg_type) {
case NFT_MSG_NEWTABLE:
nf_tables_table_destroy(&trans->ctx);
break;
case NFT_MSG_NEWCHAIN:
if (nft_trans_chain_update(trans))
nft_hooks_destroy(&nft_trans_chain_hooks(trans));
else
nf_tables_chain_destroy(&trans->ctx);
break;
case NFT_MSG_NEWRULE:
nf_tables_rule_destroy(&trans->ctx, nft_trans_rule(trans));
break;
case NFT_MSG_NEWSET:
nft_set_destroy(&trans->ctx, nft_trans_set(trans));
break;
case NFT_MSG_NEWSETELEM:
nft_set_elem_destroy(nft_trans_elem_set(trans),
nft_trans_elem(trans).priv, true);
break;
case NFT_MSG_NEWOBJ:
nft_obj_destroy(&trans->ctx, nft_trans_obj(trans));
break;
case NFT_MSG_NEWFLOWTABLE:
if (nft_trans_flowtable_update(trans))
nft_hooks_destroy(&nft_trans_flowtable_hooks(trans));
else
nf_tables_flowtable_destroy(nft_trans_flowtable(trans));
break;
}
kfree(trans);
}
static void nft_set_abort_update(struct list_head *set_update_list)
{
struct nft_set *set, *next;
list_for_each_entry_safe(set, next, set_update_list, pending_update) {
list_del_init(&set->pending_update);
if (!set->ops->abort)
continue;
set->ops->abort(set);
}
}
static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action)
{
struct nftables_pernet *nft_net = nft_pernet(net);
struct nft_trans *trans, *next;
LIST_HEAD(set_update_list);
struct nft_trans_elem *te;
if (action == NFNL_ABORT_VALIDATE &&
nf_tables_validate(net) < 0)
return -EAGAIN;
list_for_each_entry_safe_reverse(trans, next, &nft_net->commit_list,
list) {
switch (trans->msg_type) {
case NFT_MSG_NEWTABLE:
if (nft_trans_table_update(trans)) {
if (!(trans->ctx.table->flags & __NFT_TABLE_F_UPDATE)) {
nft_trans_destroy(trans);
break;
}
if (trans->ctx.table->flags & __NFT_TABLE_F_WAS_DORMANT) {
nf_tables_table_disable(net, trans->ctx.table);
trans->ctx.table->flags |= NFT_TABLE_F_DORMANT;
} else if (trans->ctx.table->flags & __NFT_TABLE_F_WAS_AWAKEN) {
trans->ctx.table->flags &= ~NFT_TABLE_F_DORMANT;
}
trans->ctx.table->flags &= ~__NFT_TABLE_F_UPDATE;
nft_trans_destroy(trans);
} else {
list_del_rcu(&trans->ctx.table->list);
}
break;
case NFT_MSG_DELTABLE:
case NFT_MSG_DESTROYTABLE:
nft_clear(trans->ctx.net, trans->ctx.table);
nft_trans_destroy(trans);
break;
case NFT_MSG_NEWCHAIN:
if (nft_trans_chain_update(trans)) {
nft_netdev_unregister_hooks(net,
&nft_trans_chain_hooks(trans),
true);
free_percpu(nft_trans_chain_stats(trans));
kfree(nft_trans_chain_name(trans));
nft_trans_destroy(trans);
} else {
if (nft_trans_chain_bound(trans)) {
nft_trans_destroy(trans);
break;
}
nft_use_dec_restore(&trans->ctx.table->use);
nft_chain_del(trans->ctx.chain);
nf_tables_unregister_hook(trans->ctx.net,
trans->ctx.table,
trans->ctx.chain);
}
break;
case NFT_MSG_DELCHAIN:
case NFT_MSG_DESTROYCHAIN:
if (nft_trans_chain_update(trans)) {
list_splice(&nft_trans_chain_hooks(trans),
&nft_trans_basechain(trans)->hook_list);
} else {
nft_use_inc_restore(&trans->ctx.table->use);
nft_clear(trans->ctx.net, trans->ctx.chain);
}
nft_trans_destroy(trans);
break;
case NFT_MSG_NEWRULE:
if (nft_trans_rule_bound(trans)) {
nft_trans_destroy(trans);
break;
}
nft_use_dec_restore(&trans->ctx.chain->use);
list_del_rcu(&nft_trans_rule(trans)->list);
nft_rule_expr_deactivate(&trans->ctx,
nft_trans_rule(trans),
NFT_TRANS_ABORT);
if (trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD)
nft_flow_rule_destroy(nft_trans_flow_rule(trans));
break;
case NFT_MSG_DELRULE:
case NFT_MSG_DESTROYRULE:
nft_use_inc_restore(&trans->ctx.chain->use);
nft_clear(trans->ctx.net, nft_trans_rule(trans));
nft_rule_expr_activate(&trans->ctx, nft_trans_rule(trans));
if (trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD)
nft_flow_rule_destroy(nft_trans_flow_rule(trans));
nft_trans_destroy(trans);
break;
case NFT_MSG_NEWSET:
if (nft_trans_set_update(trans)) {
nft_trans_destroy(trans);
break;
}
nft_use_dec_restore(&trans->ctx.table->use);
if (nft_trans_set_bound(trans)) {
nft_trans_destroy(trans);
break;
}
list_del_rcu(&nft_trans_set(trans)->list);
break;
case NFT_MSG_DELSET:
case NFT_MSG_DESTROYSET:
nft_use_inc_restore(&trans->ctx.table->use);
nft_clear(trans->ctx.net, nft_trans_set(trans));
if (nft_trans_set(trans)->flags & (NFT_SET_MAP | NFT_SET_OBJECT))
nft_map_activate(&trans->ctx, nft_trans_set(trans));
nft_trans_destroy(trans);
break;
case NFT_MSG_NEWSETELEM:
if (nft_trans_elem_set_bound(trans)) {
nft_trans_destroy(trans);
break;
}
te = (struct nft_trans_elem *)trans->data;
nft_setelem_remove(net, te->set, &te->elem);
if (!nft_setelem_is_catchall(te->set, &te->elem))
atomic_dec(&te->set->nelems);
if (te->set->ops->abort &&
list_empty(&te->set->pending_update)) {
list_add_tail(&te->set->pending_update,
&set_update_list);
}
break;
case NFT_MSG_DELSETELEM:
case NFT_MSG_DESTROYSETELEM:
te = (struct nft_trans_elem *)trans->data;
nft_setelem_data_activate(net, te->set, &te->elem);
nft_setelem_activate(net, te->set, &te->elem);
if (!nft_setelem_is_catchall(te->set, &te->elem))
te->set->ndeact--;
if (te->set->ops->abort &&
list_empty(&te->set->pending_update)) {
list_add_tail(&te->set->pending_update,
&set_update_list);
}
nft_trans_destroy(trans);
break;
case NFT_MSG_NEWOBJ:
if (nft_trans_obj_update(trans)) {
nft_obj_destroy(&trans->ctx, nft_trans_obj_newobj(trans));
nft_trans_destroy(trans);
} else {
nft_use_dec_restore(&trans->ctx.table->use);
nft_obj_del(nft_trans_obj(trans));
}
break;
case NFT_MSG_DELOBJ:
case NFT_MSG_DESTROYOBJ:
nft_use_inc_restore(&trans->ctx.table->use);
nft_clear(trans->ctx.net, nft_trans_obj(trans));
nft_trans_destroy(trans);
break;
case NFT_MSG_NEWFLOWTABLE:
if (nft_trans_flowtable_update(trans)) {
nft_unregister_flowtable_net_hooks(net,
&nft_trans_flowtable_hooks(trans));
} else {
nft_use_dec_restore(&trans->ctx.table->use);
list_del_rcu(&nft_trans_flowtable(trans)->list);
nft_unregister_flowtable_net_hooks(net,
&nft_trans_flowtable(trans)->hook_list);
}
break;
case NFT_MSG_DELFLOWTABLE:
case NFT_MSG_DESTROYFLOWTABLE:
if (nft_trans_flowtable_update(trans)) {
list_splice(&nft_trans_flowtable_hooks(trans),
&nft_trans_flowtable(trans)->hook_list);
} else {
nft_use_inc_restore(&trans->ctx.table->use);
nft_clear(trans->ctx.net, nft_trans_flowtable(trans));
}
nft_trans_destroy(trans);
break;
}
}
nft_set_abort_update(&set_update_list);
synchronize_rcu();
list_for_each_entry_safe_reverse(trans, next,
&nft_net->commit_list, list) {
nft_trans_list_del(trans);
nf_tables_abort_release(trans);
}
if (action == NFNL_ABORT_AUTOLOAD)
nf_tables_module_autoload(net);
else
nf_tables_module_autoload_cleanup(net);
return 0;
}
static int nf_tables_abort(struct net *net, struct sk_buff *skb,
enum nfnl_abort_action action)
{
struct nftables_pernet *nft_net = nft_pernet(net);
unsigned int gc_seq;
int ret;
gc_seq = nft_gc_seq_begin(nft_net);
ret = __nf_tables_abort(net, action);
nft_gc_seq_end(nft_net, gc_seq);
mutex_unlock(&nft_net->commit_mutex);
return ret;
}
static bool nf_tables_valid_genid(struct net *net, u32 genid)
{
struct nftables_pernet *nft_net = nft_pernet(net);
bool genid_ok;
mutex_lock(&nft_net->commit_mutex);
genid_ok = genid == 0 || nft_net->base_seq == genid;
if (!genid_ok)
mutex_unlock(&nft_net->commit_mutex);
/* else, commit mutex has to be released by commit or abort function */
return genid_ok;
}
static const struct nfnetlink_subsystem nf_tables_subsys = {
.name = "nf_tables",
.subsys_id = NFNL_SUBSYS_NFTABLES,
.cb_count = NFT_MSG_MAX,
.cb = nf_tables_cb,
.commit = nf_tables_commit,
.abort = nf_tables_abort,
.valid_genid = nf_tables_valid_genid,
.owner = THIS_MODULE,
};
int nft_chain_validate_dependency(const struct nft_chain *chain,
enum nft_chain_types type)
{
const struct nft_base_chain *basechain;
if (nft_is_base_chain(chain)) {
basechain = nft_base_chain(chain);
if (basechain->type->type != type)
return -EOPNOTSUPP;
}
return 0;
}
EXPORT_SYMBOL_GPL(nft_chain_validate_dependency);
int nft_chain_validate_hooks(const struct nft_chain *chain,
unsigned int hook_flags)
{
struct nft_base_chain *basechain;
if (nft_is_base_chain(chain)) {
basechain = nft_base_chain(chain);
if ((1 << basechain->ops.hooknum) & hook_flags)
return 0;
return -EOPNOTSUPP;
}
return 0;
}
EXPORT_SYMBOL_GPL(nft_chain_validate_hooks);
/*
* Loop detection - walk through the ruleset beginning at the destination chain
* of a new jump until either the source chain is reached (loop) or all
* reachable chains have been traversed.
*
* The loop check is performed whenever a new jump verdict is added to an
* expression or verdict map or a verdict map is bound to a new chain.
*/
static int nf_tables_check_loops(const struct nft_ctx *ctx,
const struct nft_chain *chain);
static int nft_check_loops(const struct nft_ctx *ctx,
const struct nft_set_ext *ext)
{
const struct nft_data *data;
int ret;
data = nft_set_ext_data(ext);
switch (data->verdict.code) {
case NFT_JUMP:
case NFT_GOTO:
ret = nf_tables_check_loops(ctx, data->verdict.chain);
break;
default:
ret = 0;
break;
}
return ret;
}
static int nf_tables_loop_check_setelem(const struct nft_ctx *ctx,
struct nft_set *set,
const struct nft_set_iter *iter,
struct nft_set_elem *elem)
{
const struct nft_set_ext *ext = nft_set_elem_ext(set, elem->priv);
if (nft_set_ext_exists(ext, NFT_SET_EXT_FLAGS) &&
*nft_set_ext_flags(ext) & NFT_SET_ELEM_INTERVAL_END)
return 0;
return nft_check_loops(ctx, ext);
}
static int nft_set_catchall_loops(const struct nft_ctx *ctx,
struct nft_set *set)
{
u8 genmask = nft_genmask_next(ctx->net);
struct nft_set_elem_catchall *catchall;
struct nft_set_ext *ext;
int ret = 0;
list_for_each_entry_rcu(catchall, &set->catchall_list, list) {
ext = nft_set_elem_ext(set, catchall->elem);
if (!nft_set_elem_active(ext, genmask))
continue;
ret = nft_check_loops(ctx, ext);
if (ret < 0)
return ret;
}
return ret;
}
static int nf_tables_check_loops(const struct nft_ctx *ctx,
const struct nft_chain *chain)
{
const struct nft_rule *rule;
const struct nft_expr *expr, *last;
struct nft_set *set;
struct nft_set_binding *binding;
struct nft_set_iter iter;
if (ctx->chain == chain)
return -ELOOP;
if (fatal_signal_pending(current))
return -EINTR;
list_for_each_entry(rule, &chain->rules, list) {
nft_rule_for_each_expr(expr, last, rule) {
struct nft_immediate_expr *priv;
const struct nft_data *data;
int err;
if (strcmp(expr->ops->type->name, "immediate"))
continue;
priv = nft_expr_priv(expr);
if (priv->dreg != NFT_REG_VERDICT)
continue;
data = &priv->data;
switch (data->verdict.code) {
case NFT_JUMP:
case NFT_GOTO:
err = nf_tables_check_loops(ctx,
data->verdict.chain);
if (err < 0)
return err;
break;
default:
break;
}
}
}
list_for_each_entry(set, &ctx->table->sets, list) {
if (!nft_is_active_next(ctx->net, set))
continue;
if (!(set->flags & NFT_SET_MAP) ||
set->dtype != NFT_DATA_VERDICT)
continue;
list_for_each_entry(binding, &set->bindings, list) {
if (!(binding->flags & NFT_SET_MAP) ||
binding->chain != chain)
continue;
iter.genmask = nft_genmask_next(ctx->net);
iter.skip = 0;
iter.count = 0;
iter.err = 0;
iter.fn = nf_tables_loop_check_setelem;
set->ops->walk(ctx, set, &iter);
if (!iter.err)
iter.err = nft_set_catchall_loops(ctx, set);
if (iter.err < 0)
return iter.err;
}
}
return 0;
}
/**
* nft_parse_u32_check - fetch u32 attribute and check for maximum value
*
* @attr: netlink attribute to fetch value from
* @max: maximum value to be stored in dest
* @dest: pointer to the variable
*
* Parse, check and store a given u32 netlink attribute into variable.
* This function returns -ERANGE if the value goes over maximum value.
* Otherwise a 0 is returned and the attribute value is stored in the
* destination variable.
*/
int nft_parse_u32_check(const struct nlattr *attr, int max, u32 *dest)
{
u32 val;
val = ntohl(nla_get_be32(attr));
if (val > max)
return -ERANGE;
*dest = val;
return 0;
}
EXPORT_SYMBOL_GPL(nft_parse_u32_check);
static int nft_parse_register(const struct nlattr *attr, u32 *preg)
{
unsigned int reg;
reg = ntohl(nla_get_be32(attr));
switch (reg) {
case NFT_REG_VERDICT...NFT_REG_4:
*preg = reg * NFT_REG_SIZE / NFT_REG32_SIZE;
break;
case NFT_REG32_00...NFT_REG32_15:
*preg = reg + NFT_REG_SIZE / NFT_REG32_SIZE - NFT_REG32_00;
break;
default:
return -ERANGE;
}
return 0;
}
/**
* nft_dump_register - dump a register value to a netlink attribute
*
* @skb: socket buffer
* @attr: attribute number
* @reg: register number
*
* Construct a netlink attribute containing the register number. For
* compatibility reasons, register numbers being a multiple of 4 are
* translated to the corresponding 128 bit register numbers.
*/
int nft_dump_register(struct sk_buff *skb, unsigned int attr, unsigned int reg)
{
if (reg % (NFT_REG_SIZE / NFT_REG32_SIZE) == 0)
reg = reg / (NFT_REG_SIZE / NFT_REG32_SIZE);
else
reg = reg - NFT_REG_SIZE / NFT_REG32_SIZE + NFT_REG32_00;
return nla_put_be32(skb, attr, htonl(reg));
}
EXPORT_SYMBOL_GPL(nft_dump_register);
static int nft_validate_register_load(enum nft_registers reg, unsigned int len)
{
if (reg < NFT_REG_1 * NFT_REG_SIZE / NFT_REG32_SIZE)
return -EINVAL;
if (len == 0)
return -EINVAL;
if (reg * NFT_REG32_SIZE + len > sizeof_field(struct nft_regs, data))
return -ERANGE;
return 0;
}
int nft_parse_register_load(const struct nlattr *attr, u8 *sreg, u32 len)
{
u32 reg;
int err;
err = nft_parse_register(attr, ®);
if (err < 0)
return err;
err = nft_validate_register_load(reg, len);
if (err < 0)
return err;
*sreg = reg;
return 0;
}
EXPORT_SYMBOL_GPL(nft_parse_register_load);
static int nft_validate_register_store(const struct nft_ctx *ctx,
enum nft_registers reg,
const struct nft_data *data,
enum nft_data_types type,
unsigned int len)
{
int err;
switch (reg) {
case NFT_REG_VERDICT:
if (type != NFT_DATA_VERDICT)
return -EINVAL;
if (data != NULL &&
(data->verdict.code == NFT_GOTO ||
data->verdict.code == NFT_JUMP)) {
err = nf_tables_check_loops(ctx, data->verdict.chain);
if (err < 0)
return err;
}
return 0;
default:
if (reg < NFT_REG_1 * NFT_REG_SIZE / NFT_REG32_SIZE)
return -EINVAL;
if (len == 0)
return -EINVAL;
if (reg * NFT_REG32_SIZE + len >
sizeof_field(struct nft_regs, data))
return -ERANGE;
if (data != NULL && type != NFT_DATA_VALUE)
return -EINVAL;
return 0;
}
}
int nft_parse_register_store(const struct nft_ctx *ctx,
const struct nlattr *attr, u8 *dreg,
const struct nft_data *data,
enum nft_data_types type, unsigned int len)
{
int err;
u32 reg;
err = nft_parse_register(attr, ®);
if (err < 0)
return err;
err = nft_validate_register_store(ctx, reg, data, type, len);
if (err < 0)
return err;
*dreg = reg;
return 0;
}
EXPORT_SYMBOL_GPL(nft_parse_register_store);
static const struct nla_policy nft_verdict_policy[NFTA_VERDICT_MAX + 1] = {
[NFTA_VERDICT_CODE] = { .type = NLA_U32 },
[NFTA_VERDICT_CHAIN] = { .type = NLA_STRING,
.len = NFT_CHAIN_MAXNAMELEN - 1 },
[NFTA_VERDICT_CHAIN_ID] = { .type = NLA_U32 },
};
static int nft_verdict_init(const struct nft_ctx *ctx, struct nft_data *data,
struct nft_data_desc *desc, const struct nlattr *nla)
{
u8 genmask = nft_genmask_next(ctx->net);
struct nlattr *tb[NFTA_VERDICT_MAX + 1];
struct nft_chain *chain;
int err;
err = nla_parse_nested_deprecated(tb, NFTA_VERDICT_MAX, nla,
nft_verdict_policy, NULL);
if (err < 0)
return err;
if (!tb[NFTA_VERDICT_CODE])
return -EINVAL;
/* zero padding hole for memcmp */
memset(data, 0, sizeof(*data));
data->verdict.code = ntohl(nla_get_be32(tb[NFTA_VERDICT_CODE]));
switch (data->verdict.code) {
default:
switch (data->verdict.code & NF_VERDICT_MASK) {
case NF_ACCEPT:
case NF_DROP:
case NF_QUEUE:
break;
default:
return -EINVAL;
}
fallthrough;
case NFT_CONTINUE:
case NFT_BREAK:
case NFT_RETURN:
break;
case NFT_JUMP:
case NFT_GOTO:
if (tb[NFTA_VERDICT_CHAIN]) {
chain = nft_chain_lookup(ctx->net, ctx->table,
tb[NFTA_VERDICT_CHAIN],
genmask);
} else if (tb[NFTA_VERDICT_CHAIN_ID]) {
chain = nft_chain_lookup_byid(ctx->net, ctx->table,
tb[NFTA_VERDICT_CHAIN_ID],
genmask);
if (IS_ERR(chain))
return PTR_ERR(chain);
} else {
return -EINVAL;
}
if (IS_ERR(chain))
return PTR_ERR(chain);
if (nft_is_base_chain(chain))
return -EOPNOTSUPP;
if (nft_chain_is_bound(chain))
return -EINVAL;
if (desc->flags & NFT_DATA_DESC_SETELEM &&
chain->flags & NFT_CHAIN_BINDING)
return -EINVAL;
if (!nft_use_inc(&chain->use))
return -EMFILE;
data->verdict.chain = chain;
break;
}
desc->len = sizeof(data->verdict);
return 0;
}
static void nft_verdict_uninit(const struct nft_data *data)
{
struct nft_chain *chain;
switch (data->verdict.code) {
case NFT_JUMP:
case NFT_GOTO:
chain = data->verdict.chain;
nft_use_dec(&chain->use);
break;
}
}
int nft_verdict_dump(struct sk_buff *skb, int type, const struct nft_verdict *v)
{
struct nlattr *nest;
nest = nla_nest_start_noflag(skb, type);
if (!nest)
goto nla_put_failure;
if (nla_put_be32(skb, NFTA_VERDICT_CODE, htonl(v->code)))
goto nla_put_failure;
switch (v->code) {
case NFT_JUMP:
case NFT_GOTO:
if (nla_put_string(skb, NFTA_VERDICT_CHAIN,
v->chain->name))
goto nla_put_failure;
}
nla_nest_end(skb, nest);
return 0;
nla_put_failure:
return -1;
}
static int nft_value_init(const struct nft_ctx *ctx,
struct nft_data *data, struct nft_data_desc *desc,
const struct nlattr *nla)
{
unsigned int len;
len = nla_len(nla);
if (len == 0)
return -EINVAL;
if (len > desc->size)
return -EOVERFLOW;
if (desc->len) {
if (len != desc->len)
return -EINVAL;
} else {
desc->len = len;
}
nla_memcpy(data->data, nla, len);
return 0;
}
static int nft_value_dump(struct sk_buff *skb, const struct nft_data *data,
unsigned int len)
{
return nla_put(skb, NFTA_DATA_VALUE, len, data->data);
}
static const struct nla_policy nft_data_policy[NFTA_DATA_MAX + 1] = {
[NFTA_DATA_VALUE] = { .type = NLA_BINARY },
[NFTA_DATA_VERDICT] = { .type = NLA_NESTED },
};
/**
* nft_data_init - parse nf_tables data netlink attributes
*
* @ctx: context of the expression using the data
* @data: destination struct nft_data
* @desc: data description
* @nla: netlink attribute containing data
*
* Parse the netlink data attributes and initialize a struct nft_data.
* The type and length of data are returned in the data description.
*
* The caller can indicate that it only wants to accept data of type
* NFT_DATA_VALUE by passing NULL for the ctx argument.
*/
int nft_data_init(const struct nft_ctx *ctx, struct nft_data *data,
struct nft_data_desc *desc, const struct nlattr *nla)
{
struct nlattr *tb[NFTA_DATA_MAX + 1];
int err;
if (WARN_ON_ONCE(!desc->size))
return -EINVAL;
err = nla_parse_nested_deprecated(tb, NFTA_DATA_MAX, nla,
nft_data_policy, NULL);
if (err < 0)
return err;
if (tb[NFTA_DATA_VALUE]) {
if (desc->type != NFT_DATA_VALUE)
return -EINVAL;
err = nft_value_init(ctx, data, desc, tb[NFTA_DATA_VALUE]);
} else if (tb[NFTA_DATA_VERDICT] && ctx != NULL) {
if (desc->type != NFT_DATA_VERDICT)
return -EINVAL;
err = nft_verdict_init(ctx, data, desc, tb[NFTA_DATA_VERDICT]);
} else {
err = -EINVAL;
}
return err;
}
EXPORT_SYMBOL_GPL(nft_data_init);
/**
* nft_data_release - release a nft_data item
*
* @data: struct nft_data to release
* @type: type of data
*
* Release a nft_data item. NFT_DATA_VALUE types can be silently discarded,
* all others need to be released by calling this function.
*/
void nft_data_release(const struct nft_data *data, enum nft_data_types type)
{
if (type < NFT_DATA_VERDICT)
return;
switch (type) {
case NFT_DATA_VERDICT:
return nft_verdict_uninit(data);
default:
WARN_ON(1);
}
}
EXPORT_SYMBOL_GPL(nft_data_release);
int nft_data_dump(struct sk_buff *skb, int attr, const struct nft_data *data,
enum nft_data_types type, unsigned int len)
{
struct nlattr *nest;
int err;
nest = nla_nest_start_noflag(skb, attr);
if (nest == NULL)
return -1;
switch (type) {
case NFT_DATA_VALUE:
err = nft_value_dump(skb, data, len);
break;
case NFT_DATA_VERDICT:
err = nft_verdict_dump(skb, NFTA_DATA_VERDICT, &data->verdict);
break;
default:
err = -EINVAL;
WARN_ON(1);
}
nla_nest_end(skb, nest);
return err;
}
EXPORT_SYMBOL_GPL(nft_data_dump);
int __nft_release_basechain(struct nft_ctx *ctx)
{
struct nft_rule *rule, *nr;
if (WARN_ON(!nft_is_base_chain(ctx->chain)))
return 0;
nf_tables_unregister_hook(ctx->net, ctx->chain->table, ctx->chain);
list_for_each_entry_safe(rule, nr, &ctx->chain->rules, list) {
list_del(&rule->list);
nft_use_dec(&ctx->chain->use);
nf_tables_rule_release(ctx, rule);
}
nft_chain_del(ctx->chain);
nft_use_dec(&ctx->table->use);
nf_tables_chain_destroy(ctx);
return 0;
}
EXPORT_SYMBOL_GPL(__nft_release_basechain);
static void __nft_release_hook(struct net *net, struct nft_table *table)
{
struct nft_flowtable *flowtable;
struct nft_chain *chain;
list_for_each_entry(chain, &table->chains, list)
__nf_tables_unregister_hook(net, table, chain, true);
list_for_each_entry(flowtable, &table->flowtables, list)
__nft_unregister_flowtable_net_hooks(net, &flowtable->hook_list,
true);
}
static void __nft_release_hooks(struct net *net)
{
struct nftables_pernet *nft_net = nft_pernet(net);
struct nft_table *table;
list_for_each_entry(table, &nft_net->tables, list) {
if (nft_table_has_owner(table))
continue;
__nft_release_hook(net, table);
}
}
static void __nft_release_table(struct net *net, struct nft_table *table)
{
struct nft_flowtable *flowtable, *nf;
struct nft_chain *chain, *nc;
struct nft_object *obj, *ne;
struct nft_rule *rule, *nr;
struct nft_set *set, *ns;
struct nft_ctx ctx = {
.net = net,
.family = NFPROTO_NETDEV,
};
ctx.family = table->family;
ctx.table = table;
list_for_each_entry(chain, &table->chains, list) {
if (nft_chain_binding(chain))
continue;
ctx.chain = chain;
list_for_each_entry_safe(rule, nr, &chain->rules, list) {
list_del(&rule->list);
nft_use_dec(&chain->use);
nf_tables_rule_release(&ctx, rule);
}
}
list_for_each_entry_safe(flowtable, nf, &table->flowtables, list) {
list_del(&flowtable->list);
nft_use_dec(&table->use);
nf_tables_flowtable_destroy(flowtable);
}
list_for_each_entry_safe(set, ns, &table->sets, list) {
list_del(&set->list);
nft_use_dec(&table->use);
if (set->flags & (NFT_SET_MAP | NFT_SET_OBJECT))
nft_map_deactivate(&ctx, set);
nft_set_destroy(&ctx, set);
}
list_for_each_entry_safe(obj, ne, &table->objects, list) {
nft_obj_del(obj);
nft_use_dec(&table->use);
nft_obj_destroy(&ctx, obj);
}
list_for_each_entry_safe(chain, nc, &table->chains, list) {
ctx.chain = chain;
nft_chain_del(chain);
nft_use_dec(&table->use);
nf_tables_chain_destroy(&ctx);
}
nf_tables_table_destroy(&ctx);
}
static void __nft_release_tables(struct net *net)
{
struct nftables_pernet *nft_net = nft_pernet(net);
struct nft_table *table, *nt;
list_for_each_entry_safe(table, nt, &nft_net->tables, list) {
if (nft_table_has_owner(table))
continue;
list_del(&table->list);
__nft_release_table(net, table);
}
}
static int nft_rcv_nl_event(struct notifier_block *this, unsigned long event,
void *ptr)
{
struct nft_table *table, *to_delete[8];
struct nftables_pernet *nft_net;
struct netlink_notify *n = ptr;
struct net *net = n->net;
unsigned int deleted;
bool restart = false;
unsigned int gc_seq;
if (event != NETLINK_URELEASE || n->protocol != NETLINK_NETFILTER)
return NOTIFY_DONE;
nft_net = nft_pernet(net);
deleted = 0;
mutex_lock(&nft_net->commit_mutex);
gc_seq = nft_gc_seq_begin(nft_net);
if (!list_empty(&nf_tables_destroy_list))
nf_tables_trans_destroy_flush_work();
again:
list_for_each_entry(table, &nft_net->tables, list) {
if (nft_table_has_owner(table) &&
n->portid == table->nlpid) {
__nft_release_hook(net, table);
list_del_rcu(&table->list);
to_delete[deleted++] = table;
if (deleted >= ARRAY_SIZE(to_delete))
break;
}
}
if (deleted) {
restart = deleted >= ARRAY_SIZE(to_delete);
synchronize_rcu();
while (deleted)
__nft_release_table(net, to_delete[--deleted]);
if (restart)
goto again;
}
nft_gc_seq_end(nft_net, gc_seq);
mutex_unlock(&nft_net->commit_mutex);
return NOTIFY_DONE;
}
static struct notifier_block nft_nl_notifier = {
.notifier_call = nft_rcv_nl_event,
};
static int __net_init nf_tables_init_net(struct net *net)
{
struct nftables_pernet *nft_net = nft_pernet(net);
INIT_LIST_HEAD(&nft_net->tables);
INIT_LIST_HEAD(&nft_net->commit_list);
INIT_LIST_HEAD(&nft_net->binding_list);
INIT_LIST_HEAD(&nft_net->module_list);
INIT_LIST_HEAD(&nft_net->notify_list);
mutex_init(&nft_net->commit_mutex);
nft_net->base_seq = 1;
nft_net->gc_seq = 0;
nft_net->validate_state = NFT_VALIDATE_SKIP;
return 0;
}
static void __net_exit nf_tables_pre_exit_net(struct net *net)
{
struct nftables_pernet *nft_net = nft_pernet(net);
mutex_lock(&nft_net->commit_mutex);
__nft_release_hooks(net);
mutex_unlock(&nft_net->commit_mutex);
}
static void __net_exit nf_tables_exit_net(struct net *net)
{
struct nftables_pernet *nft_net = nft_pernet(net);
unsigned int gc_seq;
mutex_lock(&nft_net->commit_mutex);
gc_seq = nft_gc_seq_begin(nft_net);
if (!list_empty(&nft_net->commit_list) ||
!list_empty(&nft_net->module_list))
__nf_tables_abort(net, NFNL_ABORT_NONE);
__nft_release_tables(net);
nft_gc_seq_end(nft_net, gc_seq);
mutex_unlock(&nft_net->commit_mutex);
WARN_ON_ONCE(!list_empty(&nft_net->tables));
WARN_ON_ONCE(!list_empty(&nft_net->module_list));
WARN_ON_ONCE(!list_empty(&nft_net->notify_list));
}
static void nf_tables_exit_batch(struct list_head *net_exit_list)
{
flush_work(&trans_gc_work);
}
static struct pernet_operations nf_tables_net_ops = {
.init = nf_tables_init_net,
.pre_exit = nf_tables_pre_exit_net,
.exit = nf_tables_exit_net,
.exit_batch = nf_tables_exit_batch,
.id = &nf_tables_net_id,
.size = sizeof(struct nftables_pernet),
};
static int __init nf_tables_module_init(void)
{
int err;
err = register_pernet_subsys(&nf_tables_net_ops);
if (err < 0)
return err;
err = nft_chain_filter_init();
if (err < 0)
goto err_chain_filter;
err = nf_tables_core_module_init();
if (err < 0)
goto err_core_module;
err = register_netdevice_notifier(&nf_tables_flowtable_notifier);
if (err < 0)
goto err_netdev_notifier;
err = rhltable_init(&nft_objname_ht, &nft_objname_ht_params);
if (err < 0)
goto err_rht_objname;
err = nft_offload_init();
if (err < 0)
goto err_offload;
err = netlink_register_notifier(&nft_nl_notifier);
if (err < 0)
goto err_netlink_notifier;
/* must be last */
err = nfnetlink_subsys_register(&nf_tables_subsys);
if (err < 0)
goto err_nfnl_subsys;
nft_chain_route_init();
return err;
err_nfnl_subsys:
netlink_unregister_notifier(&nft_nl_notifier);
err_netlink_notifier:
nft_offload_exit();
err_offload:
rhltable_destroy(&nft_objname_ht);
err_rht_objname:
unregister_netdevice_notifier(&nf_tables_flowtable_notifier);
err_netdev_notifier:
nf_tables_core_module_exit();
err_core_module:
nft_chain_filter_fini();
err_chain_filter:
unregister_pernet_subsys(&nf_tables_net_ops);
return err;
}
static void __exit nf_tables_module_exit(void)
{
nfnetlink_subsys_unregister(&nf_tables_subsys);
netlink_unregister_notifier(&nft_nl_notifier);
nft_offload_exit();
unregister_netdevice_notifier(&nf_tables_flowtable_notifier);
nft_chain_filter_fini();
nft_chain_route_fini();
unregister_pernet_subsys(&nf_tables_net_ops);
cancel_work_sync(&trans_gc_work);
cancel_work_sync(&trans_destroy_work);
rcu_barrier();
rhltable_destroy(&nft_objname_ht);
nf_tables_core_module_exit();
}
module_init(nf_tables_module_init);
module_exit(nf_tables_module_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Patrick McHardy <[email protected]>");
MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_NFTABLES);
| linux-master | net/netfilter/nf_tables_api.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* IRC extension for IP connection tracking, Version 1.21
* (C) 2000-2002 by Harald Welte <[email protected]>
* based on RR's ip_conntrack_ftp.c
* (C) 2006-2012 Patrick McHardy <[email protected]>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/skbuff.h>
#include <linux/in.h>
#include <linux/ip.h>
#include <linux/tcp.h>
#include <linux/netfilter.h>
#include <linux/slab.h>
#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_conntrack_expect.h>
#include <net/netfilter/nf_conntrack_helper.h>
#include <linux/netfilter/nf_conntrack_irc.h>
#define MAX_PORTS 8
static unsigned short ports[MAX_PORTS];
static unsigned int ports_c;
static unsigned int max_dcc_channels = 8;
static unsigned int dcc_timeout __read_mostly = 300;
/* This is slow, but it's simple. --RR */
static char *irc_buffer;
static DEFINE_SPINLOCK(irc_buffer_lock);
unsigned int (*nf_nat_irc_hook)(struct sk_buff *skb,
enum ip_conntrack_info ctinfo,
unsigned int protoff,
unsigned int matchoff,
unsigned int matchlen,
struct nf_conntrack_expect *exp) __read_mostly;
EXPORT_SYMBOL_GPL(nf_nat_irc_hook);
#define HELPER_NAME "irc"
#define MAX_SEARCH_SIZE 4095
MODULE_AUTHOR("Harald Welte <[email protected]>");
MODULE_DESCRIPTION("IRC (DCC) connection tracking helper");
MODULE_LICENSE("GPL");
MODULE_ALIAS("ip_conntrack_irc");
MODULE_ALIAS_NFCT_HELPER(HELPER_NAME);
module_param_array(ports, ushort, &ports_c, 0400);
MODULE_PARM_DESC(ports, "port numbers of IRC servers");
module_param(max_dcc_channels, uint, 0400);
MODULE_PARM_DESC(max_dcc_channels, "max number of expected DCC channels per "
"IRC session");
module_param(dcc_timeout, uint, 0400);
MODULE_PARM_DESC(dcc_timeout, "timeout on for unestablished DCC channels");
static const char *const dccprotos[] = {
"SEND ", "CHAT ", "MOVE ", "TSEND ", "SCHAT "
};
#define MINMATCHLEN 5
/* tries to get the ip_addr and port out of a dcc command
* return value: -1 on failure, 0 on success
* data pointer to first byte of DCC command data
* data_end pointer to last byte of dcc command data
* ip returns parsed ip of dcc command
* port returns parsed port of dcc command
* ad_beg_p returns pointer to first byte of addr data
* ad_end_p returns pointer to last byte of addr data
*/
static int parse_dcc(char *data, const char *data_end, __be32 *ip,
u_int16_t *port, char **ad_beg_p, char **ad_end_p)
{
char *tmp;
/* at least 12: "AAAAAAAA P\1\n" */
while (*data++ != ' ')
if (data > data_end - 12)
return -1;
/* Make sure we have a newline character within the packet boundaries
* because simple_strtoul parses until the first invalid character. */
for (tmp = data; tmp <= data_end; tmp++)
if (*tmp == '\n')
break;
if (tmp > data_end || *tmp != '\n')
return -1;
*ad_beg_p = data;
*ip = cpu_to_be32(simple_strtoul(data, &data, 10));
/* skip blanks between ip and port */
while (*data == ' ') {
if (data >= data_end)
return -1;
data++;
}
*port = simple_strtoul(data, &data, 10);
*ad_end_p = data;
return 0;
}
static int help(struct sk_buff *skb, unsigned int protoff,
struct nf_conn *ct, enum ip_conntrack_info ctinfo)
{
unsigned int dataoff;
const struct iphdr *iph;
const struct tcphdr *th;
struct tcphdr _tcph;
const char *data_limit;
char *data, *ib_ptr;
int dir = CTINFO2DIR(ctinfo);
struct nf_conntrack_expect *exp;
struct nf_conntrack_tuple *tuple;
__be32 dcc_ip;
u_int16_t dcc_port;
__be16 port;
int i, ret = NF_ACCEPT;
char *addr_beg_p, *addr_end_p;
typeof(nf_nat_irc_hook) nf_nat_irc;
unsigned int datalen;
/* If packet is coming from IRC server */
if (dir == IP_CT_DIR_REPLY)
return NF_ACCEPT;
/* Until there's been traffic both ways, don't look in packets. */
if (ctinfo != IP_CT_ESTABLISHED && ctinfo != IP_CT_ESTABLISHED_REPLY)
return NF_ACCEPT;
/* Not a full tcp header? */
th = skb_header_pointer(skb, protoff, sizeof(_tcph), &_tcph);
if (th == NULL)
return NF_ACCEPT;
/* No data? */
dataoff = protoff + th->doff*4;
if (dataoff >= skb->len)
return NF_ACCEPT;
datalen = skb->len - dataoff;
if (datalen > MAX_SEARCH_SIZE)
datalen = MAX_SEARCH_SIZE;
spin_lock_bh(&irc_buffer_lock);
ib_ptr = skb_header_pointer(skb, dataoff, datalen,
irc_buffer);
if (!ib_ptr) {
spin_unlock_bh(&irc_buffer_lock);
return NF_ACCEPT;
}
data = ib_ptr;
data_limit = ib_ptr + datalen;
/* Skip any whitespace */
while (data < data_limit - 10) {
if (*data == ' ' || *data == '\r' || *data == '\n')
data++;
else
break;
}
/* strlen("PRIVMSG x ")=10 */
if (data < data_limit - 10) {
if (strncasecmp("PRIVMSG ", data, 8))
goto out;
data += 8;
}
/* strlen(" :\1DCC SENT t AAAAAAAA P\1\n")=26
* 7+MINMATCHLEN+strlen("t AAAAAAAA P\1\n")=26
*/
while (data < data_limit - (21 + MINMATCHLEN)) {
/* Find first " :", the start of message */
if (memcmp(data, " :", 2)) {
data++;
continue;
}
data += 2;
/* then check that place only for the DCC command */
if (memcmp(data, "\1DCC ", 5))
goto out;
data += 5;
/* we have at least (21+MINMATCHLEN)-(2+5) bytes valid data left */
iph = ip_hdr(skb);
pr_debug("DCC found in master %pI4:%u %pI4:%u\n",
&iph->saddr, ntohs(th->source),
&iph->daddr, ntohs(th->dest));
for (i = 0; i < ARRAY_SIZE(dccprotos); i++) {
if (memcmp(data, dccprotos[i], strlen(dccprotos[i]))) {
/* no match */
continue;
}
data += strlen(dccprotos[i]);
pr_debug("DCC %s detected\n", dccprotos[i]);
/* we have at least
* (21+MINMATCHLEN)-7-dccprotos[i].matchlen bytes valid
* data left (== 14/13 bytes) */
if (parse_dcc(data, data_limit, &dcc_ip,
&dcc_port, &addr_beg_p, &addr_end_p)) {
pr_debug("unable to parse dcc command\n");
continue;
}
pr_debug("DCC bound ip/port: %pI4:%u\n",
&dcc_ip, dcc_port);
/* dcc_ip can be the internal OR external (NAT'ed) IP */
tuple = &ct->tuplehash[dir].tuple;
if ((tuple->src.u3.ip != dcc_ip &&
ct->tuplehash[!dir].tuple.dst.u3.ip != dcc_ip) ||
dcc_port == 0) {
net_warn_ratelimited("Forged DCC command from %pI4: %pI4:%u\n",
&tuple->src.u3.ip,
&dcc_ip, dcc_port);
continue;
}
exp = nf_ct_expect_alloc(ct);
if (exp == NULL) {
nf_ct_helper_log(skb, ct,
"cannot alloc expectation");
ret = NF_DROP;
goto out;
}
tuple = &ct->tuplehash[!dir].tuple;
port = htons(dcc_port);
nf_ct_expect_init(exp, NF_CT_EXPECT_CLASS_DEFAULT,
tuple->src.l3num,
NULL, &tuple->dst.u3,
IPPROTO_TCP, NULL, &port);
nf_nat_irc = rcu_dereference(nf_nat_irc_hook);
if (nf_nat_irc && ct->status & IPS_NAT_MASK)
ret = nf_nat_irc(skb, ctinfo, protoff,
addr_beg_p - ib_ptr,
addr_end_p - addr_beg_p,
exp);
else if (nf_ct_expect_related(exp, 0) != 0) {
nf_ct_helper_log(skb, ct,
"cannot add expectation");
ret = NF_DROP;
}
nf_ct_expect_put(exp);
goto out;
}
}
out:
spin_unlock_bh(&irc_buffer_lock);
return ret;
}
static struct nf_conntrack_helper irc[MAX_PORTS] __read_mostly;
static struct nf_conntrack_expect_policy irc_exp_policy;
static int __init nf_conntrack_irc_init(void)
{
int i, ret;
if (max_dcc_channels < 1) {
pr_err("max_dcc_channels must not be zero\n");
return -EINVAL;
}
if (max_dcc_channels > NF_CT_EXPECT_MAX_CNT) {
pr_err("max_dcc_channels must not be more than %u\n",
NF_CT_EXPECT_MAX_CNT);
return -EINVAL;
}
irc_exp_policy.max_expected = max_dcc_channels;
irc_exp_policy.timeout = dcc_timeout;
irc_buffer = kmalloc(MAX_SEARCH_SIZE + 1, GFP_KERNEL);
if (!irc_buffer)
return -ENOMEM;
/* If no port given, default to standard irc port */
if (ports_c == 0)
ports[ports_c++] = IRC_PORT;
for (i = 0; i < ports_c; i++) {
nf_ct_helper_init(&irc[i], AF_INET, IPPROTO_TCP, HELPER_NAME,
IRC_PORT, ports[i], i, &irc_exp_policy,
0, help, NULL, THIS_MODULE);
}
ret = nf_conntrack_helpers_register(&irc[0], ports_c);
if (ret) {
pr_err("failed to register helpers\n");
kfree(irc_buffer);
return ret;
}
return 0;
}
static void __exit nf_conntrack_irc_fini(void)
{
nf_conntrack_helpers_unregister(irc, ports_c);
kfree(irc_buffer);
}
module_init(nf_conntrack_irc_init);
module_exit(nf_conntrack_irc_fini);
| linux-master | net/netfilter/nf_conntrack_irc.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/types.h>
#include <net/ip.h>
#include <net/tcp.h>
#include <net/netlink.h>
#include <net/netfilter/nf_tables.h>
#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_conntrack_synproxy.h>
#include <net/netfilter/nf_synproxy.h>
#include <linux/netfilter/nf_tables.h>
#include <linux/netfilter/nf_synproxy.h>
struct nft_synproxy {
struct nf_synproxy_info info;
};
static const struct nla_policy nft_synproxy_policy[NFTA_SYNPROXY_MAX + 1] = {
[NFTA_SYNPROXY_MSS] = { .type = NLA_U16 },
[NFTA_SYNPROXY_WSCALE] = { .type = NLA_U8 },
[NFTA_SYNPROXY_FLAGS] = { .type = NLA_U32 },
};
static void nft_synproxy_tcp_options(struct synproxy_options *opts,
const struct tcphdr *tcp,
struct synproxy_net *snet,
struct nf_synproxy_info *info,
const struct nft_synproxy *priv)
{
this_cpu_inc(snet->stats->syn_received);
if (tcp->ece && tcp->cwr)
opts->options |= NF_SYNPROXY_OPT_ECN;
opts->options &= priv->info.options;
opts->mss_encode = opts->mss_option;
opts->mss_option = info->mss;
if (opts->options & NF_SYNPROXY_OPT_TIMESTAMP)
synproxy_init_timestamp_cookie(info, opts);
else
opts->options &= ~(NF_SYNPROXY_OPT_WSCALE |
NF_SYNPROXY_OPT_SACK_PERM |
NF_SYNPROXY_OPT_ECN);
}
static void nft_synproxy_eval_v4(const struct nft_synproxy *priv,
struct nft_regs *regs,
const struct nft_pktinfo *pkt,
const struct tcphdr *tcp,
struct tcphdr *_tcph,
struct synproxy_options *opts)
{
struct nf_synproxy_info info = priv->info;
struct net *net = nft_net(pkt);
struct synproxy_net *snet = synproxy_pernet(net);
struct sk_buff *skb = pkt->skb;
if (tcp->syn) {
/* Initial SYN from client */
nft_synproxy_tcp_options(opts, tcp, snet, &info, priv);
synproxy_send_client_synack(net, skb, tcp, opts);
consume_skb(skb);
regs->verdict.code = NF_STOLEN;
} else if (tcp->ack) {
/* ACK from client */
if (synproxy_recv_client_ack(net, skb, tcp, opts,
ntohl(tcp->seq))) {
consume_skb(skb);
regs->verdict.code = NF_STOLEN;
} else {
regs->verdict.code = NF_DROP;
}
}
}
#if IS_ENABLED(CONFIG_NF_TABLES_IPV6)
static void nft_synproxy_eval_v6(const struct nft_synproxy *priv,
struct nft_regs *regs,
const struct nft_pktinfo *pkt,
const struct tcphdr *tcp,
struct tcphdr *_tcph,
struct synproxy_options *opts)
{
struct nf_synproxy_info info = priv->info;
struct net *net = nft_net(pkt);
struct synproxy_net *snet = synproxy_pernet(net);
struct sk_buff *skb = pkt->skb;
if (tcp->syn) {
/* Initial SYN from client */
nft_synproxy_tcp_options(opts, tcp, snet, &info, priv);
synproxy_send_client_synack_ipv6(net, skb, tcp, opts);
consume_skb(skb);
regs->verdict.code = NF_STOLEN;
} else if (tcp->ack) {
/* ACK from client */
if (synproxy_recv_client_ack_ipv6(net, skb, tcp, opts,
ntohl(tcp->seq))) {
consume_skb(skb);
regs->verdict.code = NF_STOLEN;
} else {
regs->verdict.code = NF_DROP;
}
}
}
#endif /* CONFIG_NF_TABLES_IPV6*/
static void nft_synproxy_do_eval(const struct nft_synproxy *priv,
struct nft_regs *regs,
const struct nft_pktinfo *pkt)
{
struct synproxy_options opts = {};
struct sk_buff *skb = pkt->skb;
int thoff = nft_thoff(pkt);
const struct tcphdr *tcp;
struct tcphdr _tcph;
if (pkt->tprot != IPPROTO_TCP) {
regs->verdict.code = NFT_BREAK;
return;
}
if (nf_ip_checksum(skb, nft_hook(pkt), thoff, IPPROTO_TCP)) {
regs->verdict.code = NF_DROP;
return;
}
tcp = skb_header_pointer(skb, thoff,
sizeof(struct tcphdr),
&_tcph);
if (!tcp) {
regs->verdict.code = NF_DROP;
return;
}
if (!synproxy_parse_options(skb, thoff, tcp, &opts)) {
regs->verdict.code = NF_DROP;
return;
}
switch (skb->protocol) {
case htons(ETH_P_IP):
nft_synproxy_eval_v4(priv, regs, pkt, tcp, &_tcph, &opts);
return;
#if IS_ENABLED(CONFIG_NF_TABLES_IPV6)
case htons(ETH_P_IPV6):
nft_synproxy_eval_v6(priv, regs, pkt, tcp, &_tcph, &opts);
return;
#endif
}
regs->verdict.code = NFT_BREAK;
}
static int nft_synproxy_do_init(const struct nft_ctx *ctx,
const struct nlattr * const tb[],
struct nft_synproxy *priv)
{
struct synproxy_net *snet = synproxy_pernet(ctx->net);
u32 flags;
int err;
if (tb[NFTA_SYNPROXY_MSS])
priv->info.mss = ntohs(nla_get_be16(tb[NFTA_SYNPROXY_MSS]));
if (tb[NFTA_SYNPROXY_WSCALE])
priv->info.wscale = nla_get_u8(tb[NFTA_SYNPROXY_WSCALE]);
if (tb[NFTA_SYNPROXY_FLAGS]) {
flags = ntohl(nla_get_be32(tb[NFTA_SYNPROXY_FLAGS]));
if (flags & ~NF_SYNPROXY_OPT_MASK)
return -EOPNOTSUPP;
priv->info.options = flags;
}
err = nf_ct_netns_get(ctx->net, ctx->family);
if (err)
return err;
switch (ctx->family) {
case NFPROTO_IPV4:
err = nf_synproxy_ipv4_init(snet, ctx->net);
if (err)
goto nf_ct_failure;
break;
#if IS_ENABLED(CONFIG_NF_TABLES_IPV6)
case NFPROTO_IPV6:
err = nf_synproxy_ipv6_init(snet, ctx->net);
if (err)
goto nf_ct_failure;
break;
#endif
case NFPROTO_INET:
case NFPROTO_BRIDGE:
err = nf_synproxy_ipv4_init(snet, ctx->net);
if (err)
goto nf_ct_failure;
err = nf_synproxy_ipv6_init(snet, ctx->net);
if (err) {
nf_synproxy_ipv4_fini(snet, ctx->net);
goto nf_ct_failure;
}
break;
}
return 0;
nf_ct_failure:
nf_ct_netns_put(ctx->net, ctx->family);
return err;
}
static void nft_synproxy_do_destroy(const struct nft_ctx *ctx)
{
struct synproxy_net *snet = synproxy_pernet(ctx->net);
switch (ctx->family) {
case NFPROTO_IPV4:
nf_synproxy_ipv4_fini(snet, ctx->net);
break;
#if IS_ENABLED(CONFIG_NF_TABLES_IPV6)
case NFPROTO_IPV6:
nf_synproxy_ipv6_fini(snet, ctx->net);
break;
#endif
case NFPROTO_INET:
case NFPROTO_BRIDGE:
nf_synproxy_ipv4_fini(snet, ctx->net);
nf_synproxy_ipv6_fini(snet, ctx->net);
break;
}
nf_ct_netns_put(ctx->net, ctx->family);
}
static int nft_synproxy_do_dump(struct sk_buff *skb, struct nft_synproxy *priv)
{
if (nla_put_be16(skb, NFTA_SYNPROXY_MSS, htons(priv->info.mss)) ||
nla_put_u8(skb, NFTA_SYNPROXY_WSCALE, priv->info.wscale) ||
nla_put_be32(skb, NFTA_SYNPROXY_FLAGS, htonl(priv->info.options)))
goto nla_put_failure;
return 0;
nla_put_failure:
return -1;
}
static void nft_synproxy_eval(const struct nft_expr *expr,
struct nft_regs *regs,
const struct nft_pktinfo *pkt)
{
const struct nft_synproxy *priv = nft_expr_priv(expr);
nft_synproxy_do_eval(priv, regs, pkt);
}
static int nft_synproxy_validate(const struct nft_ctx *ctx,
const struct nft_expr *expr,
const struct nft_data **data)
{
return nft_chain_validate_hooks(ctx->chain, (1 << NF_INET_LOCAL_IN) |
(1 << NF_INET_FORWARD));
}
static int nft_synproxy_init(const struct nft_ctx *ctx,
const struct nft_expr *expr,
const struct nlattr * const tb[])
{
struct nft_synproxy *priv = nft_expr_priv(expr);
return nft_synproxy_do_init(ctx, tb, priv);
}
static void nft_synproxy_destroy(const struct nft_ctx *ctx,
const struct nft_expr *expr)
{
nft_synproxy_do_destroy(ctx);
}
static int nft_synproxy_dump(struct sk_buff *skb,
const struct nft_expr *expr, bool reset)
{
struct nft_synproxy *priv = nft_expr_priv(expr);
return nft_synproxy_do_dump(skb, priv);
}
static struct nft_expr_type nft_synproxy_type;
static const struct nft_expr_ops nft_synproxy_ops = {
.eval = nft_synproxy_eval,
.size = NFT_EXPR_SIZE(sizeof(struct nft_synproxy)),
.init = nft_synproxy_init,
.destroy = nft_synproxy_destroy,
.dump = nft_synproxy_dump,
.type = &nft_synproxy_type,
.validate = nft_synproxy_validate,
.reduce = NFT_REDUCE_READONLY,
};
static struct nft_expr_type nft_synproxy_type __read_mostly = {
.ops = &nft_synproxy_ops,
.name = "synproxy",
.owner = THIS_MODULE,
.policy = nft_synproxy_policy,
.maxattr = NFTA_SYNPROXY_MAX,
};
static int nft_synproxy_obj_init(const struct nft_ctx *ctx,
const struct nlattr * const tb[],
struct nft_object *obj)
{
struct nft_synproxy *priv = nft_obj_data(obj);
return nft_synproxy_do_init(ctx, tb, priv);
}
static void nft_synproxy_obj_destroy(const struct nft_ctx *ctx,
struct nft_object *obj)
{
nft_synproxy_do_destroy(ctx);
}
static int nft_synproxy_obj_dump(struct sk_buff *skb,
struct nft_object *obj, bool reset)
{
struct nft_synproxy *priv = nft_obj_data(obj);
return nft_synproxy_do_dump(skb, priv);
}
static void nft_synproxy_obj_eval(struct nft_object *obj,
struct nft_regs *regs,
const struct nft_pktinfo *pkt)
{
const struct nft_synproxy *priv = nft_obj_data(obj);
nft_synproxy_do_eval(priv, regs, pkt);
}
static void nft_synproxy_obj_update(struct nft_object *obj,
struct nft_object *newobj)
{
struct nft_synproxy *newpriv = nft_obj_data(newobj);
struct nft_synproxy *priv = nft_obj_data(obj);
priv->info = newpriv->info;
}
static struct nft_object_type nft_synproxy_obj_type;
static const struct nft_object_ops nft_synproxy_obj_ops = {
.type = &nft_synproxy_obj_type,
.size = sizeof(struct nft_synproxy),
.init = nft_synproxy_obj_init,
.destroy = nft_synproxy_obj_destroy,
.dump = nft_synproxy_obj_dump,
.eval = nft_synproxy_obj_eval,
.update = nft_synproxy_obj_update,
};
static struct nft_object_type nft_synproxy_obj_type __read_mostly = {
.type = NFT_OBJECT_SYNPROXY,
.ops = &nft_synproxy_obj_ops,
.maxattr = NFTA_SYNPROXY_MAX,
.policy = nft_synproxy_policy,
.owner = THIS_MODULE,
};
static int __init nft_synproxy_module_init(void)
{
int err;
err = nft_register_obj(&nft_synproxy_obj_type);
if (err < 0)
return err;
err = nft_register_expr(&nft_synproxy_type);
if (err < 0)
goto err;
return 0;
err:
nft_unregister_obj(&nft_synproxy_obj_type);
return err;
}
static void __exit nft_synproxy_module_exit(void)
{
nft_unregister_expr(&nft_synproxy_type);
nft_unregister_obj(&nft_synproxy_obj_type);
}
module_init(nft_synproxy_module_init);
module_exit(nft_synproxy_module_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Fernando Fernandez <[email protected]>");
MODULE_ALIAS_NFT_EXPR("synproxy");
MODULE_ALIAS_NFT_OBJ(NFT_OBJECT_SYNPROXY);
MODULE_DESCRIPTION("nftables SYNPROXY expression support");
| linux-master | net/netfilter/nft_synproxy.c |
// SPDX-License-Identifier: GPL-2.0-only
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/skbuff.h>
#include <net/ip.h>
#include <net/ipv6.h>
#include <linux/sctp.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter/xt_sctp.h>
#include <linux/netfilter_ipv4/ip_tables.h>
#include <linux/netfilter_ipv6/ip6_tables.h>
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Kiran Kumar Immidi");
MODULE_DESCRIPTION("Xtables: SCTP protocol packet match");
MODULE_ALIAS("ipt_sctp");
MODULE_ALIAS("ip6t_sctp");
#define SCCHECK(cond, option, flag, invflag) (!((flag) & (option)) \
|| (!!((invflag) & (option)) ^ (cond)))
static bool
match_flags(const struct xt_sctp_flag_info *flag_info,
const int flag_count,
u_int8_t chunktype,
u_int8_t chunkflags)
{
int i;
for (i = 0; i < flag_count; i++)
if (flag_info[i].chunktype == chunktype)
return (chunkflags & flag_info[i].flag_mask) == flag_info[i].flag;
return true;
}
static inline bool
match_packet(const struct sk_buff *skb,
unsigned int offset,
const struct xt_sctp_info *info,
bool *hotdrop)
{
u_int32_t chunkmapcopy[256 / sizeof (u_int32_t)];
const struct sctp_chunkhdr *sch;
struct sctp_chunkhdr _sch;
int chunk_match_type = info->chunk_match_type;
const struct xt_sctp_flag_info *flag_info = info->flag_info;
int flag_count = info->flag_count;
#ifdef DEBUG
int i = 0;
#endif
if (chunk_match_type == SCTP_CHUNK_MATCH_ALL)
SCTP_CHUNKMAP_COPY(chunkmapcopy, info->chunkmap);
do {
sch = skb_header_pointer(skb, offset, sizeof(_sch), &_sch);
if (sch == NULL || sch->length == 0) {
pr_debug("Dropping invalid SCTP packet.\n");
*hotdrop = true;
return false;
}
#ifdef DEBUG
pr_debug("Chunk num: %d\toffset: %d\ttype: %d\tlength: %d"
"\tflags: %x\n",
++i, offset, sch->type, htons(sch->length),
sch->flags);
#endif
offset += SCTP_PAD4(ntohs(sch->length));
pr_debug("skb->len: %d\toffset: %d\n", skb->len, offset);
if (SCTP_CHUNKMAP_IS_SET(info->chunkmap, sch->type)) {
switch (chunk_match_type) {
case SCTP_CHUNK_MATCH_ANY:
if (match_flags(flag_info, flag_count,
sch->type, sch->flags)) {
return true;
}
break;
case SCTP_CHUNK_MATCH_ALL:
if (match_flags(flag_info, flag_count,
sch->type, sch->flags))
SCTP_CHUNKMAP_CLEAR(chunkmapcopy, sch->type);
break;
case SCTP_CHUNK_MATCH_ONLY:
if (!match_flags(flag_info, flag_count,
sch->type, sch->flags))
return false;
break;
}
} else {
switch (chunk_match_type) {
case SCTP_CHUNK_MATCH_ONLY:
return false;
}
}
} while (offset < skb->len);
switch (chunk_match_type) {
case SCTP_CHUNK_MATCH_ALL:
return SCTP_CHUNKMAP_IS_CLEAR(chunkmapcopy);
case SCTP_CHUNK_MATCH_ANY:
return false;
case SCTP_CHUNK_MATCH_ONLY:
return true;
}
/* This will never be reached, but required to stop compiler whine */
return false;
}
static bool
sctp_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct xt_sctp_info *info = par->matchinfo;
const struct sctphdr *sh;
struct sctphdr _sh;
if (par->fragoff != 0) {
pr_debug("Dropping non-first fragment.. FIXME\n");
return false;
}
sh = skb_header_pointer(skb, par->thoff, sizeof(_sh), &_sh);
if (sh == NULL) {
pr_debug("Dropping evil TCP offset=0 tinygram.\n");
par->hotdrop = true;
return false;
}
pr_debug("spt: %d\tdpt: %d\n", ntohs(sh->source), ntohs(sh->dest));
return SCCHECK(ntohs(sh->source) >= info->spts[0]
&& ntohs(sh->source) <= info->spts[1],
XT_SCTP_SRC_PORTS, info->flags, info->invflags) &&
SCCHECK(ntohs(sh->dest) >= info->dpts[0]
&& ntohs(sh->dest) <= info->dpts[1],
XT_SCTP_DEST_PORTS, info->flags, info->invflags) &&
SCCHECK(match_packet(skb, par->thoff + sizeof(_sh),
info, &par->hotdrop),
XT_SCTP_CHUNK_TYPES, info->flags, info->invflags);
}
static int sctp_mt_check(const struct xt_mtchk_param *par)
{
const struct xt_sctp_info *info = par->matchinfo;
if (info->flag_count > ARRAY_SIZE(info->flag_info))
return -EINVAL;
if (info->flags & ~XT_SCTP_VALID_FLAGS)
return -EINVAL;
if (info->invflags & ~XT_SCTP_VALID_FLAGS)
return -EINVAL;
if (info->invflags & ~info->flags)
return -EINVAL;
if (!(info->flags & XT_SCTP_CHUNK_TYPES))
return 0;
if (info->chunk_match_type & (SCTP_CHUNK_MATCH_ALL |
SCTP_CHUNK_MATCH_ANY | SCTP_CHUNK_MATCH_ONLY))
return 0;
return -EINVAL;
}
static struct xt_match sctp_mt_reg[] __read_mostly = {
{
.name = "sctp",
.family = NFPROTO_IPV4,
.checkentry = sctp_mt_check,
.match = sctp_mt,
.matchsize = sizeof(struct xt_sctp_info),
.proto = IPPROTO_SCTP,
.me = THIS_MODULE
},
{
.name = "sctp",
.family = NFPROTO_IPV6,
.checkentry = sctp_mt_check,
.match = sctp_mt,
.matchsize = sizeof(struct xt_sctp_info),
.proto = IPPROTO_SCTP,
.me = THIS_MODULE
},
};
static int __init sctp_mt_init(void)
{
return xt_register_matches(sctp_mt_reg, ARRAY_SIZE(sctp_mt_reg));
}
static void __exit sctp_mt_exit(void)
{
xt_unregister_matches(sctp_mt_reg, ARRAY_SIZE(sctp_mt_reg));
}
module_init(sctp_mt_init);
module_exit(sctp_mt_exit);
| linux-master | net/netfilter/xt_sctp.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2016 Anders K. Pedersen <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/netlink.h>
#include <linux/netfilter.h>
#include <linux/netfilter/nf_tables.h>
#include <net/dst.h>
#include <net/ip6_route.h>
#include <net/route.h>
#include <net/netfilter/nf_tables.h>
#include <net/netfilter/nf_tables_core.h>
struct nft_rt {
enum nft_rt_keys key:8;
u8 dreg;
};
static u16 get_tcpmss(const struct nft_pktinfo *pkt, const struct dst_entry *skbdst)
{
u32 minlen = sizeof(struct ipv6hdr), mtu = dst_mtu(skbdst);
const struct sk_buff *skb = pkt->skb;
struct dst_entry *dst = NULL;
struct flowi fl;
memset(&fl, 0, sizeof(fl));
switch (nft_pf(pkt)) {
case NFPROTO_IPV4:
fl.u.ip4.daddr = ip_hdr(skb)->saddr;
minlen = sizeof(struct iphdr) + sizeof(struct tcphdr);
break;
case NFPROTO_IPV6:
fl.u.ip6.daddr = ipv6_hdr(skb)->saddr;
minlen = sizeof(struct ipv6hdr) + sizeof(struct tcphdr);
break;
}
nf_route(nft_net(pkt), &dst, &fl, false, nft_pf(pkt));
if (dst) {
mtu = min(mtu, dst_mtu(dst));
dst_release(dst);
}
if (mtu <= minlen || mtu > 0xffff)
return TCP_MSS_DEFAULT;
return mtu - minlen;
}
void nft_rt_get_eval(const struct nft_expr *expr,
struct nft_regs *regs,
const struct nft_pktinfo *pkt)
{
const struct nft_rt *priv = nft_expr_priv(expr);
const struct sk_buff *skb = pkt->skb;
u32 *dest = ®s->data[priv->dreg];
const struct dst_entry *dst;
dst = skb_dst(skb);
if (!dst)
goto err;
switch (priv->key) {
#ifdef CONFIG_IP_ROUTE_CLASSID
case NFT_RT_CLASSID:
*dest = dst->tclassid;
break;
#endif
case NFT_RT_NEXTHOP4:
if (nft_pf(pkt) != NFPROTO_IPV4)
goto err;
*dest = (__force u32)rt_nexthop((const struct rtable *)dst,
ip_hdr(skb)->daddr);
break;
case NFT_RT_NEXTHOP6:
if (nft_pf(pkt) != NFPROTO_IPV6)
goto err;
memcpy(dest, rt6_nexthop((struct rt6_info *)dst,
&ipv6_hdr(skb)->daddr),
sizeof(struct in6_addr));
break;
case NFT_RT_TCPMSS:
nft_reg_store16(dest, get_tcpmss(pkt, dst));
break;
#ifdef CONFIG_XFRM
case NFT_RT_XFRM:
nft_reg_store8(dest, !!dst->xfrm);
break;
#endif
default:
WARN_ON(1);
goto err;
}
return;
err:
regs->verdict.code = NFT_BREAK;
}
static const struct nla_policy nft_rt_policy[NFTA_RT_MAX + 1] = {
[NFTA_RT_DREG] = { .type = NLA_U32 },
[NFTA_RT_KEY] = NLA_POLICY_MAX(NLA_BE32, 255),
};
static int nft_rt_get_init(const struct nft_ctx *ctx,
const struct nft_expr *expr,
const struct nlattr * const tb[])
{
struct nft_rt *priv = nft_expr_priv(expr);
unsigned int len;
if (tb[NFTA_RT_KEY] == NULL ||
tb[NFTA_RT_DREG] == NULL)
return -EINVAL;
priv->key = ntohl(nla_get_be32(tb[NFTA_RT_KEY]));
switch (priv->key) {
#ifdef CONFIG_IP_ROUTE_CLASSID
case NFT_RT_CLASSID:
#endif
case NFT_RT_NEXTHOP4:
len = sizeof(u32);
break;
case NFT_RT_NEXTHOP6:
len = sizeof(struct in6_addr);
break;
case NFT_RT_TCPMSS:
len = sizeof(u16);
break;
#ifdef CONFIG_XFRM
case NFT_RT_XFRM:
len = sizeof(u8);
break;
#endif
default:
return -EOPNOTSUPP;
}
return nft_parse_register_store(ctx, tb[NFTA_RT_DREG], &priv->dreg,
NULL, NFT_DATA_VALUE, len);
}
static int nft_rt_get_dump(struct sk_buff *skb,
const struct nft_expr *expr, bool reset)
{
const struct nft_rt *priv = nft_expr_priv(expr);
if (nla_put_be32(skb, NFTA_RT_KEY, htonl(priv->key)))
goto nla_put_failure;
if (nft_dump_register(skb, NFTA_RT_DREG, priv->dreg))
goto nla_put_failure;
return 0;
nla_put_failure:
return -1;
}
static int nft_rt_validate(const struct nft_ctx *ctx, const struct nft_expr *expr,
const struct nft_data **data)
{
const struct nft_rt *priv = nft_expr_priv(expr);
unsigned int hooks;
switch (priv->key) {
case NFT_RT_NEXTHOP4:
case NFT_RT_NEXTHOP6:
case NFT_RT_CLASSID:
case NFT_RT_XFRM:
return 0;
case NFT_RT_TCPMSS:
hooks = (1 << NF_INET_FORWARD) |
(1 << NF_INET_LOCAL_OUT) |
(1 << NF_INET_POST_ROUTING);
break;
default:
return -EINVAL;
}
return nft_chain_validate_hooks(ctx->chain, hooks);
}
static const struct nft_expr_ops nft_rt_get_ops = {
.type = &nft_rt_type,
.size = NFT_EXPR_SIZE(sizeof(struct nft_rt)),
.eval = nft_rt_get_eval,
.init = nft_rt_get_init,
.dump = nft_rt_get_dump,
.validate = nft_rt_validate,
.reduce = NFT_REDUCE_READONLY,
};
struct nft_expr_type nft_rt_type __read_mostly = {
.name = "rt",
.ops = &nft_rt_get_ops,
.policy = nft_rt_policy,
.maxattr = NFTA_RT_MAX,
.owner = THIS_MODULE,
};
| linux-master | net/netfilter/nft_rt.c |
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/export.h>
#include <linux/netfilter/ipset/pfxlen.h>
/* Prefixlen maps for fast conversions, by Jan Engelhardt. */
#ifdef E
#undef E
#endif
#define PREFIXES_MAP \
E(0x00000000, 0x00000000, 0x00000000, 0x00000000), \
E(0x80000000, 0x00000000, 0x00000000, 0x00000000), \
E(0xC0000000, 0x00000000, 0x00000000, 0x00000000), \
E(0xE0000000, 0x00000000, 0x00000000, 0x00000000), \
E(0xF0000000, 0x00000000, 0x00000000, 0x00000000), \
E(0xF8000000, 0x00000000, 0x00000000, 0x00000000), \
E(0xFC000000, 0x00000000, 0x00000000, 0x00000000), \
E(0xFE000000, 0x00000000, 0x00000000, 0x00000000), \
E(0xFF000000, 0x00000000, 0x00000000, 0x00000000), \
E(0xFF800000, 0x00000000, 0x00000000, 0x00000000), \
E(0xFFC00000, 0x00000000, 0x00000000, 0x00000000), \
E(0xFFE00000, 0x00000000, 0x00000000, 0x00000000), \
E(0xFFF00000, 0x00000000, 0x00000000, 0x00000000), \
E(0xFFF80000, 0x00000000, 0x00000000, 0x00000000), \
E(0xFFFC0000, 0x00000000, 0x00000000, 0x00000000), \
E(0xFFFE0000, 0x00000000, 0x00000000, 0x00000000), \
E(0xFFFF0000, 0x00000000, 0x00000000, 0x00000000), \
E(0xFFFF8000, 0x00000000, 0x00000000, 0x00000000), \
E(0xFFFFC000, 0x00000000, 0x00000000, 0x00000000), \
E(0xFFFFE000, 0x00000000, 0x00000000, 0x00000000), \
E(0xFFFFF000, 0x00000000, 0x00000000, 0x00000000), \
E(0xFFFFF800, 0x00000000, 0x00000000, 0x00000000), \
E(0xFFFFFC00, 0x00000000, 0x00000000, 0x00000000), \
E(0xFFFFFE00, 0x00000000, 0x00000000, 0x00000000), \
E(0xFFFFFF00, 0x00000000, 0x00000000, 0x00000000), \
E(0xFFFFFF80, 0x00000000, 0x00000000, 0x00000000), \
E(0xFFFFFFC0, 0x00000000, 0x00000000, 0x00000000), \
E(0xFFFFFFE0, 0x00000000, 0x00000000, 0x00000000), \
E(0xFFFFFFF0, 0x00000000, 0x00000000, 0x00000000), \
E(0xFFFFFFF8, 0x00000000, 0x00000000, 0x00000000), \
E(0xFFFFFFFC, 0x00000000, 0x00000000, 0x00000000), \
E(0xFFFFFFFE, 0x00000000, 0x00000000, 0x00000000), \
E(0xFFFFFFFF, 0x00000000, 0x00000000, 0x00000000), \
E(0xFFFFFFFF, 0x80000000, 0x00000000, 0x00000000), \
E(0xFFFFFFFF, 0xC0000000, 0x00000000, 0x00000000), \
E(0xFFFFFFFF, 0xE0000000, 0x00000000, 0x00000000), \
E(0xFFFFFFFF, 0xF0000000, 0x00000000, 0x00000000), \
E(0xFFFFFFFF, 0xF8000000, 0x00000000, 0x00000000), \
E(0xFFFFFFFF, 0xFC000000, 0x00000000, 0x00000000), \
E(0xFFFFFFFF, 0xFE000000, 0x00000000, 0x00000000), \
E(0xFFFFFFFF, 0xFF000000, 0x00000000, 0x00000000), \
E(0xFFFFFFFF, 0xFF800000, 0x00000000, 0x00000000), \
E(0xFFFFFFFF, 0xFFC00000, 0x00000000, 0x00000000), \
E(0xFFFFFFFF, 0xFFE00000, 0x00000000, 0x00000000), \
E(0xFFFFFFFF, 0xFFF00000, 0x00000000, 0x00000000), \
E(0xFFFFFFFF, 0xFFF80000, 0x00000000, 0x00000000), \
E(0xFFFFFFFF, 0xFFFC0000, 0x00000000, 0x00000000), \
E(0xFFFFFFFF, 0xFFFE0000, 0x00000000, 0x00000000), \
E(0xFFFFFFFF, 0xFFFF0000, 0x00000000, 0x00000000), \
E(0xFFFFFFFF, 0xFFFF8000, 0x00000000, 0x00000000), \
E(0xFFFFFFFF, 0xFFFFC000, 0x00000000, 0x00000000), \
E(0xFFFFFFFF, 0xFFFFE000, 0x00000000, 0x00000000), \
E(0xFFFFFFFF, 0xFFFFF000, 0x00000000, 0x00000000), \
E(0xFFFFFFFF, 0xFFFFF800, 0x00000000, 0x00000000), \
E(0xFFFFFFFF, 0xFFFFFC00, 0x00000000, 0x00000000), \
E(0xFFFFFFFF, 0xFFFFFE00, 0x00000000, 0x00000000), \
E(0xFFFFFFFF, 0xFFFFFF00, 0x00000000, 0x00000000), \
E(0xFFFFFFFF, 0xFFFFFF80, 0x00000000, 0x00000000), \
E(0xFFFFFFFF, 0xFFFFFFC0, 0x00000000, 0x00000000), \
E(0xFFFFFFFF, 0xFFFFFFE0, 0x00000000, 0x00000000), \
E(0xFFFFFFFF, 0xFFFFFFF0, 0x00000000, 0x00000000), \
E(0xFFFFFFFF, 0xFFFFFFF8, 0x00000000, 0x00000000), \
E(0xFFFFFFFF, 0xFFFFFFFC, 0x00000000, 0x00000000), \
E(0xFFFFFFFF, 0xFFFFFFFE, 0x00000000, 0x00000000), \
E(0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, 0x00000000), \
E(0xFFFFFFFF, 0xFFFFFFFF, 0x80000000, 0x00000000), \
E(0xFFFFFFFF, 0xFFFFFFFF, 0xC0000000, 0x00000000), \
E(0xFFFFFFFF, 0xFFFFFFFF, 0xE0000000, 0x00000000), \
E(0xFFFFFFFF, 0xFFFFFFFF, 0xF0000000, 0x00000000), \
E(0xFFFFFFFF, 0xFFFFFFFF, 0xF8000000, 0x00000000), \
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFC000000, 0x00000000), \
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFE000000, 0x00000000), \
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFF000000, 0x00000000), \
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFF800000, 0x00000000), \
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFC00000, 0x00000000), \
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFE00000, 0x00000000), \
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFF00000, 0x00000000), \
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFF80000, 0x00000000), \
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFC0000, 0x00000000), \
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFE0000, 0x00000000), \
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFF0000, 0x00000000), \
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFF8000, 0x00000000), \
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFC000, 0x00000000), \
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFE000, 0x00000000), \
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFF000, 0x00000000), \
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFF800, 0x00000000), \
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFC00, 0x00000000), \
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFE00, 0x00000000), \
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFF00, 0x00000000), \
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFF80, 0x00000000), \
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFC0, 0x00000000), \
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFE0, 0x00000000), \
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFF0, 0x00000000), \
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFF8, 0x00000000), \
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFC, 0x00000000), \
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFE, 0x00000000), \
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000), \
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x80000000), \
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xC0000000), \
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xE0000000), \
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xF0000000), \
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xF8000000), \
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFC000000), \
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFE000000), \
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFF000000), \
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFF800000), \
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFC00000), \
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFE00000), \
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFF00000), \
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFF80000), \
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFC0000), \
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFE0000), \
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFF0000), \
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFF8000), \
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFC000), \
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFE000), \
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFF000), \
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFF800), \
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFC00), \
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFE00), \
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFF00), \
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFF80), \
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFC0), \
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFE0), \
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFF0), \
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFF8), \
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFC), \
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFE), \
E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF),
#define E(a, b, c, d) \
{.ip6 = { \
htonl(a), htonl(b), \
htonl(c), htonl(d), \
} }
/* This table works for both IPv4 and IPv6;
* just use prefixlen_netmask_map[prefixlength].ip.
*/
const union nf_inet_addr ip_set_netmask_map[] = {
PREFIXES_MAP
};
EXPORT_SYMBOL_GPL(ip_set_netmask_map);
#undef E
#define E(a, b, c, d) \
{.ip6 = { (__force __be32)a, (__force __be32)b, \
(__force __be32)c, (__force __be32)d, \
} }
/* This table works for both IPv4 and IPv6;
* just use prefixlen_hostmask_map[prefixlength].ip.
*/
const union nf_inet_addr ip_set_hostmask_map[] = {
PREFIXES_MAP
};
EXPORT_SYMBOL_GPL(ip_set_hostmask_map);
/* Find the largest network which matches the range from left, in host order. */
u32
ip_set_range_to_cidr(u32 from, u32 to, u8 *cidr)
{
u32 last;
u8 i;
for (i = 1; i < 32; i++) {
if ((from & ip_set_hostmask(i)) != from)
continue;
last = from | ~ip_set_hostmask(i);
if (!after(last, to)) {
*cidr = i;
return last;
}
}
*cidr = 32;
return from;
}
EXPORT_SYMBOL_GPL(ip_set_range_to_cidr);
| linux-master | net/netfilter/ipset/pfxlen.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (C) 2003-2013 Jozsef Kadlecsik <[email protected]>
* Copyright (C) 2013 Oliver Smith <[email protected]>
*/
/* Kernel module implementing an IP set type: the hash:net type */
#include <linux/jhash.h>
#include <linux/module.h>
#include <linux/ip.h>
#include <linux/skbuff.h>
#include <linux/errno.h>
#include <linux/random.h>
#include <net/ip.h>
#include <net/ipv6.h>
#include <net/netlink.h>
#include <linux/netfilter.h>
#include <linux/netfilter/ipset/pfxlen.h>
#include <linux/netfilter/ipset/ip_set.h>
#include <linux/netfilter/ipset/ip_set_hash.h>
#define IPSET_TYPE_REV_MIN 0
/* 1 Forceadd support added */
/* 2 skbinfo support added */
/* 3 bucketsize, initval support added */
#define IPSET_TYPE_REV_MAX 4 /* bitmask support added */
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Oliver Smith <[email protected]>");
IP_SET_MODULE_DESC("hash:net,net", IPSET_TYPE_REV_MIN, IPSET_TYPE_REV_MAX);
MODULE_ALIAS("ip_set_hash:net,net");
/* Type specific function prefix */
#define HTYPE hash_netnet
#define IP_SET_HASH_WITH_NETS
#define IP_SET_HASH_WITH_NETMASK
#define IP_SET_HASH_WITH_BITMASK
#define IPSET_NET_COUNT 2
/* IPv4 variants */
/* Member elements */
struct hash_netnet4_elem {
union {
__be32 ip[2];
__be64 ipcmp;
};
u8 nomatch;
u8 padding;
union {
u8 cidr[2];
u16 ccmp;
};
};
/* Common functions */
static bool
hash_netnet4_data_equal(const struct hash_netnet4_elem *ip1,
const struct hash_netnet4_elem *ip2,
u32 *multi)
{
return ip1->ipcmp == ip2->ipcmp &&
ip1->ccmp == ip2->ccmp;
}
static int
hash_netnet4_do_data_match(const struct hash_netnet4_elem *elem)
{
return elem->nomatch ? -ENOTEMPTY : 1;
}
static void
hash_netnet4_data_set_flags(struct hash_netnet4_elem *elem, u32 flags)
{
elem->nomatch = (flags >> 16) & IPSET_FLAG_NOMATCH;
}
static void
hash_netnet4_data_reset_flags(struct hash_netnet4_elem *elem, u8 *flags)
{
swap(*flags, elem->nomatch);
}
static void
hash_netnet4_data_reset_elem(struct hash_netnet4_elem *elem,
struct hash_netnet4_elem *orig)
{
elem->ip[1] = orig->ip[1];
}
static void
hash_netnet4_data_netmask(struct hash_netnet4_elem *elem, u8 cidr, bool inner)
{
if (inner) {
elem->ip[1] &= ip_set_netmask(cidr);
elem->cidr[1] = cidr;
} else {
elem->ip[0] &= ip_set_netmask(cidr);
elem->cidr[0] = cidr;
}
}
static bool
hash_netnet4_data_list(struct sk_buff *skb,
const struct hash_netnet4_elem *data)
{
u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, data->ip[0]) ||
nla_put_ipaddr4(skb, IPSET_ATTR_IP2, data->ip[1]) ||
nla_put_u8(skb, IPSET_ATTR_CIDR, data->cidr[0]) ||
nla_put_u8(skb, IPSET_ATTR_CIDR2, data->cidr[1]) ||
(flags &&
nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
goto nla_put_failure;
return false;
nla_put_failure:
return true;
}
static void
hash_netnet4_data_next(struct hash_netnet4_elem *next,
const struct hash_netnet4_elem *d)
{
next->ipcmp = d->ipcmp;
}
#define MTYPE hash_netnet4
#define HOST_MASK 32
#include "ip_set_hash_gen.h"
static void
hash_netnet4_init(struct hash_netnet4_elem *e)
{
e->cidr[0] = HOST_MASK;
e->cidr[1] = HOST_MASK;
}
static int
hash_netnet4_kadt(struct ip_set *set, const struct sk_buff *skb,
const struct xt_action_param *par,
enum ipset_adt adt, struct ip_set_adt_opt *opt)
{
const struct hash_netnet4 *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_netnet4_elem e = { };
struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
e.cidr[0] = INIT_CIDR(h->nets[0].cidr[0], HOST_MASK);
e.cidr[1] = INIT_CIDR(h->nets[0].cidr[1], HOST_MASK);
if (adt == IPSET_TEST)
e.ccmp = (HOST_MASK << (sizeof(e.cidr[0]) * 8)) | HOST_MASK;
ip4addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &e.ip[0]);
ip4addrptr(skb, opt->flags & IPSET_DIM_TWO_SRC, &e.ip[1]);
e.ip[0] &= (ip_set_netmask(e.cidr[0]) & h->bitmask.ip);
e.ip[1] &= (ip_set_netmask(e.cidr[1]) & h->bitmask.ip);
return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags);
}
static int
hash_netnet4_uadt(struct ip_set *set, struct nlattr *tb[],
enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
{
struct hash_netnet4 *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_netnet4_elem e = { };
struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
u32 ip = 0, ip_to = 0;
u32 ip2 = 0, ip2_from = 0, ip2_to = 0, i = 0;
int ret;
if (tb[IPSET_ATTR_LINENO])
*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
hash_netnet4_init(&e);
if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS)))
return -IPSET_ERR_PROTOCOL;
ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip);
if (ret)
return ret;
ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP2], &ip2_from);
if (ret)
return ret;
ret = ip_set_get_extensions(set, tb, &ext);
if (ret)
return ret;
if (tb[IPSET_ATTR_CIDR]) {
e.cidr[0] = nla_get_u8(tb[IPSET_ATTR_CIDR]);
if (!e.cidr[0] || e.cidr[0] > HOST_MASK)
return -IPSET_ERR_INVALID_CIDR;
}
if (tb[IPSET_ATTR_CIDR2]) {
e.cidr[1] = nla_get_u8(tb[IPSET_ATTR_CIDR2]);
if (!e.cidr[1] || e.cidr[1] > HOST_MASK)
return -IPSET_ERR_INVALID_CIDR;
}
if (tb[IPSET_ATTR_CADT_FLAGS]) {
u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
if (cadt_flags & IPSET_FLAG_NOMATCH)
flags |= (IPSET_FLAG_NOMATCH << 16);
}
if (adt == IPSET_TEST || !(tb[IPSET_ATTR_IP_TO] ||
tb[IPSET_ATTR_IP2_TO])) {
e.ip[0] = htonl(ip & ntohl(h->bitmask.ip) & ip_set_hostmask(e.cidr[0]));
e.ip[1] = htonl(ip2_from & ntohl(h->bitmask.ip) & ip_set_hostmask(e.cidr[1]));
ret = adtfn(set, &e, &ext, &ext, flags);
return ip_set_enomatch(ret, flags, adt, set) ? -ret :
ip_set_eexist(ret, flags) ? 0 : ret;
}
ip_to = ip;
if (tb[IPSET_ATTR_IP_TO]) {
ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to);
if (ret)
return ret;
if (ip_to < ip)
swap(ip, ip_to);
if (unlikely(ip + UINT_MAX == ip_to))
return -IPSET_ERR_HASH_RANGE;
} else {
ip_set_mask_from_to(ip, ip_to, e.cidr[0]);
}
ip2_to = ip2_from;
if (tb[IPSET_ATTR_IP2_TO]) {
ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP2_TO], &ip2_to);
if (ret)
return ret;
if (ip2_to < ip2_from)
swap(ip2_from, ip2_to);
if (unlikely(ip2_from + UINT_MAX == ip2_to))
return -IPSET_ERR_HASH_RANGE;
} else {
ip_set_mask_from_to(ip2_from, ip2_to, e.cidr[1]);
}
if (retried) {
ip = ntohl(h->next.ip[0]);
ip2 = ntohl(h->next.ip[1]);
} else {
ip2 = ip2_from;
}
do {
e.ip[0] = htonl(ip);
ip = ip_set_range_to_cidr(ip, ip_to, &e.cidr[0]);
do {
i++;
e.ip[1] = htonl(ip2);
if (i > IPSET_MAX_RANGE) {
hash_netnet4_data_next(&h->next, &e);
return -ERANGE;
}
ip2 = ip_set_range_to_cidr(ip2, ip2_to, &e.cidr[1]);
ret = adtfn(set, &e, &ext, &ext, flags);
if (ret && !ip_set_eexist(ret, flags))
return ret;
ret = 0;
} while (ip2++ < ip2_to);
ip2 = ip2_from;
} while (ip++ < ip_to);
return ret;
}
/* IPv6 variants */
struct hash_netnet6_elem {
union nf_inet_addr ip[2];
u8 nomatch;
u8 padding;
union {
u8 cidr[2];
u16 ccmp;
};
};
/* Common functions */
static bool
hash_netnet6_data_equal(const struct hash_netnet6_elem *ip1,
const struct hash_netnet6_elem *ip2,
u32 *multi)
{
return ipv6_addr_equal(&ip1->ip[0].in6, &ip2->ip[0].in6) &&
ipv6_addr_equal(&ip1->ip[1].in6, &ip2->ip[1].in6) &&
ip1->ccmp == ip2->ccmp;
}
static int
hash_netnet6_do_data_match(const struct hash_netnet6_elem *elem)
{
return elem->nomatch ? -ENOTEMPTY : 1;
}
static void
hash_netnet6_data_set_flags(struct hash_netnet6_elem *elem, u32 flags)
{
elem->nomatch = (flags >> 16) & IPSET_FLAG_NOMATCH;
}
static void
hash_netnet6_data_reset_flags(struct hash_netnet6_elem *elem, u8 *flags)
{
swap(*flags, elem->nomatch);
}
static void
hash_netnet6_data_reset_elem(struct hash_netnet6_elem *elem,
struct hash_netnet6_elem *orig)
{
elem->ip[1] = orig->ip[1];
}
static void
hash_netnet6_data_netmask(struct hash_netnet6_elem *elem, u8 cidr, bool inner)
{
if (inner) {
ip6_netmask(&elem->ip[1], cidr);
elem->cidr[1] = cidr;
} else {
ip6_netmask(&elem->ip[0], cidr);
elem->cidr[0] = cidr;
}
}
static bool
hash_netnet6_data_list(struct sk_buff *skb,
const struct hash_netnet6_elem *data)
{
u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &data->ip[0].in6) ||
nla_put_ipaddr6(skb, IPSET_ATTR_IP2, &data->ip[1].in6) ||
nla_put_u8(skb, IPSET_ATTR_CIDR, data->cidr[0]) ||
nla_put_u8(skb, IPSET_ATTR_CIDR2, data->cidr[1]) ||
(flags &&
nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
goto nla_put_failure;
return false;
nla_put_failure:
return true;
}
static void
hash_netnet6_data_next(struct hash_netnet6_elem *next,
const struct hash_netnet6_elem *d)
{
}
#undef MTYPE
#undef HOST_MASK
#define MTYPE hash_netnet6
#define HOST_MASK 128
#define IP_SET_EMIT_CREATE
#include "ip_set_hash_gen.h"
static void
hash_netnet6_init(struct hash_netnet6_elem *e)
{
e->cidr[0] = HOST_MASK;
e->cidr[1] = HOST_MASK;
}
static int
hash_netnet6_kadt(struct ip_set *set, const struct sk_buff *skb,
const struct xt_action_param *par,
enum ipset_adt adt, struct ip_set_adt_opt *opt)
{
const struct hash_netnet6 *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_netnet6_elem e = { };
struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
e.cidr[0] = INIT_CIDR(h->nets[0].cidr[0], HOST_MASK);
e.cidr[1] = INIT_CIDR(h->nets[0].cidr[1], HOST_MASK);
if (adt == IPSET_TEST)
e.ccmp = (HOST_MASK << (sizeof(u8) * 8)) | HOST_MASK;
ip6addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &e.ip[0].in6);
ip6addrptr(skb, opt->flags & IPSET_DIM_TWO_SRC, &e.ip[1].in6);
ip6_netmask(&e.ip[0], e.cidr[0]);
ip6_netmask(&e.ip[1], e.cidr[1]);
nf_inet_addr_mask_inplace(&e.ip[0], &h->bitmask);
nf_inet_addr_mask_inplace(&e.ip[1], &h->bitmask);
if (e.cidr[0] == HOST_MASK && ipv6_addr_any(&e.ip[0].in6))
return -EINVAL;
return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags);
}
static int
hash_netnet6_uadt(struct ip_set *set, struct nlattr *tb[],
enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
{
ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_netnet6_elem e = { };
struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
const struct hash_netnet6 *h = set->data;
int ret;
if (tb[IPSET_ATTR_LINENO])
*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
hash_netnet6_init(&e);
if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS)))
return -IPSET_ERR_PROTOCOL;
if (unlikely(tb[IPSET_ATTR_IP_TO] || tb[IPSET_ATTR_IP2_TO]))
return -IPSET_ERR_HASH_RANGE_UNSUPPORTED;
ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip[0]);
if (ret)
return ret;
ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP2], &e.ip[1]);
if (ret)
return ret;
ret = ip_set_get_extensions(set, tb, &ext);
if (ret)
return ret;
if (tb[IPSET_ATTR_CIDR]) {
e.cidr[0] = nla_get_u8(tb[IPSET_ATTR_CIDR]);
if (!e.cidr[0] || e.cidr[0] > HOST_MASK)
return -IPSET_ERR_INVALID_CIDR;
}
if (tb[IPSET_ATTR_CIDR2]) {
e.cidr[1] = nla_get_u8(tb[IPSET_ATTR_CIDR2]);
if (!e.cidr[1] || e.cidr[1] > HOST_MASK)
return -IPSET_ERR_INVALID_CIDR;
}
ip6_netmask(&e.ip[0], e.cidr[0]);
ip6_netmask(&e.ip[1], e.cidr[1]);
nf_inet_addr_mask_inplace(&e.ip[0], &h->bitmask);
nf_inet_addr_mask_inplace(&e.ip[1], &h->bitmask);
if (e.cidr[0] == HOST_MASK && ipv6_addr_any(&e.ip[0].in6))
return -IPSET_ERR_HASH_ELEM;
if (tb[IPSET_ATTR_CADT_FLAGS]) {
u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
if (cadt_flags & IPSET_FLAG_NOMATCH)
flags |= (IPSET_FLAG_NOMATCH << 16);
}
ret = adtfn(set, &e, &ext, &ext, flags);
return ip_set_enomatch(ret, flags, adt, set) ? -ret :
ip_set_eexist(ret, flags) ? 0 : ret;
}
static struct ip_set_type hash_netnet_type __read_mostly = {
.name = "hash:net,net",
.protocol = IPSET_PROTOCOL,
.features = IPSET_TYPE_IP | IPSET_TYPE_IP2 | IPSET_TYPE_NOMATCH,
.dimension = IPSET_DIM_TWO,
.family = NFPROTO_UNSPEC,
.revision_min = IPSET_TYPE_REV_MIN,
.revision_max = IPSET_TYPE_REV_MAX,
.create_flags[IPSET_TYPE_REV_MAX] = IPSET_CREATE_FLAG_BUCKETSIZE,
.create = hash_netnet_create,
.create_policy = {
[IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 },
[IPSET_ATTR_MAXELEM] = { .type = NLA_U32 },
[IPSET_ATTR_INITVAL] = { .type = NLA_U32 },
[IPSET_ATTR_BUCKETSIZE] = { .type = NLA_U8 },
[IPSET_ATTR_RESIZE] = { .type = NLA_U8 },
[IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
[IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 },
[IPSET_ATTR_NETMASK] = { .type = NLA_U8 },
[IPSET_ATTR_BITMASK] = { .type = NLA_NESTED },
},
.adt_policy = {
[IPSET_ATTR_IP] = { .type = NLA_NESTED },
[IPSET_ATTR_IP_TO] = { .type = NLA_NESTED },
[IPSET_ATTR_IP2] = { .type = NLA_NESTED },
[IPSET_ATTR_IP2_TO] = { .type = NLA_NESTED },
[IPSET_ATTR_CIDR] = { .type = NLA_U8 },
[IPSET_ATTR_CIDR2] = { .type = NLA_U8 },
[IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
[IPSET_ATTR_LINENO] = { .type = NLA_U32 },
[IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 },
[IPSET_ATTR_BYTES] = { .type = NLA_U64 },
[IPSET_ATTR_PACKETS] = { .type = NLA_U64 },
[IPSET_ATTR_COMMENT] = { .type = NLA_NUL_STRING,
.len = IPSET_MAX_COMMENT_SIZE },
[IPSET_ATTR_SKBMARK] = { .type = NLA_U64 },
[IPSET_ATTR_SKBPRIO] = { .type = NLA_U32 },
[IPSET_ATTR_SKBQUEUE] = { .type = NLA_U16 },
},
.me = THIS_MODULE,
};
static int __init
hash_netnet_init(void)
{
return ip_set_type_register(&hash_netnet_type);
}
static void __exit
hash_netnet_fini(void)
{
rcu_barrier();
ip_set_type_unregister(&hash_netnet_type);
}
module_init(hash_netnet_init);
module_exit(hash_netnet_fini);
| linux-master | net/netfilter/ipset/ip_set_hash_netnet.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (C) 2000-2002 Joakim Axelsson <[email protected]>
* Patrick Schaaf <[email protected]>
* Copyright (C) 2003-2013 Jozsef Kadlecsik <[email protected]>
*/
/* Kernel module for IP set management */
#include <linux/init.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/ip.h>
#include <linux/skbuff.h>
#include <linux/spinlock.h>
#include <linux/rculist.h>
#include <net/netlink.h>
#include <net/net_namespace.h>
#include <net/netns/generic.h>
#include <linux/netfilter.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter/nfnetlink.h>
#include <linux/netfilter/ipset/ip_set.h>
static LIST_HEAD(ip_set_type_list); /* all registered set types */
static DEFINE_MUTEX(ip_set_type_mutex); /* protects ip_set_type_list */
static DEFINE_RWLOCK(ip_set_ref_lock); /* protects the set refs */
struct ip_set_net {
struct ip_set * __rcu *ip_set_list; /* all individual sets */
ip_set_id_t ip_set_max; /* max number of sets */
bool is_deleted; /* deleted by ip_set_net_exit */
bool is_destroyed; /* all sets are destroyed */
};
static unsigned int ip_set_net_id __read_mostly;
static struct ip_set_net *ip_set_pernet(struct net *net)
{
return net_generic(net, ip_set_net_id);
}
#define IP_SET_INC 64
#define STRNCMP(a, b) (strncmp(a, b, IPSET_MAXNAMELEN) == 0)
static unsigned int max_sets;
module_param(max_sets, int, 0600);
MODULE_PARM_DESC(max_sets, "maximal number of sets");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jozsef Kadlecsik <[email protected]>");
MODULE_DESCRIPTION("core IP set support");
MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_IPSET);
/* When the nfnl mutex or ip_set_ref_lock is held: */
#define ip_set_dereference(p) \
rcu_dereference_protected(p, \
lockdep_nfnl_is_held(NFNL_SUBSYS_IPSET) || \
lockdep_is_held(&ip_set_ref_lock))
#define ip_set(inst, id) \
ip_set_dereference((inst)->ip_set_list)[id]
#define ip_set_ref_netlink(inst,id) \
rcu_dereference_raw((inst)->ip_set_list)[id]
/* The set types are implemented in modules and registered set types
* can be found in ip_set_type_list. Adding/deleting types is
* serialized by ip_set_type_mutex.
*/
static void
ip_set_type_lock(void)
{
mutex_lock(&ip_set_type_mutex);
}
static void
ip_set_type_unlock(void)
{
mutex_unlock(&ip_set_type_mutex);
}
/* Register and deregister settype */
static struct ip_set_type *
find_set_type(const char *name, u8 family, u8 revision)
{
struct ip_set_type *type;
list_for_each_entry_rcu(type, &ip_set_type_list, list,
lockdep_is_held(&ip_set_type_mutex))
if (STRNCMP(type->name, name) &&
(type->family == family ||
type->family == NFPROTO_UNSPEC) &&
revision >= type->revision_min &&
revision <= type->revision_max)
return type;
return NULL;
}
/* Unlock, try to load a set type module and lock again */
static bool
load_settype(const char *name)
{
nfnl_unlock(NFNL_SUBSYS_IPSET);
pr_debug("try to load ip_set_%s\n", name);
if (request_module("ip_set_%s", name) < 0) {
pr_warn("Can't find ip_set type %s\n", name);
nfnl_lock(NFNL_SUBSYS_IPSET);
return false;
}
nfnl_lock(NFNL_SUBSYS_IPSET);
return true;
}
/* Find a set type and reference it */
#define find_set_type_get(name, family, revision, found) \
__find_set_type_get(name, family, revision, found, false)
static int
__find_set_type_get(const char *name, u8 family, u8 revision,
struct ip_set_type **found, bool retry)
{
struct ip_set_type *type;
int err;
if (retry && !load_settype(name))
return -IPSET_ERR_FIND_TYPE;
rcu_read_lock();
*found = find_set_type(name, family, revision);
if (*found) {
err = !try_module_get((*found)->me) ? -EFAULT : 0;
goto unlock;
}
/* Make sure the type is already loaded
* but we don't support the revision
*/
list_for_each_entry_rcu(type, &ip_set_type_list, list)
if (STRNCMP(type->name, name)) {
err = -IPSET_ERR_FIND_TYPE;
goto unlock;
}
rcu_read_unlock();
return retry ? -IPSET_ERR_FIND_TYPE :
__find_set_type_get(name, family, revision, found, true);
unlock:
rcu_read_unlock();
return err;
}
/* Find a given set type by name and family.
* If we succeeded, the supported minimal and maximum revisions are
* filled out.
*/
#define find_set_type_minmax(name, family, min, max) \
__find_set_type_minmax(name, family, min, max, false)
static int
__find_set_type_minmax(const char *name, u8 family, u8 *min, u8 *max,
bool retry)
{
struct ip_set_type *type;
bool found = false;
if (retry && !load_settype(name))
return -IPSET_ERR_FIND_TYPE;
*min = 255; *max = 0;
rcu_read_lock();
list_for_each_entry_rcu(type, &ip_set_type_list, list)
if (STRNCMP(type->name, name) &&
(type->family == family ||
type->family == NFPROTO_UNSPEC)) {
found = true;
if (type->revision_min < *min)
*min = type->revision_min;
if (type->revision_max > *max)
*max = type->revision_max;
}
rcu_read_unlock();
if (found)
return 0;
return retry ? -IPSET_ERR_FIND_TYPE :
__find_set_type_minmax(name, family, min, max, true);
}
#define family_name(f) ((f) == NFPROTO_IPV4 ? "inet" : \
(f) == NFPROTO_IPV6 ? "inet6" : "any")
/* Register a set type structure. The type is identified by
* the unique triple of name, family and revision.
*/
int
ip_set_type_register(struct ip_set_type *type)
{
int ret = 0;
if (type->protocol != IPSET_PROTOCOL) {
pr_warn("ip_set type %s, family %s, revision %u:%u uses wrong protocol version %u (want %u)\n",
type->name, family_name(type->family),
type->revision_min, type->revision_max,
type->protocol, IPSET_PROTOCOL);
return -EINVAL;
}
ip_set_type_lock();
if (find_set_type(type->name, type->family, type->revision_min)) {
/* Duplicate! */
pr_warn("ip_set type %s, family %s with revision min %u already registered!\n",
type->name, family_name(type->family),
type->revision_min);
ip_set_type_unlock();
return -EINVAL;
}
list_add_rcu(&type->list, &ip_set_type_list);
pr_debug("type %s, family %s, revision %u:%u registered.\n",
type->name, family_name(type->family),
type->revision_min, type->revision_max);
ip_set_type_unlock();
return ret;
}
EXPORT_SYMBOL_GPL(ip_set_type_register);
/* Unregister a set type. There's a small race with ip_set_create */
void
ip_set_type_unregister(struct ip_set_type *type)
{
ip_set_type_lock();
if (!find_set_type(type->name, type->family, type->revision_min)) {
pr_warn("ip_set type %s, family %s with revision min %u not registered\n",
type->name, family_name(type->family),
type->revision_min);
ip_set_type_unlock();
return;
}
list_del_rcu(&type->list);
pr_debug("type %s, family %s with revision min %u unregistered.\n",
type->name, family_name(type->family), type->revision_min);
ip_set_type_unlock();
synchronize_rcu();
}
EXPORT_SYMBOL_GPL(ip_set_type_unregister);
/* Utility functions */
void *
ip_set_alloc(size_t size)
{
return kvzalloc(size, GFP_KERNEL_ACCOUNT);
}
EXPORT_SYMBOL_GPL(ip_set_alloc);
void
ip_set_free(void *members)
{
pr_debug("%p: free with %s\n", members,
is_vmalloc_addr(members) ? "vfree" : "kfree");
kvfree(members);
}
EXPORT_SYMBOL_GPL(ip_set_free);
static bool
flag_nested(const struct nlattr *nla)
{
return nla->nla_type & NLA_F_NESTED;
}
static const struct nla_policy ipaddr_policy[IPSET_ATTR_IPADDR_MAX + 1] = {
[IPSET_ATTR_IPADDR_IPV4] = { .type = NLA_U32 },
[IPSET_ATTR_IPADDR_IPV6] = NLA_POLICY_EXACT_LEN(sizeof(struct in6_addr)),
};
int
ip_set_get_ipaddr4(struct nlattr *nla, __be32 *ipaddr)
{
struct nlattr *tb[IPSET_ATTR_IPADDR_MAX + 1];
if (unlikely(!flag_nested(nla)))
return -IPSET_ERR_PROTOCOL;
if (nla_parse_nested(tb, IPSET_ATTR_IPADDR_MAX, nla,
ipaddr_policy, NULL))
return -IPSET_ERR_PROTOCOL;
if (unlikely(!ip_set_attr_netorder(tb, IPSET_ATTR_IPADDR_IPV4)))
return -IPSET_ERR_PROTOCOL;
*ipaddr = nla_get_be32(tb[IPSET_ATTR_IPADDR_IPV4]);
return 0;
}
EXPORT_SYMBOL_GPL(ip_set_get_ipaddr4);
int
ip_set_get_ipaddr6(struct nlattr *nla, union nf_inet_addr *ipaddr)
{
struct nlattr *tb[IPSET_ATTR_IPADDR_MAX + 1];
if (unlikely(!flag_nested(nla)))
return -IPSET_ERR_PROTOCOL;
if (nla_parse_nested(tb, IPSET_ATTR_IPADDR_MAX, nla,
ipaddr_policy, NULL))
return -IPSET_ERR_PROTOCOL;
if (unlikely(!ip_set_attr_netorder(tb, IPSET_ATTR_IPADDR_IPV6)))
return -IPSET_ERR_PROTOCOL;
memcpy(ipaddr, nla_data(tb[IPSET_ATTR_IPADDR_IPV6]),
sizeof(struct in6_addr));
return 0;
}
EXPORT_SYMBOL_GPL(ip_set_get_ipaddr6);
static u32
ip_set_timeout_get(const unsigned long *timeout)
{
u32 t;
if (*timeout == IPSET_ELEM_PERMANENT)
return 0;
t = jiffies_to_msecs(*timeout - jiffies) / MSEC_PER_SEC;
/* Zero value in userspace means no timeout */
return t == 0 ? 1 : t;
}
static char *
ip_set_comment_uget(struct nlattr *tb)
{
return nla_data(tb);
}
/* Called from uadd only, protected by the set spinlock.
* The kadt functions don't use the comment extensions in any way.
*/
void
ip_set_init_comment(struct ip_set *set, struct ip_set_comment *comment,
const struct ip_set_ext *ext)
{
struct ip_set_comment_rcu *c = rcu_dereference_protected(comment->c, 1);
size_t len = ext->comment ? strlen(ext->comment) : 0;
if (unlikely(c)) {
set->ext_size -= sizeof(*c) + strlen(c->str) + 1;
kfree_rcu(c, rcu);
rcu_assign_pointer(comment->c, NULL);
}
if (!len)
return;
if (unlikely(len > IPSET_MAX_COMMENT_SIZE))
len = IPSET_MAX_COMMENT_SIZE;
c = kmalloc(sizeof(*c) + len + 1, GFP_ATOMIC);
if (unlikely(!c))
return;
strscpy(c->str, ext->comment, len + 1);
set->ext_size += sizeof(*c) + strlen(c->str) + 1;
rcu_assign_pointer(comment->c, c);
}
EXPORT_SYMBOL_GPL(ip_set_init_comment);
/* Used only when dumping a set, protected by rcu_read_lock() */
static int
ip_set_put_comment(struct sk_buff *skb, const struct ip_set_comment *comment)
{
struct ip_set_comment_rcu *c = rcu_dereference(comment->c);
if (!c)
return 0;
return nla_put_string(skb, IPSET_ATTR_COMMENT, c->str);
}
/* Called from uadd/udel, flush or the garbage collectors protected
* by the set spinlock.
* Called when the set is destroyed and when there can't be any user
* of the set data anymore.
*/
static void
ip_set_comment_free(struct ip_set *set, void *ptr)
{
struct ip_set_comment *comment = ptr;
struct ip_set_comment_rcu *c;
c = rcu_dereference_protected(comment->c, 1);
if (unlikely(!c))
return;
set->ext_size -= sizeof(*c) + strlen(c->str) + 1;
kfree_rcu(c, rcu);
rcu_assign_pointer(comment->c, NULL);
}
typedef void (*destroyer)(struct ip_set *, void *);
/* ipset data extension types, in size order */
const struct ip_set_ext_type ip_set_extensions[] = {
[IPSET_EXT_ID_COUNTER] = {
.type = IPSET_EXT_COUNTER,
.flag = IPSET_FLAG_WITH_COUNTERS,
.len = sizeof(struct ip_set_counter),
.align = __alignof__(struct ip_set_counter),
},
[IPSET_EXT_ID_TIMEOUT] = {
.type = IPSET_EXT_TIMEOUT,
.len = sizeof(unsigned long),
.align = __alignof__(unsigned long),
},
[IPSET_EXT_ID_SKBINFO] = {
.type = IPSET_EXT_SKBINFO,
.flag = IPSET_FLAG_WITH_SKBINFO,
.len = sizeof(struct ip_set_skbinfo),
.align = __alignof__(struct ip_set_skbinfo),
},
[IPSET_EXT_ID_COMMENT] = {
.type = IPSET_EXT_COMMENT | IPSET_EXT_DESTROY,
.flag = IPSET_FLAG_WITH_COMMENT,
.len = sizeof(struct ip_set_comment),
.align = __alignof__(struct ip_set_comment),
.destroy = ip_set_comment_free,
},
};
EXPORT_SYMBOL_GPL(ip_set_extensions);
static bool
add_extension(enum ip_set_ext_id id, u32 flags, struct nlattr *tb[])
{
return ip_set_extensions[id].flag ?
(flags & ip_set_extensions[id].flag) :
!!tb[IPSET_ATTR_TIMEOUT];
}
size_t
ip_set_elem_len(struct ip_set *set, struct nlattr *tb[], size_t len,
size_t align)
{
enum ip_set_ext_id id;
u32 cadt_flags = 0;
if (tb[IPSET_ATTR_CADT_FLAGS])
cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
if (cadt_flags & IPSET_FLAG_WITH_FORCEADD)
set->flags |= IPSET_CREATE_FLAG_FORCEADD;
if (!align)
align = 1;
for (id = 0; id < IPSET_EXT_ID_MAX; id++) {
if (!add_extension(id, cadt_flags, tb))
continue;
if (align < ip_set_extensions[id].align)
align = ip_set_extensions[id].align;
len = ALIGN(len, ip_set_extensions[id].align);
set->offset[id] = len;
set->extensions |= ip_set_extensions[id].type;
len += ip_set_extensions[id].len;
}
return ALIGN(len, align);
}
EXPORT_SYMBOL_GPL(ip_set_elem_len);
int
ip_set_get_extensions(struct ip_set *set, struct nlattr *tb[],
struct ip_set_ext *ext)
{
u64 fullmark;
if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE)))
return -IPSET_ERR_PROTOCOL;
if (tb[IPSET_ATTR_TIMEOUT]) {
if (!SET_WITH_TIMEOUT(set))
return -IPSET_ERR_TIMEOUT;
ext->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
}
if (tb[IPSET_ATTR_BYTES] || tb[IPSET_ATTR_PACKETS]) {
if (!SET_WITH_COUNTER(set))
return -IPSET_ERR_COUNTER;
if (tb[IPSET_ATTR_BYTES])
ext->bytes = be64_to_cpu(nla_get_be64(
tb[IPSET_ATTR_BYTES]));
if (tb[IPSET_ATTR_PACKETS])
ext->packets = be64_to_cpu(nla_get_be64(
tb[IPSET_ATTR_PACKETS]));
}
if (tb[IPSET_ATTR_COMMENT]) {
if (!SET_WITH_COMMENT(set))
return -IPSET_ERR_COMMENT;
ext->comment = ip_set_comment_uget(tb[IPSET_ATTR_COMMENT]);
}
if (tb[IPSET_ATTR_SKBMARK]) {
if (!SET_WITH_SKBINFO(set))
return -IPSET_ERR_SKBINFO;
fullmark = be64_to_cpu(nla_get_be64(tb[IPSET_ATTR_SKBMARK]));
ext->skbinfo.skbmark = fullmark >> 32;
ext->skbinfo.skbmarkmask = fullmark & 0xffffffff;
}
if (tb[IPSET_ATTR_SKBPRIO]) {
if (!SET_WITH_SKBINFO(set))
return -IPSET_ERR_SKBINFO;
ext->skbinfo.skbprio =
be32_to_cpu(nla_get_be32(tb[IPSET_ATTR_SKBPRIO]));
}
if (tb[IPSET_ATTR_SKBQUEUE]) {
if (!SET_WITH_SKBINFO(set))
return -IPSET_ERR_SKBINFO;
ext->skbinfo.skbqueue =
be16_to_cpu(nla_get_be16(tb[IPSET_ATTR_SKBQUEUE]));
}
return 0;
}
EXPORT_SYMBOL_GPL(ip_set_get_extensions);
static u64
ip_set_get_bytes(const struct ip_set_counter *counter)
{
return (u64)atomic64_read(&(counter)->bytes);
}
static u64
ip_set_get_packets(const struct ip_set_counter *counter)
{
return (u64)atomic64_read(&(counter)->packets);
}
static bool
ip_set_put_counter(struct sk_buff *skb, const struct ip_set_counter *counter)
{
return nla_put_net64(skb, IPSET_ATTR_BYTES,
cpu_to_be64(ip_set_get_bytes(counter)),
IPSET_ATTR_PAD) ||
nla_put_net64(skb, IPSET_ATTR_PACKETS,
cpu_to_be64(ip_set_get_packets(counter)),
IPSET_ATTR_PAD);
}
static bool
ip_set_put_skbinfo(struct sk_buff *skb, const struct ip_set_skbinfo *skbinfo)
{
/* Send nonzero parameters only */
return ((skbinfo->skbmark || skbinfo->skbmarkmask) &&
nla_put_net64(skb, IPSET_ATTR_SKBMARK,
cpu_to_be64((u64)skbinfo->skbmark << 32 |
skbinfo->skbmarkmask),
IPSET_ATTR_PAD)) ||
(skbinfo->skbprio &&
nla_put_net32(skb, IPSET_ATTR_SKBPRIO,
cpu_to_be32(skbinfo->skbprio))) ||
(skbinfo->skbqueue &&
nla_put_net16(skb, IPSET_ATTR_SKBQUEUE,
cpu_to_be16(skbinfo->skbqueue)));
}
int
ip_set_put_extensions(struct sk_buff *skb, const struct ip_set *set,
const void *e, bool active)
{
if (SET_WITH_TIMEOUT(set)) {
unsigned long *timeout = ext_timeout(e, set);
if (nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
htonl(active ? ip_set_timeout_get(timeout)
: *timeout)))
return -EMSGSIZE;
}
if (SET_WITH_COUNTER(set) &&
ip_set_put_counter(skb, ext_counter(e, set)))
return -EMSGSIZE;
if (SET_WITH_COMMENT(set) &&
ip_set_put_comment(skb, ext_comment(e, set)))
return -EMSGSIZE;
if (SET_WITH_SKBINFO(set) &&
ip_set_put_skbinfo(skb, ext_skbinfo(e, set)))
return -EMSGSIZE;
return 0;
}
EXPORT_SYMBOL_GPL(ip_set_put_extensions);
static bool
ip_set_match_counter(u64 counter, u64 match, u8 op)
{
switch (op) {
case IPSET_COUNTER_NONE:
return true;
case IPSET_COUNTER_EQ:
return counter == match;
case IPSET_COUNTER_NE:
return counter != match;
case IPSET_COUNTER_LT:
return counter < match;
case IPSET_COUNTER_GT:
return counter > match;
}
return false;
}
static void
ip_set_add_bytes(u64 bytes, struct ip_set_counter *counter)
{
atomic64_add((long long)bytes, &(counter)->bytes);
}
static void
ip_set_add_packets(u64 packets, struct ip_set_counter *counter)
{
atomic64_add((long long)packets, &(counter)->packets);
}
static void
ip_set_update_counter(struct ip_set_counter *counter,
const struct ip_set_ext *ext, u32 flags)
{
if (ext->packets != ULLONG_MAX &&
!(flags & IPSET_FLAG_SKIP_COUNTER_UPDATE)) {
ip_set_add_bytes(ext->bytes, counter);
ip_set_add_packets(ext->packets, counter);
}
}
static void
ip_set_get_skbinfo(struct ip_set_skbinfo *skbinfo,
const struct ip_set_ext *ext,
struct ip_set_ext *mext, u32 flags)
{
mext->skbinfo = *skbinfo;
}
bool
ip_set_match_extensions(struct ip_set *set, const struct ip_set_ext *ext,
struct ip_set_ext *mext, u32 flags, void *data)
{
if (SET_WITH_TIMEOUT(set) &&
ip_set_timeout_expired(ext_timeout(data, set)))
return false;
if (SET_WITH_COUNTER(set)) {
struct ip_set_counter *counter = ext_counter(data, set);
ip_set_update_counter(counter, ext, flags);
if (flags & IPSET_FLAG_MATCH_COUNTERS &&
!(ip_set_match_counter(ip_set_get_packets(counter),
mext->packets, mext->packets_op) &&
ip_set_match_counter(ip_set_get_bytes(counter),
mext->bytes, mext->bytes_op)))
return false;
}
if (SET_WITH_SKBINFO(set))
ip_set_get_skbinfo(ext_skbinfo(data, set),
ext, mext, flags);
return true;
}
EXPORT_SYMBOL_GPL(ip_set_match_extensions);
/* Creating/destroying/renaming/swapping affect the existence and
* the properties of a set. All of these can be executed from userspace
* only and serialized by the nfnl mutex indirectly from nfnetlink.
*
* Sets are identified by their index in ip_set_list and the index
* is used by the external references (set/SET netfilter modules).
*
* The set behind an index may change by swapping only, from userspace.
*/
static void
__ip_set_get(struct ip_set *set)
{
write_lock_bh(&ip_set_ref_lock);
set->ref++;
write_unlock_bh(&ip_set_ref_lock);
}
static void
__ip_set_put(struct ip_set *set)
{
write_lock_bh(&ip_set_ref_lock);
BUG_ON(set->ref == 0);
set->ref--;
write_unlock_bh(&ip_set_ref_lock);
}
/* set->ref can be swapped out by ip_set_swap, netlink events (like dump) need
* a separate reference counter
*/
static void
__ip_set_get_netlink(struct ip_set *set)
{
write_lock_bh(&ip_set_ref_lock);
set->ref_netlink++;
write_unlock_bh(&ip_set_ref_lock);
}
static void
__ip_set_put_netlink(struct ip_set *set)
{
write_lock_bh(&ip_set_ref_lock);
BUG_ON(set->ref_netlink == 0);
set->ref_netlink--;
write_unlock_bh(&ip_set_ref_lock);
}
/* Add, del and test set entries from kernel.
*
* The set behind the index must exist and must be referenced
* so it can't be destroyed (or changed) under our foot.
*/
static struct ip_set *
ip_set_rcu_get(struct net *net, ip_set_id_t index)
{
struct ip_set *set;
struct ip_set_net *inst = ip_set_pernet(net);
rcu_read_lock();
/* ip_set_list itself needs to be protected */
set = rcu_dereference(inst->ip_set_list)[index];
rcu_read_unlock();
return set;
}
static inline void
ip_set_lock(struct ip_set *set)
{
if (!set->variant->region_lock)
spin_lock_bh(&set->lock);
}
static inline void
ip_set_unlock(struct ip_set *set)
{
if (!set->variant->region_lock)
spin_unlock_bh(&set->lock);
}
int
ip_set_test(ip_set_id_t index, const struct sk_buff *skb,
const struct xt_action_param *par, struct ip_set_adt_opt *opt)
{
struct ip_set *set = ip_set_rcu_get(xt_net(par), index);
int ret = 0;
BUG_ON(!set);
pr_debug("set %s, index %u\n", set->name, index);
if (opt->dim < set->type->dimension ||
!(opt->family == set->family || set->family == NFPROTO_UNSPEC))
return 0;
ret = set->variant->kadt(set, skb, par, IPSET_TEST, opt);
if (ret == -EAGAIN) {
/* Type requests element to be completed */
pr_debug("element must be completed, ADD is triggered\n");
ip_set_lock(set);
set->variant->kadt(set, skb, par, IPSET_ADD, opt);
ip_set_unlock(set);
ret = 1;
} else {
/* --return-nomatch: invert matched element */
if ((opt->cmdflags & IPSET_FLAG_RETURN_NOMATCH) &&
(set->type->features & IPSET_TYPE_NOMATCH) &&
(ret > 0 || ret == -ENOTEMPTY))
ret = -ret;
}
/* Convert error codes to nomatch */
return (ret < 0 ? 0 : ret);
}
EXPORT_SYMBOL_GPL(ip_set_test);
int
ip_set_add(ip_set_id_t index, const struct sk_buff *skb,
const struct xt_action_param *par, struct ip_set_adt_opt *opt)
{
struct ip_set *set = ip_set_rcu_get(xt_net(par), index);
int ret;
BUG_ON(!set);
pr_debug("set %s, index %u\n", set->name, index);
if (opt->dim < set->type->dimension ||
!(opt->family == set->family || set->family == NFPROTO_UNSPEC))
return -IPSET_ERR_TYPE_MISMATCH;
ip_set_lock(set);
ret = set->variant->kadt(set, skb, par, IPSET_ADD, opt);
ip_set_unlock(set);
return ret;
}
EXPORT_SYMBOL_GPL(ip_set_add);
int
ip_set_del(ip_set_id_t index, const struct sk_buff *skb,
const struct xt_action_param *par, struct ip_set_adt_opt *opt)
{
struct ip_set *set = ip_set_rcu_get(xt_net(par), index);
int ret = 0;
BUG_ON(!set);
pr_debug("set %s, index %u\n", set->name, index);
if (opt->dim < set->type->dimension ||
!(opt->family == set->family || set->family == NFPROTO_UNSPEC))
return -IPSET_ERR_TYPE_MISMATCH;
ip_set_lock(set);
ret = set->variant->kadt(set, skb, par, IPSET_DEL, opt);
ip_set_unlock(set);
return ret;
}
EXPORT_SYMBOL_GPL(ip_set_del);
/* Find set by name, reference it once. The reference makes sure the
* thing pointed to, does not go away under our feet.
*
*/
ip_set_id_t
ip_set_get_byname(struct net *net, const char *name, struct ip_set **set)
{
ip_set_id_t i, index = IPSET_INVALID_ID;
struct ip_set *s;
struct ip_set_net *inst = ip_set_pernet(net);
rcu_read_lock();
for (i = 0; i < inst->ip_set_max; i++) {
s = rcu_dereference(inst->ip_set_list)[i];
if (s && STRNCMP(s->name, name)) {
__ip_set_get(s);
index = i;
*set = s;
break;
}
}
rcu_read_unlock();
return index;
}
EXPORT_SYMBOL_GPL(ip_set_get_byname);
/* If the given set pointer points to a valid set, decrement
* reference count by 1. The caller shall not assume the index
* to be valid, after calling this function.
*
*/
static void
__ip_set_put_byindex(struct ip_set_net *inst, ip_set_id_t index)
{
struct ip_set *set;
rcu_read_lock();
set = rcu_dereference(inst->ip_set_list)[index];
if (set)
__ip_set_put(set);
rcu_read_unlock();
}
void
ip_set_put_byindex(struct net *net, ip_set_id_t index)
{
struct ip_set_net *inst = ip_set_pernet(net);
__ip_set_put_byindex(inst, index);
}
EXPORT_SYMBOL_GPL(ip_set_put_byindex);
/* Get the name of a set behind a set index.
* Set itself is protected by RCU, but its name isn't: to protect against
* renaming, grab ip_set_ref_lock as reader (see ip_set_rename()) and copy the
* name.
*/
void
ip_set_name_byindex(struct net *net, ip_set_id_t index, char *name)
{
struct ip_set *set = ip_set_rcu_get(net, index);
BUG_ON(!set);
read_lock_bh(&ip_set_ref_lock);
strscpy_pad(name, set->name, IPSET_MAXNAMELEN);
read_unlock_bh(&ip_set_ref_lock);
}
EXPORT_SYMBOL_GPL(ip_set_name_byindex);
/* Routines to call by external subsystems, which do not
* call nfnl_lock for us.
*/
/* Find set by index, reference it once. The reference makes sure the
* thing pointed to, does not go away under our feet.
*
* The nfnl mutex is used in the function.
*/
ip_set_id_t
ip_set_nfnl_get_byindex(struct net *net, ip_set_id_t index)
{
struct ip_set *set;
struct ip_set_net *inst = ip_set_pernet(net);
if (index >= inst->ip_set_max)
return IPSET_INVALID_ID;
nfnl_lock(NFNL_SUBSYS_IPSET);
set = ip_set(inst, index);
if (set)
__ip_set_get(set);
else
index = IPSET_INVALID_ID;
nfnl_unlock(NFNL_SUBSYS_IPSET);
return index;
}
EXPORT_SYMBOL_GPL(ip_set_nfnl_get_byindex);
/* If the given set pointer points to a valid set, decrement
* reference count by 1. The caller shall not assume the index
* to be valid, after calling this function.
*
* The nfnl mutex is used in the function.
*/
void
ip_set_nfnl_put(struct net *net, ip_set_id_t index)
{
struct ip_set *set;
struct ip_set_net *inst = ip_set_pernet(net);
nfnl_lock(NFNL_SUBSYS_IPSET);
if (!inst->is_deleted) { /* already deleted from ip_set_net_exit() */
set = ip_set(inst, index);
if (set)
__ip_set_put(set);
}
nfnl_unlock(NFNL_SUBSYS_IPSET);
}
EXPORT_SYMBOL_GPL(ip_set_nfnl_put);
/* Communication protocol with userspace over netlink.
*
* The commands are serialized by the nfnl mutex.
*/
static inline u8 protocol(const struct nlattr * const tb[])
{
return nla_get_u8(tb[IPSET_ATTR_PROTOCOL]);
}
static inline bool
protocol_failed(const struct nlattr * const tb[])
{
return !tb[IPSET_ATTR_PROTOCOL] || protocol(tb) != IPSET_PROTOCOL;
}
static inline bool
protocol_min_failed(const struct nlattr * const tb[])
{
return !tb[IPSET_ATTR_PROTOCOL] || protocol(tb) < IPSET_PROTOCOL_MIN;
}
static inline u32
flag_exist(const struct nlmsghdr *nlh)
{
return nlh->nlmsg_flags & NLM_F_EXCL ? 0 : IPSET_FLAG_EXIST;
}
static struct nlmsghdr *
start_msg(struct sk_buff *skb, u32 portid, u32 seq, unsigned int flags,
enum ipset_cmd cmd)
{
return nfnl_msg_put(skb, portid, seq,
nfnl_msg_type(NFNL_SUBSYS_IPSET, cmd), flags,
NFPROTO_IPV4, NFNETLINK_V0, 0);
}
/* Create a set */
static const struct nla_policy ip_set_create_policy[IPSET_ATTR_CMD_MAX + 1] = {
[IPSET_ATTR_PROTOCOL] = { .type = NLA_U8 },
[IPSET_ATTR_SETNAME] = { .type = NLA_NUL_STRING,
.len = IPSET_MAXNAMELEN - 1 },
[IPSET_ATTR_TYPENAME] = { .type = NLA_NUL_STRING,
.len = IPSET_MAXNAMELEN - 1},
[IPSET_ATTR_REVISION] = { .type = NLA_U8 },
[IPSET_ATTR_FAMILY] = { .type = NLA_U8 },
[IPSET_ATTR_DATA] = { .type = NLA_NESTED },
};
static struct ip_set *
find_set_and_id(struct ip_set_net *inst, const char *name, ip_set_id_t *id)
{
struct ip_set *set = NULL;
ip_set_id_t i;
*id = IPSET_INVALID_ID;
for (i = 0; i < inst->ip_set_max; i++) {
set = ip_set(inst, i);
if (set && STRNCMP(set->name, name)) {
*id = i;
break;
}
}
return (*id == IPSET_INVALID_ID ? NULL : set);
}
static inline struct ip_set *
find_set(struct ip_set_net *inst, const char *name)
{
ip_set_id_t id;
return find_set_and_id(inst, name, &id);
}
static int
find_free_id(struct ip_set_net *inst, const char *name, ip_set_id_t *index,
struct ip_set **set)
{
struct ip_set *s;
ip_set_id_t i;
*index = IPSET_INVALID_ID;
for (i = 0; i < inst->ip_set_max; i++) {
s = ip_set(inst, i);
if (!s) {
if (*index == IPSET_INVALID_ID)
*index = i;
} else if (STRNCMP(name, s->name)) {
/* Name clash */
*set = s;
return -EEXIST;
}
}
if (*index == IPSET_INVALID_ID)
/* No free slot remained */
return -IPSET_ERR_MAX_SETS;
return 0;
}
static int ip_set_none(struct sk_buff *skb, const struct nfnl_info *info,
const struct nlattr * const attr[])
{
return -EOPNOTSUPP;
}
static int ip_set_create(struct sk_buff *skb, const struct nfnl_info *info,
const struct nlattr * const attr[])
{
struct ip_set_net *inst = ip_set_pernet(info->net);
struct ip_set *set, *clash = NULL;
ip_set_id_t index = IPSET_INVALID_ID;
struct nlattr *tb[IPSET_ATTR_CREATE_MAX + 1] = {};
const char *name, *typename;
u8 family, revision;
u32 flags = flag_exist(info->nlh);
int ret = 0;
if (unlikely(protocol_min_failed(attr) ||
!attr[IPSET_ATTR_SETNAME] ||
!attr[IPSET_ATTR_TYPENAME] ||
!attr[IPSET_ATTR_REVISION] ||
!attr[IPSET_ATTR_FAMILY] ||
(attr[IPSET_ATTR_DATA] &&
!flag_nested(attr[IPSET_ATTR_DATA]))))
return -IPSET_ERR_PROTOCOL;
name = nla_data(attr[IPSET_ATTR_SETNAME]);
typename = nla_data(attr[IPSET_ATTR_TYPENAME]);
family = nla_get_u8(attr[IPSET_ATTR_FAMILY]);
revision = nla_get_u8(attr[IPSET_ATTR_REVISION]);
pr_debug("setname: %s, typename: %s, family: %s, revision: %u\n",
name, typename, family_name(family), revision);
/* First, and without any locks, allocate and initialize
* a normal base set structure.
*/
set = kzalloc(sizeof(*set), GFP_KERNEL);
if (!set)
return -ENOMEM;
spin_lock_init(&set->lock);
strscpy(set->name, name, IPSET_MAXNAMELEN);
set->family = family;
set->revision = revision;
/* Next, check that we know the type, and take
* a reference on the type, to make sure it stays available
* while constructing our new set.
*
* After referencing the type, we try to create the type
* specific part of the set without holding any locks.
*/
ret = find_set_type_get(typename, family, revision, &set->type);
if (ret)
goto out;
/* Without holding any locks, create private part. */
if (attr[IPSET_ATTR_DATA] &&
nla_parse_nested(tb, IPSET_ATTR_CREATE_MAX, attr[IPSET_ATTR_DATA],
set->type->create_policy, NULL)) {
ret = -IPSET_ERR_PROTOCOL;
goto put_out;
}
/* Set create flags depending on the type revision */
set->flags |= set->type->create_flags[revision];
ret = set->type->create(info->net, set, tb, flags);
if (ret != 0)
goto put_out;
/* BTW, ret==0 here. */
/* Here, we have a valid, constructed set and we are protected
* by the nfnl mutex. Find the first free index in ip_set_list
* and check clashing.
*/
ret = find_free_id(inst, set->name, &index, &clash);
if (ret == -EEXIST) {
/* If this is the same set and requested, ignore error */
if ((flags & IPSET_FLAG_EXIST) &&
STRNCMP(set->type->name, clash->type->name) &&
set->type->family == clash->type->family &&
set->type->revision_min == clash->type->revision_min &&
set->type->revision_max == clash->type->revision_max &&
set->variant->same_set(set, clash))
ret = 0;
goto cleanup;
} else if (ret == -IPSET_ERR_MAX_SETS) {
struct ip_set **list, **tmp;
ip_set_id_t i = inst->ip_set_max + IP_SET_INC;
if (i < inst->ip_set_max || i == IPSET_INVALID_ID)
/* Wraparound */
goto cleanup;
list = kvcalloc(i, sizeof(struct ip_set *), GFP_KERNEL);
if (!list)
goto cleanup;
/* nfnl mutex is held, both lists are valid */
tmp = ip_set_dereference(inst->ip_set_list);
memcpy(list, tmp, sizeof(struct ip_set *) * inst->ip_set_max);
rcu_assign_pointer(inst->ip_set_list, list);
/* Make sure all current packets have passed through */
synchronize_net();
/* Use new list */
index = inst->ip_set_max;
inst->ip_set_max = i;
kvfree(tmp);
ret = 0;
} else if (ret) {
goto cleanup;
}
/* Finally! Add our shiny new set to the list, and be done. */
pr_debug("create: '%s' created with index %u!\n", set->name, index);
ip_set(inst, index) = set;
return ret;
cleanup:
set->variant->destroy(set);
put_out:
module_put(set->type->me);
out:
kfree(set);
return ret;
}
/* Destroy sets */
static const struct nla_policy
ip_set_setname_policy[IPSET_ATTR_CMD_MAX + 1] = {
[IPSET_ATTR_PROTOCOL] = { .type = NLA_U8 },
[IPSET_ATTR_SETNAME] = { .type = NLA_NUL_STRING,
.len = IPSET_MAXNAMELEN - 1 },
};
static void
ip_set_destroy_set(struct ip_set *set)
{
pr_debug("set: %s\n", set->name);
/* Must call it without holding any lock */
set->variant->destroy(set);
module_put(set->type->me);
kfree(set);
}
static int ip_set_destroy(struct sk_buff *skb, const struct nfnl_info *info,
const struct nlattr * const attr[])
{
struct ip_set_net *inst = ip_set_pernet(info->net);
struct ip_set *s;
ip_set_id_t i;
int ret = 0;
if (unlikely(protocol_min_failed(attr)))
return -IPSET_ERR_PROTOCOL;
/* Must wait for flush to be really finished in list:set */
rcu_barrier();
/* Commands are serialized and references are
* protected by the ip_set_ref_lock.
* External systems (i.e. xt_set) must call
* ip_set_put|get_nfnl_* functions, that way we
* can safely check references here.
*
* list:set timer can only decrement the reference
* counter, so if it's already zero, we can proceed
* without holding the lock.
*/
read_lock_bh(&ip_set_ref_lock);
if (!attr[IPSET_ATTR_SETNAME]) {
for (i = 0; i < inst->ip_set_max; i++) {
s = ip_set(inst, i);
if (s && (s->ref || s->ref_netlink)) {
ret = -IPSET_ERR_BUSY;
goto out;
}
}
inst->is_destroyed = true;
read_unlock_bh(&ip_set_ref_lock);
for (i = 0; i < inst->ip_set_max; i++) {
s = ip_set(inst, i);
if (s) {
ip_set(inst, i) = NULL;
ip_set_destroy_set(s);
}
}
/* Modified by ip_set_destroy() only, which is serialized */
inst->is_destroyed = false;
} else {
u32 flags = flag_exist(info->nlh);
s = find_set_and_id(inst, nla_data(attr[IPSET_ATTR_SETNAME]),
&i);
if (!s) {
if (!(flags & IPSET_FLAG_EXIST))
ret = -ENOENT;
goto out;
} else if (s->ref || s->ref_netlink) {
ret = -IPSET_ERR_BUSY;
goto out;
}
ip_set(inst, i) = NULL;
read_unlock_bh(&ip_set_ref_lock);
ip_set_destroy_set(s);
}
return 0;
out:
read_unlock_bh(&ip_set_ref_lock);
return ret;
}
/* Flush sets */
static void
ip_set_flush_set(struct ip_set *set)
{
pr_debug("set: %s\n", set->name);
ip_set_lock(set);
set->variant->flush(set);
ip_set_unlock(set);
}
static int ip_set_flush(struct sk_buff *skb, const struct nfnl_info *info,
const struct nlattr * const attr[])
{
struct ip_set_net *inst = ip_set_pernet(info->net);
struct ip_set *s;
ip_set_id_t i;
if (unlikely(protocol_min_failed(attr)))
return -IPSET_ERR_PROTOCOL;
if (!attr[IPSET_ATTR_SETNAME]) {
for (i = 0; i < inst->ip_set_max; i++) {
s = ip_set(inst, i);
if (s)
ip_set_flush_set(s);
}
} else {
s = find_set(inst, nla_data(attr[IPSET_ATTR_SETNAME]));
if (!s)
return -ENOENT;
ip_set_flush_set(s);
}
return 0;
}
/* Rename a set */
static const struct nla_policy
ip_set_setname2_policy[IPSET_ATTR_CMD_MAX + 1] = {
[IPSET_ATTR_PROTOCOL] = { .type = NLA_U8 },
[IPSET_ATTR_SETNAME] = { .type = NLA_NUL_STRING,
.len = IPSET_MAXNAMELEN - 1 },
[IPSET_ATTR_SETNAME2] = { .type = NLA_NUL_STRING,
.len = IPSET_MAXNAMELEN - 1 },
};
static int ip_set_rename(struct sk_buff *skb, const struct nfnl_info *info,
const struct nlattr * const attr[])
{
struct ip_set_net *inst = ip_set_pernet(info->net);
struct ip_set *set, *s;
const char *name2;
ip_set_id_t i;
int ret = 0;
if (unlikely(protocol_min_failed(attr) ||
!attr[IPSET_ATTR_SETNAME] ||
!attr[IPSET_ATTR_SETNAME2]))
return -IPSET_ERR_PROTOCOL;
set = find_set(inst, nla_data(attr[IPSET_ATTR_SETNAME]));
if (!set)
return -ENOENT;
write_lock_bh(&ip_set_ref_lock);
if (set->ref != 0 || set->ref_netlink != 0) {
ret = -IPSET_ERR_REFERENCED;
goto out;
}
name2 = nla_data(attr[IPSET_ATTR_SETNAME2]);
for (i = 0; i < inst->ip_set_max; i++) {
s = ip_set(inst, i);
if (s && STRNCMP(s->name, name2)) {
ret = -IPSET_ERR_EXIST_SETNAME2;
goto out;
}
}
strscpy_pad(set->name, name2, IPSET_MAXNAMELEN);
out:
write_unlock_bh(&ip_set_ref_lock);
return ret;
}
/* Swap two sets so that name/index points to the other.
* References and set names are also swapped.
*
* The commands are serialized by the nfnl mutex and references are
* protected by the ip_set_ref_lock. The kernel interfaces
* do not hold the mutex but the pointer settings are atomic
* so the ip_set_list always contains valid pointers to the sets.
*/
static int ip_set_swap(struct sk_buff *skb, const struct nfnl_info *info,
const struct nlattr * const attr[])
{
struct ip_set_net *inst = ip_set_pernet(info->net);
struct ip_set *from, *to;
ip_set_id_t from_id, to_id;
char from_name[IPSET_MAXNAMELEN];
if (unlikely(protocol_min_failed(attr) ||
!attr[IPSET_ATTR_SETNAME] ||
!attr[IPSET_ATTR_SETNAME2]))
return -IPSET_ERR_PROTOCOL;
from = find_set_and_id(inst, nla_data(attr[IPSET_ATTR_SETNAME]),
&from_id);
if (!from)
return -ENOENT;
to = find_set_and_id(inst, nla_data(attr[IPSET_ATTR_SETNAME2]),
&to_id);
if (!to)
return -IPSET_ERR_EXIST_SETNAME2;
/* Features must not change.
* Not an artifical restriction anymore, as we must prevent
* possible loops created by swapping in setlist type of sets.
*/
if (!(from->type->features == to->type->features &&
from->family == to->family))
return -IPSET_ERR_TYPE_MISMATCH;
write_lock_bh(&ip_set_ref_lock);
if (from->ref_netlink || to->ref_netlink) {
write_unlock_bh(&ip_set_ref_lock);
return -EBUSY;
}
strscpy_pad(from_name, from->name, IPSET_MAXNAMELEN);
strscpy_pad(from->name, to->name, IPSET_MAXNAMELEN);
strscpy_pad(to->name, from_name, IPSET_MAXNAMELEN);
swap(from->ref, to->ref);
ip_set(inst, from_id) = to;
ip_set(inst, to_id) = from;
write_unlock_bh(&ip_set_ref_lock);
return 0;
}
/* List/save set data */
#define DUMP_INIT 0
#define DUMP_ALL 1
#define DUMP_ONE 2
#define DUMP_LAST 3
#define DUMP_TYPE(arg) (((u32)(arg)) & 0x0000FFFF)
#define DUMP_FLAGS(arg) (((u32)(arg)) >> 16)
int
ip_set_put_flags(struct sk_buff *skb, struct ip_set *set)
{
u32 cadt_flags = 0;
if (SET_WITH_TIMEOUT(set))
if (unlikely(nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
htonl(set->timeout))))
return -EMSGSIZE;
if (SET_WITH_COUNTER(set))
cadt_flags |= IPSET_FLAG_WITH_COUNTERS;
if (SET_WITH_COMMENT(set))
cadt_flags |= IPSET_FLAG_WITH_COMMENT;
if (SET_WITH_SKBINFO(set))
cadt_flags |= IPSET_FLAG_WITH_SKBINFO;
if (SET_WITH_FORCEADD(set))
cadt_flags |= IPSET_FLAG_WITH_FORCEADD;
if (!cadt_flags)
return 0;
return nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(cadt_flags));
}
EXPORT_SYMBOL_GPL(ip_set_put_flags);
static int
ip_set_dump_done(struct netlink_callback *cb)
{
if (cb->args[IPSET_CB_ARG0]) {
struct ip_set_net *inst =
(struct ip_set_net *)cb->args[IPSET_CB_NET];
ip_set_id_t index = (ip_set_id_t)cb->args[IPSET_CB_INDEX];
struct ip_set *set = ip_set_ref_netlink(inst, index);
if (set->variant->uref)
set->variant->uref(set, cb, false);
pr_debug("release set %s\n", set->name);
__ip_set_put_netlink(set);
}
return 0;
}
static inline void
dump_attrs(struct nlmsghdr *nlh)
{
const struct nlattr *attr;
int rem;
pr_debug("dump nlmsg\n");
nlmsg_for_each_attr(attr, nlh, sizeof(struct nfgenmsg), rem) {
pr_debug("type: %u, len %u\n", nla_type(attr), attr->nla_len);
}
}
static const struct nla_policy
ip_set_dump_policy[IPSET_ATTR_CMD_MAX + 1] = {
[IPSET_ATTR_PROTOCOL] = { .type = NLA_U8 },
[IPSET_ATTR_SETNAME] = { .type = NLA_NUL_STRING,
.len = IPSET_MAXNAMELEN - 1 },
[IPSET_ATTR_FLAGS] = { .type = NLA_U32 },
};
static int
ip_set_dump_start(struct netlink_callback *cb)
{
struct nlmsghdr *nlh = nlmsg_hdr(cb->skb);
int min_len = nlmsg_total_size(sizeof(struct nfgenmsg));
struct nlattr *cda[IPSET_ATTR_CMD_MAX + 1];
struct nlattr *attr = (void *)nlh + min_len;
struct sk_buff *skb = cb->skb;
struct ip_set_net *inst = ip_set_pernet(sock_net(skb->sk));
u32 dump_type;
int ret;
ret = nla_parse(cda, IPSET_ATTR_CMD_MAX, attr,
nlh->nlmsg_len - min_len,
ip_set_dump_policy, NULL);
if (ret)
goto error;
cb->args[IPSET_CB_PROTO] = nla_get_u8(cda[IPSET_ATTR_PROTOCOL]);
if (cda[IPSET_ATTR_SETNAME]) {
ip_set_id_t index;
struct ip_set *set;
set = find_set_and_id(inst, nla_data(cda[IPSET_ATTR_SETNAME]),
&index);
if (!set) {
ret = -ENOENT;
goto error;
}
dump_type = DUMP_ONE;
cb->args[IPSET_CB_INDEX] = index;
} else {
dump_type = DUMP_ALL;
}
if (cda[IPSET_ATTR_FLAGS]) {
u32 f = ip_set_get_h32(cda[IPSET_ATTR_FLAGS]);
dump_type |= (f << 16);
}
cb->args[IPSET_CB_NET] = (unsigned long)inst;
cb->args[IPSET_CB_DUMP] = dump_type;
return 0;
error:
/* We have to create and send the error message manually :-( */
if (nlh->nlmsg_flags & NLM_F_ACK) {
netlink_ack(cb->skb, nlh, ret, NULL);
}
return ret;
}
static int
ip_set_dump_do(struct sk_buff *skb, struct netlink_callback *cb)
{
ip_set_id_t index = IPSET_INVALID_ID, max;
struct ip_set *set = NULL;
struct nlmsghdr *nlh = NULL;
unsigned int flags = NETLINK_CB(cb->skb).portid ? NLM_F_MULTI : 0;
struct ip_set_net *inst = ip_set_pernet(sock_net(skb->sk));
u32 dump_type, dump_flags;
bool is_destroyed;
int ret = 0;
if (!cb->args[IPSET_CB_DUMP])
return -EINVAL;
if (cb->args[IPSET_CB_INDEX] >= inst->ip_set_max)
goto out;
dump_type = DUMP_TYPE(cb->args[IPSET_CB_DUMP]);
dump_flags = DUMP_FLAGS(cb->args[IPSET_CB_DUMP]);
max = dump_type == DUMP_ONE ? cb->args[IPSET_CB_INDEX] + 1
: inst->ip_set_max;
dump_last:
pr_debug("dump type, flag: %u %u index: %ld\n",
dump_type, dump_flags, cb->args[IPSET_CB_INDEX]);
for (; cb->args[IPSET_CB_INDEX] < max; cb->args[IPSET_CB_INDEX]++) {
index = (ip_set_id_t)cb->args[IPSET_CB_INDEX];
write_lock_bh(&ip_set_ref_lock);
set = ip_set(inst, index);
is_destroyed = inst->is_destroyed;
if (!set || is_destroyed) {
write_unlock_bh(&ip_set_ref_lock);
if (dump_type == DUMP_ONE) {
ret = -ENOENT;
goto out;
}
if (is_destroyed) {
/* All sets are just being destroyed */
ret = 0;
goto out;
}
continue;
}
/* When dumping all sets, we must dump "sorted"
* so that lists (unions of sets) are dumped last.
*/
if (dump_type != DUMP_ONE &&
((dump_type == DUMP_ALL) ==
!!(set->type->features & IPSET_DUMP_LAST))) {
write_unlock_bh(&ip_set_ref_lock);
continue;
}
pr_debug("List set: %s\n", set->name);
if (!cb->args[IPSET_CB_ARG0]) {
/* Start listing: make sure set won't be destroyed */
pr_debug("reference set\n");
set->ref_netlink++;
}
write_unlock_bh(&ip_set_ref_lock);
nlh = start_msg(skb, NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq, flags,
IPSET_CMD_LIST);
if (!nlh) {
ret = -EMSGSIZE;
goto release_refcount;
}
if (nla_put_u8(skb, IPSET_ATTR_PROTOCOL,
cb->args[IPSET_CB_PROTO]) ||
nla_put_string(skb, IPSET_ATTR_SETNAME, set->name))
goto nla_put_failure;
if (dump_flags & IPSET_FLAG_LIST_SETNAME)
goto next_set;
switch (cb->args[IPSET_CB_ARG0]) {
case 0:
/* Core header data */
if (nla_put_string(skb, IPSET_ATTR_TYPENAME,
set->type->name) ||
nla_put_u8(skb, IPSET_ATTR_FAMILY,
set->family) ||
nla_put_u8(skb, IPSET_ATTR_REVISION,
set->revision))
goto nla_put_failure;
if (cb->args[IPSET_CB_PROTO] > IPSET_PROTOCOL_MIN &&
nla_put_net16(skb, IPSET_ATTR_INDEX, htons(index)))
goto nla_put_failure;
ret = set->variant->head(set, skb);
if (ret < 0)
goto release_refcount;
if (dump_flags & IPSET_FLAG_LIST_HEADER)
goto next_set;
if (set->variant->uref)
set->variant->uref(set, cb, true);
fallthrough;
default:
ret = set->variant->list(set, skb, cb);
if (!cb->args[IPSET_CB_ARG0])
/* Set is done, proceed with next one */
goto next_set;
goto release_refcount;
}
}
/* If we dump all sets, continue with dumping last ones */
if (dump_type == DUMP_ALL) {
dump_type = DUMP_LAST;
cb->args[IPSET_CB_DUMP] = dump_type | (dump_flags << 16);
cb->args[IPSET_CB_INDEX] = 0;
if (set && set->variant->uref)
set->variant->uref(set, cb, false);
goto dump_last;
}
goto out;
nla_put_failure:
ret = -EFAULT;
next_set:
if (dump_type == DUMP_ONE)
cb->args[IPSET_CB_INDEX] = IPSET_INVALID_ID;
else
cb->args[IPSET_CB_INDEX]++;
release_refcount:
/* If there was an error or set is done, release set */
if (ret || !cb->args[IPSET_CB_ARG0]) {
set = ip_set_ref_netlink(inst, index);
if (set->variant->uref)
set->variant->uref(set, cb, false);
pr_debug("release set %s\n", set->name);
__ip_set_put_netlink(set);
cb->args[IPSET_CB_ARG0] = 0;
}
out:
if (nlh) {
nlmsg_end(skb, nlh);
pr_debug("nlmsg_len: %u\n", nlh->nlmsg_len);
dump_attrs(nlh);
}
return ret < 0 ? ret : skb->len;
}
static int ip_set_dump(struct sk_buff *skb, const struct nfnl_info *info,
const struct nlattr * const attr[])
{
if (unlikely(protocol_min_failed(attr)))
return -IPSET_ERR_PROTOCOL;
{
struct netlink_dump_control c = {
.start = ip_set_dump_start,
.dump = ip_set_dump_do,
.done = ip_set_dump_done,
};
return netlink_dump_start(info->sk, skb, info->nlh, &c);
}
}
/* Add, del and test */
static const struct nla_policy ip_set_adt_policy[IPSET_ATTR_CMD_MAX + 1] = {
[IPSET_ATTR_PROTOCOL] = { .type = NLA_U8 },
[IPSET_ATTR_SETNAME] = { .type = NLA_NUL_STRING,
.len = IPSET_MAXNAMELEN - 1 },
[IPSET_ATTR_LINENO] = { .type = NLA_U32 },
[IPSET_ATTR_DATA] = { .type = NLA_NESTED },
[IPSET_ATTR_ADT] = { .type = NLA_NESTED },
};
static int
call_ad(struct net *net, struct sock *ctnl, struct sk_buff *skb,
struct ip_set *set, struct nlattr *tb[], enum ipset_adt adt,
u32 flags, bool use_lineno)
{
int ret;
u32 lineno = 0;
bool eexist = flags & IPSET_FLAG_EXIST, retried = false;
do {
if (retried) {
__ip_set_get_netlink(set);
nfnl_unlock(NFNL_SUBSYS_IPSET);
cond_resched();
nfnl_lock(NFNL_SUBSYS_IPSET);
__ip_set_put_netlink(set);
}
ip_set_lock(set);
ret = set->variant->uadt(set, tb, adt, &lineno, flags, retried);
ip_set_unlock(set);
retried = true;
} while (ret == -ERANGE ||
(ret == -EAGAIN &&
set->variant->resize &&
(ret = set->variant->resize(set, retried)) == 0));
if (!ret || (ret == -IPSET_ERR_EXIST && eexist))
return 0;
if (lineno && use_lineno) {
/* Error in restore/batch mode: send back lineno */
struct nlmsghdr *rep, *nlh = nlmsg_hdr(skb);
struct sk_buff *skb2;
struct nlmsgerr *errmsg;
size_t payload = min(SIZE_MAX,
sizeof(*errmsg) + nlmsg_len(nlh));
int min_len = nlmsg_total_size(sizeof(struct nfgenmsg));
struct nlattr *cda[IPSET_ATTR_CMD_MAX + 1];
struct nlattr *cmdattr;
u32 *errline;
skb2 = nlmsg_new(payload, GFP_KERNEL);
if (!skb2)
return -ENOMEM;
rep = nlmsg_put(skb2, NETLINK_CB(skb).portid,
nlh->nlmsg_seq, NLMSG_ERROR, payload, 0);
errmsg = nlmsg_data(rep);
errmsg->error = ret;
unsafe_memcpy(&errmsg->msg, nlh, nlh->nlmsg_len,
/* Bounds checked by the skb layer. */);
cmdattr = (void *)&errmsg->msg + min_len;
ret = nla_parse(cda, IPSET_ATTR_CMD_MAX, cmdattr,
nlh->nlmsg_len - min_len, ip_set_adt_policy,
NULL);
if (ret) {
nlmsg_free(skb2);
return ret;
}
errline = nla_data(cda[IPSET_ATTR_LINENO]);
*errline = lineno;
nfnetlink_unicast(skb2, net, NETLINK_CB(skb).portid);
/* Signal netlink not to send its ACK/errmsg. */
return -EINTR;
}
return ret;
}
static int ip_set_ad(struct net *net, struct sock *ctnl,
struct sk_buff *skb,
enum ipset_adt adt,
const struct nlmsghdr *nlh,
const struct nlattr * const attr[],
struct netlink_ext_ack *extack)
{
struct ip_set_net *inst = ip_set_pernet(net);
struct ip_set *set;
struct nlattr *tb[IPSET_ATTR_ADT_MAX + 1] = {};
const struct nlattr *nla;
u32 flags = flag_exist(nlh);
bool use_lineno;
int ret = 0;
if (unlikely(protocol_min_failed(attr) ||
!attr[IPSET_ATTR_SETNAME] ||
!((attr[IPSET_ATTR_DATA] != NULL) ^
(attr[IPSET_ATTR_ADT] != NULL)) ||
(attr[IPSET_ATTR_DATA] &&
!flag_nested(attr[IPSET_ATTR_DATA])) ||
(attr[IPSET_ATTR_ADT] &&
(!flag_nested(attr[IPSET_ATTR_ADT]) ||
!attr[IPSET_ATTR_LINENO]))))
return -IPSET_ERR_PROTOCOL;
set = find_set(inst, nla_data(attr[IPSET_ATTR_SETNAME]));
if (!set)
return -ENOENT;
use_lineno = !!attr[IPSET_ATTR_LINENO];
if (attr[IPSET_ATTR_DATA]) {
if (nla_parse_nested(tb, IPSET_ATTR_ADT_MAX,
attr[IPSET_ATTR_DATA],
set->type->adt_policy, NULL))
return -IPSET_ERR_PROTOCOL;
ret = call_ad(net, ctnl, skb, set, tb, adt, flags,
use_lineno);
} else {
int nla_rem;
nla_for_each_nested(nla, attr[IPSET_ATTR_ADT], nla_rem) {
if (nla_type(nla) != IPSET_ATTR_DATA ||
!flag_nested(nla) ||
nla_parse_nested(tb, IPSET_ATTR_ADT_MAX, nla,
set->type->adt_policy, NULL))
return -IPSET_ERR_PROTOCOL;
ret = call_ad(net, ctnl, skb, set, tb, adt,
flags, use_lineno);
if (ret < 0)
return ret;
}
}
return ret;
}
static int ip_set_uadd(struct sk_buff *skb, const struct nfnl_info *info,
const struct nlattr * const attr[])
{
return ip_set_ad(info->net, info->sk, skb,
IPSET_ADD, info->nlh, attr, info->extack);
}
static int ip_set_udel(struct sk_buff *skb, const struct nfnl_info *info,
const struct nlattr * const attr[])
{
return ip_set_ad(info->net, info->sk, skb,
IPSET_DEL, info->nlh, attr, info->extack);
}
static int ip_set_utest(struct sk_buff *skb, const struct nfnl_info *info,
const struct nlattr * const attr[])
{
struct ip_set_net *inst = ip_set_pernet(info->net);
struct ip_set *set;
struct nlattr *tb[IPSET_ATTR_ADT_MAX + 1] = {};
int ret = 0;
u32 lineno;
if (unlikely(protocol_min_failed(attr) ||
!attr[IPSET_ATTR_SETNAME] ||
!attr[IPSET_ATTR_DATA] ||
!flag_nested(attr[IPSET_ATTR_DATA])))
return -IPSET_ERR_PROTOCOL;
set = find_set(inst, nla_data(attr[IPSET_ATTR_SETNAME]));
if (!set)
return -ENOENT;
if (nla_parse_nested(tb, IPSET_ATTR_ADT_MAX, attr[IPSET_ATTR_DATA],
set->type->adt_policy, NULL))
return -IPSET_ERR_PROTOCOL;
rcu_read_lock_bh();
ret = set->variant->uadt(set, tb, IPSET_TEST, &lineno, 0, 0);
rcu_read_unlock_bh();
/* Userspace can't trigger element to be re-added */
if (ret == -EAGAIN)
ret = 1;
return ret > 0 ? 0 : -IPSET_ERR_EXIST;
}
/* Get headed data of a set */
static int ip_set_header(struct sk_buff *skb, const struct nfnl_info *info,
const struct nlattr * const attr[])
{
struct ip_set_net *inst = ip_set_pernet(info->net);
const struct ip_set *set;
struct sk_buff *skb2;
struct nlmsghdr *nlh2;
if (unlikely(protocol_min_failed(attr) ||
!attr[IPSET_ATTR_SETNAME]))
return -IPSET_ERR_PROTOCOL;
set = find_set(inst, nla_data(attr[IPSET_ATTR_SETNAME]));
if (!set)
return -ENOENT;
skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!skb2)
return -ENOMEM;
nlh2 = start_msg(skb2, NETLINK_CB(skb).portid, info->nlh->nlmsg_seq, 0,
IPSET_CMD_HEADER);
if (!nlh2)
goto nlmsg_failure;
if (nla_put_u8(skb2, IPSET_ATTR_PROTOCOL, protocol(attr)) ||
nla_put_string(skb2, IPSET_ATTR_SETNAME, set->name) ||
nla_put_string(skb2, IPSET_ATTR_TYPENAME, set->type->name) ||
nla_put_u8(skb2, IPSET_ATTR_FAMILY, set->family) ||
nla_put_u8(skb2, IPSET_ATTR_REVISION, set->revision))
goto nla_put_failure;
nlmsg_end(skb2, nlh2);
return nfnetlink_unicast(skb2, info->net, NETLINK_CB(skb).portid);
nla_put_failure:
nlmsg_cancel(skb2, nlh2);
nlmsg_failure:
kfree_skb(skb2);
return -EMSGSIZE;
}
/* Get type data */
static const struct nla_policy ip_set_type_policy[IPSET_ATTR_CMD_MAX + 1] = {
[IPSET_ATTR_PROTOCOL] = { .type = NLA_U8 },
[IPSET_ATTR_TYPENAME] = { .type = NLA_NUL_STRING,
.len = IPSET_MAXNAMELEN - 1 },
[IPSET_ATTR_FAMILY] = { .type = NLA_U8 },
};
static int ip_set_type(struct sk_buff *skb, const struct nfnl_info *info,
const struct nlattr * const attr[])
{
struct sk_buff *skb2;
struct nlmsghdr *nlh2;
u8 family, min, max;
const char *typename;
int ret = 0;
if (unlikely(protocol_min_failed(attr) ||
!attr[IPSET_ATTR_TYPENAME] ||
!attr[IPSET_ATTR_FAMILY]))
return -IPSET_ERR_PROTOCOL;
family = nla_get_u8(attr[IPSET_ATTR_FAMILY]);
typename = nla_data(attr[IPSET_ATTR_TYPENAME]);
ret = find_set_type_minmax(typename, family, &min, &max);
if (ret)
return ret;
skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!skb2)
return -ENOMEM;
nlh2 = start_msg(skb2, NETLINK_CB(skb).portid, info->nlh->nlmsg_seq, 0,
IPSET_CMD_TYPE);
if (!nlh2)
goto nlmsg_failure;
if (nla_put_u8(skb2, IPSET_ATTR_PROTOCOL, protocol(attr)) ||
nla_put_string(skb2, IPSET_ATTR_TYPENAME, typename) ||
nla_put_u8(skb2, IPSET_ATTR_FAMILY, family) ||
nla_put_u8(skb2, IPSET_ATTR_REVISION, max) ||
nla_put_u8(skb2, IPSET_ATTR_REVISION_MIN, min))
goto nla_put_failure;
nlmsg_end(skb2, nlh2);
pr_debug("Send TYPE, nlmsg_len: %u\n", nlh2->nlmsg_len);
return nfnetlink_unicast(skb2, info->net, NETLINK_CB(skb).portid);
nla_put_failure:
nlmsg_cancel(skb2, nlh2);
nlmsg_failure:
kfree_skb(skb2);
return -EMSGSIZE;
}
/* Get protocol version */
static const struct nla_policy
ip_set_protocol_policy[IPSET_ATTR_CMD_MAX + 1] = {
[IPSET_ATTR_PROTOCOL] = { .type = NLA_U8 },
};
static int ip_set_protocol(struct sk_buff *skb, const struct nfnl_info *info,
const struct nlattr * const attr[])
{
struct sk_buff *skb2;
struct nlmsghdr *nlh2;
if (unlikely(!attr[IPSET_ATTR_PROTOCOL]))
return -IPSET_ERR_PROTOCOL;
skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!skb2)
return -ENOMEM;
nlh2 = start_msg(skb2, NETLINK_CB(skb).portid, info->nlh->nlmsg_seq, 0,
IPSET_CMD_PROTOCOL);
if (!nlh2)
goto nlmsg_failure;
if (nla_put_u8(skb2, IPSET_ATTR_PROTOCOL, IPSET_PROTOCOL))
goto nla_put_failure;
if (nla_put_u8(skb2, IPSET_ATTR_PROTOCOL_MIN, IPSET_PROTOCOL_MIN))
goto nla_put_failure;
nlmsg_end(skb2, nlh2);
return nfnetlink_unicast(skb2, info->net, NETLINK_CB(skb).portid);
nla_put_failure:
nlmsg_cancel(skb2, nlh2);
nlmsg_failure:
kfree_skb(skb2);
return -EMSGSIZE;
}
/* Get set by name or index, from userspace */
static int ip_set_byname(struct sk_buff *skb, const struct nfnl_info *info,
const struct nlattr * const attr[])
{
struct ip_set_net *inst = ip_set_pernet(info->net);
struct sk_buff *skb2;
struct nlmsghdr *nlh2;
ip_set_id_t id = IPSET_INVALID_ID;
const struct ip_set *set;
if (unlikely(protocol_failed(attr) ||
!attr[IPSET_ATTR_SETNAME]))
return -IPSET_ERR_PROTOCOL;
set = find_set_and_id(inst, nla_data(attr[IPSET_ATTR_SETNAME]), &id);
if (id == IPSET_INVALID_ID)
return -ENOENT;
skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!skb2)
return -ENOMEM;
nlh2 = start_msg(skb2, NETLINK_CB(skb).portid, info->nlh->nlmsg_seq, 0,
IPSET_CMD_GET_BYNAME);
if (!nlh2)
goto nlmsg_failure;
if (nla_put_u8(skb2, IPSET_ATTR_PROTOCOL, protocol(attr)) ||
nla_put_u8(skb2, IPSET_ATTR_FAMILY, set->family) ||
nla_put_net16(skb2, IPSET_ATTR_INDEX, htons(id)))
goto nla_put_failure;
nlmsg_end(skb2, nlh2);
return nfnetlink_unicast(skb2, info->net, NETLINK_CB(skb).portid);
nla_put_failure:
nlmsg_cancel(skb2, nlh2);
nlmsg_failure:
kfree_skb(skb2);
return -EMSGSIZE;
}
static const struct nla_policy ip_set_index_policy[IPSET_ATTR_CMD_MAX + 1] = {
[IPSET_ATTR_PROTOCOL] = { .type = NLA_U8 },
[IPSET_ATTR_INDEX] = { .type = NLA_U16 },
};
static int ip_set_byindex(struct sk_buff *skb, const struct nfnl_info *info,
const struct nlattr * const attr[])
{
struct ip_set_net *inst = ip_set_pernet(info->net);
struct sk_buff *skb2;
struct nlmsghdr *nlh2;
ip_set_id_t id = IPSET_INVALID_ID;
const struct ip_set *set;
if (unlikely(protocol_failed(attr) ||
!attr[IPSET_ATTR_INDEX]))
return -IPSET_ERR_PROTOCOL;
id = ip_set_get_h16(attr[IPSET_ATTR_INDEX]);
if (id >= inst->ip_set_max)
return -ENOENT;
set = ip_set(inst, id);
if (set == NULL)
return -ENOENT;
skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!skb2)
return -ENOMEM;
nlh2 = start_msg(skb2, NETLINK_CB(skb).portid, info->nlh->nlmsg_seq, 0,
IPSET_CMD_GET_BYINDEX);
if (!nlh2)
goto nlmsg_failure;
if (nla_put_u8(skb2, IPSET_ATTR_PROTOCOL, protocol(attr)) ||
nla_put_string(skb2, IPSET_ATTR_SETNAME, set->name))
goto nla_put_failure;
nlmsg_end(skb2, nlh2);
return nfnetlink_unicast(skb2, info->net, NETLINK_CB(skb).portid);
nla_put_failure:
nlmsg_cancel(skb2, nlh2);
nlmsg_failure:
kfree_skb(skb2);
return -EMSGSIZE;
}
static const struct nfnl_callback ip_set_netlink_subsys_cb[IPSET_MSG_MAX] = {
[IPSET_CMD_NONE] = {
.call = ip_set_none,
.type = NFNL_CB_MUTEX,
.attr_count = IPSET_ATTR_CMD_MAX,
},
[IPSET_CMD_CREATE] = {
.call = ip_set_create,
.type = NFNL_CB_MUTEX,
.attr_count = IPSET_ATTR_CMD_MAX,
.policy = ip_set_create_policy,
},
[IPSET_CMD_DESTROY] = {
.call = ip_set_destroy,
.type = NFNL_CB_MUTEX,
.attr_count = IPSET_ATTR_CMD_MAX,
.policy = ip_set_setname_policy,
},
[IPSET_CMD_FLUSH] = {
.call = ip_set_flush,
.type = NFNL_CB_MUTEX,
.attr_count = IPSET_ATTR_CMD_MAX,
.policy = ip_set_setname_policy,
},
[IPSET_CMD_RENAME] = {
.call = ip_set_rename,
.type = NFNL_CB_MUTEX,
.attr_count = IPSET_ATTR_CMD_MAX,
.policy = ip_set_setname2_policy,
},
[IPSET_CMD_SWAP] = {
.call = ip_set_swap,
.type = NFNL_CB_MUTEX,
.attr_count = IPSET_ATTR_CMD_MAX,
.policy = ip_set_setname2_policy,
},
[IPSET_CMD_LIST] = {
.call = ip_set_dump,
.type = NFNL_CB_MUTEX,
.attr_count = IPSET_ATTR_CMD_MAX,
.policy = ip_set_dump_policy,
},
[IPSET_CMD_SAVE] = {
.call = ip_set_dump,
.type = NFNL_CB_MUTEX,
.attr_count = IPSET_ATTR_CMD_MAX,
.policy = ip_set_setname_policy,
},
[IPSET_CMD_ADD] = {
.call = ip_set_uadd,
.type = NFNL_CB_MUTEX,
.attr_count = IPSET_ATTR_CMD_MAX,
.policy = ip_set_adt_policy,
},
[IPSET_CMD_DEL] = {
.call = ip_set_udel,
.type = NFNL_CB_MUTEX,
.attr_count = IPSET_ATTR_CMD_MAX,
.policy = ip_set_adt_policy,
},
[IPSET_CMD_TEST] = {
.call = ip_set_utest,
.type = NFNL_CB_MUTEX,
.attr_count = IPSET_ATTR_CMD_MAX,
.policy = ip_set_adt_policy,
},
[IPSET_CMD_HEADER] = {
.call = ip_set_header,
.type = NFNL_CB_MUTEX,
.attr_count = IPSET_ATTR_CMD_MAX,
.policy = ip_set_setname_policy,
},
[IPSET_CMD_TYPE] = {
.call = ip_set_type,
.type = NFNL_CB_MUTEX,
.attr_count = IPSET_ATTR_CMD_MAX,
.policy = ip_set_type_policy,
},
[IPSET_CMD_PROTOCOL] = {
.call = ip_set_protocol,
.type = NFNL_CB_MUTEX,
.attr_count = IPSET_ATTR_CMD_MAX,
.policy = ip_set_protocol_policy,
},
[IPSET_CMD_GET_BYNAME] = {
.call = ip_set_byname,
.type = NFNL_CB_MUTEX,
.attr_count = IPSET_ATTR_CMD_MAX,
.policy = ip_set_setname_policy,
},
[IPSET_CMD_GET_BYINDEX] = {
.call = ip_set_byindex,
.type = NFNL_CB_MUTEX,
.attr_count = IPSET_ATTR_CMD_MAX,
.policy = ip_set_index_policy,
},
};
static struct nfnetlink_subsystem ip_set_netlink_subsys __read_mostly = {
.name = "ip_set",
.subsys_id = NFNL_SUBSYS_IPSET,
.cb_count = IPSET_MSG_MAX,
.cb = ip_set_netlink_subsys_cb,
};
/* Interface to iptables/ip6tables */
static int
ip_set_sockfn_get(struct sock *sk, int optval, void __user *user, int *len)
{
unsigned int *op;
void *data;
int copylen = *len, ret = 0;
struct net *net = sock_net(sk);
struct ip_set_net *inst = ip_set_pernet(net);
if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
return -EPERM;
if (optval != SO_IP_SET)
return -EBADF;
if (*len < sizeof(unsigned int))
return -EINVAL;
data = vmalloc(*len);
if (!data)
return -ENOMEM;
if (copy_from_user(data, user, *len) != 0) {
ret = -EFAULT;
goto done;
}
op = data;
if (*op < IP_SET_OP_VERSION) {
/* Check the version at the beginning of operations */
struct ip_set_req_version *req_version = data;
if (*len < sizeof(struct ip_set_req_version)) {
ret = -EINVAL;
goto done;
}
if (req_version->version < IPSET_PROTOCOL_MIN) {
ret = -EPROTO;
goto done;
}
}
switch (*op) {
case IP_SET_OP_VERSION: {
struct ip_set_req_version *req_version = data;
if (*len != sizeof(struct ip_set_req_version)) {
ret = -EINVAL;
goto done;
}
req_version->version = IPSET_PROTOCOL;
if (copy_to_user(user, req_version,
sizeof(struct ip_set_req_version)))
ret = -EFAULT;
goto done;
}
case IP_SET_OP_GET_BYNAME: {
struct ip_set_req_get_set *req_get = data;
ip_set_id_t id;
if (*len != sizeof(struct ip_set_req_get_set)) {
ret = -EINVAL;
goto done;
}
req_get->set.name[IPSET_MAXNAMELEN - 1] = '\0';
nfnl_lock(NFNL_SUBSYS_IPSET);
find_set_and_id(inst, req_get->set.name, &id);
req_get->set.index = id;
nfnl_unlock(NFNL_SUBSYS_IPSET);
goto copy;
}
case IP_SET_OP_GET_FNAME: {
struct ip_set_req_get_set_family *req_get = data;
ip_set_id_t id;
if (*len != sizeof(struct ip_set_req_get_set_family)) {
ret = -EINVAL;
goto done;
}
req_get->set.name[IPSET_MAXNAMELEN - 1] = '\0';
nfnl_lock(NFNL_SUBSYS_IPSET);
find_set_and_id(inst, req_get->set.name, &id);
req_get->set.index = id;
if (id != IPSET_INVALID_ID)
req_get->family = ip_set(inst, id)->family;
nfnl_unlock(NFNL_SUBSYS_IPSET);
goto copy;
}
case IP_SET_OP_GET_BYINDEX: {
struct ip_set_req_get_set *req_get = data;
struct ip_set *set;
if (*len != sizeof(struct ip_set_req_get_set) ||
req_get->set.index >= inst->ip_set_max) {
ret = -EINVAL;
goto done;
}
nfnl_lock(NFNL_SUBSYS_IPSET);
set = ip_set(inst, req_get->set.index);
ret = strscpy(req_get->set.name, set ? set->name : "",
IPSET_MAXNAMELEN);
nfnl_unlock(NFNL_SUBSYS_IPSET);
if (ret < 0)
goto done;
goto copy;
}
default:
ret = -EBADMSG;
goto done;
} /* end of switch(op) */
copy:
if (copy_to_user(user, data, copylen))
ret = -EFAULT;
done:
vfree(data);
if (ret > 0)
ret = 0;
return ret;
}
static struct nf_sockopt_ops so_set __read_mostly = {
.pf = PF_INET,
.get_optmin = SO_IP_SET,
.get_optmax = SO_IP_SET + 1,
.get = ip_set_sockfn_get,
.owner = THIS_MODULE,
};
static int __net_init
ip_set_net_init(struct net *net)
{
struct ip_set_net *inst = ip_set_pernet(net);
struct ip_set **list;
inst->ip_set_max = max_sets ? max_sets : CONFIG_IP_SET_MAX;
if (inst->ip_set_max >= IPSET_INVALID_ID)
inst->ip_set_max = IPSET_INVALID_ID - 1;
list = kvcalloc(inst->ip_set_max, sizeof(struct ip_set *), GFP_KERNEL);
if (!list)
return -ENOMEM;
inst->is_deleted = false;
inst->is_destroyed = false;
rcu_assign_pointer(inst->ip_set_list, list);
return 0;
}
static void __net_exit
ip_set_net_exit(struct net *net)
{
struct ip_set_net *inst = ip_set_pernet(net);
struct ip_set *set = NULL;
ip_set_id_t i;
inst->is_deleted = true; /* flag for ip_set_nfnl_put */
nfnl_lock(NFNL_SUBSYS_IPSET);
for (i = 0; i < inst->ip_set_max; i++) {
set = ip_set(inst, i);
if (set) {
ip_set(inst, i) = NULL;
ip_set_destroy_set(set);
}
}
nfnl_unlock(NFNL_SUBSYS_IPSET);
kvfree(rcu_dereference_protected(inst->ip_set_list, 1));
}
static struct pernet_operations ip_set_net_ops = {
.init = ip_set_net_init,
.exit = ip_set_net_exit,
.id = &ip_set_net_id,
.size = sizeof(struct ip_set_net),
};
static int __init
ip_set_init(void)
{
int ret = register_pernet_subsys(&ip_set_net_ops);
if (ret) {
pr_err("ip_set: cannot register pernet_subsys.\n");
return ret;
}
ret = nfnetlink_subsys_register(&ip_set_netlink_subsys);
if (ret != 0) {
pr_err("ip_set: cannot register with nfnetlink.\n");
unregister_pernet_subsys(&ip_set_net_ops);
return ret;
}
ret = nf_register_sockopt(&so_set);
if (ret != 0) {
pr_err("SO_SET registry failed: %d\n", ret);
nfnetlink_subsys_unregister(&ip_set_netlink_subsys);
unregister_pernet_subsys(&ip_set_net_ops);
return ret;
}
return 0;
}
static void __exit
ip_set_fini(void)
{
nf_unregister_sockopt(&so_set);
nfnetlink_subsys_unregister(&ip_set_netlink_subsys);
unregister_pernet_subsys(&ip_set_net_ops);
pr_debug("these are the famous last words\n");
}
module_init(ip_set_init);
module_exit(ip_set_fini);
MODULE_DESCRIPTION("ip_set: protocol " __stringify(IPSET_PROTOCOL));
| linux-master | net/netfilter/ipset/ip_set_core.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (C) 2000-2002 Joakim Axelsson <[email protected]>
* Patrick Schaaf <[email protected]>
* Martin Josefsson <[email protected]>
*/
/* Kernel module implementing an IP set type: the bitmap:ip,mac type */
#include <linux/module.h>
#include <linux/ip.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/errno.h>
#include <linux/if_ether.h>
#include <linux/netlink.h>
#include <linux/jiffies.h>
#include <linux/timer.h>
#include <net/netlink.h>
#include <linux/netfilter/ipset/pfxlen.h>
#include <linux/netfilter/ipset/ip_set.h>
#include <linux/netfilter/ipset/ip_set_bitmap.h>
#define IPSET_TYPE_REV_MIN 0
/* 1 Counter support added */
/* 2 Comment support added */
#define IPSET_TYPE_REV_MAX 3 /* skbinfo support added */
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jozsef Kadlecsik <[email protected]>");
IP_SET_MODULE_DESC("bitmap:ip,mac", IPSET_TYPE_REV_MIN, IPSET_TYPE_REV_MAX);
MODULE_ALIAS("ip_set_bitmap:ip,mac");
#define MTYPE bitmap_ipmac
#define HOST_MASK 32
#define IP_SET_BITMAP_STORED_TIMEOUT
enum {
MAC_UNSET, /* element is set, without MAC */
MAC_FILLED, /* element is set with MAC */
};
/* Type structure */
struct bitmap_ipmac {
unsigned long *members; /* the set members */
u32 first_ip; /* host byte order, included in range */
u32 last_ip; /* host byte order, included in range */
u32 elements; /* number of max elements in the set */
size_t memsize; /* members size */
struct timer_list gc; /* garbage collector */
struct ip_set *set; /* attached to this ip_set */
unsigned char extensions[] /* MAC + data extensions */
__aligned(__alignof__(u64));
};
/* ADT structure for generic function args */
struct bitmap_ipmac_adt_elem {
unsigned char ether[ETH_ALEN] __aligned(2);
u16 id;
u16 add_mac;
};
struct bitmap_ipmac_elem {
unsigned char ether[ETH_ALEN];
unsigned char filled;
} __aligned(__alignof__(u64));
static u32
ip_to_id(const struct bitmap_ipmac *m, u32 ip)
{
return ip - m->first_ip;
}
#define get_elem(extensions, id, dsize) \
(struct bitmap_ipmac_elem *)(extensions + (id) * (dsize))
#define get_const_elem(extensions, id, dsize) \
(const struct bitmap_ipmac_elem *)(extensions + (id) * (dsize))
/* Common functions */
static int
bitmap_ipmac_do_test(const struct bitmap_ipmac_adt_elem *e,
const struct bitmap_ipmac *map, size_t dsize)
{
const struct bitmap_ipmac_elem *elem;
if (!test_bit(e->id, map->members))
return 0;
elem = get_const_elem(map->extensions, e->id, dsize);
if (e->add_mac && elem->filled == MAC_FILLED)
return ether_addr_equal(e->ether, elem->ether);
/* Trigger kernel to fill out the ethernet address */
return -EAGAIN;
}
static int
bitmap_ipmac_gc_test(u16 id, const struct bitmap_ipmac *map, size_t dsize)
{
const struct bitmap_ipmac_elem *elem;
if (!test_bit(id, map->members))
return 0;
elem = get_const_elem(map->extensions, id, dsize);
/* Timer not started for the incomplete elements */
return elem->filled == MAC_FILLED;
}
static int
bitmap_ipmac_is_filled(const struct bitmap_ipmac_elem *elem)
{
return elem->filled == MAC_FILLED;
}
static int
bitmap_ipmac_add_timeout(unsigned long *timeout,
const struct bitmap_ipmac_adt_elem *e,
const struct ip_set_ext *ext, struct ip_set *set,
struct bitmap_ipmac *map, int mode)
{
u32 t = ext->timeout;
if (mode == IPSET_ADD_START_STORED_TIMEOUT) {
if (t == set->timeout)
/* Timeout was not specified, get stored one */
t = *timeout;
ip_set_timeout_set(timeout, t);
} else {
/* If MAC is unset yet, we store plain timeout value
* because the timer is not activated yet
* and we can reuse it later when MAC is filled out,
* possibly by the kernel
*/
if (e->add_mac)
ip_set_timeout_set(timeout, t);
else
*timeout = t;
}
return 0;
}
static int
bitmap_ipmac_do_add(const struct bitmap_ipmac_adt_elem *e,
struct bitmap_ipmac *map, u32 flags, size_t dsize)
{
struct bitmap_ipmac_elem *elem;
elem = get_elem(map->extensions, e->id, dsize);
if (test_bit(e->id, map->members)) {
if (elem->filled == MAC_FILLED) {
if (e->add_mac &&
(flags & IPSET_FLAG_EXIST) &&
!ether_addr_equal(e->ether, elem->ether)) {
/* memcpy isn't atomic */
clear_bit(e->id, map->members);
smp_mb__after_atomic();
ether_addr_copy(elem->ether, e->ether);
}
return IPSET_ADD_FAILED;
} else if (!e->add_mac)
/* Already added without ethernet address */
return IPSET_ADD_FAILED;
/* Fill the MAC address and trigger the timer activation */
clear_bit(e->id, map->members);
smp_mb__after_atomic();
ether_addr_copy(elem->ether, e->ether);
elem->filled = MAC_FILLED;
return IPSET_ADD_START_STORED_TIMEOUT;
} else if (e->add_mac) {
/* We can store MAC too */
ether_addr_copy(elem->ether, e->ether);
elem->filled = MAC_FILLED;
return 0;
}
elem->filled = MAC_UNSET;
/* MAC is not stored yet, don't start timer */
return IPSET_ADD_STORE_PLAIN_TIMEOUT;
}
static int
bitmap_ipmac_do_del(const struct bitmap_ipmac_adt_elem *e,
struct bitmap_ipmac *map)
{
return !test_and_clear_bit(e->id, map->members);
}
static int
bitmap_ipmac_do_list(struct sk_buff *skb, const struct bitmap_ipmac *map,
u32 id, size_t dsize)
{
const struct bitmap_ipmac_elem *elem =
get_const_elem(map->extensions, id, dsize);
return nla_put_ipaddr4(skb, IPSET_ATTR_IP,
htonl(map->first_ip + id)) ||
(elem->filled == MAC_FILLED &&
nla_put(skb, IPSET_ATTR_ETHER, ETH_ALEN, elem->ether));
}
static int
bitmap_ipmac_do_head(struct sk_buff *skb, const struct bitmap_ipmac *map)
{
return nla_put_ipaddr4(skb, IPSET_ATTR_IP, htonl(map->first_ip)) ||
nla_put_ipaddr4(skb, IPSET_ATTR_IP_TO, htonl(map->last_ip));
}
static int
bitmap_ipmac_kadt(struct ip_set *set, const struct sk_buff *skb,
const struct xt_action_param *par,
enum ipset_adt adt, struct ip_set_adt_opt *opt)
{
struct bitmap_ipmac *map = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
struct bitmap_ipmac_adt_elem e = { .id = 0, .add_mac = 1 };
struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
u32 ip;
ip = ntohl(ip4addr(skb, opt->flags & IPSET_DIM_ONE_SRC));
if (ip < map->first_ip || ip > map->last_ip)
return -IPSET_ERR_BITMAP_RANGE;
/* Backward compatibility: we don't check the second flag */
if (skb_mac_header(skb) < skb->head ||
(skb_mac_header(skb) + ETH_HLEN) > skb->data)
return -EINVAL;
e.id = ip_to_id(map, ip);
if (opt->flags & IPSET_DIM_TWO_SRC)
ether_addr_copy(e.ether, eth_hdr(skb)->h_source);
else
ether_addr_copy(e.ether, eth_hdr(skb)->h_dest);
if (is_zero_ether_addr(e.ether))
return -EINVAL;
return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags);
}
static int
bitmap_ipmac_uadt(struct ip_set *set, struct nlattr *tb[],
enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
{
const struct bitmap_ipmac *map = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
struct bitmap_ipmac_adt_elem e = { .id = 0 };
struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
u32 ip = 0;
int ret = 0;
if (tb[IPSET_ATTR_LINENO])
*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
if (unlikely(!tb[IPSET_ATTR_IP]))
return -IPSET_ERR_PROTOCOL;
ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip);
if (ret)
return ret;
ret = ip_set_get_extensions(set, tb, &ext);
if (ret)
return ret;
if (ip < map->first_ip || ip > map->last_ip)
return -IPSET_ERR_BITMAP_RANGE;
e.id = ip_to_id(map, ip);
if (tb[IPSET_ATTR_ETHER]) {
if (nla_len(tb[IPSET_ATTR_ETHER]) != ETH_ALEN)
return -IPSET_ERR_PROTOCOL;
memcpy(e.ether, nla_data(tb[IPSET_ATTR_ETHER]), ETH_ALEN);
e.add_mac = 1;
}
ret = adtfn(set, &e, &ext, &ext, flags);
return ip_set_eexist(ret, flags) ? 0 : ret;
}
static bool
bitmap_ipmac_same_set(const struct ip_set *a, const struct ip_set *b)
{
const struct bitmap_ipmac *x = a->data;
const struct bitmap_ipmac *y = b->data;
return x->first_ip == y->first_ip &&
x->last_ip == y->last_ip &&
a->timeout == b->timeout &&
a->extensions == b->extensions;
}
/* Plain variant */
#include "ip_set_bitmap_gen.h"
/* Create bitmap:ip,mac type of sets */
static bool
init_map_ipmac(struct ip_set *set, struct bitmap_ipmac *map,
u32 first_ip, u32 last_ip, u32 elements)
{
map->members = bitmap_zalloc(elements, GFP_KERNEL | __GFP_NOWARN);
if (!map->members)
return false;
map->first_ip = first_ip;
map->last_ip = last_ip;
map->elements = elements;
set->timeout = IPSET_NO_TIMEOUT;
map->set = set;
set->data = map;
set->family = NFPROTO_IPV4;
return true;
}
static int
bitmap_ipmac_create(struct net *net, struct ip_set *set, struct nlattr *tb[],
u32 flags)
{
u32 first_ip = 0, last_ip = 0;
u64 elements;
struct bitmap_ipmac *map;
int ret;
if (unlikely(!tb[IPSET_ATTR_IP] ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS)))
return -IPSET_ERR_PROTOCOL;
ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &first_ip);
if (ret)
return ret;
if (tb[IPSET_ATTR_IP_TO]) {
ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &last_ip);
if (ret)
return ret;
if (first_ip > last_ip)
swap(first_ip, last_ip);
} else if (tb[IPSET_ATTR_CIDR]) {
u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
if (cidr >= HOST_MASK)
return -IPSET_ERR_INVALID_CIDR;
ip_set_mask_from_to(first_ip, last_ip, cidr);
} else {
return -IPSET_ERR_PROTOCOL;
}
elements = (u64)last_ip - first_ip + 1;
if (elements > IPSET_BITMAP_MAX_RANGE + 1)
return -IPSET_ERR_BITMAP_RANGE_SIZE;
set->dsize = ip_set_elem_len(set, tb,
sizeof(struct bitmap_ipmac_elem),
__alignof__(struct bitmap_ipmac_elem));
map = ip_set_alloc(sizeof(*map) + elements * set->dsize);
if (!map)
return -ENOMEM;
map->memsize = BITS_TO_LONGS(elements) * sizeof(unsigned long);
set->variant = &bitmap_ipmac;
if (!init_map_ipmac(set, map, first_ip, last_ip, elements)) {
ip_set_free(map);
return -ENOMEM;
}
if (tb[IPSET_ATTR_TIMEOUT]) {
set->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
bitmap_ipmac_gc_init(set, bitmap_ipmac_gc);
}
return 0;
}
static struct ip_set_type bitmap_ipmac_type = {
.name = "bitmap:ip,mac",
.protocol = IPSET_PROTOCOL,
.features = IPSET_TYPE_IP | IPSET_TYPE_MAC,
.dimension = IPSET_DIM_TWO,
.family = NFPROTO_IPV4,
.revision_min = IPSET_TYPE_REV_MIN,
.revision_max = IPSET_TYPE_REV_MAX,
.create = bitmap_ipmac_create,
.create_policy = {
[IPSET_ATTR_IP] = { .type = NLA_NESTED },
[IPSET_ATTR_IP_TO] = { .type = NLA_NESTED },
[IPSET_ATTR_CIDR] = { .type = NLA_U8 },
[IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
[IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 },
},
.adt_policy = {
[IPSET_ATTR_IP] = { .type = NLA_NESTED },
[IPSET_ATTR_ETHER] = { .type = NLA_BINARY,
.len = ETH_ALEN },
[IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
[IPSET_ATTR_LINENO] = { .type = NLA_U32 },
[IPSET_ATTR_BYTES] = { .type = NLA_U64 },
[IPSET_ATTR_PACKETS] = { .type = NLA_U64 },
[IPSET_ATTR_COMMENT] = { .type = NLA_NUL_STRING,
.len = IPSET_MAX_COMMENT_SIZE },
[IPSET_ATTR_SKBMARK] = { .type = NLA_U64 },
[IPSET_ATTR_SKBPRIO] = { .type = NLA_U32 },
[IPSET_ATTR_SKBQUEUE] = { .type = NLA_U16 },
},
.me = THIS_MODULE,
};
static int __init
bitmap_ipmac_init(void)
{
return ip_set_type_register(&bitmap_ipmac_type);
}
static void __exit
bitmap_ipmac_fini(void)
{
rcu_barrier();
ip_set_type_unregister(&bitmap_ipmac_type);
}
module_init(bitmap_ipmac_init);
module_exit(bitmap_ipmac_fini);
| linux-master | net/netfilter/ipset/ip_set_bitmap_ipmac.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (C) 2003-2013 Jozsef Kadlecsik <[email protected]> */
/* Kernel module implementing an IP set type: the hash:ip type */
#include <linux/jhash.h>
#include <linux/module.h>
#include <linux/ip.h>
#include <linux/skbuff.h>
#include <linux/errno.h>
#include <linux/random.h>
#include <net/ip.h>
#include <net/ipv6.h>
#include <net/netlink.h>
#include <net/tcp.h>
#include <linux/netfilter.h>
#include <linux/netfilter/ipset/pfxlen.h>
#include <linux/netfilter/ipset/ip_set.h>
#include <linux/netfilter/ipset/ip_set_hash.h>
#define IPSET_TYPE_REV_MIN 0
/* 1 Counters support */
/* 2 Comments support */
/* 3 Forceadd support */
/* 4 skbinfo support */
/* 5 bucketsize, initval support */
#define IPSET_TYPE_REV_MAX 6 /* bitmask support */
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jozsef Kadlecsik <[email protected]>");
IP_SET_MODULE_DESC("hash:ip", IPSET_TYPE_REV_MIN, IPSET_TYPE_REV_MAX);
MODULE_ALIAS("ip_set_hash:ip");
/* Type specific function prefix */
#define HTYPE hash_ip
#define IP_SET_HASH_WITH_NETMASK
#define IP_SET_HASH_WITH_BITMASK
/* IPv4 variant */
/* Member elements */
struct hash_ip4_elem {
/* Zero valued IP addresses cannot be stored */
__be32 ip;
};
/* Common functions */
static bool
hash_ip4_data_equal(const struct hash_ip4_elem *e1,
const struct hash_ip4_elem *e2,
u32 *multi)
{
return e1->ip == e2->ip;
}
static bool
hash_ip4_data_list(struct sk_buff *skb, const struct hash_ip4_elem *e)
{
if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, e->ip))
goto nla_put_failure;
return false;
nla_put_failure:
return true;
}
static void
hash_ip4_data_next(struct hash_ip4_elem *next, const struct hash_ip4_elem *e)
{
next->ip = e->ip;
}
#define MTYPE hash_ip4
#define HOST_MASK 32
#include "ip_set_hash_gen.h"
static int
hash_ip4_kadt(struct ip_set *set, const struct sk_buff *skb,
const struct xt_action_param *par,
enum ipset_adt adt, struct ip_set_adt_opt *opt)
{
const struct hash_ip4 *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_ip4_elem e = { 0 };
struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
__be32 ip;
ip4addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &ip);
ip &= h->bitmask.ip;
if (ip == 0)
return -EINVAL;
e.ip = ip;
return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags);
}
static int
hash_ip4_uadt(struct ip_set *set, struct nlattr *tb[],
enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
{
struct hash_ip4 *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_ip4_elem e = { 0 };
struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
u32 ip = 0, ip_to = 0, hosts, i = 0;
int ret = 0;
if (tb[IPSET_ATTR_LINENO])
*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
if (unlikely(!tb[IPSET_ATTR_IP]))
return -IPSET_ERR_PROTOCOL;
ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip);
if (ret)
return ret;
ret = ip_set_get_extensions(set, tb, &ext);
if (ret)
return ret;
ip &= ntohl(h->bitmask.ip);
e.ip = htonl(ip);
if (e.ip == 0)
return -IPSET_ERR_HASH_ELEM;
if (adt == IPSET_TEST)
return adtfn(set, &e, &ext, &ext, flags);
ip_to = ip;
if (tb[IPSET_ATTR_IP_TO]) {
ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to);
if (ret)
return ret;
if (ip > ip_to) {
if (ip_to == 0)
return -IPSET_ERR_HASH_ELEM;
swap(ip, ip_to);
}
} else if (tb[IPSET_ATTR_CIDR]) {
u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
if (!cidr || cidr > HOST_MASK)
return -IPSET_ERR_INVALID_CIDR;
ip_set_mask_from_to(ip, ip_to, cidr);
}
hosts = h->netmask == 32 ? 1 : 2 << (32 - h->netmask - 1);
if (retried)
ip = ntohl(h->next.ip);
for (; ip <= ip_to; i++) {
e.ip = htonl(ip);
if (i > IPSET_MAX_RANGE) {
hash_ip4_data_next(&h->next, &e);
return -ERANGE;
}
ret = adtfn(set, &e, &ext, &ext, flags);
if (ret && !ip_set_eexist(ret, flags))
return ret;
ip += hosts;
if (ip == 0)
return 0;
ret = 0;
}
return ret;
}
/* IPv6 variant */
/* Member elements */
struct hash_ip6_elem {
union nf_inet_addr ip;
};
/* Common functions */
static bool
hash_ip6_data_equal(const struct hash_ip6_elem *ip1,
const struct hash_ip6_elem *ip2,
u32 *multi)
{
return ipv6_addr_equal(&ip1->ip.in6, &ip2->ip.in6);
}
static bool
hash_ip6_data_list(struct sk_buff *skb, const struct hash_ip6_elem *e)
{
if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &e->ip.in6))
goto nla_put_failure;
return false;
nla_put_failure:
return true;
}
static void
hash_ip6_data_next(struct hash_ip6_elem *next, const struct hash_ip6_elem *e)
{
}
#undef MTYPE
#undef HOST_MASK
#define MTYPE hash_ip6
#define HOST_MASK 128
#define IP_SET_EMIT_CREATE
#include "ip_set_hash_gen.h"
static int
hash_ip6_kadt(struct ip_set *set, const struct sk_buff *skb,
const struct xt_action_param *par,
enum ipset_adt adt, struct ip_set_adt_opt *opt)
{
const struct hash_ip6 *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_ip6_elem e = { { .all = { 0 } } };
struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
ip6addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &e.ip.in6);
nf_inet_addr_mask_inplace(&e.ip, &h->bitmask);
if (ipv6_addr_any(&e.ip.in6))
return -EINVAL;
return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags);
}
static int
hash_ip6_uadt(struct ip_set *set, struct nlattr *tb[],
enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
{
const struct hash_ip6 *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_ip6_elem e = { { .all = { 0 } } };
struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
int ret;
if (tb[IPSET_ATTR_LINENO])
*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
if (unlikely(!tb[IPSET_ATTR_IP]))
return -IPSET_ERR_PROTOCOL;
if (unlikely(tb[IPSET_ATTR_IP_TO]))
return -IPSET_ERR_HASH_RANGE_UNSUPPORTED;
if (unlikely(tb[IPSET_ATTR_CIDR])) {
u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
if (cidr != HOST_MASK)
return -IPSET_ERR_INVALID_CIDR;
}
ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip);
if (ret)
return ret;
ret = ip_set_get_extensions(set, tb, &ext);
if (ret)
return ret;
nf_inet_addr_mask_inplace(&e.ip, &h->bitmask);
if (ipv6_addr_any(&e.ip.in6))
return -IPSET_ERR_HASH_ELEM;
ret = adtfn(set, &e, &ext, &ext, flags);
return ip_set_eexist(ret, flags) ? 0 : ret;
}
static struct ip_set_type hash_ip_type __read_mostly = {
.name = "hash:ip",
.protocol = IPSET_PROTOCOL,
.features = IPSET_TYPE_IP,
.dimension = IPSET_DIM_ONE,
.family = NFPROTO_UNSPEC,
.revision_min = IPSET_TYPE_REV_MIN,
.revision_max = IPSET_TYPE_REV_MAX,
.create_flags[IPSET_TYPE_REV_MAX] = IPSET_CREATE_FLAG_BUCKETSIZE,
.create = hash_ip_create,
.create_policy = {
[IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 },
[IPSET_ATTR_MAXELEM] = { .type = NLA_U32 },
[IPSET_ATTR_INITVAL] = { .type = NLA_U32 },
[IPSET_ATTR_BUCKETSIZE] = { .type = NLA_U8 },
[IPSET_ATTR_RESIZE] = { .type = NLA_U8 },
[IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
[IPSET_ATTR_NETMASK] = { .type = NLA_U8 },
[IPSET_ATTR_BITMASK] = { .type = NLA_NESTED },
[IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 },
},
.adt_policy = {
[IPSET_ATTR_IP] = { .type = NLA_NESTED },
[IPSET_ATTR_IP_TO] = { .type = NLA_NESTED },
[IPSET_ATTR_CIDR] = { .type = NLA_U8 },
[IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
[IPSET_ATTR_LINENO] = { .type = NLA_U32 },
[IPSET_ATTR_BYTES] = { .type = NLA_U64 },
[IPSET_ATTR_PACKETS] = { .type = NLA_U64 },
[IPSET_ATTR_COMMENT] = { .type = NLA_NUL_STRING,
.len = IPSET_MAX_COMMENT_SIZE },
[IPSET_ATTR_SKBMARK] = { .type = NLA_U64 },
[IPSET_ATTR_SKBPRIO] = { .type = NLA_U32 },
[IPSET_ATTR_SKBQUEUE] = { .type = NLA_U16 },
},
.me = THIS_MODULE,
};
static int __init
hash_ip_init(void)
{
return ip_set_type_register(&hash_ip_type);
}
static void __exit
hash_ip_fini(void)
{
rcu_barrier();
ip_set_type_unregister(&hash_ip_type);
}
module_init(hash_ip_init);
module_exit(hash_ip_fini);
| linux-master | net/netfilter/ipset/ip_set_hash_ip.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (C) 2003-2011 Jozsef Kadlecsik <[email protected]>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
/* Get Layer-4 data from the packets */
#include <linux/ip.h>
#include <linux/skbuff.h>
#include <linux/icmp.h>
#include <linux/icmpv6.h>
#include <linux/sctp.h>
#include <linux/netfilter_ipv6/ip6_tables.h>
#include <net/ip.h>
#include <net/ipv6.h>
#include <linux/netfilter/ipset/ip_set_getport.h>
#include <linux/export.h>
/* We must handle non-linear skbs */
static bool
get_port(const struct sk_buff *skb, int protocol, unsigned int protooff,
bool src, __be16 *port, u8 *proto)
{
switch (protocol) {
case IPPROTO_TCP: {
struct tcphdr _tcph;
const struct tcphdr *th;
th = skb_header_pointer(skb, protooff, sizeof(_tcph), &_tcph);
if (!th)
/* No choice either */
return false;
*port = src ? th->source : th->dest;
break;
}
case IPPROTO_SCTP: {
struct sctphdr _sh;
const struct sctphdr *sh;
sh = skb_header_pointer(skb, protooff, sizeof(_sh), &_sh);
if (!sh)
/* No choice either */
return false;
*port = src ? sh->source : sh->dest;
break;
}
case IPPROTO_UDP:
case IPPROTO_UDPLITE: {
struct udphdr _udph;
const struct udphdr *uh;
uh = skb_header_pointer(skb, protooff, sizeof(_udph), &_udph);
if (!uh)
/* No choice either */
return false;
*port = src ? uh->source : uh->dest;
break;
}
case IPPROTO_ICMP: {
struct icmphdr _ich;
const struct icmphdr *ic;
ic = skb_header_pointer(skb, protooff, sizeof(_ich), &_ich);
if (!ic)
return false;
*port = (__force __be16)htons((ic->type << 8) | ic->code);
break;
}
case IPPROTO_ICMPV6: {
struct icmp6hdr _ich;
const struct icmp6hdr *ic;
ic = skb_header_pointer(skb, protooff, sizeof(_ich), &_ich);
if (!ic)
return false;
*port = (__force __be16)
htons((ic->icmp6_type << 8) | ic->icmp6_code);
break;
}
default:
break;
}
*proto = protocol;
return true;
}
bool
ip_set_get_ip4_port(const struct sk_buff *skb, bool src,
__be16 *port, u8 *proto)
{
const struct iphdr *iph = ip_hdr(skb);
unsigned int protooff = skb_network_offset(skb) + ip_hdrlen(skb);
int protocol = iph->protocol;
/* See comments at tcp_match in ip_tables.c */
if (protocol <= 0)
return false;
if (ntohs(iph->frag_off) & IP_OFFSET)
switch (protocol) {
case IPPROTO_TCP:
case IPPROTO_SCTP:
case IPPROTO_UDP:
case IPPROTO_UDPLITE:
case IPPROTO_ICMP:
/* Port info not available for fragment offset > 0 */
return false;
default:
/* Other protocols doesn't have ports,
* so we can match fragments.
*/
*proto = protocol;
return true;
}
return get_port(skb, protocol, protooff, src, port, proto);
}
EXPORT_SYMBOL_GPL(ip_set_get_ip4_port);
#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
bool
ip_set_get_ip6_port(const struct sk_buff *skb, bool src,
__be16 *port, u8 *proto)
{
int protoff;
u8 nexthdr;
__be16 frag_off = 0;
nexthdr = ipv6_hdr(skb)->nexthdr;
protoff = ipv6_skip_exthdr(skb,
skb_network_offset(skb) +
sizeof(struct ipv6hdr), &nexthdr,
&frag_off);
if (protoff < 0 || (frag_off & htons(~0x7)) != 0)
return false;
return get_port(skb, nexthdr, protoff, src, port, proto);
}
EXPORT_SYMBOL_GPL(ip_set_get_ip6_port);
#endif
| linux-master | net/netfilter/ipset/ip_set_getport.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (C) 2003-2013 Jozsef Kadlecsik <[email protected]> */
/* Kernel module implementing an IP set type: the hash:ip,port,net type */
#include <linux/jhash.h>
#include <linux/module.h>
#include <linux/ip.h>
#include <linux/skbuff.h>
#include <linux/errno.h>
#include <linux/random.h>
#include <net/ip.h>
#include <net/ipv6.h>
#include <net/netlink.h>
#include <net/tcp.h>
#include <linux/netfilter.h>
#include <linux/netfilter/ipset/pfxlen.h>
#include <linux/netfilter/ipset/ip_set.h>
#include <linux/netfilter/ipset/ip_set_getport.h>
#include <linux/netfilter/ipset/ip_set_hash.h>
#define IPSET_TYPE_REV_MIN 0
/* 1 SCTP and UDPLITE support added */
/* 2 Range as input support for IPv4 added */
/* 3 nomatch flag support added */
/* 4 Counters support added */
/* 5 Comments support added */
/* 6 Forceadd support added */
/* 7 skbinfo support added */
#define IPSET_TYPE_REV_MAX 8 /* bucketsize, initval support added */
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jozsef Kadlecsik <[email protected]>");
IP_SET_MODULE_DESC("hash:ip,port,net", IPSET_TYPE_REV_MIN, IPSET_TYPE_REV_MAX);
MODULE_ALIAS("ip_set_hash:ip,port,net");
/* Type specific function prefix */
#define HTYPE hash_ipportnet
/* We squeeze the "nomatch" flag into cidr: we don't support cidr == 0
* However this way we have to store internally cidr - 1,
* dancing back and forth.
*/
#define IP_SET_HASH_WITH_NETS_PACKED
#define IP_SET_HASH_WITH_PROTO
#define IP_SET_HASH_WITH_NETS
/* IPv4 variant */
/* Member elements */
struct hash_ipportnet4_elem {
__be32 ip;
__be32 ip2;
__be16 port;
u8 cidr:7;
u8 nomatch:1;
u8 proto;
};
/* Common functions */
static bool
hash_ipportnet4_data_equal(const struct hash_ipportnet4_elem *ip1,
const struct hash_ipportnet4_elem *ip2,
u32 *multi)
{
return ip1->ip == ip2->ip &&
ip1->ip2 == ip2->ip2 &&
ip1->cidr == ip2->cidr &&
ip1->port == ip2->port &&
ip1->proto == ip2->proto;
}
static int
hash_ipportnet4_do_data_match(const struct hash_ipportnet4_elem *elem)
{
return elem->nomatch ? -ENOTEMPTY : 1;
}
static void
hash_ipportnet4_data_set_flags(struct hash_ipportnet4_elem *elem, u32 flags)
{
elem->nomatch = !!((flags >> 16) & IPSET_FLAG_NOMATCH);
}
static void
hash_ipportnet4_data_reset_flags(struct hash_ipportnet4_elem *elem, u8 *flags)
{
swap(*flags, elem->nomatch);
}
static void
hash_ipportnet4_data_netmask(struct hash_ipportnet4_elem *elem, u8 cidr)
{
elem->ip2 &= ip_set_netmask(cidr);
elem->cidr = cidr - 1;
}
static bool
hash_ipportnet4_data_list(struct sk_buff *skb,
const struct hash_ipportnet4_elem *data)
{
u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, data->ip) ||
nla_put_ipaddr4(skb, IPSET_ATTR_IP2, data->ip2) ||
nla_put_net16(skb, IPSET_ATTR_PORT, data->port) ||
nla_put_u8(skb, IPSET_ATTR_CIDR2, data->cidr + 1) ||
nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto) ||
(flags &&
nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
goto nla_put_failure;
return false;
nla_put_failure:
return true;
}
static void
hash_ipportnet4_data_next(struct hash_ipportnet4_elem *next,
const struct hash_ipportnet4_elem *d)
{
next->ip = d->ip;
next->port = d->port;
next->ip2 = d->ip2;
}
#define MTYPE hash_ipportnet4
#define HOST_MASK 32
#include "ip_set_hash_gen.h"
static int
hash_ipportnet4_kadt(struct ip_set *set, const struct sk_buff *skb,
const struct xt_action_param *par,
enum ipset_adt adt, struct ip_set_adt_opt *opt)
{
const struct hash_ipportnet4 *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_ipportnet4_elem e = {
.cidr = INIT_CIDR(h->nets[0].cidr[0], HOST_MASK),
};
struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
if (adt == IPSET_TEST)
e.cidr = HOST_MASK - 1;
if (!ip_set_get_ip4_port(skb, opt->flags & IPSET_DIM_TWO_SRC,
&e.port, &e.proto))
return -EINVAL;
ip4addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &e.ip);
ip4addrptr(skb, opt->flags & IPSET_DIM_THREE_SRC, &e.ip2);
e.ip2 &= ip_set_netmask(e.cidr + 1);
return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags);
}
static int
hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
{
struct hash_ipportnet4 *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_ipportnet4_elem e = { .cidr = HOST_MASK - 1 };
struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
u32 ip = 0, ip_to = 0, p = 0, port, port_to;
u32 ip2_from = 0, ip2_to = 0, ip2, i = 0;
bool with_ports = false;
u8 cidr;
int ret;
if (tb[IPSET_ATTR_LINENO])
*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] ||
!ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS)))
return -IPSET_ERR_PROTOCOL;
ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip);
if (ret)
return ret;
ret = ip_set_get_extensions(set, tb, &ext);
if (ret)
return ret;
ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP2], &ip2_from);
if (ret)
return ret;
if (tb[IPSET_ATTR_CIDR2]) {
cidr = nla_get_u8(tb[IPSET_ATTR_CIDR2]);
if (!cidr || cidr > HOST_MASK)
return -IPSET_ERR_INVALID_CIDR;
e.cidr = cidr - 1;
}
e.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
if (tb[IPSET_ATTR_PROTO]) {
e.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]);
with_ports = ip_set_proto_with_ports(e.proto);
if (e.proto == 0)
return -IPSET_ERR_INVALID_PROTO;
} else {
return -IPSET_ERR_MISSING_PROTO;
}
if (!(with_ports || e.proto == IPPROTO_ICMP))
e.port = 0;
if (tb[IPSET_ATTR_CADT_FLAGS]) {
u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
if (cadt_flags & IPSET_FLAG_NOMATCH)
flags |= (IPSET_FLAG_NOMATCH << 16);
}
with_ports = with_ports && tb[IPSET_ATTR_PORT_TO];
if (adt == IPSET_TEST ||
!(tb[IPSET_ATTR_CIDR] || tb[IPSET_ATTR_IP_TO] || with_ports ||
tb[IPSET_ATTR_IP2_TO])) {
e.ip = htonl(ip);
e.ip2 = htonl(ip2_from & ip_set_hostmask(e.cidr + 1));
ret = adtfn(set, &e, &ext, &ext, flags);
return ip_set_enomatch(ret, flags, adt, set) ? -ret :
ip_set_eexist(ret, flags) ? 0 : ret;
}
ip_to = ip;
if (tb[IPSET_ATTR_IP_TO]) {
ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to);
if (ret)
return ret;
if (ip > ip_to)
swap(ip, ip_to);
} else if (tb[IPSET_ATTR_CIDR]) {
cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
if (!cidr || cidr > HOST_MASK)
return -IPSET_ERR_INVALID_CIDR;
ip_set_mask_from_to(ip, ip_to, cidr);
}
port_to = port = ntohs(e.port);
if (tb[IPSET_ATTR_PORT_TO]) {
port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]);
if (port > port_to)
swap(port, port_to);
}
ip2_to = ip2_from;
if (tb[IPSET_ATTR_IP2_TO]) {
ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP2_TO], &ip2_to);
if (ret)
return ret;
if (ip2_from > ip2_to)
swap(ip2_from, ip2_to);
if (ip2_from + UINT_MAX == ip2_to)
return -IPSET_ERR_HASH_RANGE;
} else {
ip_set_mask_from_to(ip2_from, ip2_to, e.cidr + 1);
}
if (retried) {
ip = ntohl(h->next.ip);
p = ntohs(h->next.port);
ip2 = ntohl(h->next.ip2);
} else {
p = port;
ip2 = ip2_from;
}
for (; ip <= ip_to; ip++) {
e.ip = htonl(ip);
for (; p <= port_to; p++) {
e.port = htons(p);
do {
i++;
e.ip2 = htonl(ip2);
ip2 = ip_set_range_to_cidr(ip2, ip2_to, &cidr);
e.cidr = cidr - 1;
if (i > IPSET_MAX_RANGE) {
hash_ipportnet4_data_next(&h->next,
&e);
return -ERANGE;
}
ret = adtfn(set, &e, &ext, &ext, flags);
if (ret && !ip_set_eexist(ret, flags))
return ret;
ret = 0;
} while (ip2++ < ip2_to);
ip2 = ip2_from;
}
p = port;
}
return ret;
}
/* IPv6 variant */
struct hash_ipportnet6_elem {
union nf_inet_addr ip;
union nf_inet_addr ip2;
__be16 port;
u8 cidr:7;
u8 nomatch:1;
u8 proto;
};
/* Common functions */
static bool
hash_ipportnet6_data_equal(const struct hash_ipportnet6_elem *ip1,
const struct hash_ipportnet6_elem *ip2,
u32 *multi)
{
return ipv6_addr_equal(&ip1->ip.in6, &ip2->ip.in6) &&
ipv6_addr_equal(&ip1->ip2.in6, &ip2->ip2.in6) &&
ip1->cidr == ip2->cidr &&
ip1->port == ip2->port &&
ip1->proto == ip2->proto;
}
static int
hash_ipportnet6_do_data_match(const struct hash_ipportnet6_elem *elem)
{
return elem->nomatch ? -ENOTEMPTY : 1;
}
static void
hash_ipportnet6_data_set_flags(struct hash_ipportnet6_elem *elem, u32 flags)
{
elem->nomatch = !!((flags >> 16) & IPSET_FLAG_NOMATCH);
}
static void
hash_ipportnet6_data_reset_flags(struct hash_ipportnet6_elem *elem, u8 *flags)
{
swap(*flags, elem->nomatch);
}
static void
hash_ipportnet6_data_netmask(struct hash_ipportnet6_elem *elem, u8 cidr)
{
ip6_netmask(&elem->ip2, cidr);
elem->cidr = cidr - 1;
}
static bool
hash_ipportnet6_data_list(struct sk_buff *skb,
const struct hash_ipportnet6_elem *data)
{
u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &data->ip.in6) ||
nla_put_ipaddr6(skb, IPSET_ATTR_IP2, &data->ip2.in6) ||
nla_put_net16(skb, IPSET_ATTR_PORT, data->port) ||
nla_put_u8(skb, IPSET_ATTR_CIDR2, data->cidr + 1) ||
nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto) ||
(flags &&
nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
goto nla_put_failure;
return false;
nla_put_failure:
return true;
}
static void
hash_ipportnet6_data_next(struct hash_ipportnet6_elem *next,
const struct hash_ipportnet6_elem *d)
{
next->port = d->port;
}
#undef MTYPE
#undef HOST_MASK
#define MTYPE hash_ipportnet6
#define HOST_MASK 128
#define IP_SET_EMIT_CREATE
#include "ip_set_hash_gen.h"
static int
hash_ipportnet6_kadt(struct ip_set *set, const struct sk_buff *skb,
const struct xt_action_param *par,
enum ipset_adt adt, struct ip_set_adt_opt *opt)
{
const struct hash_ipportnet6 *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_ipportnet6_elem e = {
.cidr = INIT_CIDR(h->nets[0].cidr[0], HOST_MASK),
};
struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
if (adt == IPSET_TEST)
e.cidr = HOST_MASK - 1;
if (!ip_set_get_ip6_port(skb, opt->flags & IPSET_DIM_TWO_SRC,
&e.port, &e.proto))
return -EINVAL;
ip6addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &e.ip.in6);
ip6addrptr(skb, opt->flags & IPSET_DIM_THREE_SRC, &e.ip2.in6);
ip6_netmask(&e.ip2, e.cidr + 1);
return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags);
}
static int
hash_ipportnet6_uadt(struct ip_set *set, struct nlattr *tb[],
enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
{
const struct hash_ipportnet6 *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_ipportnet6_elem e = { .cidr = HOST_MASK - 1 };
struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
u32 port, port_to;
bool with_ports = false;
u8 cidr;
int ret;
if (tb[IPSET_ATTR_LINENO])
*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] ||
!ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS)))
return -IPSET_ERR_PROTOCOL;
if (unlikely(tb[IPSET_ATTR_IP_TO]))
return -IPSET_ERR_HASH_RANGE_UNSUPPORTED;
if (unlikely(tb[IPSET_ATTR_CIDR])) {
cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
if (cidr != HOST_MASK)
return -IPSET_ERR_INVALID_CIDR;
}
ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip);
if (ret)
return ret;
ret = ip_set_get_extensions(set, tb, &ext);
if (ret)
return ret;
ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP2], &e.ip2);
if (ret)
return ret;
if (tb[IPSET_ATTR_CIDR2]) {
cidr = nla_get_u8(tb[IPSET_ATTR_CIDR2]);
if (!cidr || cidr > HOST_MASK)
return -IPSET_ERR_INVALID_CIDR;
e.cidr = cidr - 1;
}
ip6_netmask(&e.ip2, e.cidr + 1);
e.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
if (tb[IPSET_ATTR_PROTO]) {
e.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]);
with_ports = ip_set_proto_with_ports(e.proto);
if (e.proto == 0)
return -IPSET_ERR_INVALID_PROTO;
} else {
return -IPSET_ERR_MISSING_PROTO;
}
if (!(with_ports || e.proto == IPPROTO_ICMPV6))
e.port = 0;
if (tb[IPSET_ATTR_CADT_FLAGS]) {
u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
if (cadt_flags & IPSET_FLAG_NOMATCH)
flags |= (IPSET_FLAG_NOMATCH << 16);
}
if (adt == IPSET_TEST || !with_ports || !tb[IPSET_ATTR_PORT_TO]) {
ret = adtfn(set, &e, &ext, &ext, flags);
return ip_set_enomatch(ret, flags, adt, set) ? -ret :
ip_set_eexist(ret, flags) ? 0 : ret;
}
port = ntohs(e.port);
port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]);
if (port > port_to)
swap(port, port_to);
if (retried)
port = ntohs(h->next.port);
for (; port <= port_to; port++) {
e.port = htons(port);
ret = adtfn(set, &e, &ext, &ext, flags);
if (ret && !ip_set_eexist(ret, flags))
return ret;
ret = 0;
}
return ret;
}
static struct ip_set_type hash_ipportnet_type __read_mostly = {
.name = "hash:ip,port,net",
.protocol = IPSET_PROTOCOL,
.features = IPSET_TYPE_IP | IPSET_TYPE_PORT | IPSET_TYPE_IP2 |
IPSET_TYPE_NOMATCH,
.dimension = IPSET_DIM_THREE,
.family = NFPROTO_UNSPEC,
.revision_min = IPSET_TYPE_REV_MIN,
.revision_max = IPSET_TYPE_REV_MAX,
.create_flags[IPSET_TYPE_REV_MAX] = IPSET_CREATE_FLAG_BUCKETSIZE,
.create = hash_ipportnet_create,
.create_policy = {
[IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 },
[IPSET_ATTR_MAXELEM] = { .type = NLA_U32 },
[IPSET_ATTR_INITVAL] = { .type = NLA_U32 },
[IPSET_ATTR_BUCKETSIZE] = { .type = NLA_U8 },
[IPSET_ATTR_RESIZE] = { .type = NLA_U8 },
[IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
[IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 },
},
.adt_policy = {
[IPSET_ATTR_IP] = { .type = NLA_NESTED },
[IPSET_ATTR_IP_TO] = { .type = NLA_NESTED },
[IPSET_ATTR_IP2] = { .type = NLA_NESTED },
[IPSET_ATTR_IP2_TO] = { .type = NLA_NESTED },
[IPSET_ATTR_PORT] = { .type = NLA_U16 },
[IPSET_ATTR_PORT_TO] = { .type = NLA_U16 },
[IPSET_ATTR_CIDR] = { .type = NLA_U8 },
[IPSET_ATTR_CIDR2] = { .type = NLA_U8 },
[IPSET_ATTR_PROTO] = { .type = NLA_U8 },
[IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 },
[IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
[IPSET_ATTR_LINENO] = { .type = NLA_U32 },
[IPSET_ATTR_BYTES] = { .type = NLA_U64 },
[IPSET_ATTR_PACKETS] = { .type = NLA_U64 },
[IPSET_ATTR_COMMENT] = { .type = NLA_NUL_STRING,
.len = IPSET_MAX_COMMENT_SIZE },
[IPSET_ATTR_SKBMARK] = { .type = NLA_U64 },
[IPSET_ATTR_SKBPRIO] = { .type = NLA_U32 },
[IPSET_ATTR_SKBQUEUE] = { .type = NLA_U16 },
},
.me = THIS_MODULE,
};
static int __init
hash_ipportnet_init(void)
{
return ip_set_type_register(&hash_ipportnet_type);
}
static void __exit
hash_ipportnet_fini(void)
{
rcu_barrier();
ip_set_type_unregister(&hash_ipportnet_type);
}
module_init(hash_ipportnet_init);
module_exit(hash_ipportnet_fini);
| linux-master | net/netfilter/ipset/ip_set_hash_ipportnet.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (C) 2014 Jozsef Kadlecsik <[email protected]> */
/* Kernel module implementing an IP set type: the hash:mac type */
#include <linux/jhash.h>
#include <linux/module.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/errno.h>
#include <linux/if_ether.h>
#include <net/netlink.h>
#include <linux/netfilter.h>
#include <linux/netfilter/ipset/ip_set.h>
#include <linux/netfilter/ipset/ip_set_hash.h>
#define IPSET_TYPE_REV_MIN 0
#define IPSET_TYPE_REV_MAX 1 /* bucketsize, initval support */
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jozsef Kadlecsik <[email protected]>");
IP_SET_MODULE_DESC("hash:mac", IPSET_TYPE_REV_MIN, IPSET_TYPE_REV_MAX);
MODULE_ALIAS("ip_set_hash:mac");
/* Type specific function prefix */
#define HTYPE hash_mac
/* Member elements */
struct hash_mac4_elem {
/* Zero valued IP addresses cannot be stored */
union {
unsigned char ether[ETH_ALEN];
__be32 foo[2];
};
};
/* Common functions */
static bool
hash_mac4_data_equal(const struct hash_mac4_elem *e1,
const struct hash_mac4_elem *e2,
u32 *multi)
{
return ether_addr_equal(e1->ether, e2->ether);
}
static bool
hash_mac4_data_list(struct sk_buff *skb, const struct hash_mac4_elem *e)
{
if (nla_put(skb, IPSET_ATTR_ETHER, ETH_ALEN, e->ether))
goto nla_put_failure;
return false;
nla_put_failure:
return true;
}
static void
hash_mac4_data_next(struct hash_mac4_elem *next,
const struct hash_mac4_elem *e)
{
}
#define MTYPE hash_mac4
#define HOST_MASK 32
#define IP_SET_EMIT_CREATE
#define IP_SET_PROTO_UNDEF
#include "ip_set_hash_gen.h"
static int
hash_mac4_kadt(struct ip_set *set, const struct sk_buff *skb,
const struct xt_action_param *par,
enum ipset_adt adt, struct ip_set_adt_opt *opt)
{
ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_mac4_elem e = { { .foo[0] = 0, .foo[1] = 0 } };
struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
if (skb_mac_header(skb) < skb->head ||
(skb_mac_header(skb) + ETH_HLEN) > skb->data)
return -EINVAL;
if (opt->flags & IPSET_DIM_ONE_SRC)
ether_addr_copy(e.ether, eth_hdr(skb)->h_source);
else
ether_addr_copy(e.ether, eth_hdr(skb)->h_dest);
if (is_zero_ether_addr(e.ether))
return -EINVAL;
return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags);
}
static int
hash_mac4_uadt(struct ip_set *set, struct nlattr *tb[],
enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
{
ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_mac4_elem e = { { .foo[0] = 0, .foo[1] = 0 } };
struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
int ret;
if (tb[IPSET_ATTR_LINENO])
*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
if (unlikely(!tb[IPSET_ATTR_ETHER] ||
nla_len(tb[IPSET_ATTR_ETHER]) != ETH_ALEN))
return -IPSET_ERR_PROTOCOL;
ret = ip_set_get_extensions(set, tb, &ext);
if (ret)
return ret;
ether_addr_copy(e.ether, nla_data(tb[IPSET_ATTR_ETHER]));
if (is_zero_ether_addr(e.ether))
return -IPSET_ERR_HASH_ELEM;
return adtfn(set, &e, &ext, &ext, flags);
}
static struct ip_set_type hash_mac_type __read_mostly = {
.name = "hash:mac",
.protocol = IPSET_PROTOCOL,
.features = IPSET_TYPE_MAC,
.dimension = IPSET_DIM_ONE,
.family = NFPROTO_UNSPEC,
.revision_min = IPSET_TYPE_REV_MIN,
.revision_max = IPSET_TYPE_REV_MAX,
.create_flags[IPSET_TYPE_REV_MAX] = IPSET_CREATE_FLAG_BUCKETSIZE,
.create = hash_mac_create,
.create_policy = {
[IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 },
[IPSET_ATTR_MAXELEM] = { .type = NLA_U32 },
[IPSET_ATTR_INITVAL] = { .type = NLA_U32 },
[IPSET_ATTR_BUCKETSIZE] = { .type = NLA_U8 },
[IPSET_ATTR_RESIZE] = { .type = NLA_U8 },
[IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
[IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 },
},
.adt_policy = {
[IPSET_ATTR_ETHER] = { .type = NLA_BINARY,
.len = ETH_ALEN },
[IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
[IPSET_ATTR_LINENO] = { .type = NLA_U32 },
[IPSET_ATTR_BYTES] = { .type = NLA_U64 },
[IPSET_ATTR_PACKETS] = { .type = NLA_U64 },
[IPSET_ATTR_COMMENT] = { .type = NLA_NUL_STRING,
.len = IPSET_MAX_COMMENT_SIZE },
[IPSET_ATTR_SKBMARK] = { .type = NLA_U64 },
[IPSET_ATTR_SKBPRIO] = { .type = NLA_U32 },
[IPSET_ATTR_SKBQUEUE] = { .type = NLA_U16 },
},
.me = THIS_MODULE,
};
static int __init
hash_mac_init(void)
{
return ip_set_type_register(&hash_mac_type);
}
static void __exit
hash_mac_fini(void)
{
rcu_barrier();
ip_set_type_unregister(&hash_mac_type);
}
module_init(hash_mac_init);
module_exit(hash_mac_fini);
| linux-master | net/netfilter/ipset/ip_set_hash_mac.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (C) 2003-2013 Jozsef Kadlecsik <[email protected]> */
/* Kernel module implementing an IP set type: the hash:net type */
#include <linux/jhash.h>
#include <linux/module.h>
#include <linux/ip.h>
#include <linux/skbuff.h>
#include <linux/errno.h>
#include <linux/random.h>
#include <net/ip.h>
#include <net/ipv6.h>
#include <net/netlink.h>
#include <linux/netfilter.h>
#include <linux/netfilter/ipset/pfxlen.h>
#include <linux/netfilter/ipset/ip_set.h>
#include <linux/netfilter/ipset/ip_set_hash.h>
#define IPSET_TYPE_REV_MIN 0
/* 1 Range as input support for IPv4 added */
/* 2 nomatch flag support added */
/* 3 Counters support added */
/* 4 Comments support added */
/* 5 Forceadd support added */
/* 6 skbinfo support added */
#define IPSET_TYPE_REV_MAX 7 /* bucketsize, initval support added */
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jozsef Kadlecsik <[email protected]>");
IP_SET_MODULE_DESC("hash:net", IPSET_TYPE_REV_MIN, IPSET_TYPE_REV_MAX);
MODULE_ALIAS("ip_set_hash:net");
/* Type specific function prefix */
#define HTYPE hash_net
#define IP_SET_HASH_WITH_NETS
/* IPv4 variant */
/* Member elements */
struct hash_net4_elem {
__be32 ip;
u16 padding0;
u8 nomatch;
u8 cidr;
};
/* Common functions */
static bool
hash_net4_data_equal(const struct hash_net4_elem *ip1,
const struct hash_net4_elem *ip2,
u32 *multi)
{
return ip1->ip == ip2->ip &&
ip1->cidr == ip2->cidr;
}
static int
hash_net4_do_data_match(const struct hash_net4_elem *elem)
{
return elem->nomatch ? -ENOTEMPTY : 1;
}
static void
hash_net4_data_set_flags(struct hash_net4_elem *elem, u32 flags)
{
elem->nomatch = (flags >> 16) & IPSET_FLAG_NOMATCH;
}
static void
hash_net4_data_reset_flags(struct hash_net4_elem *elem, u8 *flags)
{
swap(*flags, elem->nomatch);
}
static void
hash_net4_data_netmask(struct hash_net4_elem *elem, u8 cidr)
{
elem->ip &= ip_set_netmask(cidr);
elem->cidr = cidr;
}
static bool
hash_net4_data_list(struct sk_buff *skb, const struct hash_net4_elem *data)
{
u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, data->ip) ||
nla_put_u8(skb, IPSET_ATTR_CIDR, data->cidr) ||
(flags &&
nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
goto nla_put_failure;
return false;
nla_put_failure:
return true;
}
static void
hash_net4_data_next(struct hash_net4_elem *next,
const struct hash_net4_elem *d)
{
next->ip = d->ip;
}
#define MTYPE hash_net4
#define HOST_MASK 32
#include "ip_set_hash_gen.h"
static int
hash_net4_kadt(struct ip_set *set, const struct sk_buff *skb,
const struct xt_action_param *par,
enum ipset_adt adt, struct ip_set_adt_opt *opt)
{
const struct hash_net4 *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_net4_elem e = {
.cidr = INIT_CIDR(h->nets[0].cidr[0], HOST_MASK),
};
struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
if (e.cidr == 0)
return -EINVAL;
if (adt == IPSET_TEST)
e.cidr = HOST_MASK;
ip4addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &e.ip);
e.ip &= ip_set_netmask(e.cidr);
return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags);
}
static int
hash_net4_uadt(struct ip_set *set, struct nlattr *tb[],
enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
{
struct hash_net4 *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_net4_elem e = { .cidr = HOST_MASK };
struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
u32 ip = 0, ip_to = 0, i = 0;
int ret;
if (tb[IPSET_ATTR_LINENO])
*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
if (unlikely(!tb[IPSET_ATTR_IP] ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS)))
return -IPSET_ERR_PROTOCOL;
ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip);
if (ret)
return ret;
ret = ip_set_get_extensions(set, tb, &ext);
if (ret)
return ret;
if (tb[IPSET_ATTR_CIDR]) {
e.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
if (!e.cidr || e.cidr > HOST_MASK)
return -IPSET_ERR_INVALID_CIDR;
}
if (tb[IPSET_ATTR_CADT_FLAGS]) {
u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
if (cadt_flags & IPSET_FLAG_NOMATCH)
flags |= (IPSET_FLAG_NOMATCH << 16);
}
if (adt == IPSET_TEST || !tb[IPSET_ATTR_IP_TO]) {
e.ip = htonl(ip & ip_set_hostmask(e.cidr));
ret = adtfn(set, &e, &ext, &ext, flags);
return ip_set_enomatch(ret, flags, adt, set) ? -ret :
ip_set_eexist(ret, flags) ? 0 : ret;
}
ip_to = ip;
if (tb[IPSET_ATTR_IP_TO]) {
ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to);
if (ret)
return ret;
if (ip_to < ip)
swap(ip, ip_to);
if (ip + UINT_MAX == ip_to)
return -IPSET_ERR_HASH_RANGE;
}
if (retried)
ip = ntohl(h->next.ip);
do {
i++;
e.ip = htonl(ip);
if (i > IPSET_MAX_RANGE) {
hash_net4_data_next(&h->next, &e);
return -ERANGE;
}
ip = ip_set_range_to_cidr(ip, ip_to, &e.cidr);
ret = adtfn(set, &e, &ext, &ext, flags);
if (ret && !ip_set_eexist(ret, flags))
return ret;
ret = 0;
} while (ip++ < ip_to);
return ret;
}
/* IPv6 variant */
struct hash_net6_elem {
union nf_inet_addr ip;
u16 padding0;
u8 nomatch;
u8 cidr;
};
/* Common functions */
static bool
hash_net6_data_equal(const struct hash_net6_elem *ip1,
const struct hash_net6_elem *ip2,
u32 *multi)
{
return ipv6_addr_equal(&ip1->ip.in6, &ip2->ip.in6) &&
ip1->cidr == ip2->cidr;
}
static int
hash_net6_do_data_match(const struct hash_net6_elem *elem)
{
return elem->nomatch ? -ENOTEMPTY : 1;
}
static void
hash_net6_data_set_flags(struct hash_net6_elem *elem, u32 flags)
{
elem->nomatch = (flags >> 16) & IPSET_FLAG_NOMATCH;
}
static void
hash_net6_data_reset_flags(struct hash_net6_elem *elem, u8 *flags)
{
swap(*flags, elem->nomatch);
}
static void
hash_net6_data_netmask(struct hash_net6_elem *elem, u8 cidr)
{
ip6_netmask(&elem->ip, cidr);
elem->cidr = cidr;
}
static bool
hash_net6_data_list(struct sk_buff *skb, const struct hash_net6_elem *data)
{
u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &data->ip.in6) ||
nla_put_u8(skb, IPSET_ATTR_CIDR, data->cidr) ||
(flags &&
nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
goto nla_put_failure;
return false;
nla_put_failure:
return true;
}
static void
hash_net6_data_next(struct hash_net6_elem *next,
const struct hash_net6_elem *d)
{
}
#undef MTYPE
#undef HOST_MASK
#define MTYPE hash_net6
#define HOST_MASK 128
#define IP_SET_EMIT_CREATE
#include "ip_set_hash_gen.h"
static int
hash_net6_kadt(struct ip_set *set, const struct sk_buff *skb,
const struct xt_action_param *par,
enum ipset_adt adt, struct ip_set_adt_opt *opt)
{
const struct hash_net6 *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_net6_elem e = {
.cidr = INIT_CIDR(h->nets[0].cidr[0], HOST_MASK),
};
struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
if (e.cidr == 0)
return -EINVAL;
if (adt == IPSET_TEST)
e.cidr = HOST_MASK;
ip6addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &e.ip.in6);
ip6_netmask(&e.ip, e.cidr);
return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags);
}
static int
hash_net6_uadt(struct ip_set *set, struct nlattr *tb[],
enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
{
ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_net6_elem e = { .cidr = HOST_MASK };
struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
int ret;
if (tb[IPSET_ATTR_LINENO])
*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
if (unlikely(!tb[IPSET_ATTR_IP] ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS)))
return -IPSET_ERR_PROTOCOL;
if (unlikely(tb[IPSET_ATTR_IP_TO]))
return -IPSET_ERR_HASH_RANGE_UNSUPPORTED;
ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip);
if (ret)
return ret;
ret = ip_set_get_extensions(set, tb, &ext);
if (ret)
return ret;
if (tb[IPSET_ATTR_CIDR]) {
e.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
if (!e.cidr || e.cidr > HOST_MASK)
return -IPSET_ERR_INVALID_CIDR;
}
ip6_netmask(&e.ip, e.cidr);
if (tb[IPSET_ATTR_CADT_FLAGS]) {
u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
if (cadt_flags & IPSET_FLAG_NOMATCH)
flags |= (IPSET_FLAG_NOMATCH << 16);
}
ret = adtfn(set, &e, &ext, &ext, flags);
return ip_set_enomatch(ret, flags, adt, set) ? -ret :
ip_set_eexist(ret, flags) ? 0 : ret;
}
static struct ip_set_type hash_net_type __read_mostly = {
.name = "hash:net",
.protocol = IPSET_PROTOCOL,
.features = IPSET_TYPE_IP | IPSET_TYPE_NOMATCH,
.dimension = IPSET_DIM_ONE,
.family = NFPROTO_UNSPEC,
.revision_min = IPSET_TYPE_REV_MIN,
.revision_max = IPSET_TYPE_REV_MAX,
.create_flags[IPSET_TYPE_REV_MAX] = IPSET_CREATE_FLAG_BUCKETSIZE,
.create = hash_net_create,
.create_policy = {
[IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 },
[IPSET_ATTR_MAXELEM] = { .type = NLA_U32 },
[IPSET_ATTR_INITVAL] = { .type = NLA_U32 },
[IPSET_ATTR_BUCKETSIZE] = { .type = NLA_U8 },
[IPSET_ATTR_RESIZE] = { .type = NLA_U8 },
[IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
[IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 },
},
.adt_policy = {
[IPSET_ATTR_IP] = { .type = NLA_NESTED },
[IPSET_ATTR_IP_TO] = { .type = NLA_NESTED },
[IPSET_ATTR_CIDR] = { .type = NLA_U8 },
[IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
[IPSET_ATTR_LINENO] = { .type = NLA_U32 },
[IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 },
[IPSET_ATTR_BYTES] = { .type = NLA_U64 },
[IPSET_ATTR_PACKETS] = { .type = NLA_U64 },
[IPSET_ATTR_COMMENT] = { .type = NLA_NUL_STRING,
.len = IPSET_MAX_COMMENT_SIZE },
[IPSET_ATTR_SKBMARK] = { .type = NLA_U64 },
[IPSET_ATTR_SKBPRIO] = { .type = NLA_U32 },
[IPSET_ATTR_SKBQUEUE] = { .type = NLA_U16 },
},
.me = THIS_MODULE,
};
static int __init
hash_net_init(void)
{
return ip_set_type_register(&hash_net_type);
}
static void __exit
hash_net_fini(void)
{
rcu_barrier();
ip_set_type_unregister(&hash_net_type);
}
module_init(hash_net_init);
module_exit(hash_net_fini);
| linux-master | net/netfilter/ipset/ip_set_hash_net.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (C) 2000-2002 Joakim Axelsson <[email protected]>
* Patrick Schaaf <[email protected]>
* Copyright (C) 2003-2013 Jozsef Kadlecsik <[email protected]>
*/
/* Kernel module implementing an IP set type: the bitmap:ip type */
#include <linux/module.h>
#include <linux/ip.h>
#include <linux/skbuff.h>
#include <linux/errno.h>
#include <linux/bitops.h>
#include <linux/spinlock.h>
#include <linux/netlink.h>
#include <linux/jiffies.h>
#include <linux/timer.h>
#include <net/netlink.h>
#include <net/tcp.h>
#include <linux/netfilter/ipset/pfxlen.h>
#include <linux/netfilter/ipset/ip_set.h>
#include <linux/netfilter/ipset/ip_set_bitmap.h>
#define IPSET_TYPE_REV_MIN 0
/* 1 Counter support added */
/* 2 Comment support added */
#define IPSET_TYPE_REV_MAX 3 /* skbinfo support added */
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jozsef Kadlecsik <[email protected]>");
IP_SET_MODULE_DESC("bitmap:ip", IPSET_TYPE_REV_MIN, IPSET_TYPE_REV_MAX);
MODULE_ALIAS("ip_set_bitmap:ip");
#define MTYPE bitmap_ip
#define HOST_MASK 32
/* Type structure */
struct bitmap_ip {
unsigned long *members; /* the set members */
u32 first_ip; /* host byte order, included in range */
u32 last_ip; /* host byte order, included in range */
u32 elements; /* number of max elements in the set */
u32 hosts; /* number of hosts in a subnet */
size_t memsize; /* members size */
u8 netmask; /* subnet netmask */
struct timer_list gc; /* garbage collection */
struct ip_set *set; /* attached to this ip_set */
unsigned char extensions[] /* data extensions */
__aligned(__alignof__(u64));
};
/* ADT structure for generic function args */
struct bitmap_ip_adt_elem {
u16 id;
};
static u32
ip_to_id(const struct bitmap_ip *m, u32 ip)
{
return ((ip & ip_set_hostmask(m->netmask)) - m->first_ip) / m->hosts;
}
/* Common functions */
static int
bitmap_ip_do_test(const struct bitmap_ip_adt_elem *e,
struct bitmap_ip *map, size_t dsize)
{
return !!test_bit(e->id, map->members);
}
static int
bitmap_ip_gc_test(u16 id, const struct bitmap_ip *map, size_t dsize)
{
return !!test_bit(id, map->members);
}
static int
bitmap_ip_do_add(const struct bitmap_ip_adt_elem *e, struct bitmap_ip *map,
u32 flags, size_t dsize)
{
return !!test_bit(e->id, map->members);
}
static int
bitmap_ip_do_del(const struct bitmap_ip_adt_elem *e, struct bitmap_ip *map)
{
return !test_and_clear_bit(e->id, map->members);
}
static int
bitmap_ip_do_list(struct sk_buff *skb, const struct bitmap_ip *map, u32 id,
size_t dsize)
{
return nla_put_ipaddr4(skb, IPSET_ATTR_IP,
htonl(map->first_ip + id * map->hosts));
}
static int
bitmap_ip_do_head(struct sk_buff *skb, const struct bitmap_ip *map)
{
return nla_put_ipaddr4(skb, IPSET_ATTR_IP, htonl(map->first_ip)) ||
nla_put_ipaddr4(skb, IPSET_ATTR_IP_TO, htonl(map->last_ip)) ||
(map->netmask != 32 &&
nla_put_u8(skb, IPSET_ATTR_NETMASK, map->netmask));
}
static int
bitmap_ip_kadt(struct ip_set *set, const struct sk_buff *skb,
const struct xt_action_param *par,
enum ipset_adt adt, struct ip_set_adt_opt *opt)
{
struct bitmap_ip *map = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
struct bitmap_ip_adt_elem e = { .id = 0 };
struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
u32 ip;
ip = ntohl(ip4addr(skb, opt->flags & IPSET_DIM_ONE_SRC));
if (ip < map->first_ip || ip > map->last_ip)
return -IPSET_ERR_BITMAP_RANGE;
e.id = ip_to_id(map, ip);
return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags);
}
static int
bitmap_ip_uadt(struct ip_set *set, struct nlattr *tb[],
enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
{
struct bitmap_ip *map = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
u32 ip = 0, ip_to = 0;
struct bitmap_ip_adt_elem e = { .id = 0 };
struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
int ret = 0;
if (tb[IPSET_ATTR_LINENO])
*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
if (unlikely(!tb[IPSET_ATTR_IP]))
return -IPSET_ERR_PROTOCOL;
ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip);
if (ret)
return ret;
ret = ip_set_get_extensions(set, tb, &ext);
if (ret)
return ret;
if (ip < map->first_ip || ip > map->last_ip)
return -IPSET_ERR_BITMAP_RANGE;
if (adt == IPSET_TEST) {
e.id = ip_to_id(map, ip);
return adtfn(set, &e, &ext, &ext, flags);
}
if (tb[IPSET_ATTR_IP_TO]) {
ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to);
if (ret)
return ret;
if (ip > ip_to) {
swap(ip, ip_to);
if (ip < map->first_ip)
return -IPSET_ERR_BITMAP_RANGE;
}
} else if (tb[IPSET_ATTR_CIDR]) {
u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
if (!cidr || cidr > HOST_MASK)
return -IPSET_ERR_INVALID_CIDR;
ip_set_mask_from_to(ip, ip_to, cidr);
} else {
ip_to = ip;
}
if (ip_to > map->last_ip)
return -IPSET_ERR_BITMAP_RANGE;
for (; !before(ip_to, ip); ip += map->hosts) {
e.id = ip_to_id(map, ip);
ret = adtfn(set, &e, &ext, &ext, flags);
if (ret && !ip_set_eexist(ret, flags))
return ret;
ret = 0;
}
return ret;
}
static bool
bitmap_ip_same_set(const struct ip_set *a, const struct ip_set *b)
{
const struct bitmap_ip *x = a->data;
const struct bitmap_ip *y = b->data;
return x->first_ip == y->first_ip &&
x->last_ip == y->last_ip &&
x->netmask == y->netmask &&
a->timeout == b->timeout &&
a->extensions == b->extensions;
}
/* Plain variant */
struct bitmap_ip_elem {
};
#include "ip_set_bitmap_gen.h"
/* Create bitmap:ip type of sets */
static bool
init_map_ip(struct ip_set *set, struct bitmap_ip *map,
u32 first_ip, u32 last_ip,
u32 elements, u32 hosts, u8 netmask)
{
map->members = bitmap_zalloc(elements, GFP_KERNEL | __GFP_NOWARN);
if (!map->members)
return false;
map->first_ip = first_ip;
map->last_ip = last_ip;
map->elements = elements;
map->hosts = hosts;
map->netmask = netmask;
set->timeout = IPSET_NO_TIMEOUT;
map->set = set;
set->data = map;
set->family = NFPROTO_IPV4;
return true;
}
static u32
range_to_mask(u32 from, u32 to, u8 *bits)
{
u32 mask = 0xFFFFFFFE;
*bits = 32;
while (--(*bits) > 0 && mask && (to & mask) != from)
mask <<= 1;
return mask;
}
static int
bitmap_ip_create(struct net *net, struct ip_set *set, struct nlattr *tb[],
u32 flags)
{
struct bitmap_ip *map;
u32 first_ip = 0, last_ip = 0, hosts;
u64 elements;
u8 netmask = 32;
int ret;
if (unlikely(!tb[IPSET_ATTR_IP] ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS)))
return -IPSET_ERR_PROTOCOL;
ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &first_ip);
if (ret)
return ret;
if (tb[IPSET_ATTR_IP_TO]) {
ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &last_ip);
if (ret)
return ret;
if (first_ip > last_ip)
swap(first_ip, last_ip);
} else if (tb[IPSET_ATTR_CIDR]) {
u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
if (cidr >= HOST_MASK)
return -IPSET_ERR_INVALID_CIDR;
ip_set_mask_from_to(first_ip, last_ip, cidr);
} else {
return -IPSET_ERR_PROTOCOL;
}
if (tb[IPSET_ATTR_NETMASK]) {
netmask = nla_get_u8(tb[IPSET_ATTR_NETMASK]);
if (netmask > HOST_MASK)
return -IPSET_ERR_INVALID_NETMASK;
first_ip &= ip_set_hostmask(netmask);
last_ip |= ~ip_set_hostmask(netmask);
}
if (netmask == 32) {
hosts = 1;
elements = (u64)last_ip - first_ip + 1;
} else {
u8 mask_bits;
u32 mask;
mask = range_to_mask(first_ip, last_ip, &mask_bits);
if ((!mask && (first_ip || last_ip != 0xFFFFFFFF)) ||
netmask <= mask_bits)
return -IPSET_ERR_BITMAP_RANGE;
pr_debug("mask_bits %u, netmask %u\n", mask_bits, netmask);
hosts = 2U << (32 - netmask - 1);
elements = 2UL << (netmask - mask_bits - 1);
}
if (elements > IPSET_BITMAP_MAX_RANGE + 1)
return -IPSET_ERR_BITMAP_RANGE_SIZE;
pr_debug("hosts %u, elements %llu\n",
hosts, (unsigned long long)elements);
set->dsize = ip_set_elem_len(set, tb, 0, 0);
map = ip_set_alloc(sizeof(*map) + elements * set->dsize);
if (!map)
return -ENOMEM;
map->memsize = BITS_TO_LONGS(elements) * sizeof(unsigned long);
set->variant = &bitmap_ip;
if (!init_map_ip(set, map, first_ip, last_ip,
elements, hosts, netmask)) {
ip_set_free(map);
return -ENOMEM;
}
if (tb[IPSET_ATTR_TIMEOUT]) {
set->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
bitmap_ip_gc_init(set, bitmap_ip_gc);
}
return 0;
}
static struct ip_set_type bitmap_ip_type __read_mostly = {
.name = "bitmap:ip",
.protocol = IPSET_PROTOCOL,
.features = IPSET_TYPE_IP,
.dimension = IPSET_DIM_ONE,
.family = NFPROTO_IPV4,
.revision_min = IPSET_TYPE_REV_MIN,
.revision_max = IPSET_TYPE_REV_MAX,
.create = bitmap_ip_create,
.create_policy = {
[IPSET_ATTR_IP] = { .type = NLA_NESTED },
[IPSET_ATTR_IP_TO] = { .type = NLA_NESTED },
[IPSET_ATTR_CIDR] = { .type = NLA_U8 },
[IPSET_ATTR_NETMASK] = { .type = NLA_U8 },
[IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
[IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 },
},
.adt_policy = {
[IPSET_ATTR_IP] = { .type = NLA_NESTED },
[IPSET_ATTR_IP_TO] = { .type = NLA_NESTED },
[IPSET_ATTR_CIDR] = { .type = NLA_U8 },
[IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
[IPSET_ATTR_LINENO] = { .type = NLA_U32 },
[IPSET_ATTR_BYTES] = { .type = NLA_U64 },
[IPSET_ATTR_PACKETS] = { .type = NLA_U64 },
[IPSET_ATTR_COMMENT] = { .type = NLA_NUL_STRING,
.len = IPSET_MAX_COMMENT_SIZE },
[IPSET_ATTR_SKBMARK] = { .type = NLA_U64 },
[IPSET_ATTR_SKBPRIO] = { .type = NLA_U32 },
[IPSET_ATTR_SKBQUEUE] = { .type = NLA_U16 },
},
.me = THIS_MODULE,
};
static int __init
bitmap_ip_init(void)
{
return ip_set_type_register(&bitmap_ip_type);
}
static void __exit
bitmap_ip_fini(void)
{
rcu_barrier();
ip_set_type_unregister(&bitmap_ip_type);
}
module_init(bitmap_ip_init);
module_exit(bitmap_ip_fini);
| linux-master | net/netfilter/ipset/ip_set_bitmap_ip.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (C) 2003-2013 Jozsef Kadlecsik <[email protected]> */
/* Kernel module implementing an IP set type: the hash:ip,port,net type */
#include <linux/jhash.h>
#include <linux/module.h>
#include <linux/ip.h>
#include <linux/skbuff.h>
#include <linux/errno.h>
#include <linux/random.h>
#include <net/ip.h>
#include <net/ipv6.h>
#include <net/netlink.h>
#include <net/tcp.h>
#include <linux/netfilter.h>
#include <linux/netfilter/ipset/pfxlen.h>
#include <linux/netfilter/ipset/ip_set.h>
#include <linux/netfilter/ipset/ip_set_getport.h>
#include <linux/netfilter/ipset/ip_set_hash.h>
#define IPSET_TYPE_REV_MIN 0
/* 0 Comments support added */
/* 1 Forceadd support added */
/* 2 skbinfo support added */
#define IPSET_TYPE_REV_MAX 3 /* bucketsize, initval support added */
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Oliver Smith <[email protected]>");
IP_SET_MODULE_DESC("hash:net,port,net", IPSET_TYPE_REV_MIN, IPSET_TYPE_REV_MAX);
MODULE_ALIAS("ip_set_hash:net,port,net");
/* Type specific function prefix */
#define HTYPE hash_netportnet
#define IP_SET_HASH_WITH_PROTO
#define IP_SET_HASH_WITH_NETS
#define IPSET_NET_COUNT 2
#define IP_SET_HASH_WITH_NET0
/* IPv4 variant */
/* Member elements */
struct hash_netportnet4_elem {
union {
__be32 ip[2];
__be64 ipcmp;
};
__be16 port;
union {
u8 cidr[2];
u16 ccmp;
};
u16 padding;
u8 nomatch;
u8 proto;
};
/* Common functions */
static bool
hash_netportnet4_data_equal(const struct hash_netportnet4_elem *ip1,
const struct hash_netportnet4_elem *ip2,
u32 *multi)
{
return ip1->ipcmp == ip2->ipcmp &&
ip1->ccmp == ip2->ccmp &&
ip1->port == ip2->port &&
ip1->proto == ip2->proto;
}
static int
hash_netportnet4_do_data_match(const struct hash_netportnet4_elem *elem)
{
return elem->nomatch ? -ENOTEMPTY : 1;
}
static void
hash_netportnet4_data_set_flags(struct hash_netportnet4_elem *elem, u32 flags)
{
elem->nomatch = !!((flags >> 16) & IPSET_FLAG_NOMATCH);
}
static void
hash_netportnet4_data_reset_flags(struct hash_netportnet4_elem *elem, u8 *flags)
{
swap(*flags, elem->nomatch);
}
static void
hash_netportnet4_data_reset_elem(struct hash_netportnet4_elem *elem,
struct hash_netportnet4_elem *orig)
{
elem->ip[1] = orig->ip[1];
}
static void
hash_netportnet4_data_netmask(struct hash_netportnet4_elem *elem,
u8 cidr, bool inner)
{
if (inner) {
elem->ip[1] &= ip_set_netmask(cidr);
elem->cidr[1] = cidr;
} else {
elem->ip[0] &= ip_set_netmask(cidr);
elem->cidr[0] = cidr;
}
}
static bool
hash_netportnet4_data_list(struct sk_buff *skb,
const struct hash_netportnet4_elem *data)
{
u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, data->ip[0]) ||
nla_put_ipaddr4(skb, IPSET_ATTR_IP2, data->ip[1]) ||
nla_put_net16(skb, IPSET_ATTR_PORT, data->port) ||
nla_put_u8(skb, IPSET_ATTR_CIDR, data->cidr[0]) ||
nla_put_u8(skb, IPSET_ATTR_CIDR2, data->cidr[1]) ||
nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto) ||
(flags &&
nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
goto nla_put_failure;
return false;
nla_put_failure:
return true;
}
static void
hash_netportnet4_data_next(struct hash_netportnet4_elem *next,
const struct hash_netportnet4_elem *d)
{
next->ipcmp = d->ipcmp;
next->port = d->port;
}
#define MTYPE hash_netportnet4
#define HOST_MASK 32
#include "ip_set_hash_gen.h"
static void
hash_netportnet4_init(struct hash_netportnet4_elem *e)
{
e->cidr[0] = HOST_MASK;
e->cidr[1] = HOST_MASK;
}
static int
hash_netportnet4_kadt(struct ip_set *set, const struct sk_buff *skb,
const struct xt_action_param *par,
enum ipset_adt adt, struct ip_set_adt_opt *opt)
{
const struct hash_netportnet4 *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_netportnet4_elem e = { };
struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
e.cidr[0] = INIT_CIDR(h->nets[0].cidr[0], HOST_MASK);
e.cidr[1] = INIT_CIDR(h->nets[0].cidr[1], HOST_MASK);
if (adt == IPSET_TEST)
e.ccmp = (HOST_MASK << (sizeof(e.cidr[0]) * 8)) | HOST_MASK;
if (!ip_set_get_ip4_port(skb, opt->flags & IPSET_DIM_TWO_SRC,
&e.port, &e.proto))
return -EINVAL;
ip4addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &e.ip[0]);
ip4addrptr(skb, opt->flags & IPSET_DIM_THREE_SRC, &e.ip[1]);
e.ip[0] &= ip_set_netmask(e.cidr[0]);
e.ip[1] &= ip_set_netmask(e.cidr[1]);
return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags);
}
static u32
hash_netportnet4_range_to_cidr(u32 from, u32 to, u8 *cidr)
{
if (from == 0 && to == UINT_MAX) {
*cidr = 0;
return to;
}
return ip_set_range_to_cidr(from, to, cidr);
}
static int
hash_netportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
{
struct hash_netportnet4 *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_netportnet4_elem e = { };
struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
u32 ip = 0, ip_to = 0, p = 0, port, port_to;
u32 ip2_from = 0, ip2_to = 0, ip2, i = 0;
bool with_ports = false;
int ret;
if (tb[IPSET_ATTR_LINENO])
*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
hash_netportnet4_init(&e);
if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] ||
!ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS)))
return -IPSET_ERR_PROTOCOL;
ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip);
if (ret)
return ret;
ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP2], &ip2_from);
if (ret)
return ret;
ret = ip_set_get_extensions(set, tb, &ext);
if (ret)
return ret;
if (tb[IPSET_ATTR_CIDR]) {
e.cidr[0] = nla_get_u8(tb[IPSET_ATTR_CIDR]);
if (e.cidr[0] > HOST_MASK)
return -IPSET_ERR_INVALID_CIDR;
}
if (tb[IPSET_ATTR_CIDR2]) {
e.cidr[1] = nla_get_u8(tb[IPSET_ATTR_CIDR2]);
if (e.cidr[1] > HOST_MASK)
return -IPSET_ERR_INVALID_CIDR;
}
e.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
if (tb[IPSET_ATTR_PROTO]) {
e.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]);
with_ports = ip_set_proto_with_ports(e.proto);
if (e.proto == 0)
return -IPSET_ERR_INVALID_PROTO;
} else {
return -IPSET_ERR_MISSING_PROTO;
}
if (!(with_ports || e.proto == IPPROTO_ICMP))
e.port = 0;
if (tb[IPSET_ATTR_CADT_FLAGS]) {
u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
if (cadt_flags & IPSET_FLAG_NOMATCH)
flags |= (IPSET_FLAG_NOMATCH << 16);
}
with_ports = with_ports && tb[IPSET_ATTR_PORT_TO];
if (adt == IPSET_TEST ||
!(tb[IPSET_ATTR_IP_TO] || with_ports || tb[IPSET_ATTR_IP2_TO])) {
e.ip[0] = htonl(ip & ip_set_hostmask(e.cidr[0]));
e.ip[1] = htonl(ip2_from & ip_set_hostmask(e.cidr[1]));
ret = adtfn(set, &e, &ext, &ext, flags);
return ip_set_enomatch(ret, flags, adt, set) ? -ret :
ip_set_eexist(ret, flags) ? 0 : ret;
}
ip_to = ip;
if (tb[IPSET_ATTR_IP_TO]) {
ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to);
if (ret)
return ret;
if (ip > ip_to)
swap(ip, ip_to);
if (unlikely(ip + UINT_MAX == ip_to))
return -IPSET_ERR_HASH_RANGE;
} else {
ip_set_mask_from_to(ip, ip_to, e.cidr[0]);
}
port_to = port = ntohs(e.port);
if (tb[IPSET_ATTR_PORT_TO]) {
port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]);
if (port > port_to)
swap(port, port_to);
}
ip2_to = ip2_from;
if (tb[IPSET_ATTR_IP2_TO]) {
ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP2_TO], &ip2_to);
if (ret)
return ret;
if (ip2_from > ip2_to)
swap(ip2_from, ip2_to);
if (unlikely(ip2_from + UINT_MAX == ip2_to))
return -IPSET_ERR_HASH_RANGE;
} else {
ip_set_mask_from_to(ip2_from, ip2_to, e.cidr[1]);
}
if (retried) {
ip = ntohl(h->next.ip[0]);
p = ntohs(h->next.port);
ip2 = ntohl(h->next.ip[1]);
} else {
p = port;
ip2 = ip2_from;
}
do {
e.ip[0] = htonl(ip);
ip = hash_netportnet4_range_to_cidr(ip, ip_to, &e.cidr[0]);
for (; p <= port_to; p++) {
e.port = htons(p);
do {
i++;
e.ip[1] = htonl(ip2);
if (i > IPSET_MAX_RANGE) {
hash_netportnet4_data_next(&h->next,
&e);
return -ERANGE;
}
ip2 = hash_netportnet4_range_to_cidr(ip2,
ip2_to, &e.cidr[1]);
ret = adtfn(set, &e, &ext, &ext, flags);
if (ret && !ip_set_eexist(ret, flags))
return ret;
ret = 0;
} while (ip2++ < ip2_to);
ip2 = ip2_from;
}
p = port;
} while (ip++ < ip_to);
return ret;
}
/* IPv6 variant */
struct hash_netportnet6_elem {
union nf_inet_addr ip[2];
__be16 port;
union {
u8 cidr[2];
u16 ccmp;
};
u16 padding;
u8 nomatch;
u8 proto;
};
/* Common functions */
static bool
hash_netportnet6_data_equal(const struct hash_netportnet6_elem *ip1,
const struct hash_netportnet6_elem *ip2,
u32 *multi)
{
return ipv6_addr_equal(&ip1->ip[0].in6, &ip2->ip[0].in6) &&
ipv6_addr_equal(&ip1->ip[1].in6, &ip2->ip[1].in6) &&
ip1->ccmp == ip2->ccmp &&
ip1->port == ip2->port &&
ip1->proto == ip2->proto;
}
static int
hash_netportnet6_do_data_match(const struct hash_netportnet6_elem *elem)
{
return elem->nomatch ? -ENOTEMPTY : 1;
}
static void
hash_netportnet6_data_set_flags(struct hash_netportnet6_elem *elem, u32 flags)
{
elem->nomatch = !!((flags >> 16) & IPSET_FLAG_NOMATCH);
}
static void
hash_netportnet6_data_reset_flags(struct hash_netportnet6_elem *elem, u8 *flags)
{
swap(*flags, elem->nomatch);
}
static void
hash_netportnet6_data_reset_elem(struct hash_netportnet6_elem *elem,
struct hash_netportnet6_elem *orig)
{
elem->ip[1] = orig->ip[1];
}
static void
hash_netportnet6_data_netmask(struct hash_netportnet6_elem *elem,
u8 cidr, bool inner)
{
if (inner) {
ip6_netmask(&elem->ip[1], cidr);
elem->cidr[1] = cidr;
} else {
ip6_netmask(&elem->ip[0], cidr);
elem->cidr[0] = cidr;
}
}
static bool
hash_netportnet6_data_list(struct sk_buff *skb,
const struct hash_netportnet6_elem *data)
{
u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &data->ip[0].in6) ||
nla_put_ipaddr6(skb, IPSET_ATTR_IP2, &data->ip[1].in6) ||
nla_put_net16(skb, IPSET_ATTR_PORT, data->port) ||
nla_put_u8(skb, IPSET_ATTR_CIDR, data->cidr[0]) ||
nla_put_u8(skb, IPSET_ATTR_CIDR2, data->cidr[1]) ||
nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto) ||
(flags &&
nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
goto nla_put_failure;
return false;
nla_put_failure:
return true;
}
static void
hash_netportnet6_data_next(struct hash_netportnet6_elem *next,
const struct hash_netportnet6_elem *d)
{
next->port = d->port;
}
#undef MTYPE
#undef HOST_MASK
#define MTYPE hash_netportnet6
#define HOST_MASK 128
#define IP_SET_EMIT_CREATE
#include "ip_set_hash_gen.h"
static void
hash_netportnet6_init(struct hash_netportnet6_elem *e)
{
e->cidr[0] = HOST_MASK;
e->cidr[1] = HOST_MASK;
}
static int
hash_netportnet6_kadt(struct ip_set *set, const struct sk_buff *skb,
const struct xt_action_param *par,
enum ipset_adt adt, struct ip_set_adt_opt *opt)
{
const struct hash_netportnet6 *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_netportnet6_elem e = { };
struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
e.cidr[0] = INIT_CIDR(h->nets[0].cidr[0], HOST_MASK);
e.cidr[1] = INIT_CIDR(h->nets[0].cidr[1], HOST_MASK);
if (adt == IPSET_TEST)
e.ccmp = (HOST_MASK << (sizeof(u8) * 8)) | HOST_MASK;
if (!ip_set_get_ip6_port(skb, opt->flags & IPSET_DIM_TWO_SRC,
&e.port, &e.proto))
return -EINVAL;
ip6addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &e.ip[0].in6);
ip6addrptr(skb, opt->flags & IPSET_DIM_THREE_SRC, &e.ip[1].in6);
ip6_netmask(&e.ip[0], e.cidr[0]);
ip6_netmask(&e.ip[1], e.cidr[1]);
return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags);
}
static int
hash_netportnet6_uadt(struct ip_set *set, struct nlattr *tb[],
enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
{
const struct hash_netportnet6 *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_netportnet6_elem e = { };
struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
u32 port, port_to;
bool with_ports = false;
int ret;
if (tb[IPSET_ATTR_LINENO])
*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
hash_netportnet6_init(&e);
if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] ||
!ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS)))
return -IPSET_ERR_PROTOCOL;
if (unlikely(tb[IPSET_ATTR_IP_TO] || tb[IPSET_ATTR_IP2_TO]))
return -IPSET_ERR_HASH_RANGE_UNSUPPORTED;
ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip[0]);
if (ret)
return ret;
ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP2], &e.ip[1]);
if (ret)
return ret;
ret = ip_set_get_extensions(set, tb, &ext);
if (ret)
return ret;
if (tb[IPSET_ATTR_CIDR]) {
e.cidr[0] = nla_get_u8(tb[IPSET_ATTR_CIDR]);
if (e.cidr[0] > HOST_MASK)
return -IPSET_ERR_INVALID_CIDR;
}
if (tb[IPSET_ATTR_CIDR2]) {
e.cidr[1] = nla_get_u8(tb[IPSET_ATTR_CIDR2]);
if (e.cidr[1] > HOST_MASK)
return -IPSET_ERR_INVALID_CIDR;
}
ip6_netmask(&e.ip[0], e.cidr[0]);
ip6_netmask(&e.ip[1], e.cidr[1]);
e.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
if (tb[IPSET_ATTR_PROTO]) {
e.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]);
with_ports = ip_set_proto_with_ports(e.proto);
if (e.proto == 0)
return -IPSET_ERR_INVALID_PROTO;
} else {
return -IPSET_ERR_MISSING_PROTO;
}
if (!(with_ports || e.proto == IPPROTO_ICMPV6))
e.port = 0;
if (tb[IPSET_ATTR_CADT_FLAGS]) {
u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
if (cadt_flags & IPSET_FLAG_NOMATCH)
flags |= (IPSET_FLAG_NOMATCH << 16);
}
if (adt == IPSET_TEST || !with_ports || !tb[IPSET_ATTR_PORT_TO]) {
ret = adtfn(set, &e, &ext, &ext, flags);
return ip_set_enomatch(ret, flags, adt, set) ? -ret :
ip_set_eexist(ret, flags) ? 0 : ret;
}
port = ntohs(e.port);
port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]);
if (port > port_to)
swap(port, port_to);
if (retried)
port = ntohs(h->next.port);
for (; port <= port_to; port++) {
e.port = htons(port);
ret = adtfn(set, &e, &ext, &ext, flags);
if (ret && !ip_set_eexist(ret, flags))
return ret;
ret = 0;
}
return ret;
}
static struct ip_set_type hash_netportnet_type __read_mostly = {
.name = "hash:net,port,net",
.protocol = IPSET_PROTOCOL,
.features = IPSET_TYPE_IP | IPSET_TYPE_PORT | IPSET_TYPE_IP2 |
IPSET_TYPE_NOMATCH,
.dimension = IPSET_DIM_THREE,
.family = NFPROTO_UNSPEC,
.revision_min = IPSET_TYPE_REV_MIN,
.revision_max = IPSET_TYPE_REV_MAX,
.create_flags[IPSET_TYPE_REV_MAX] = IPSET_CREATE_FLAG_BUCKETSIZE,
.create = hash_netportnet_create,
.create_policy = {
[IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 },
[IPSET_ATTR_MAXELEM] = { .type = NLA_U32 },
[IPSET_ATTR_INITVAL] = { .type = NLA_U32 },
[IPSET_ATTR_BUCKETSIZE] = { .type = NLA_U8 },
[IPSET_ATTR_RESIZE] = { .type = NLA_U8 },
[IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
[IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 },
},
.adt_policy = {
[IPSET_ATTR_IP] = { .type = NLA_NESTED },
[IPSET_ATTR_IP_TO] = { .type = NLA_NESTED },
[IPSET_ATTR_IP2] = { .type = NLA_NESTED },
[IPSET_ATTR_IP2_TO] = { .type = NLA_NESTED },
[IPSET_ATTR_PORT] = { .type = NLA_U16 },
[IPSET_ATTR_PORT_TO] = { .type = NLA_U16 },
[IPSET_ATTR_CIDR] = { .type = NLA_U8 },
[IPSET_ATTR_CIDR2] = { .type = NLA_U8 },
[IPSET_ATTR_PROTO] = { .type = NLA_U8 },
[IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 },
[IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
[IPSET_ATTR_LINENO] = { .type = NLA_U32 },
[IPSET_ATTR_BYTES] = { .type = NLA_U64 },
[IPSET_ATTR_PACKETS] = { .type = NLA_U64 },
[IPSET_ATTR_COMMENT] = { .type = NLA_NUL_STRING,
.len = IPSET_MAX_COMMENT_SIZE },
[IPSET_ATTR_SKBMARK] = { .type = NLA_U64 },
[IPSET_ATTR_SKBPRIO] = { .type = NLA_U32 },
[IPSET_ATTR_SKBQUEUE] = { .type = NLA_U16 },
},
.me = THIS_MODULE,
};
static int __init
hash_netportnet_init(void)
{
return ip_set_type_register(&hash_netportnet_type);
}
static void __exit
hash_netportnet_fini(void)
{
rcu_barrier();
ip_set_type_unregister(&hash_netportnet_type);
}
module_init(hash_netportnet_init);
module_exit(hash_netportnet_fini);
| linux-master | net/netfilter/ipset/ip_set_hash_netportnet.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (C) 2016 Tomasz Chilinski <[email protected]>
*/
/* Kernel module implementing an IP set type: the hash:ip,mac type */
#include <linux/jhash.h>
#include <linux/module.h>
#include <linux/ip.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/errno.h>
#include <linux/random.h>
#include <linux/if_ether.h>
#include <net/ip.h>
#include <net/ipv6.h>
#include <net/netlink.h>
#include <net/tcp.h>
#include <linux/netfilter.h>
#include <linux/netfilter/ipset/pfxlen.h>
#include <linux/netfilter/ipset/ip_set.h>
#include <linux/netfilter/ipset/ip_set_hash.h>
#define IPSET_TYPE_REV_MIN 0
#define IPSET_TYPE_REV_MAX 1 /* bucketsize, initval support */
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Tomasz Chilinski <[email protected]>");
IP_SET_MODULE_DESC("hash:ip,mac", IPSET_TYPE_REV_MIN, IPSET_TYPE_REV_MAX);
MODULE_ALIAS("ip_set_hash:ip,mac");
/* Type specific function prefix */
#define HTYPE hash_ipmac
/* IPv4 variant */
/* Member elements */
struct hash_ipmac4_elem {
/* Zero valued IP addresses cannot be stored */
__be32 ip;
union {
unsigned char ether[ETH_ALEN];
__be32 foo[2];
};
};
/* Common functions */
static bool
hash_ipmac4_data_equal(const struct hash_ipmac4_elem *e1,
const struct hash_ipmac4_elem *e2,
u32 *multi)
{
return e1->ip == e2->ip && ether_addr_equal(e1->ether, e2->ether);
}
static bool
hash_ipmac4_data_list(struct sk_buff *skb, const struct hash_ipmac4_elem *e)
{
if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, e->ip) ||
nla_put(skb, IPSET_ATTR_ETHER, ETH_ALEN, e->ether))
goto nla_put_failure;
return false;
nla_put_failure:
return true;
}
static void
hash_ipmac4_data_next(struct hash_ipmac4_elem *next,
const struct hash_ipmac4_elem *e)
{
next->ip = e->ip;
}
#define MTYPE hash_ipmac4
#define PF 4
#define HOST_MASK 32
#define HKEY_DATALEN sizeof(struct hash_ipmac4_elem)
#include "ip_set_hash_gen.h"
static int
hash_ipmac4_kadt(struct ip_set *set, const struct sk_buff *skb,
const struct xt_action_param *par,
enum ipset_adt adt, struct ip_set_adt_opt *opt)
{
ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_ipmac4_elem e = { .ip = 0, { .foo[0] = 0, .foo[1] = 0 } };
struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
if (skb_mac_header(skb) < skb->head ||
(skb_mac_header(skb) + ETH_HLEN) > skb->data)
return -EINVAL;
if (opt->flags & IPSET_DIM_TWO_SRC)
ether_addr_copy(e.ether, eth_hdr(skb)->h_source);
else
ether_addr_copy(e.ether, eth_hdr(skb)->h_dest);
if (is_zero_ether_addr(e.ether))
return -EINVAL;
ip4addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &e.ip);
return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags);
}
static int
hash_ipmac4_uadt(struct ip_set *set, struct nlattr *tb[],
enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
{
ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_ipmac4_elem e = { .ip = 0, { .foo[0] = 0, .foo[1] = 0 } };
struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
int ret;
if (unlikely(!tb[IPSET_ATTR_IP] ||
!tb[IPSET_ATTR_ETHER] ||
nla_len(tb[IPSET_ATTR_ETHER]) != ETH_ALEN ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE)))
return -IPSET_ERR_PROTOCOL;
if (tb[IPSET_ATTR_LINENO])
*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
ret = ip_set_get_ipaddr4(tb[IPSET_ATTR_IP], &e.ip) ||
ip_set_get_extensions(set, tb, &ext);
if (ret)
return ret;
memcpy(e.ether, nla_data(tb[IPSET_ATTR_ETHER]), ETH_ALEN);
if (is_zero_ether_addr(e.ether))
return -IPSET_ERR_HASH_ELEM;
return adtfn(set, &e, &ext, &ext, flags);
}
/* IPv6 variant */
/* Member elements */
struct hash_ipmac6_elem {
/* Zero valued IP addresses cannot be stored */
union nf_inet_addr ip;
union {
unsigned char ether[ETH_ALEN];
__be32 foo[2];
};
};
/* Common functions */
static bool
hash_ipmac6_data_equal(const struct hash_ipmac6_elem *e1,
const struct hash_ipmac6_elem *e2,
u32 *multi)
{
return ipv6_addr_equal(&e1->ip.in6, &e2->ip.in6) &&
ether_addr_equal(e1->ether, e2->ether);
}
static bool
hash_ipmac6_data_list(struct sk_buff *skb, const struct hash_ipmac6_elem *e)
{
if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &e->ip.in6) ||
nla_put(skb, IPSET_ATTR_ETHER, ETH_ALEN, e->ether))
goto nla_put_failure;
return false;
nla_put_failure:
return true;
}
static void
hash_ipmac6_data_next(struct hash_ipmac6_elem *next,
const struct hash_ipmac6_elem *e)
{
}
#undef MTYPE
#undef PF
#undef HOST_MASK
#undef HKEY_DATALEN
#define MTYPE hash_ipmac6
#define PF 6
#define HOST_MASK 128
#define HKEY_DATALEN sizeof(struct hash_ipmac6_elem)
#define IP_SET_EMIT_CREATE
#include "ip_set_hash_gen.h"
static int
hash_ipmac6_kadt(struct ip_set *set, const struct sk_buff *skb,
const struct xt_action_param *par,
enum ipset_adt adt, struct ip_set_adt_opt *opt)
{
ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_ipmac6_elem e = {
{ .all = { 0 } },
{ .foo[0] = 0, .foo[1] = 0 }
};
struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
if (skb_mac_header(skb) < skb->head ||
(skb_mac_header(skb) + ETH_HLEN) > skb->data)
return -EINVAL;
if (opt->flags & IPSET_DIM_TWO_SRC)
ether_addr_copy(e.ether, eth_hdr(skb)->h_source);
else
ether_addr_copy(e.ether, eth_hdr(skb)->h_dest);
if (is_zero_ether_addr(e.ether))
return -EINVAL;
ip6addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &e.ip.in6);
return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags);
}
static int
hash_ipmac6_uadt(struct ip_set *set, struct nlattr *tb[],
enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
{
ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_ipmac6_elem e = {
{ .all = { 0 } },
{ .foo[0] = 0, .foo[1] = 0 }
};
struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
int ret;
if (unlikely(!tb[IPSET_ATTR_IP] ||
!tb[IPSET_ATTR_ETHER] ||
nla_len(tb[IPSET_ATTR_ETHER]) != ETH_ALEN ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE)))
return -IPSET_ERR_PROTOCOL;
if (tb[IPSET_ATTR_LINENO])
*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip) ||
ip_set_get_extensions(set, tb, &ext);
if (ret)
return ret;
memcpy(e.ether, nla_data(tb[IPSET_ATTR_ETHER]), ETH_ALEN);
if (is_zero_ether_addr(e.ether))
return -IPSET_ERR_HASH_ELEM;
return adtfn(set, &e, &ext, &ext, flags);
}
static struct ip_set_type hash_ipmac_type __read_mostly = {
.name = "hash:ip,mac",
.protocol = IPSET_PROTOCOL,
.features = IPSET_TYPE_IP | IPSET_TYPE_MAC,
.dimension = IPSET_DIM_TWO,
.family = NFPROTO_UNSPEC,
.revision_min = IPSET_TYPE_REV_MIN,
.revision_max = IPSET_TYPE_REV_MAX,
.create_flags[IPSET_TYPE_REV_MAX] = IPSET_CREATE_FLAG_BUCKETSIZE,
.create = hash_ipmac_create,
.create_policy = {
[IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 },
[IPSET_ATTR_MAXELEM] = { .type = NLA_U32 },
[IPSET_ATTR_INITVAL] = { .type = NLA_U32 },
[IPSET_ATTR_BUCKETSIZE] = { .type = NLA_U8 },
[IPSET_ATTR_RESIZE] = { .type = NLA_U8 },
[IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
[IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 },
},
.adt_policy = {
[IPSET_ATTR_IP] = { .type = NLA_NESTED },
[IPSET_ATTR_ETHER] = { .type = NLA_BINARY,
.len = ETH_ALEN },
[IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
[IPSET_ATTR_LINENO] = { .type = NLA_U32 },
[IPSET_ATTR_BYTES] = { .type = NLA_U64 },
[IPSET_ATTR_PACKETS] = { .type = NLA_U64 },
[IPSET_ATTR_COMMENT] = { .type = NLA_NUL_STRING },
[IPSET_ATTR_SKBMARK] = { .type = NLA_U64 },
[IPSET_ATTR_SKBPRIO] = { .type = NLA_U32 },
[IPSET_ATTR_SKBQUEUE] = { .type = NLA_U16 },
},
.me = THIS_MODULE,
};
static int __init
hash_ipmac_init(void)
{
return ip_set_type_register(&hash_ipmac_type);
}
static void __exit
hash_ipmac_fini(void)
{
ip_set_type_unregister(&hash_ipmac_type);
}
module_init(hash_ipmac_init);
module_exit(hash_ipmac_fini);
| linux-master | net/netfilter/ipset/ip_set_hash_ipmac.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (C) 2003-2013 Jozsef Kadlecsik <[email protected]> */
/* Kernel module implementing an IP set type: the bitmap:port type */
#include <linux/module.h>
#include <linux/ip.h>
#include <linux/skbuff.h>
#include <linux/errno.h>
#include <linux/netlink.h>
#include <linux/jiffies.h>
#include <linux/timer.h>
#include <net/netlink.h>
#include <linux/netfilter/ipset/ip_set.h>
#include <linux/netfilter/ipset/ip_set_bitmap.h>
#include <linux/netfilter/ipset/ip_set_getport.h>
#define IPSET_TYPE_REV_MIN 0
/* 1 Counter support added */
/* 2 Comment support added */
#define IPSET_TYPE_REV_MAX 3 /* skbinfo support added */
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jozsef Kadlecsik <[email protected]>");
IP_SET_MODULE_DESC("bitmap:port", IPSET_TYPE_REV_MIN, IPSET_TYPE_REV_MAX);
MODULE_ALIAS("ip_set_bitmap:port");
#define MTYPE bitmap_port
/* Type structure */
struct bitmap_port {
unsigned long *members; /* the set members */
u16 first_port; /* host byte order, included in range */
u16 last_port; /* host byte order, included in range */
u32 elements; /* number of max elements in the set */
size_t memsize; /* members size */
struct timer_list gc; /* garbage collection */
struct ip_set *set; /* attached to this ip_set */
unsigned char extensions[] /* data extensions */
__aligned(__alignof__(u64));
};
/* ADT structure for generic function args */
struct bitmap_port_adt_elem {
u16 id;
};
static u16
port_to_id(const struct bitmap_port *m, u16 port)
{
return port - m->first_port;
}
/* Common functions */
static int
bitmap_port_do_test(const struct bitmap_port_adt_elem *e,
const struct bitmap_port *map, size_t dsize)
{
return !!test_bit(e->id, map->members);
}
static int
bitmap_port_gc_test(u16 id, const struct bitmap_port *map, size_t dsize)
{
return !!test_bit(id, map->members);
}
static int
bitmap_port_do_add(const struct bitmap_port_adt_elem *e,
struct bitmap_port *map, u32 flags, size_t dsize)
{
return !!test_bit(e->id, map->members);
}
static int
bitmap_port_do_del(const struct bitmap_port_adt_elem *e,
struct bitmap_port *map)
{
return !test_and_clear_bit(e->id, map->members);
}
static int
bitmap_port_do_list(struct sk_buff *skb, const struct bitmap_port *map, u32 id,
size_t dsize)
{
return nla_put_net16(skb, IPSET_ATTR_PORT,
htons(map->first_port + id));
}
static int
bitmap_port_do_head(struct sk_buff *skb, const struct bitmap_port *map)
{
return nla_put_net16(skb, IPSET_ATTR_PORT, htons(map->first_port)) ||
nla_put_net16(skb, IPSET_ATTR_PORT_TO, htons(map->last_port));
}
static bool
ip_set_get_ip_port(const struct sk_buff *skb, u8 pf, bool src, __be16 *port)
{
bool ret;
u8 proto;
switch (pf) {
case NFPROTO_IPV4:
ret = ip_set_get_ip4_port(skb, src, port, &proto);
break;
case NFPROTO_IPV6:
ret = ip_set_get_ip6_port(skb, src, port, &proto);
break;
default:
return false;
}
if (!ret)
return ret;
switch (proto) {
case IPPROTO_TCP:
case IPPROTO_UDP:
return true;
default:
return false;
}
}
static int
bitmap_port_kadt(struct ip_set *set, const struct sk_buff *skb,
const struct xt_action_param *par,
enum ipset_adt adt, struct ip_set_adt_opt *opt)
{
struct bitmap_port *map = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
struct bitmap_port_adt_elem e = { .id = 0 };
struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
__be16 __port;
u16 port = 0;
if (!ip_set_get_ip_port(skb, opt->family,
opt->flags & IPSET_DIM_ONE_SRC, &__port))
return -EINVAL;
port = ntohs(__port);
if (port < map->first_port || port > map->last_port)
return -IPSET_ERR_BITMAP_RANGE;
e.id = port_to_id(map, port);
return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags);
}
static int
bitmap_port_uadt(struct ip_set *set, struct nlattr *tb[],
enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
{
struct bitmap_port *map = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
struct bitmap_port_adt_elem e = { .id = 0 };
struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
u32 port; /* wraparound */
u16 port_to;
int ret = 0;
if (tb[IPSET_ATTR_LINENO])
*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
if (unlikely(!ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO)))
return -IPSET_ERR_PROTOCOL;
port = ip_set_get_h16(tb[IPSET_ATTR_PORT]);
if (port < map->first_port || port > map->last_port)
return -IPSET_ERR_BITMAP_RANGE;
ret = ip_set_get_extensions(set, tb, &ext);
if (ret)
return ret;
if (adt == IPSET_TEST) {
e.id = port_to_id(map, port);
return adtfn(set, &e, &ext, &ext, flags);
}
if (tb[IPSET_ATTR_PORT_TO]) {
port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]);
if (port > port_to) {
swap(port, port_to);
if (port < map->first_port)
return -IPSET_ERR_BITMAP_RANGE;
}
} else {
port_to = port;
}
if (port_to > map->last_port)
return -IPSET_ERR_BITMAP_RANGE;
for (; port <= port_to; port++) {
e.id = port_to_id(map, port);
ret = adtfn(set, &e, &ext, &ext, flags);
if (ret && !ip_set_eexist(ret, flags))
return ret;
ret = 0;
}
return ret;
}
static bool
bitmap_port_same_set(const struct ip_set *a, const struct ip_set *b)
{
const struct bitmap_port *x = a->data;
const struct bitmap_port *y = b->data;
return x->first_port == y->first_port &&
x->last_port == y->last_port &&
a->timeout == b->timeout &&
a->extensions == b->extensions;
}
/* Plain variant */
struct bitmap_port_elem {
};
#include "ip_set_bitmap_gen.h"
/* Create bitmap:ip type of sets */
static bool
init_map_port(struct ip_set *set, struct bitmap_port *map,
u16 first_port, u16 last_port)
{
map->members = bitmap_zalloc(map->elements, GFP_KERNEL | __GFP_NOWARN);
if (!map->members)
return false;
map->first_port = first_port;
map->last_port = last_port;
set->timeout = IPSET_NO_TIMEOUT;
map->set = set;
set->data = map;
set->family = NFPROTO_UNSPEC;
return true;
}
static int
bitmap_port_create(struct net *net, struct ip_set *set, struct nlattr *tb[],
u32 flags)
{
struct bitmap_port *map;
u16 first_port, last_port;
u32 elements;
if (unlikely(!ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
!ip_set_attr_netorder(tb, IPSET_ATTR_PORT_TO) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS)))
return -IPSET_ERR_PROTOCOL;
first_port = ip_set_get_h16(tb[IPSET_ATTR_PORT]);
last_port = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]);
if (first_port > last_port)
swap(first_port, last_port);
elements = last_port - first_port + 1;
set->dsize = ip_set_elem_len(set, tb, 0, 0);
map = ip_set_alloc(sizeof(*map) + elements * set->dsize);
if (!map)
return -ENOMEM;
map->elements = elements;
map->memsize = BITS_TO_LONGS(elements) * sizeof(unsigned long);
set->variant = &bitmap_port;
if (!init_map_port(set, map, first_port, last_port)) {
ip_set_free(map);
return -ENOMEM;
}
if (tb[IPSET_ATTR_TIMEOUT]) {
set->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
bitmap_port_gc_init(set, bitmap_port_gc);
}
return 0;
}
static struct ip_set_type bitmap_port_type = {
.name = "bitmap:port",
.protocol = IPSET_PROTOCOL,
.features = IPSET_TYPE_PORT,
.dimension = IPSET_DIM_ONE,
.family = NFPROTO_UNSPEC,
.revision_min = IPSET_TYPE_REV_MIN,
.revision_max = IPSET_TYPE_REV_MAX,
.create = bitmap_port_create,
.create_policy = {
[IPSET_ATTR_PORT] = { .type = NLA_U16 },
[IPSET_ATTR_PORT_TO] = { .type = NLA_U16 },
[IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
[IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 },
},
.adt_policy = {
[IPSET_ATTR_PORT] = { .type = NLA_U16 },
[IPSET_ATTR_PORT_TO] = { .type = NLA_U16 },
[IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
[IPSET_ATTR_LINENO] = { .type = NLA_U32 },
[IPSET_ATTR_BYTES] = { .type = NLA_U64 },
[IPSET_ATTR_PACKETS] = { .type = NLA_U64 },
[IPSET_ATTR_COMMENT] = { .type = NLA_NUL_STRING,
.len = IPSET_MAX_COMMENT_SIZE },
[IPSET_ATTR_SKBMARK] = { .type = NLA_U64 },
[IPSET_ATTR_SKBPRIO] = { .type = NLA_U32 },
[IPSET_ATTR_SKBQUEUE] = { .type = NLA_U16 },
},
.me = THIS_MODULE,
};
static int __init
bitmap_port_init(void)
{
return ip_set_type_register(&bitmap_port_type);
}
static void __exit
bitmap_port_fini(void)
{
rcu_barrier();
ip_set_type_unregister(&bitmap_port_type);
}
module_init(bitmap_port_init);
module_exit(bitmap_port_fini);
| linux-master | net/netfilter/ipset/ip_set_bitmap_port.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (C) 2011-2013 Jozsef Kadlecsik <[email protected]> */
/* Kernel module implementing an IP set type: the hash:net,iface type */
#include <linux/jhash.h>
#include <linux/module.h>
#include <linux/ip.h>
#include <linux/skbuff.h>
#include <linux/errno.h>
#include <linux/random.h>
#include <net/ip.h>
#include <net/ipv6.h>
#include <net/netlink.h>
#include <linux/netfilter.h>
#include <linux/netfilter_bridge.h>
#include <linux/netfilter/ipset/pfxlen.h>
#include <linux/netfilter/ipset/ip_set.h>
#include <linux/netfilter/ipset/ip_set_hash.h>
#define IPSET_TYPE_REV_MIN 0
/* 1 nomatch flag support added */
/* 2 /0 support added */
/* 3 Counters support added */
/* 4 Comments support added */
/* 5 Forceadd support added */
/* 6 skbinfo support added */
/* 7 interface wildcard support added */
#define IPSET_TYPE_REV_MAX 8 /* bucketsize, initval support added */
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jozsef Kadlecsik <[email protected]>");
IP_SET_MODULE_DESC("hash:net,iface", IPSET_TYPE_REV_MIN, IPSET_TYPE_REV_MAX);
MODULE_ALIAS("ip_set_hash:net,iface");
/* Type specific function prefix */
#define HTYPE hash_netiface
#define IP_SET_HASH_WITH_NETS
#define IP_SET_HASH_WITH_MULTI
#define IP_SET_HASH_WITH_NET0
#define STRSCPY(a, b) strscpy(a, b, IFNAMSIZ)
/* IPv4 variant */
struct hash_netiface4_elem_hashed {
__be32 ip;
u8 physdev;
u8 cidr;
u8 nomatch;
u8 elem;
};
/* Member elements */
struct hash_netiface4_elem {
__be32 ip;
u8 physdev;
u8 cidr;
u8 nomatch;
u8 elem;
u8 wildcard;
char iface[IFNAMSIZ];
};
/* Common functions */
static bool
hash_netiface4_data_equal(const struct hash_netiface4_elem *ip1,
const struct hash_netiface4_elem *ip2,
u32 *multi)
{
return ip1->ip == ip2->ip &&
ip1->cidr == ip2->cidr &&
(++*multi) &&
ip1->physdev == ip2->physdev &&
(ip1->wildcard ?
strncmp(ip1->iface, ip2->iface, strlen(ip1->iface)) == 0 :
strcmp(ip1->iface, ip2->iface) == 0);
}
static int
hash_netiface4_do_data_match(const struct hash_netiface4_elem *elem)
{
return elem->nomatch ? -ENOTEMPTY : 1;
}
static void
hash_netiface4_data_set_flags(struct hash_netiface4_elem *elem, u32 flags)
{
elem->nomatch = (flags >> 16) & IPSET_FLAG_NOMATCH;
}
static void
hash_netiface4_data_reset_flags(struct hash_netiface4_elem *elem, u8 *flags)
{
swap(*flags, elem->nomatch);
}
static void
hash_netiface4_data_netmask(struct hash_netiface4_elem *elem, u8 cidr)
{
elem->ip &= ip_set_netmask(cidr);
elem->cidr = cidr;
}
static bool
hash_netiface4_data_list(struct sk_buff *skb,
const struct hash_netiface4_elem *data)
{
u32 flags = (data->physdev ? IPSET_FLAG_PHYSDEV : 0) |
(data->wildcard ? IPSET_FLAG_IFACE_WILDCARD : 0);
if (data->nomatch)
flags |= IPSET_FLAG_NOMATCH;
if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, data->ip) ||
nla_put_u8(skb, IPSET_ATTR_CIDR, data->cidr) ||
nla_put_string(skb, IPSET_ATTR_IFACE, data->iface) ||
(flags &&
nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
goto nla_put_failure;
return false;
nla_put_failure:
return true;
}
static void
hash_netiface4_data_next(struct hash_netiface4_elem *next,
const struct hash_netiface4_elem *d)
{
next->ip = d->ip;
}
#define MTYPE hash_netiface4
#define HOST_MASK 32
#define HKEY_DATALEN sizeof(struct hash_netiface4_elem_hashed)
#include "ip_set_hash_gen.h"
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
static const char *get_physindev_name(const struct sk_buff *skb)
{
struct net_device *dev = nf_bridge_get_physindev(skb);
return dev ? dev->name : NULL;
}
static const char *get_physoutdev_name(const struct sk_buff *skb)
{
struct net_device *dev = nf_bridge_get_physoutdev(skb);
return dev ? dev->name : NULL;
}
#endif
static int
hash_netiface4_kadt(struct ip_set *set, const struct sk_buff *skb,
const struct xt_action_param *par,
enum ipset_adt adt, struct ip_set_adt_opt *opt)
{
struct hash_netiface4 *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_netiface4_elem e = {
.cidr = INIT_CIDR(h->nets[0].cidr[0], HOST_MASK),
.elem = 1,
};
struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
if (adt == IPSET_TEST)
e.cidr = HOST_MASK;
ip4addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &e.ip);
e.ip &= ip_set_netmask(e.cidr);
#define IFACE(dir) (par->state->dir ? par->state->dir->name : "")
#define SRCDIR (opt->flags & IPSET_DIM_TWO_SRC)
if (opt->cmdflags & IPSET_FLAG_PHYSDEV) {
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
const char *eiface = SRCDIR ? get_physindev_name(skb) :
get_physoutdev_name(skb);
if (!eiface)
return -EINVAL;
STRSCPY(e.iface, eiface);
e.physdev = 1;
#endif
} else {
STRSCPY(e.iface, SRCDIR ? IFACE(in) : IFACE(out));
}
if (strlen(e.iface) == 0)
return -EINVAL;
return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags);
}
static int
hash_netiface4_uadt(struct ip_set *set, struct nlattr *tb[],
enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
{
struct hash_netiface4 *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_netiface4_elem e = { .cidr = HOST_MASK, .elem = 1 };
struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
u32 ip = 0, ip_to = 0, i = 0;
int ret;
if (tb[IPSET_ATTR_LINENO])
*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
if (unlikely(!tb[IPSET_ATTR_IP] ||
!tb[IPSET_ATTR_IFACE] ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS)))
return -IPSET_ERR_PROTOCOL;
ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip);
if (ret)
return ret;
ret = ip_set_get_extensions(set, tb, &ext);
if (ret)
return ret;
if (tb[IPSET_ATTR_CIDR]) {
e.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
if (e.cidr > HOST_MASK)
return -IPSET_ERR_INVALID_CIDR;
}
nla_strscpy(e.iface, tb[IPSET_ATTR_IFACE], IFNAMSIZ);
if (tb[IPSET_ATTR_CADT_FLAGS]) {
u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
if (cadt_flags & IPSET_FLAG_PHYSDEV)
e.physdev = 1;
if (cadt_flags & IPSET_FLAG_NOMATCH)
flags |= (IPSET_FLAG_NOMATCH << 16);
if (cadt_flags & IPSET_FLAG_IFACE_WILDCARD)
e.wildcard = 1;
}
if (adt == IPSET_TEST || !tb[IPSET_ATTR_IP_TO]) {
e.ip = htonl(ip & ip_set_hostmask(e.cidr));
ret = adtfn(set, &e, &ext, &ext, flags);
return ip_set_enomatch(ret, flags, adt, set) ? -ret :
ip_set_eexist(ret, flags) ? 0 : ret;
}
if (tb[IPSET_ATTR_IP_TO]) {
ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to);
if (ret)
return ret;
if (ip_to < ip)
swap(ip, ip_to);
if (ip + UINT_MAX == ip_to)
return -IPSET_ERR_HASH_RANGE;
} else {
ip_set_mask_from_to(ip, ip_to, e.cidr);
}
if (retried)
ip = ntohl(h->next.ip);
do {
i++;
e.ip = htonl(ip);
if (i > IPSET_MAX_RANGE) {
hash_netiface4_data_next(&h->next, &e);
return -ERANGE;
}
ip = ip_set_range_to_cidr(ip, ip_to, &e.cidr);
ret = adtfn(set, &e, &ext, &ext, flags);
if (ret && !ip_set_eexist(ret, flags))
return ret;
ret = 0;
} while (ip++ < ip_to);
return ret;
}
/* IPv6 variant */
struct hash_netiface6_elem_hashed {
union nf_inet_addr ip;
u8 physdev;
u8 cidr;
u8 nomatch;
u8 elem;
};
struct hash_netiface6_elem {
union nf_inet_addr ip;
u8 physdev;
u8 cidr;
u8 nomatch;
u8 elem;
u8 wildcard;
char iface[IFNAMSIZ];
};
/* Common functions */
static bool
hash_netiface6_data_equal(const struct hash_netiface6_elem *ip1,
const struct hash_netiface6_elem *ip2,
u32 *multi)
{
return ipv6_addr_equal(&ip1->ip.in6, &ip2->ip.in6) &&
ip1->cidr == ip2->cidr &&
(++*multi) &&
ip1->physdev == ip2->physdev &&
(ip1->wildcard ?
strncmp(ip1->iface, ip2->iface, strlen(ip1->iface)) == 0 :
strcmp(ip1->iface, ip2->iface) == 0);
}
static int
hash_netiface6_do_data_match(const struct hash_netiface6_elem *elem)
{
return elem->nomatch ? -ENOTEMPTY : 1;
}
static void
hash_netiface6_data_set_flags(struct hash_netiface6_elem *elem, u32 flags)
{
elem->nomatch = (flags >> 16) & IPSET_FLAG_NOMATCH;
}
static void
hash_netiface6_data_reset_flags(struct hash_netiface6_elem *elem, u8 *flags)
{
swap(*flags, elem->nomatch);
}
static void
hash_netiface6_data_netmask(struct hash_netiface6_elem *elem, u8 cidr)
{
ip6_netmask(&elem->ip, cidr);
elem->cidr = cidr;
}
static bool
hash_netiface6_data_list(struct sk_buff *skb,
const struct hash_netiface6_elem *data)
{
u32 flags = (data->physdev ? IPSET_FLAG_PHYSDEV : 0) |
(data->wildcard ? IPSET_FLAG_IFACE_WILDCARD : 0);
if (data->nomatch)
flags |= IPSET_FLAG_NOMATCH;
if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &data->ip.in6) ||
nla_put_u8(skb, IPSET_ATTR_CIDR, data->cidr) ||
nla_put_string(skb, IPSET_ATTR_IFACE, data->iface) ||
(flags &&
nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
goto nla_put_failure;
return false;
nla_put_failure:
return true;
}
static void
hash_netiface6_data_next(struct hash_netiface6_elem *next,
const struct hash_netiface6_elem *d)
{
}
#undef MTYPE
#undef HOST_MASK
#define MTYPE hash_netiface6
#define HOST_MASK 128
#define HKEY_DATALEN sizeof(struct hash_netiface6_elem_hashed)
#define IP_SET_EMIT_CREATE
#include "ip_set_hash_gen.h"
static int
hash_netiface6_kadt(struct ip_set *set, const struct sk_buff *skb,
const struct xt_action_param *par,
enum ipset_adt adt, struct ip_set_adt_opt *opt)
{
struct hash_netiface6 *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_netiface6_elem e = {
.cidr = INIT_CIDR(h->nets[0].cidr[0], HOST_MASK),
.elem = 1,
};
struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
if (adt == IPSET_TEST)
e.cidr = HOST_MASK;
ip6addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &e.ip.in6);
ip6_netmask(&e.ip, e.cidr);
if (opt->cmdflags & IPSET_FLAG_PHYSDEV) {
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
const char *eiface = SRCDIR ? get_physindev_name(skb) :
get_physoutdev_name(skb);
if (!eiface)
return -EINVAL;
STRSCPY(e.iface, eiface);
e.physdev = 1;
#endif
} else {
STRSCPY(e.iface, SRCDIR ? IFACE(in) : IFACE(out));
}
if (strlen(e.iface) == 0)
return -EINVAL;
return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags);
}
static int
hash_netiface6_uadt(struct ip_set *set, struct nlattr *tb[],
enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
{
ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_netiface6_elem e = { .cidr = HOST_MASK, .elem = 1 };
struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
int ret;
if (tb[IPSET_ATTR_LINENO])
*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
if (unlikely(!tb[IPSET_ATTR_IP] ||
!tb[IPSET_ATTR_IFACE] ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS)))
return -IPSET_ERR_PROTOCOL;
if (unlikely(tb[IPSET_ATTR_IP_TO]))
return -IPSET_ERR_HASH_RANGE_UNSUPPORTED;
ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip);
if (ret)
return ret;
ret = ip_set_get_extensions(set, tb, &ext);
if (ret)
return ret;
if (tb[IPSET_ATTR_CIDR]) {
e.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
if (e.cidr > HOST_MASK)
return -IPSET_ERR_INVALID_CIDR;
}
ip6_netmask(&e.ip, e.cidr);
nla_strscpy(e.iface, tb[IPSET_ATTR_IFACE], IFNAMSIZ);
if (tb[IPSET_ATTR_CADT_FLAGS]) {
u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
if (cadt_flags & IPSET_FLAG_PHYSDEV)
e.physdev = 1;
if (cadt_flags & IPSET_FLAG_NOMATCH)
flags |= (IPSET_FLAG_NOMATCH << 16);
if (cadt_flags & IPSET_FLAG_IFACE_WILDCARD)
e.wildcard = 1;
}
ret = adtfn(set, &e, &ext, &ext, flags);
return ip_set_enomatch(ret, flags, adt, set) ? -ret :
ip_set_eexist(ret, flags) ? 0 : ret;
}
static struct ip_set_type hash_netiface_type __read_mostly = {
.name = "hash:net,iface",
.protocol = IPSET_PROTOCOL,
.features = IPSET_TYPE_IP | IPSET_TYPE_IFACE |
IPSET_TYPE_NOMATCH,
.dimension = IPSET_DIM_TWO,
.family = NFPROTO_UNSPEC,
.revision_min = IPSET_TYPE_REV_MIN,
.revision_max = IPSET_TYPE_REV_MAX,
.create_flags[IPSET_TYPE_REV_MAX] = IPSET_CREATE_FLAG_BUCKETSIZE,
.create = hash_netiface_create,
.create_policy = {
[IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 },
[IPSET_ATTR_MAXELEM] = { .type = NLA_U32 },
[IPSET_ATTR_INITVAL] = { .type = NLA_U32 },
[IPSET_ATTR_BUCKETSIZE] = { .type = NLA_U8 },
[IPSET_ATTR_RESIZE] = { .type = NLA_U8 },
[IPSET_ATTR_PROTO] = { .type = NLA_U8 },
[IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
[IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 },
},
.adt_policy = {
[IPSET_ATTR_IP] = { .type = NLA_NESTED },
[IPSET_ATTR_IP_TO] = { .type = NLA_NESTED },
[IPSET_ATTR_IFACE] = { .type = NLA_NUL_STRING,
.len = IFNAMSIZ - 1 },
[IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 },
[IPSET_ATTR_CIDR] = { .type = NLA_U8 },
[IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
[IPSET_ATTR_LINENO] = { .type = NLA_U32 },
[IPSET_ATTR_BYTES] = { .type = NLA_U64 },
[IPSET_ATTR_PACKETS] = { .type = NLA_U64 },
[IPSET_ATTR_COMMENT] = { .type = NLA_NUL_STRING,
.len = IPSET_MAX_COMMENT_SIZE },
[IPSET_ATTR_SKBMARK] = { .type = NLA_U64 },
[IPSET_ATTR_SKBPRIO] = { .type = NLA_U32 },
[IPSET_ATTR_SKBQUEUE] = { .type = NLA_U16 },
},
.me = THIS_MODULE,
};
static int __init
hash_netiface_init(void)
{
return ip_set_type_register(&hash_netiface_type);
}
static void __exit
hash_netiface_fini(void)
{
rcu_barrier();
ip_set_type_unregister(&hash_netiface_type);
}
module_init(hash_netiface_init);
module_exit(hash_netiface_fini);
| linux-master | net/netfilter/ipset/ip_set_hash_netiface.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (C) 2008-2013 Jozsef Kadlecsik <[email protected]> */
/* Kernel module implementing an IP set type: the list:set type */
#include <linux/module.h>
#include <linux/ip.h>
#include <linux/rculist.h>
#include <linux/skbuff.h>
#include <linux/errno.h>
#include <linux/netfilter/ipset/ip_set.h>
#include <linux/netfilter/ipset/ip_set_list.h>
#define IPSET_TYPE_REV_MIN 0
/* 1 Counters support added */
/* 2 Comments support added */
#define IPSET_TYPE_REV_MAX 3 /* skbinfo support added */
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jozsef Kadlecsik <[email protected]>");
IP_SET_MODULE_DESC("list:set", IPSET_TYPE_REV_MIN, IPSET_TYPE_REV_MAX);
MODULE_ALIAS("ip_set_list:set");
/* Member elements */
struct set_elem {
struct rcu_head rcu;
struct list_head list;
struct ip_set *set; /* Sigh, in order to cleanup reference */
ip_set_id_t id;
} __aligned(__alignof__(u64));
struct set_adt_elem {
ip_set_id_t id;
ip_set_id_t refid;
int before;
};
/* Type structure */
struct list_set {
u32 size; /* size of set list array */
struct timer_list gc; /* garbage collection */
struct ip_set *set; /* attached to this ip_set */
struct net *net; /* namespace */
struct list_head members; /* the set members */
};
static int
list_set_ktest(struct ip_set *set, const struct sk_buff *skb,
const struct xt_action_param *par,
struct ip_set_adt_opt *opt, const struct ip_set_ext *ext)
{
struct list_set *map = set->data;
struct ip_set_ext *mext = &opt->ext;
struct set_elem *e;
u32 flags = opt->cmdflags;
int ret;
/* Don't lookup sub-counters at all */
opt->cmdflags &= ~IPSET_FLAG_MATCH_COUNTERS;
if (opt->cmdflags & IPSET_FLAG_SKIP_SUBCOUNTER_UPDATE)
opt->cmdflags |= IPSET_FLAG_SKIP_COUNTER_UPDATE;
list_for_each_entry_rcu(e, &map->members, list) {
ret = ip_set_test(e->id, skb, par, opt);
if (ret <= 0)
continue;
if (ip_set_match_extensions(set, ext, mext, flags, e))
return 1;
}
return 0;
}
static int
list_set_kadd(struct ip_set *set, const struct sk_buff *skb,
const struct xt_action_param *par,
struct ip_set_adt_opt *opt, const struct ip_set_ext *ext)
{
struct list_set *map = set->data;
struct set_elem *e;
int ret;
list_for_each_entry(e, &map->members, list) {
if (SET_WITH_TIMEOUT(set) &&
ip_set_timeout_expired(ext_timeout(e, set)))
continue;
ret = ip_set_add(e->id, skb, par, opt);
if (ret == 0)
return ret;
}
return 0;
}
static int
list_set_kdel(struct ip_set *set, const struct sk_buff *skb,
const struct xt_action_param *par,
struct ip_set_adt_opt *opt, const struct ip_set_ext *ext)
{
struct list_set *map = set->data;
struct set_elem *e;
int ret;
list_for_each_entry(e, &map->members, list) {
if (SET_WITH_TIMEOUT(set) &&
ip_set_timeout_expired(ext_timeout(e, set)))
continue;
ret = ip_set_del(e->id, skb, par, opt);
if (ret == 0)
return ret;
}
return 0;
}
static int
list_set_kadt(struct ip_set *set, const struct sk_buff *skb,
const struct xt_action_param *par,
enum ipset_adt adt, struct ip_set_adt_opt *opt)
{
struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
int ret = -EINVAL;
rcu_read_lock();
switch (adt) {
case IPSET_TEST:
ret = list_set_ktest(set, skb, par, opt, &ext);
break;
case IPSET_ADD:
ret = list_set_kadd(set, skb, par, opt, &ext);
break;
case IPSET_DEL:
ret = list_set_kdel(set, skb, par, opt, &ext);
break;
default:
break;
}
rcu_read_unlock();
return ret;
}
/* Userspace interfaces: we are protected by the nfnl mutex */
static void
__list_set_del_rcu(struct rcu_head * rcu)
{
struct set_elem *e = container_of(rcu, struct set_elem, rcu);
struct ip_set *set = e->set;
ip_set_ext_destroy(set, e);
kfree(e);
}
static void
list_set_del(struct ip_set *set, struct set_elem *e)
{
struct list_set *map = set->data;
set->elements--;
list_del_rcu(&e->list);
ip_set_put_byindex(map->net, e->id);
call_rcu(&e->rcu, __list_set_del_rcu);
}
static void
list_set_replace(struct ip_set *set, struct set_elem *e, struct set_elem *old)
{
struct list_set *map = set->data;
list_replace_rcu(&old->list, &e->list);
ip_set_put_byindex(map->net, old->id);
call_rcu(&old->rcu, __list_set_del_rcu);
}
static void
set_cleanup_entries(struct ip_set *set)
{
struct list_set *map = set->data;
struct set_elem *e, *n;
list_for_each_entry_safe(e, n, &map->members, list)
if (ip_set_timeout_expired(ext_timeout(e, set)))
list_set_del(set, e);
}
static int
list_set_utest(struct ip_set *set, void *value, const struct ip_set_ext *ext,
struct ip_set_ext *mext, u32 flags)
{
struct list_set *map = set->data;
struct set_adt_elem *d = value;
struct set_elem *e, *next, *prev = NULL;
int ret;
list_for_each_entry(e, &map->members, list) {
if (SET_WITH_TIMEOUT(set) &&
ip_set_timeout_expired(ext_timeout(e, set)))
continue;
else if (e->id != d->id) {
prev = e;
continue;
}
if (d->before == 0) {
ret = 1;
} else if (d->before > 0) {
next = list_next_entry(e, list);
ret = !list_is_last(&e->list, &map->members) &&
next->id == d->refid;
} else {
ret = prev && prev->id == d->refid;
}
return ret;
}
return 0;
}
static void
list_set_init_extensions(struct ip_set *set, const struct ip_set_ext *ext,
struct set_elem *e)
{
if (SET_WITH_COUNTER(set))
ip_set_init_counter(ext_counter(e, set), ext);
if (SET_WITH_COMMENT(set))
ip_set_init_comment(set, ext_comment(e, set), ext);
if (SET_WITH_SKBINFO(set))
ip_set_init_skbinfo(ext_skbinfo(e, set), ext);
/* Update timeout last */
if (SET_WITH_TIMEOUT(set))
ip_set_timeout_set(ext_timeout(e, set), ext->timeout);
}
static int
list_set_uadd(struct ip_set *set, void *value, const struct ip_set_ext *ext,
struct ip_set_ext *mext, u32 flags)
{
struct list_set *map = set->data;
struct set_adt_elem *d = value;
struct set_elem *e, *n, *prev, *next;
bool flag_exist = flags & IPSET_FLAG_EXIST;
/* Find where to add the new entry */
n = prev = next = NULL;
list_for_each_entry(e, &map->members, list) {
if (SET_WITH_TIMEOUT(set) &&
ip_set_timeout_expired(ext_timeout(e, set)))
continue;
else if (d->id == e->id)
n = e;
else if (d->before == 0 || e->id != d->refid)
continue;
else if (d->before > 0)
next = e;
else
prev = e;
}
/* If before/after is used on an empty set */
if ((d->before > 0 && !next) ||
(d->before < 0 && !prev))
return -IPSET_ERR_REF_EXIST;
/* Re-add already existing element */
if (n) {
if (!flag_exist)
return -IPSET_ERR_EXIST;
/* Update extensions */
ip_set_ext_destroy(set, n);
list_set_init_extensions(set, ext, n);
/* Set is already added to the list */
ip_set_put_byindex(map->net, d->id);
return 0;
}
/* Add new entry */
if (d->before == 0) {
/* Append */
n = list_empty(&map->members) ? NULL :
list_last_entry(&map->members, struct set_elem, list);
} else if (d->before > 0) {
/* Insert after next element */
if (!list_is_last(&next->list, &map->members))
n = list_next_entry(next, list);
} else {
/* Insert before prev element */
if (prev->list.prev != &map->members)
n = list_prev_entry(prev, list);
}
/* Can we replace a timed out entry? */
if (n &&
!(SET_WITH_TIMEOUT(set) &&
ip_set_timeout_expired(ext_timeout(n, set))))
n = NULL;
e = kzalloc(set->dsize, GFP_ATOMIC);
if (!e)
return -ENOMEM;
e->id = d->id;
e->set = set;
INIT_LIST_HEAD(&e->list);
list_set_init_extensions(set, ext, e);
if (n)
list_set_replace(set, e, n);
else if (next)
list_add_tail_rcu(&e->list, &next->list);
else if (prev)
list_add_rcu(&e->list, &prev->list);
else
list_add_tail_rcu(&e->list, &map->members);
set->elements++;
return 0;
}
static int
list_set_udel(struct ip_set *set, void *value, const struct ip_set_ext *ext,
struct ip_set_ext *mext, u32 flags)
{
struct list_set *map = set->data;
struct set_adt_elem *d = value;
struct set_elem *e, *next, *prev = NULL;
list_for_each_entry(e, &map->members, list) {
if (SET_WITH_TIMEOUT(set) &&
ip_set_timeout_expired(ext_timeout(e, set)))
continue;
else if (e->id != d->id) {
prev = e;
continue;
}
if (d->before > 0) {
next = list_next_entry(e, list);
if (list_is_last(&e->list, &map->members) ||
next->id != d->refid)
return -IPSET_ERR_REF_EXIST;
} else if (d->before < 0) {
if (!prev || prev->id != d->refid)
return -IPSET_ERR_REF_EXIST;
}
list_set_del(set, e);
return 0;
}
return d->before != 0 ? -IPSET_ERR_REF_EXIST : -IPSET_ERR_EXIST;
}
static int
list_set_uadt(struct ip_set *set, struct nlattr *tb[],
enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
{
struct list_set *map = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
struct set_adt_elem e = { .refid = IPSET_INVALID_ID };
struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
struct ip_set *s;
int ret = 0;
if (tb[IPSET_ATTR_LINENO])
*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
if (unlikely(!tb[IPSET_ATTR_NAME] ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS)))
return -IPSET_ERR_PROTOCOL;
ret = ip_set_get_extensions(set, tb, &ext);
if (ret)
return ret;
e.id = ip_set_get_byname(map->net, nla_data(tb[IPSET_ATTR_NAME]), &s);
if (e.id == IPSET_INVALID_ID)
return -IPSET_ERR_NAME;
/* "Loop detection" */
if (s->type->features & IPSET_TYPE_NAME) {
ret = -IPSET_ERR_LOOP;
goto finish;
}
if (tb[IPSET_ATTR_CADT_FLAGS]) {
u32 f = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
e.before = f & IPSET_FLAG_BEFORE;
}
if (e.before && !tb[IPSET_ATTR_NAMEREF]) {
ret = -IPSET_ERR_BEFORE;
goto finish;
}
if (tb[IPSET_ATTR_NAMEREF]) {
e.refid = ip_set_get_byname(map->net,
nla_data(tb[IPSET_ATTR_NAMEREF]),
&s);
if (e.refid == IPSET_INVALID_ID) {
ret = -IPSET_ERR_NAMEREF;
goto finish;
}
if (!e.before)
e.before = -1;
}
if (adt != IPSET_TEST && SET_WITH_TIMEOUT(set))
set_cleanup_entries(set);
ret = adtfn(set, &e, &ext, &ext, flags);
finish:
if (e.refid != IPSET_INVALID_ID)
ip_set_put_byindex(map->net, e.refid);
if (adt != IPSET_ADD || ret)
ip_set_put_byindex(map->net, e.id);
return ip_set_eexist(ret, flags) ? 0 : ret;
}
static void
list_set_flush(struct ip_set *set)
{
struct list_set *map = set->data;
struct set_elem *e, *n;
list_for_each_entry_safe(e, n, &map->members, list)
list_set_del(set, e);
set->elements = 0;
set->ext_size = 0;
}
static void
list_set_destroy(struct ip_set *set)
{
struct list_set *map = set->data;
struct set_elem *e, *n;
if (SET_WITH_TIMEOUT(set))
timer_shutdown_sync(&map->gc);
list_for_each_entry_safe(e, n, &map->members, list) {
list_del(&e->list);
ip_set_put_byindex(map->net, e->id);
ip_set_ext_destroy(set, e);
kfree(e);
}
kfree(map);
set->data = NULL;
}
/* Calculate the actual memory size of the set data */
static size_t
list_set_memsize(const struct list_set *map, size_t dsize)
{
struct set_elem *e;
u32 n = 0;
rcu_read_lock();
list_for_each_entry_rcu(e, &map->members, list)
n++;
rcu_read_unlock();
return (sizeof(*map) + n * dsize);
}
static int
list_set_head(struct ip_set *set, struct sk_buff *skb)
{
const struct list_set *map = set->data;
struct nlattr *nested;
size_t memsize = list_set_memsize(map, set->dsize) + set->ext_size;
nested = nla_nest_start(skb, IPSET_ATTR_DATA);
if (!nested)
goto nla_put_failure;
if (nla_put_net32(skb, IPSET_ATTR_SIZE, htonl(map->size)) ||
nla_put_net32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref)) ||
nla_put_net32(skb, IPSET_ATTR_MEMSIZE, htonl(memsize)) ||
nla_put_net32(skb, IPSET_ATTR_ELEMENTS, htonl(set->elements)))
goto nla_put_failure;
if (unlikely(ip_set_put_flags(skb, set)))
goto nla_put_failure;
nla_nest_end(skb, nested);
return 0;
nla_put_failure:
return -EMSGSIZE;
}
static int
list_set_list(const struct ip_set *set,
struct sk_buff *skb, struct netlink_callback *cb)
{
const struct list_set *map = set->data;
struct nlattr *atd, *nested;
u32 i = 0, first = cb->args[IPSET_CB_ARG0];
char name[IPSET_MAXNAMELEN];
struct set_elem *e;
int ret = 0;
atd = nla_nest_start(skb, IPSET_ATTR_ADT);
if (!atd)
return -EMSGSIZE;
rcu_read_lock();
list_for_each_entry_rcu(e, &map->members, list) {
if (i < first ||
(SET_WITH_TIMEOUT(set) &&
ip_set_timeout_expired(ext_timeout(e, set)))) {
i++;
continue;
}
nested = nla_nest_start(skb, IPSET_ATTR_DATA);
if (!nested)
goto nla_put_failure;
ip_set_name_byindex(map->net, e->id, name);
if (nla_put_string(skb, IPSET_ATTR_NAME, name))
goto nla_put_failure;
if (ip_set_put_extensions(skb, set, e, true))
goto nla_put_failure;
nla_nest_end(skb, nested);
i++;
}
nla_nest_end(skb, atd);
/* Set listing finished */
cb->args[IPSET_CB_ARG0] = 0;
goto out;
nla_put_failure:
nla_nest_cancel(skb, nested);
if (unlikely(i == first)) {
nla_nest_cancel(skb, atd);
cb->args[IPSET_CB_ARG0] = 0;
ret = -EMSGSIZE;
} else {
cb->args[IPSET_CB_ARG0] = i;
nla_nest_end(skb, atd);
}
out:
rcu_read_unlock();
return ret;
}
static bool
list_set_same_set(const struct ip_set *a, const struct ip_set *b)
{
const struct list_set *x = a->data;
const struct list_set *y = b->data;
return x->size == y->size &&
a->timeout == b->timeout &&
a->extensions == b->extensions;
}
static const struct ip_set_type_variant set_variant = {
.kadt = list_set_kadt,
.uadt = list_set_uadt,
.adt = {
[IPSET_ADD] = list_set_uadd,
[IPSET_DEL] = list_set_udel,
[IPSET_TEST] = list_set_utest,
},
.destroy = list_set_destroy,
.flush = list_set_flush,
.head = list_set_head,
.list = list_set_list,
.same_set = list_set_same_set,
};
static void
list_set_gc(struct timer_list *t)
{
struct list_set *map = from_timer(map, t, gc);
struct ip_set *set = map->set;
spin_lock_bh(&set->lock);
set_cleanup_entries(set);
spin_unlock_bh(&set->lock);
map->gc.expires = jiffies + IPSET_GC_PERIOD(set->timeout) * HZ;
add_timer(&map->gc);
}
static void
list_set_gc_init(struct ip_set *set, void (*gc)(struct timer_list *t))
{
struct list_set *map = set->data;
timer_setup(&map->gc, gc, 0);
mod_timer(&map->gc, jiffies + IPSET_GC_PERIOD(set->timeout) * HZ);
}
/* Create list:set type of sets */
static bool
init_list_set(struct net *net, struct ip_set *set, u32 size)
{
struct list_set *map;
map = kzalloc(sizeof(*map), GFP_KERNEL);
if (!map)
return false;
map->size = size;
map->net = net;
map->set = set;
INIT_LIST_HEAD(&map->members);
set->data = map;
return true;
}
static int
list_set_create(struct net *net, struct ip_set *set, struct nlattr *tb[],
u32 flags)
{
u32 size = IP_SET_LIST_DEFAULT_SIZE;
if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_SIZE) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS)))
return -IPSET_ERR_PROTOCOL;
if (tb[IPSET_ATTR_SIZE])
size = ip_set_get_h32(tb[IPSET_ATTR_SIZE]);
if (size < IP_SET_LIST_MIN_SIZE)
size = IP_SET_LIST_MIN_SIZE;
set->variant = &set_variant;
set->dsize = ip_set_elem_len(set, tb, sizeof(struct set_elem),
__alignof__(struct set_elem));
if (!init_list_set(net, set, size))
return -ENOMEM;
if (tb[IPSET_ATTR_TIMEOUT]) {
set->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
list_set_gc_init(set, list_set_gc);
}
return 0;
}
static struct ip_set_type list_set_type __read_mostly = {
.name = "list:set",
.protocol = IPSET_PROTOCOL,
.features = IPSET_TYPE_NAME | IPSET_DUMP_LAST,
.dimension = IPSET_DIM_ONE,
.family = NFPROTO_UNSPEC,
.revision_min = IPSET_TYPE_REV_MIN,
.revision_max = IPSET_TYPE_REV_MAX,
.create = list_set_create,
.create_policy = {
[IPSET_ATTR_SIZE] = { .type = NLA_U32 },
[IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
[IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 },
},
.adt_policy = {
[IPSET_ATTR_NAME] = { .type = NLA_STRING,
.len = IPSET_MAXNAMELEN },
[IPSET_ATTR_NAMEREF] = { .type = NLA_STRING,
.len = IPSET_MAXNAMELEN },
[IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
[IPSET_ATTR_LINENO] = { .type = NLA_U32 },
[IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 },
[IPSET_ATTR_BYTES] = { .type = NLA_U64 },
[IPSET_ATTR_PACKETS] = { .type = NLA_U64 },
[IPSET_ATTR_COMMENT] = { .type = NLA_NUL_STRING,
.len = IPSET_MAX_COMMENT_SIZE },
[IPSET_ATTR_SKBMARK] = { .type = NLA_U64 },
[IPSET_ATTR_SKBPRIO] = { .type = NLA_U32 },
[IPSET_ATTR_SKBQUEUE] = { .type = NLA_U16 },
},
.me = THIS_MODULE,
};
static int __init
list_set_init(void)
{
return ip_set_type_register(&list_set_type);
}
static void __exit
list_set_fini(void)
{
rcu_barrier();
ip_set_type_unregister(&list_set_type);
}
module_init(list_set_init);
module_exit(list_set_fini);
| linux-master | net/netfilter/ipset/ip_set_list_set.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (C) 2003-2013 Jozsef Kadlecsik <[email protected]> */
/* Kernel module implementing an IP set type: the hash:net,port type */
#include <linux/jhash.h>
#include <linux/module.h>
#include <linux/ip.h>
#include <linux/skbuff.h>
#include <linux/errno.h>
#include <linux/random.h>
#include <net/ip.h>
#include <net/ipv6.h>
#include <net/netlink.h>
#include <linux/netfilter.h>
#include <linux/netfilter/ipset/pfxlen.h>
#include <linux/netfilter/ipset/ip_set.h>
#include <linux/netfilter/ipset/ip_set_getport.h>
#include <linux/netfilter/ipset/ip_set_hash.h>
#define IPSET_TYPE_REV_MIN 0
/* 1 SCTP and UDPLITE support added */
/* 2 Range as input support for IPv4 added */
/* 3 nomatch flag support added */
/* 4 Counters support added */
/* 5 Comments support added */
/* 6 Forceadd support added */
/* 7 skbinfo support added */
#define IPSET_TYPE_REV_MAX 8 /* bucketsize, initval support added */
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jozsef Kadlecsik <[email protected]>");
IP_SET_MODULE_DESC("hash:net,port", IPSET_TYPE_REV_MIN, IPSET_TYPE_REV_MAX);
MODULE_ALIAS("ip_set_hash:net,port");
/* Type specific function prefix */
#define HTYPE hash_netport
#define IP_SET_HASH_WITH_PROTO
#define IP_SET_HASH_WITH_NETS
/* We squeeze the "nomatch" flag into cidr: we don't support cidr == 0
* However this way we have to store internally cidr - 1,
* dancing back and forth.
*/
#define IP_SET_HASH_WITH_NETS_PACKED
/* IPv4 variant */
/* Member elements */
struct hash_netport4_elem {
__be32 ip;
__be16 port;
u8 proto;
u8 cidr:7;
u8 nomatch:1;
};
/* Common functions */
static bool
hash_netport4_data_equal(const struct hash_netport4_elem *ip1,
const struct hash_netport4_elem *ip2,
u32 *multi)
{
return ip1->ip == ip2->ip &&
ip1->port == ip2->port &&
ip1->proto == ip2->proto &&
ip1->cidr == ip2->cidr;
}
static int
hash_netport4_do_data_match(const struct hash_netport4_elem *elem)
{
return elem->nomatch ? -ENOTEMPTY : 1;
}
static void
hash_netport4_data_set_flags(struct hash_netport4_elem *elem, u32 flags)
{
elem->nomatch = !!((flags >> 16) & IPSET_FLAG_NOMATCH);
}
static void
hash_netport4_data_reset_flags(struct hash_netport4_elem *elem, u8 *flags)
{
swap(*flags, elem->nomatch);
}
static void
hash_netport4_data_netmask(struct hash_netport4_elem *elem, u8 cidr)
{
elem->ip &= ip_set_netmask(cidr);
elem->cidr = cidr - 1;
}
static bool
hash_netport4_data_list(struct sk_buff *skb,
const struct hash_netport4_elem *data)
{
u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, data->ip) ||
nla_put_net16(skb, IPSET_ATTR_PORT, data->port) ||
nla_put_u8(skb, IPSET_ATTR_CIDR, data->cidr + 1) ||
nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto) ||
(flags &&
nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
goto nla_put_failure;
return false;
nla_put_failure:
return true;
}
static void
hash_netport4_data_next(struct hash_netport4_elem *next,
const struct hash_netport4_elem *d)
{
next->ip = d->ip;
next->port = d->port;
}
#define MTYPE hash_netport4
#define HOST_MASK 32
#include "ip_set_hash_gen.h"
static int
hash_netport4_kadt(struct ip_set *set, const struct sk_buff *skb,
const struct xt_action_param *par,
enum ipset_adt adt, struct ip_set_adt_opt *opt)
{
const struct hash_netport4 *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_netport4_elem e = {
.cidr = INIT_CIDR(h->nets[0].cidr[0], HOST_MASK),
};
struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
if (adt == IPSET_TEST)
e.cidr = HOST_MASK - 1;
if (!ip_set_get_ip4_port(skb, opt->flags & IPSET_DIM_TWO_SRC,
&e.port, &e.proto))
return -EINVAL;
ip4addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &e.ip);
e.ip &= ip_set_netmask(e.cidr + 1);
return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags);
}
static int
hash_netport4_uadt(struct ip_set *set, struct nlattr *tb[],
enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
{
struct hash_netport4 *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_netport4_elem e = { .cidr = HOST_MASK - 1 };
struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
u32 port, port_to, p = 0, ip = 0, ip_to = 0, i = 0;
bool with_ports = false;
u8 cidr;
int ret;
if (tb[IPSET_ATTR_LINENO])
*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
if (unlikely(!tb[IPSET_ATTR_IP] ||
!ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS)))
return -IPSET_ERR_PROTOCOL;
ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip);
if (ret)
return ret;
ret = ip_set_get_extensions(set, tb, &ext);
if (ret)
return ret;
if (tb[IPSET_ATTR_CIDR]) {
cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
if (!cidr || cidr > HOST_MASK)
return -IPSET_ERR_INVALID_CIDR;
e.cidr = cidr - 1;
}
e.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
if (tb[IPSET_ATTR_PROTO]) {
e.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]);
with_ports = ip_set_proto_with_ports(e.proto);
if (e.proto == 0)
return -IPSET_ERR_INVALID_PROTO;
} else {
return -IPSET_ERR_MISSING_PROTO;
}
if (!(with_ports || e.proto == IPPROTO_ICMP))
e.port = 0;
with_ports = with_ports && tb[IPSET_ATTR_PORT_TO];
if (tb[IPSET_ATTR_CADT_FLAGS]) {
u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
if (cadt_flags & IPSET_FLAG_NOMATCH)
flags |= (IPSET_FLAG_NOMATCH << 16);
}
if (adt == IPSET_TEST || !(with_ports || tb[IPSET_ATTR_IP_TO])) {
e.ip = htonl(ip & ip_set_hostmask(e.cidr + 1));
ret = adtfn(set, &e, &ext, &ext, flags);
return ip_set_enomatch(ret, flags, adt, set) ? -ret :
ip_set_eexist(ret, flags) ? 0 : ret;
}
port = port_to = ntohs(e.port);
if (tb[IPSET_ATTR_PORT_TO]) {
port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]);
if (port_to < port)
swap(port, port_to);
}
if (tb[IPSET_ATTR_IP_TO]) {
ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to);
if (ret)
return ret;
if (ip_to < ip)
swap(ip, ip_to);
if (ip + UINT_MAX == ip_to)
return -IPSET_ERR_HASH_RANGE;
} else {
ip_set_mask_from_to(ip, ip_to, e.cidr + 1);
}
if (retried) {
ip = ntohl(h->next.ip);
p = ntohs(h->next.port);
} else {
p = port;
}
do {
e.ip = htonl(ip);
ip = ip_set_range_to_cidr(ip, ip_to, &cidr);
e.cidr = cidr - 1;
for (; p <= port_to; p++, i++) {
e.port = htons(p);
if (i > IPSET_MAX_RANGE) {
hash_netport4_data_next(&h->next, &e);
return -ERANGE;
}
ret = adtfn(set, &e, &ext, &ext, flags);
if (ret && !ip_set_eexist(ret, flags))
return ret;
ret = 0;
}
p = port;
} while (ip++ < ip_to);
return ret;
}
/* IPv6 variant */
struct hash_netport6_elem {
union nf_inet_addr ip;
__be16 port;
u8 proto;
u8 cidr:7;
u8 nomatch:1;
};
/* Common functions */
static bool
hash_netport6_data_equal(const struct hash_netport6_elem *ip1,
const struct hash_netport6_elem *ip2,
u32 *multi)
{
return ipv6_addr_equal(&ip1->ip.in6, &ip2->ip.in6) &&
ip1->port == ip2->port &&
ip1->proto == ip2->proto &&
ip1->cidr == ip2->cidr;
}
static int
hash_netport6_do_data_match(const struct hash_netport6_elem *elem)
{
return elem->nomatch ? -ENOTEMPTY : 1;
}
static void
hash_netport6_data_set_flags(struct hash_netport6_elem *elem, u32 flags)
{
elem->nomatch = !!((flags >> 16) & IPSET_FLAG_NOMATCH);
}
static void
hash_netport6_data_reset_flags(struct hash_netport6_elem *elem, u8 *flags)
{
swap(*flags, elem->nomatch);
}
static void
hash_netport6_data_netmask(struct hash_netport6_elem *elem, u8 cidr)
{
ip6_netmask(&elem->ip, cidr);
elem->cidr = cidr - 1;
}
static bool
hash_netport6_data_list(struct sk_buff *skb,
const struct hash_netport6_elem *data)
{
u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &data->ip.in6) ||
nla_put_net16(skb, IPSET_ATTR_PORT, data->port) ||
nla_put_u8(skb, IPSET_ATTR_CIDR, data->cidr + 1) ||
nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto) ||
(flags &&
nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
goto nla_put_failure;
return false;
nla_put_failure:
return true;
}
static void
hash_netport6_data_next(struct hash_netport6_elem *next,
const struct hash_netport6_elem *d)
{
next->port = d->port;
}
#undef MTYPE
#undef HOST_MASK
#define MTYPE hash_netport6
#define HOST_MASK 128
#define IP_SET_EMIT_CREATE
#include "ip_set_hash_gen.h"
static int
hash_netport6_kadt(struct ip_set *set, const struct sk_buff *skb,
const struct xt_action_param *par,
enum ipset_adt adt, struct ip_set_adt_opt *opt)
{
const struct hash_netport6 *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_netport6_elem e = {
.cidr = INIT_CIDR(h->nets[0].cidr[0], HOST_MASK),
};
struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
if (adt == IPSET_TEST)
e.cidr = HOST_MASK - 1;
if (!ip_set_get_ip6_port(skb, opt->flags & IPSET_DIM_TWO_SRC,
&e.port, &e.proto))
return -EINVAL;
ip6addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &e.ip.in6);
ip6_netmask(&e.ip, e.cidr + 1);
return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags);
}
static int
hash_netport6_uadt(struct ip_set *set, struct nlattr *tb[],
enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
{
const struct hash_netport6 *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_netport6_elem e = { .cidr = HOST_MASK - 1 };
struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
u32 port, port_to;
bool with_ports = false;
u8 cidr;
int ret;
if (tb[IPSET_ATTR_LINENO])
*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
if (unlikely(!tb[IPSET_ATTR_IP] ||
!ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS)))
return -IPSET_ERR_PROTOCOL;
if (unlikely(tb[IPSET_ATTR_IP_TO]))
return -IPSET_ERR_HASH_RANGE_UNSUPPORTED;
ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip);
if (ret)
return ret;
ret = ip_set_get_extensions(set, tb, &ext);
if (ret)
return ret;
if (tb[IPSET_ATTR_CIDR]) {
cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
if (!cidr || cidr > HOST_MASK)
return -IPSET_ERR_INVALID_CIDR;
e.cidr = cidr - 1;
}
ip6_netmask(&e.ip, e.cidr + 1);
e.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
if (tb[IPSET_ATTR_PROTO]) {
e.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]);
with_ports = ip_set_proto_with_ports(e.proto);
if (e.proto == 0)
return -IPSET_ERR_INVALID_PROTO;
} else {
return -IPSET_ERR_MISSING_PROTO;
}
if (!(with_ports || e.proto == IPPROTO_ICMPV6))
e.port = 0;
if (tb[IPSET_ATTR_CADT_FLAGS]) {
u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
if (cadt_flags & IPSET_FLAG_NOMATCH)
flags |= (IPSET_FLAG_NOMATCH << 16);
}
if (adt == IPSET_TEST || !with_ports || !tb[IPSET_ATTR_PORT_TO]) {
ret = adtfn(set, &e, &ext, &ext, flags);
return ip_set_enomatch(ret, flags, adt, set) ? -ret :
ip_set_eexist(ret, flags) ? 0 : ret;
}
port = ntohs(e.port);
port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]);
if (port > port_to)
swap(port, port_to);
if (retried)
port = ntohs(h->next.port);
for (; port <= port_to; port++) {
e.port = htons(port);
ret = adtfn(set, &e, &ext, &ext, flags);
if (ret && !ip_set_eexist(ret, flags))
return ret;
ret = 0;
}
return ret;
}
static struct ip_set_type hash_netport_type __read_mostly = {
.name = "hash:net,port",
.protocol = IPSET_PROTOCOL,
.features = IPSET_TYPE_IP | IPSET_TYPE_PORT | IPSET_TYPE_NOMATCH,
.dimension = IPSET_DIM_TWO,
.family = NFPROTO_UNSPEC,
.revision_min = IPSET_TYPE_REV_MIN,
.revision_max = IPSET_TYPE_REV_MAX,
.create_flags[IPSET_TYPE_REV_MAX] = IPSET_CREATE_FLAG_BUCKETSIZE,
.create = hash_netport_create,
.create_policy = {
[IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 },
[IPSET_ATTR_MAXELEM] = { .type = NLA_U32 },
[IPSET_ATTR_INITVAL] = { .type = NLA_U32 },
[IPSET_ATTR_BUCKETSIZE] = { .type = NLA_U8 },
[IPSET_ATTR_RESIZE] = { .type = NLA_U8 },
[IPSET_ATTR_PROTO] = { .type = NLA_U8 },
[IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
[IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 },
},
.adt_policy = {
[IPSET_ATTR_IP] = { .type = NLA_NESTED },
[IPSET_ATTR_IP_TO] = { .type = NLA_NESTED },
[IPSET_ATTR_PORT] = { .type = NLA_U16 },
[IPSET_ATTR_PORT_TO] = { .type = NLA_U16 },
[IPSET_ATTR_PROTO] = { .type = NLA_U8 },
[IPSET_ATTR_CIDR] = { .type = NLA_U8 },
[IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
[IPSET_ATTR_LINENO] = { .type = NLA_U32 },
[IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 },
[IPSET_ATTR_BYTES] = { .type = NLA_U64 },
[IPSET_ATTR_PACKETS] = { .type = NLA_U64 },
[IPSET_ATTR_COMMENT] = { .type = NLA_NUL_STRING,
.len = IPSET_MAX_COMMENT_SIZE },
[IPSET_ATTR_SKBMARK] = { .type = NLA_U64 },
[IPSET_ATTR_SKBPRIO] = { .type = NLA_U32 },
[IPSET_ATTR_SKBQUEUE] = { .type = NLA_U16 },
},
.me = THIS_MODULE,
};
static int __init
hash_netport_init(void)
{
return ip_set_type_register(&hash_netport_type);
}
static void __exit
hash_netport_fini(void)
{
rcu_barrier();
ip_set_type_unregister(&hash_netport_type);
}
module_init(hash_netport_init);
module_exit(hash_netport_fini);
| linux-master | net/netfilter/ipset/ip_set_hash_netport.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (C) 2003-2013 Jozsef Kadlecsik <[email protected]> */
/* Kernel module implementing an IP set type: the hash:ip,mark type */
#include <linux/jhash.h>
#include <linux/module.h>
#include <linux/ip.h>
#include <linux/skbuff.h>
#include <linux/errno.h>
#include <linux/random.h>
#include <net/ip.h>
#include <net/ipv6.h>
#include <net/netlink.h>
#include <net/tcp.h>
#include <linux/netfilter.h>
#include <linux/netfilter/ipset/pfxlen.h>
#include <linux/netfilter/ipset/ip_set.h>
#include <linux/netfilter/ipset/ip_set_hash.h>
#define IPSET_TYPE_REV_MIN 0
/* 1 Forceadd support */
/* 2 skbinfo support */
#define IPSET_TYPE_REV_MAX 3 /* bucketsize, initval support */
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Vytas Dauksa <[email protected]>");
IP_SET_MODULE_DESC("hash:ip,mark", IPSET_TYPE_REV_MIN, IPSET_TYPE_REV_MAX);
MODULE_ALIAS("ip_set_hash:ip,mark");
/* Type specific function prefix */
#define HTYPE hash_ipmark
#define IP_SET_HASH_WITH_MARKMASK
/* IPv4 variant */
/* Member elements */
struct hash_ipmark4_elem {
__be32 ip;
__u32 mark;
};
/* Common functions */
static bool
hash_ipmark4_data_equal(const struct hash_ipmark4_elem *ip1,
const struct hash_ipmark4_elem *ip2,
u32 *multi)
{
return ip1->ip == ip2->ip &&
ip1->mark == ip2->mark;
}
static bool
hash_ipmark4_data_list(struct sk_buff *skb,
const struct hash_ipmark4_elem *data)
{
if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, data->ip) ||
nla_put_net32(skb, IPSET_ATTR_MARK, htonl(data->mark)))
goto nla_put_failure;
return false;
nla_put_failure:
return true;
}
static void
hash_ipmark4_data_next(struct hash_ipmark4_elem *next,
const struct hash_ipmark4_elem *d)
{
next->ip = d->ip;
}
#define MTYPE hash_ipmark4
#define HOST_MASK 32
#include "ip_set_hash_gen.h"
static int
hash_ipmark4_kadt(struct ip_set *set, const struct sk_buff *skb,
const struct xt_action_param *par,
enum ipset_adt adt, struct ip_set_adt_opt *opt)
{
const struct hash_ipmark4 *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_ipmark4_elem e = { };
struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
e.mark = skb->mark;
e.mark &= h->markmask;
ip4addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &e.ip);
return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags);
}
static int
hash_ipmark4_uadt(struct ip_set *set, struct nlattr *tb[],
enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
{
struct hash_ipmark4 *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_ipmark4_elem e = { };
struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
u32 ip, ip_to = 0, i = 0;
int ret;
if (tb[IPSET_ATTR_LINENO])
*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
if (unlikely(!tb[IPSET_ATTR_IP] ||
!ip_set_attr_netorder(tb, IPSET_ATTR_MARK)))
return -IPSET_ERR_PROTOCOL;
ret = ip_set_get_ipaddr4(tb[IPSET_ATTR_IP], &e.ip);
if (ret)
return ret;
ret = ip_set_get_extensions(set, tb, &ext);
if (ret)
return ret;
e.mark = ntohl(nla_get_be32(tb[IPSET_ATTR_MARK]));
e.mark &= h->markmask;
if (e.mark == 0 && e.ip == 0)
return -IPSET_ERR_HASH_ELEM;
if (adt == IPSET_TEST ||
!(tb[IPSET_ATTR_IP_TO] || tb[IPSET_ATTR_CIDR])) {
ret = adtfn(set, &e, &ext, &ext, flags);
return ip_set_eexist(ret, flags) ? 0 : ret;
}
ip_to = ip = ntohl(e.ip);
if (tb[IPSET_ATTR_IP_TO]) {
ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to);
if (ret)
return ret;
if (ip > ip_to) {
if (e.mark == 0 && ip_to == 0)
return -IPSET_ERR_HASH_ELEM;
swap(ip, ip_to);
}
} else if (tb[IPSET_ATTR_CIDR]) {
u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
if (!cidr || cidr > HOST_MASK)
return -IPSET_ERR_INVALID_CIDR;
ip_set_mask_from_to(ip, ip_to, cidr);
}
if (retried)
ip = ntohl(h->next.ip);
for (; ip <= ip_to; ip++, i++) {
e.ip = htonl(ip);
if (i > IPSET_MAX_RANGE) {
hash_ipmark4_data_next(&h->next, &e);
return -ERANGE;
}
ret = adtfn(set, &e, &ext, &ext, flags);
if (ret && !ip_set_eexist(ret, flags))
return ret;
ret = 0;
}
return ret;
}
/* IPv6 variant */
struct hash_ipmark6_elem {
union nf_inet_addr ip;
__u32 mark;
};
/* Common functions */
static bool
hash_ipmark6_data_equal(const struct hash_ipmark6_elem *ip1,
const struct hash_ipmark6_elem *ip2,
u32 *multi)
{
return ipv6_addr_equal(&ip1->ip.in6, &ip2->ip.in6) &&
ip1->mark == ip2->mark;
}
static bool
hash_ipmark6_data_list(struct sk_buff *skb,
const struct hash_ipmark6_elem *data)
{
if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &data->ip.in6) ||
nla_put_net32(skb, IPSET_ATTR_MARK, htonl(data->mark)))
goto nla_put_failure;
return false;
nla_put_failure:
return true;
}
static void
hash_ipmark6_data_next(struct hash_ipmark6_elem *next,
const struct hash_ipmark6_elem *d)
{
}
#undef MTYPE
#undef HOST_MASK
#define MTYPE hash_ipmark6
#define HOST_MASK 128
#define IP_SET_EMIT_CREATE
#include "ip_set_hash_gen.h"
static int
hash_ipmark6_kadt(struct ip_set *set, const struct sk_buff *skb,
const struct xt_action_param *par,
enum ipset_adt adt, struct ip_set_adt_opt *opt)
{
const struct hash_ipmark6 *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_ipmark6_elem e = { };
struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
e.mark = skb->mark;
e.mark &= h->markmask;
ip6addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &e.ip.in6);
return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags);
}
static int
hash_ipmark6_uadt(struct ip_set *set, struct nlattr *tb[],
enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
{
const struct hash_ipmark6 *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_ipmark6_elem e = { };
struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
int ret;
if (tb[IPSET_ATTR_LINENO])
*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
if (unlikely(!tb[IPSET_ATTR_IP] ||
!ip_set_attr_netorder(tb, IPSET_ATTR_MARK)))
return -IPSET_ERR_PROTOCOL;
if (unlikely(tb[IPSET_ATTR_IP_TO]))
return -IPSET_ERR_HASH_RANGE_UNSUPPORTED;
if (unlikely(tb[IPSET_ATTR_CIDR])) {
u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
if (cidr != HOST_MASK)
return -IPSET_ERR_INVALID_CIDR;
}
ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip);
if (ret)
return ret;
ret = ip_set_get_extensions(set, tb, &ext);
if (ret)
return ret;
e.mark = ntohl(nla_get_be32(tb[IPSET_ATTR_MARK]));
e.mark &= h->markmask;
if (adt == IPSET_TEST) {
ret = adtfn(set, &e, &ext, &ext, flags);
return ip_set_eexist(ret, flags) ? 0 : ret;
}
ret = adtfn(set, &e, &ext, &ext, flags);
if (ret && !ip_set_eexist(ret, flags))
return ret;
return 0;
}
static struct ip_set_type hash_ipmark_type __read_mostly = {
.name = "hash:ip,mark",
.protocol = IPSET_PROTOCOL,
.features = IPSET_TYPE_IP | IPSET_TYPE_MARK,
.dimension = IPSET_DIM_TWO,
.family = NFPROTO_UNSPEC,
.revision_min = IPSET_TYPE_REV_MIN,
.revision_max = IPSET_TYPE_REV_MAX,
.create_flags[IPSET_TYPE_REV_MAX] = IPSET_CREATE_FLAG_BUCKETSIZE,
.create = hash_ipmark_create,
.create_policy = {
[IPSET_ATTR_MARKMASK] = { .type = NLA_U32 },
[IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 },
[IPSET_ATTR_MAXELEM] = { .type = NLA_U32 },
[IPSET_ATTR_INITVAL] = { .type = NLA_U32 },
[IPSET_ATTR_BUCKETSIZE] = { .type = NLA_U8 },
[IPSET_ATTR_RESIZE] = { .type = NLA_U8 },
[IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
[IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 },
},
.adt_policy = {
[IPSET_ATTR_IP] = { .type = NLA_NESTED },
[IPSET_ATTR_IP_TO] = { .type = NLA_NESTED },
[IPSET_ATTR_MARK] = { .type = NLA_U32 },
[IPSET_ATTR_CIDR] = { .type = NLA_U8 },
[IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
[IPSET_ATTR_LINENO] = { .type = NLA_U32 },
[IPSET_ATTR_BYTES] = { .type = NLA_U64 },
[IPSET_ATTR_PACKETS] = { .type = NLA_U64 },
[IPSET_ATTR_COMMENT] = { .type = NLA_NUL_STRING,
.len = IPSET_MAX_COMMENT_SIZE },
[IPSET_ATTR_SKBMARK] = { .type = NLA_U64 },
[IPSET_ATTR_SKBPRIO] = { .type = NLA_U32 },
[IPSET_ATTR_SKBQUEUE] = { .type = NLA_U16 },
},
.me = THIS_MODULE,
};
static int __init
hash_ipmark_init(void)
{
return ip_set_type_register(&hash_ipmark_type);
}
static void __exit
hash_ipmark_fini(void)
{
rcu_barrier();
ip_set_type_unregister(&hash_ipmark_type);
}
module_init(hash_ipmark_init);
module_exit(hash_ipmark_fini);
| linux-master | net/netfilter/ipset/ip_set_hash_ipmark.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (C) 2003-2013 Jozsef Kadlecsik <[email protected]> */
/* Kernel module implementing an IP set type: the hash:ip,port type */
#include <linux/jhash.h>
#include <linux/module.h>
#include <linux/ip.h>
#include <linux/skbuff.h>
#include <linux/errno.h>
#include <linux/random.h>
#include <net/ip.h>
#include <net/ipv6.h>
#include <net/netlink.h>
#include <net/tcp.h>
#include <linux/netfilter.h>
#include <linux/netfilter/ipset/pfxlen.h>
#include <linux/netfilter/ipset/ip_set.h>
#include <linux/netfilter/ipset/ip_set_getport.h>
#include <linux/netfilter/ipset/ip_set_hash.h>
#define IPSET_TYPE_REV_MIN 0
/* 1 SCTP and UDPLITE support added */
/* 2 Counters support added */
/* 3 Comments support added */
/* 4 Forceadd support added */
/* 5 skbinfo support added */
/* 6 bucketsize, initval support added */
#define IPSET_TYPE_REV_MAX 7 /* bitmask support added */
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jozsef Kadlecsik <[email protected]>");
IP_SET_MODULE_DESC("hash:ip,port", IPSET_TYPE_REV_MIN, IPSET_TYPE_REV_MAX);
MODULE_ALIAS("ip_set_hash:ip,port");
/* Type specific function prefix */
#define HTYPE hash_ipport
#define IP_SET_HASH_WITH_NETMASK
#define IP_SET_HASH_WITH_BITMASK
/* IPv4 variant */
/* Member elements */
struct hash_ipport4_elem {
__be32 ip;
__be16 port;
u8 proto;
u8 padding;
};
/* Common functions */
static bool
hash_ipport4_data_equal(const struct hash_ipport4_elem *ip1,
const struct hash_ipport4_elem *ip2,
u32 *multi)
{
return ip1->ip == ip2->ip &&
ip1->port == ip2->port &&
ip1->proto == ip2->proto;
}
static bool
hash_ipport4_data_list(struct sk_buff *skb,
const struct hash_ipport4_elem *data)
{
if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, data->ip) ||
nla_put_net16(skb, IPSET_ATTR_PORT, data->port) ||
nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto))
goto nla_put_failure;
return false;
nla_put_failure:
return true;
}
static void
hash_ipport4_data_next(struct hash_ipport4_elem *next,
const struct hash_ipport4_elem *d)
{
next->ip = d->ip;
next->port = d->port;
}
#define MTYPE hash_ipport4
#define HOST_MASK 32
#include "ip_set_hash_gen.h"
static int
hash_ipport4_kadt(struct ip_set *set, const struct sk_buff *skb,
const struct xt_action_param *par,
enum ipset_adt adt, struct ip_set_adt_opt *opt)
{
ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_ipport4_elem e = { .ip = 0 };
struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
const struct MTYPE *h = set->data;
if (!ip_set_get_ip4_port(skb, opt->flags & IPSET_DIM_TWO_SRC,
&e.port, &e.proto))
return -EINVAL;
ip4addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &e.ip);
e.ip &= h->bitmask.ip;
if (e.ip == 0)
return -EINVAL;
return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags);
}
static int
hash_ipport4_uadt(struct ip_set *set, struct nlattr *tb[],
enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
{
struct hash_ipport4 *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_ipport4_elem e = { .ip = 0 };
struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
u32 ip, ip_to = 0, p = 0, port, port_to, i = 0;
bool with_ports = false;
int ret;
if (tb[IPSET_ATTR_LINENO])
*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
if (unlikely(!tb[IPSET_ATTR_IP] ||
!ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO)))
return -IPSET_ERR_PROTOCOL;
ret = ip_set_get_ipaddr4(tb[IPSET_ATTR_IP], &e.ip);
if (ret)
return ret;
ret = ip_set_get_extensions(set, tb, &ext);
if (ret)
return ret;
e.ip &= h->bitmask.ip;
if (e.ip == 0)
return -EINVAL;
e.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
if (tb[IPSET_ATTR_PROTO]) {
e.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]);
with_ports = ip_set_proto_with_ports(e.proto);
if (e.proto == 0)
return -IPSET_ERR_INVALID_PROTO;
} else {
return -IPSET_ERR_MISSING_PROTO;
}
if (!(with_ports || e.proto == IPPROTO_ICMP))
e.port = 0;
if (adt == IPSET_TEST ||
!(tb[IPSET_ATTR_IP_TO] || tb[IPSET_ATTR_CIDR] ||
tb[IPSET_ATTR_PORT_TO])) {
ret = adtfn(set, &e, &ext, &ext, flags);
return ip_set_eexist(ret, flags) ? 0 : ret;
}
ip_to = ip = ntohl(e.ip);
if (tb[IPSET_ATTR_IP_TO]) {
ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to);
if (ret)
return ret;
if (ip > ip_to)
swap(ip, ip_to);
} else if (tb[IPSET_ATTR_CIDR]) {
u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
if (!cidr || cidr > HOST_MASK)
return -IPSET_ERR_INVALID_CIDR;
ip_set_mask_from_to(ip, ip_to, cidr);
}
port_to = port = ntohs(e.port);
if (with_ports && tb[IPSET_ATTR_PORT_TO]) {
port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]);
if (port > port_to)
swap(port, port_to);
}
if (retried)
ip = ntohl(h->next.ip);
for (; ip <= ip_to; ip++) {
p = retried && ip == ntohl(h->next.ip) ? ntohs(h->next.port)
: port;
for (; p <= port_to; p++, i++) {
e.ip = htonl(ip);
e.port = htons(p);
if (i > IPSET_MAX_RANGE) {
hash_ipport4_data_next(&h->next, &e);
return -ERANGE;
}
ret = adtfn(set, &e, &ext, &ext, flags);
if (ret && !ip_set_eexist(ret, flags))
return ret;
ret = 0;
}
}
return ret;
}
/* IPv6 variant */
struct hash_ipport6_elem {
union nf_inet_addr ip;
__be16 port;
u8 proto;
u8 padding;
};
/* Common functions */
static bool
hash_ipport6_data_equal(const struct hash_ipport6_elem *ip1,
const struct hash_ipport6_elem *ip2,
u32 *multi)
{
return ipv6_addr_equal(&ip1->ip.in6, &ip2->ip.in6) &&
ip1->port == ip2->port &&
ip1->proto == ip2->proto;
}
static bool
hash_ipport6_data_list(struct sk_buff *skb,
const struct hash_ipport6_elem *data)
{
if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &data->ip.in6) ||
nla_put_net16(skb, IPSET_ATTR_PORT, data->port) ||
nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto))
goto nla_put_failure;
return false;
nla_put_failure:
return true;
}
static void
hash_ipport6_data_next(struct hash_ipport6_elem *next,
const struct hash_ipport6_elem *d)
{
next->port = d->port;
}
#undef MTYPE
#undef HOST_MASK
#define MTYPE hash_ipport6
#define HOST_MASK 128
#define IP_SET_EMIT_CREATE
#include "ip_set_hash_gen.h"
static int
hash_ipport6_kadt(struct ip_set *set, const struct sk_buff *skb,
const struct xt_action_param *par,
enum ipset_adt adt, struct ip_set_adt_opt *opt)
{
ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_ipport6_elem e = { .ip = { .all = { 0 } } };
struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
const struct MTYPE *h = set->data;
if (!ip_set_get_ip6_port(skb, opt->flags & IPSET_DIM_TWO_SRC,
&e.port, &e.proto))
return -EINVAL;
ip6addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &e.ip.in6);
nf_inet_addr_mask_inplace(&e.ip, &h->bitmask);
if (ipv6_addr_any(&e.ip.in6))
return -EINVAL;
return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags);
}
static int
hash_ipport6_uadt(struct ip_set *set, struct nlattr *tb[],
enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
{
const struct hash_ipport6 *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_ipport6_elem e = { .ip = { .all = { 0 } } };
struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
u32 port, port_to;
bool with_ports = false;
int ret;
if (tb[IPSET_ATTR_LINENO])
*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
if (unlikely(!tb[IPSET_ATTR_IP] ||
!ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO)))
return -IPSET_ERR_PROTOCOL;
if (unlikely(tb[IPSET_ATTR_IP_TO]))
return -IPSET_ERR_HASH_RANGE_UNSUPPORTED;
if (unlikely(tb[IPSET_ATTR_CIDR])) {
u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
if (cidr != HOST_MASK)
return -IPSET_ERR_INVALID_CIDR;
}
ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip);
if (ret)
return ret;
ret = ip_set_get_extensions(set, tb, &ext);
if (ret)
return ret;
nf_inet_addr_mask_inplace(&e.ip, &h->bitmask);
if (ipv6_addr_any(&e.ip.in6))
return -EINVAL;
e.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
if (tb[IPSET_ATTR_PROTO]) {
e.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]);
with_ports = ip_set_proto_with_ports(e.proto);
if (e.proto == 0)
return -IPSET_ERR_INVALID_PROTO;
} else {
return -IPSET_ERR_MISSING_PROTO;
}
if (!(with_ports || e.proto == IPPROTO_ICMPV6))
e.port = 0;
if (adt == IPSET_TEST || !with_ports || !tb[IPSET_ATTR_PORT_TO]) {
ret = adtfn(set, &e, &ext, &ext, flags);
return ip_set_eexist(ret, flags) ? 0 : ret;
}
port = ntohs(e.port);
port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]);
if (port > port_to)
swap(port, port_to);
if (retried)
port = ntohs(h->next.port);
for (; port <= port_to; port++) {
e.port = htons(port);
ret = adtfn(set, &e, &ext, &ext, flags);
if (ret && !ip_set_eexist(ret, flags))
return ret;
ret = 0;
}
return ret;
}
static struct ip_set_type hash_ipport_type __read_mostly = {
.name = "hash:ip,port",
.protocol = IPSET_PROTOCOL,
.features = IPSET_TYPE_IP | IPSET_TYPE_PORT,
.dimension = IPSET_DIM_TWO,
.family = NFPROTO_UNSPEC,
.revision_min = IPSET_TYPE_REV_MIN,
.revision_max = IPSET_TYPE_REV_MAX,
.create_flags[IPSET_TYPE_REV_MAX] = IPSET_CREATE_FLAG_BUCKETSIZE,
.create = hash_ipport_create,
.create_policy = {
[IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 },
[IPSET_ATTR_MAXELEM] = { .type = NLA_U32 },
[IPSET_ATTR_INITVAL] = { .type = NLA_U32 },
[IPSET_ATTR_BUCKETSIZE] = { .type = NLA_U8 },
[IPSET_ATTR_RESIZE] = { .type = NLA_U8 },
[IPSET_ATTR_PROTO] = { .type = NLA_U8 },
[IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
[IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 },
[IPSET_ATTR_NETMASK] = { .type = NLA_U8 },
[IPSET_ATTR_BITMASK] = { .type = NLA_NESTED },
},
.adt_policy = {
[IPSET_ATTR_IP] = { .type = NLA_NESTED },
[IPSET_ATTR_IP_TO] = { .type = NLA_NESTED },
[IPSET_ATTR_PORT] = { .type = NLA_U16 },
[IPSET_ATTR_PORT_TO] = { .type = NLA_U16 },
[IPSET_ATTR_CIDR] = { .type = NLA_U8 },
[IPSET_ATTR_PROTO] = { .type = NLA_U8 },
[IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
[IPSET_ATTR_LINENO] = { .type = NLA_U32 },
[IPSET_ATTR_BYTES] = { .type = NLA_U64 },
[IPSET_ATTR_PACKETS] = { .type = NLA_U64 },
[IPSET_ATTR_COMMENT] = { .type = NLA_NUL_STRING,
.len = IPSET_MAX_COMMENT_SIZE },
[IPSET_ATTR_SKBMARK] = { .type = NLA_U64 },
[IPSET_ATTR_SKBPRIO] = { .type = NLA_U32 },
[IPSET_ATTR_SKBQUEUE] = { .type = NLA_U16 },
},
.me = THIS_MODULE,
};
static int __init
hash_ipport_init(void)
{
return ip_set_type_register(&hash_ipport_type);
}
static void __exit
hash_ipport_fini(void)
{
rcu_barrier();
ip_set_type_unregister(&hash_ipport_type);
}
module_init(hash_ipport_init);
module_exit(hash_ipport_fini);
| linux-master | net/netfilter/ipset/ip_set_hash_ipport.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (C) 2003-2013 Jozsef Kadlecsik <[email protected]> */
/* Kernel module implementing an IP set type: the hash:ip,port,ip type */
#include <linux/jhash.h>
#include <linux/module.h>
#include <linux/ip.h>
#include <linux/skbuff.h>
#include <linux/errno.h>
#include <linux/random.h>
#include <net/ip.h>
#include <net/ipv6.h>
#include <net/netlink.h>
#include <net/tcp.h>
#include <linux/netfilter.h>
#include <linux/netfilter/ipset/pfxlen.h>
#include <linux/netfilter/ipset/ip_set.h>
#include <linux/netfilter/ipset/ip_set_getport.h>
#include <linux/netfilter/ipset/ip_set_hash.h>
#define IPSET_TYPE_REV_MIN 0
/* 1 SCTP and UDPLITE support added */
/* 2 Counters support added */
/* 3 Comments support added */
/* 4 Forceadd support added */
/* 5 skbinfo support added */
#define IPSET_TYPE_REV_MAX 6 /* bucketsize, initval support added */
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jozsef Kadlecsik <[email protected]>");
IP_SET_MODULE_DESC("hash:ip,port,ip", IPSET_TYPE_REV_MIN, IPSET_TYPE_REV_MAX);
MODULE_ALIAS("ip_set_hash:ip,port,ip");
/* Type specific function prefix */
#define HTYPE hash_ipportip
/* IPv4 variant */
/* Member elements */
struct hash_ipportip4_elem {
__be32 ip;
__be32 ip2;
__be16 port;
u8 proto;
u8 padding;
};
static bool
hash_ipportip4_data_equal(const struct hash_ipportip4_elem *ip1,
const struct hash_ipportip4_elem *ip2,
u32 *multi)
{
return ip1->ip == ip2->ip &&
ip1->ip2 == ip2->ip2 &&
ip1->port == ip2->port &&
ip1->proto == ip2->proto;
}
static bool
hash_ipportip4_data_list(struct sk_buff *skb,
const struct hash_ipportip4_elem *data)
{
if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, data->ip) ||
nla_put_ipaddr4(skb, IPSET_ATTR_IP2, data->ip2) ||
nla_put_net16(skb, IPSET_ATTR_PORT, data->port) ||
nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto))
goto nla_put_failure;
return false;
nla_put_failure:
return true;
}
static void
hash_ipportip4_data_next(struct hash_ipportip4_elem *next,
const struct hash_ipportip4_elem *d)
{
next->ip = d->ip;
next->port = d->port;
}
/* Common functions */
#define MTYPE hash_ipportip4
#define HOST_MASK 32
#include "ip_set_hash_gen.h"
static int
hash_ipportip4_kadt(struct ip_set *set, const struct sk_buff *skb,
const struct xt_action_param *par,
enum ipset_adt adt, struct ip_set_adt_opt *opt)
{
ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_ipportip4_elem e = { .ip = 0 };
struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
if (!ip_set_get_ip4_port(skb, opt->flags & IPSET_DIM_TWO_SRC,
&e.port, &e.proto))
return -EINVAL;
ip4addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &e.ip);
ip4addrptr(skb, opt->flags & IPSET_DIM_THREE_SRC, &e.ip2);
return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags);
}
static int
hash_ipportip4_uadt(struct ip_set *set, struct nlattr *tb[],
enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
{
struct hash_ipportip4 *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_ipportip4_elem e = { .ip = 0 };
struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
u32 ip, ip_to = 0, p = 0, port, port_to, i = 0;
bool with_ports = false;
int ret;
if (tb[IPSET_ATTR_LINENO])
*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] ||
!ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO)))
return -IPSET_ERR_PROTOCOL;
ret = ip_set_get_ipaddr4(tb[IPSET_ATTR_IP], &e.ip);
if (ret)
return ret;
ret = ip_set_get_extensions(set, tb, &ext);
if (ret)
return ret;
ret = ip_set_get_ipaddr4(tb[IPSET_ATTR_IP2], &e.ip2);
if (ret)
return ret;
e.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
if (tb[IPSET_ATTR_PROTO]) {
e.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]);
with_ports = ip_set_proto_with_ports(e.proto);
if (e.proto == 0)
return -IPSET_ERR_INVALID_PROTO;
} else {
return -IPSET_ERR_MISSING_PROTO;
}
if (!(with_ports || e.proto == IPPROTO_ICMP))
e.port = 0;
if (adt == IPSET_TEST ||
!(tb[IPSET_ATTR_IP_TO] || tb[IPSET_ATTR_CIDR] ||
tb[IPSET_ATTR_PORT_TO])) {
ret = adtfn(set, &e, &ext, &ext, flags);
return ip_set_eexist(ret, flags) ? 0 : ret;
}
ip_to = ip = ntohl(e.ip);
if (tb[IPSET_ATTR_IP_TO]) {
ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to);
if (ret)
return ret;
if (ip > ip_to)
swap(ip, ip_to);
} else if (tb[IPSET_ATTR_CIDR]) {
u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
if (!cidr || cidr > HOST_MASK)
return -IPSET_ERR_INVALID_CIDR;
ip_set_mask_from_to(ip, ip_to, cidr);
}
port_to = port = ntohs(e.port);
if (with_ports && tb[IPSET_ATTR_PORT_TO]) {
port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]);
if (port > port_to)
swap(port, port_to);
}
if (retried)
ip = ntohl(h->next.ip);
for (; ip <= ip_to; ip++) {
p = retried && ip == ntohl(h->next.ip) ? ntohs(h->next.port)
: port;
for (; p <= port_to; p++, i++) {
e.ip = htonl(ip);
e.port = htons(p);
if (i > IPSET_MAX_RANGE) {
hash_ipportip4_data_next(&h->next, &e);
return -ERANGE;
}
ret = adtfn(set, &e, &ext, &ext, flags);
if (ret && !ip_set_eexist(ret, flags))
return ret;
ret = 0;
}
}
return ret;
}
/* IPv6 variant */
struct hash_ipportip6_elem {
union nf_inet_addr ip;
union nf_inet_addr ip2;
__be16 port;
u8 proto;
u8 padding;
};
/* Common functions */
static bool
hash_ipportip6_data_equal(const struct hash_ipportip6_elem *ip1,
const struct hash_ipportip6_elem *ip2,
u32 *multi)
{
return ipv6_addr_equal(&ip1->ip.in6, &ip2->ip.in6) &&
ipv6_addr_equal(&ip1->ip2.in6, &ip2->ip2.in6) &&
ip1->port == ip2->port &&
ip1->proto == ip2->proto;
}
static bool
hash_ipportip6_data_list(struct sk_buff *skb,
const struct hash_ipportip6_elem *data)
{
if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &data->ip.in6) ||
nla_put_ipaddr6(skb, IPSET_ATTR_IP2, &data->ip2.in6) ||
nla_put_net16(skb, IPSET_ATTR_PORT, data->port) ||
nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto))
goto nla_put_failure;
return false;
nla_put_failure:
return true;
}
static void
hash_ipportip6_data_next(struct hash_ipportip6_elem *next,
const struct hash_ipportip6_elem *d)
{
next->port = d->port;
}
#undef MTYPE
#undef HOST_MASK
#define MTYPE hash_ipportip6
#define HOST_MASK 128
#define IP_SET_EMIT_CREATE
#include "ip_set_hash_gen.h"
static int
hash_ipportip6_kadt(struct ip_set *set, const struct sk_buff *skb,
const struct xt_action_param *par,
enum ipset_adt adt, struct ip_set_adt_opt *opt)
{
ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_ipportip6_elem e = { .ip = { .all = { 0 } } };
struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
if (!ip_set_get_ip6_port(skb, opt->flags & IPSET_DIM_TWO_SRC,
&e.port, &e.proto))
return -EINVAL;
ip6addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &e.ip.in6);
ip6addrptr(skb, opt->flags & IPSET_DIM_THREE_SRC, &e.ip2.in6);
return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags);
}
static int
hash_ipportip6_uadt(struct ip_set *set, struct nlattr *tb[],
enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
{
const struct hash_ipportip6 *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_ipportip6_elem e = { .ip = { .all = { 0 } } };
struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
u32 port, port_to;
bool with_ports = false;
int ret;
if (tb[IPSET_ATTR_LINENO])
*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] ||
!ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO)))
return -IPSET_ERR_PROTOCOL;
if (unlikely(tb[IPSET_ATTR_IP_TO]))
return -IPSET_ERR_HASH_RANGE_UNSUPPORTED;
if (unlikely(tb[IPSET_ATTR_CIDR])) {
u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
if (cidr != HOST_MASK)
return -IPSET_ERR_INVALID_CIDR;
}
ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip);
if (ret)
return ret;
ret = ip_set_get_extensions(set, tb, &ext);
if (ret)
return ret;
ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP2], &e.ip2);
if (ret)
return ret;
e.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
if (tb[IPSET_ATTR_PROTO]) {
e.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]);
with_ports = ip_set_proto_with_ports(e.proto);
if (e.proto == 0)
return -IPSET_ERR_INVALID_PROTO;
} else {
return -IPSET_ERR_MISSING_PROTO;
}
if (!(with_ports || e.proto == IPPROTO_ICMPV6))
e.port = 0;
if (adt == IPSET_TEST || !with_ports || !tb[IPSET_ATTR_PORT_TO]) {
ret = adtfn(set, &e, &ext, &ext, flags);
return ip_set_eexist(ret, flags) ? 0 : ret;
}
port = ntohs(e.port);
port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]);
if (port > port_to)
swap(port, port_to);
if (retried)
port = ntohs(h->next.port);
for (; port <= port_to; port++) {
e.port = htons(port);
ret = adtfn(set, &e, &ext, &ext, flags);
if (ret && !ip_set_eexist(ret, flags))
return ret;
ret = 0;
}
return ret;
}
static struct ip_set_type hash_ipportip_type __read_mostly = {
.name = "hash:ip,port,ip",
.protocol = IPSET_PROTOCOL,
.features = IPSET_TYPE_IP | IPSET_TYPE_PORT | IPSET_TYPE_IP2,
.dimension = IPSET_DIM_THREE,
.family = NFPROTO_UNSPEC,
.revision_min = IPSET_TYPE_REV_MIN,
.revision_max = IPSET_TYPE_REV_MAX,
.create_flags[IPSET_TYPE_REV_MAX] = IPSET_CREATE_FLAG_BUCKETSIZE,
.create = hash_ipportip_create,
.create_policy = {
[IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 },
[IPSET_ATTR_MAXELEM] = { .type = NLA_U32 },
[IPSET_ATTR_INITVAL] = { .type = NLA_U32 },
[IPSET_ATTR_BUCKETSIZE] = { .type = NLA_U8 },
[IPSET_ATTR_RESIZE] = { .type = NLA_U8 },
[IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
[IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 },
},
.adt_policy = {
[IPSET_ATTR_IP] = { .type = NLA_NESTED },
[IPSET_ATTR_IP_TO] = { .type = NLA_NESTED },
[IPSET_ATTR_IP2] = { .type = NLA_NESTED },
[IPSET_ATTR_PORT] = { .type = NLA_U16 },
[IPSET_ATTR_PORT_TO] = { .type = NLA_U16 },
[IPSET_ATTR_CIDR] = { .type = NLA_U8 },
[IPSET_ATTR_PROTO] = { .type = NLA_U8 },
[IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
[IPSET_ATTR_LINENO] = { .type = NLA_U32 },
[IPSET_ATTR_BYTES] = { .type = NLA_U64 },
[IPSET_ATTR_PACKETS] = { .type = NLA_U64 },
[IPSET_ATTR_COMMENT] = { .type = NLA_NUL_STRING,
.len = IPSET_MAX_COMMENT_SIZE },
[IPSET_ATTR_SKBMARK] = { .type = NLA_U64 },
[IPSET_ATTR_SKBPRIO] = { .type = NLA_U32 },
[IPSET_ATTR_SKBQUEUE] = { .type = NLA_U16 },
},
.me = THIS_MODULE,
};
static int __init
hash_ipportip_init(void)
{
return ip_set_type_register(&hash_ipportip_type);
}
static void __exit
hash_ipportip_fini(void)
{
rcu_barrier();
ip_set_type_unregister(&hash_ipportip_type);
}
module_init(hash_ipportip_init);
module_exit(hash_ipportip_fini);
| linux-master | net/netfilter/ipset/ip_set_hash_ipportip.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* ip_vs_est.c: simple rate estimator for IPVS
*
* Authors: Wensong Zhang <[email protected]>
*
* Changes: Hans Schillstrom <[email protected]>
* Network name space (netns) aware.
* Global data moved to netns i.e struct netns_ipvs
* Affected data: est_list and est_lock.
* estimation_timer() runs with timer per netns.
* get_stats()) do the per cpu summing.
*/
#define KMSG_COMPONENT "IPVS"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/kernel.h>
#include <linux/jiffies.h>
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/sysctl.h>
#include <linux/list.h>
#include <net/ip_vs.h>
/*
This code is to estimate rate in a shorter interval (such as 8
seconds) for virtual services and real servers. For measure rate in a
long interval, it is easy to implement a user level daemon which
periodically reads those statistical counters and measure rate.
We measure rate during the last 8 seconds every 2 seconds:
avgrate = avgrate*(1-W) + rate*W
where W = 2^(-2)
NOTES.
* Average bps is scaled by 2^5, while average pps and cps are scaled by 2^10.
* Netlink users can see 64-bit values but sockopt users are restricted
to 32-bit values for conns, packets, bps, cps and pps.
* A lot of code is taken from net/core/gen_estimator.c
KEY POINTS:
- cpustats counters are updated per-cpu in SoftIRQ context with BH disabled
- kthreads read the cpustats to update the estimators (svcs, dests, total)
- the states of estimators can be read (get stats) or modified (zero stats)
from processes
KTHREADS:
- estimators are added initially to est_temp_list and later kthread 0
distributes them to one or many kthreads for estimation
- kthread contexts are created and attached to array
- the kthread tasks are started when first service is added, before that
the total stats are not estimated
- when configuration (cpulist/nice) is changed, the tasks are restarted
by work (est_reload_work)
- kthread tasks are stopped while the cpulist is empty
- the kthread context holds lists with estimators (chains) which are
processed every 2 seconds
- as estimators can be added dynamically and in bursts, we try to spread
them to multiple chains which are estimated at different time
- on start, kthread 0 enters calculation phase to determine the chain limits
and the limit of estimators per kthread
- est_add_ktid: ktid where to add new ests, can point to empty slot where
we should add kt data
*/
static struct lock_class_key __ipvs_est_key;
static void ip_vs_est_calc_phase(struct netns_ipvs *ipvs);
static void ip_vs_est_drain_temp_list(struct netns_ipvs *ipvs);
static void ip_vs_chain_estimation(struct hlist_head *chain)
{
struct ip_vs_estimator *e;
struct ip_vs_cpu_stats *c;
struct ip_vs_stats *s;
u64 rate;
hlist_for_each_entry_rcu(e, chain, list) {
u64 conns, inpkts, outpkts, inbytes, outbytes;
u64 kconns = 0, kinpkts = 0, koutpkts = 0;
u64 kinbytes = 0, koutbytes = 0;
unsigned int start;
int i;
if (kthread_should_stop())
break;
s = container_of(e, struct ip_vs_stats, est);
for_each_possible_cpu(i) {
c = per_cpu_ptr(s->cpustats, i);
do {
start = u64_stats_fetch_begin(&c->syncp);
conns = u64_stats_read(&c->cnt.conns);
inpkts = u64_stats_read(&c->cnt.inpkts);
outpkts = u64_stats_read(&c->cnt.outpkts);
inbytes = u64_stats_read(&c->cnt.inbytes);
outbytes = u64_stats_read(&c->cnt.outbytes);
} while (u64_stats_fetch_retry(&c->syncp, start));
kconns += conns;
kinpkts += inpkts;
koutpkts += outpkts;
kinbytes += inbytes;
koutbytes += outbytes;
}
spin_lock(&s->lock);
s->kstats.conns = kconns;
s->kstats.inpkts = kinpkts;
s->kstats.outpkts = koutpkts;
s->kstats.inbytes = kinbytes;
s->kstats.outbytes = koutbytes;
/* scaled by 2^10, but divided 2 seconds */
rate = (s->kstats.conns - e->last_conns) << 9;
e->last_conns = s->kstats.conns;
e->cps += ((s64)rate - (s64)e->cps) >> 2;
rate = (s->kstats.inpkts - e->last_inpkts) << 9;
e->last_inpkts = s->kstats.inpkts;
e->inpps += ((s64)rate - (s64)e->inpps) >> 2;
rate = (s->kstats.outpkts - e->last_outpkts) << 9;
e->last_outpkts = s->kstats.outpkts;
e->outpps += ((s64)rate - (s64)e->outpps) >> 2;
/* scaled by 2^5, but divided 2 seconds */
rate = (s->kstats.inbytes - e->last_inbytes) << 4;
e->last_inbytes = s->kstats.inbytes;
e->inbps += ((s64)rate - (s64)e->inbps) >> 2;
rate = (s->kstats.outbytes - e->last_outbytes) << 4;
e->last_outbytes = s->kstats.outbytes;
e->outbps += ((s64)rate - (s64)e->outbps) >> 2;
spin_unlock(&s->lock);
}
}
static void ip_vs_tick_estimation(struct ip_vs_est_kt_data *kd, int row)
{
struct ip_vs_est_tick_data *td;
int cid;
rcu_read_lock();
td = rcu_dereference(kd->ticks[row]);
if (!td)
goto out;
for_each_set_bit(cid, td->present, IPVS_EST_TICK_CHAINS) {
if (kthread_should_stop())
break;
ip_vs_chain_estimation(&td->chains[cid]);
cond_resched_rcu();
td = rcu_dereference(kd->ticks[row]);
if (!td)
break;
}
out:
rcu_read_unlock();
}
static int ip_vs_estimation_kthread(void *data)
{
struct ip_vs_est_kt_data *kd = data;
struct netns_ipvs *ipvs = kd->ipvs;
int row = kd->est_row;
unsigned long now;
int id = kd->id;
long gap;
if (id > 0) {
if (!ipvs->est_chain_max)
return 0;
} else {
if (!ipvs->est_chain_max) {
ipvs->est_calc_phase = 1;
/* commit est_calc_phase before reading est_genid */
smp_mb();
}
/* kthread 0 will handle the calc phase */
if (ipvs->est_calc_phase)
ip_vs_est_calc_phase(ipvs);
}
while (1) {
if (!id && !hlist_empty(&ipvs->est_temp_list))
ip_vs_est_drain_temp_list(ipvs);
set_current_state(TASK_IDLE);
if (kthread_should_stop())
break;
/* before estimation, check if we should sleep */
now = jiffies;
gap = kd->est_timer - now;
if (gap > 0) {
if (gap > IPVS_EST_TICK) {
kd->est_timer = now - IPVS_EST_TICK;
gap = IPVS_EST_TICK;
}
schedule_timeout(gap);
} else {
__set_current_state(TASK_RUNNING);
if (gap < -8 * IPVS_EST_TICK)
kd->est_timer = now;
}
if (kd->tick_len[row])
ip_vs_tick_estimation(kd, row);
row++;
if (row >= IPVS_EST_NTICKS)
row = 0;
WRITE_ONCE(kd->est_row, row);
kd->est_timer += IPVS_EST_TICK;
}
__set_current_state(TASK_RUNNING);
return 0;
}
/* Schedule stop/start for kthread tasks */
void ip_vs_est_reload_start(struct netns_ipvs *ipvs)
{
/* Ignore reloads before first service is added */
if (!ipvs->enable)
return;
ip_vs_est_stopped_recalc(ipvs);
/* Bump the kthread configuration genid */
atomic_inc(&ipvs->est_genid);
queue_delayed_work(system_long_wq, &ipvs->est_reload_work, 0);
}
/* Start kthread task with current configuration */
int ip_vs_est_kthread_start(struct netns_ipvs *ipvs,
struct ip_vs_est_kt_data *kd)
{
unsigned long now;
int ret = 0;
long gap;
lockdep_assert_held(&ipvs->est_mutex);
if (kd->task)
goto out;
now = jiffies;
gap = kd->est_timer - now;
/* Sync est_timer if task is starting later */
if (abs(gap) > 4 * IPVS_EST_TICK)
kd->est_timer = now;
kd->task = kthread_create(ip_vs_estimation_kthread, kd, "ipvs-e:%d:%d",
ipvs->gen, kd->id);
if (IS_ERR(kd->task)) {
ret = PTR_ERR(kd->task);
kd->task = NULL;
goto out;
}
set_user_nice(kd->task, sysctl_est_nice(ipvs));
set_cpus_allowed_ptr(kd->task, sysctl_est_cpulist(ipvs));
pr_info("starting estimator thread %d...\n", kd->id);
wake_up_process(kd->task);
out:
return ret;
}
void ip_vs_est_kthread_stop(struct ip_vs_est_kt_data *kd)
{
if (kd->task) {
pr_info("stopping estimator thread %d...\n", kd->id);
kthread_stop(kd->task);
kd->task = NULL;
}
}
/* Apply parameters to kthread */
static void ip_vs_est_set_params(struct netns_ipvs *ipvs,
struct ip_vs_est_kt_data *kd)
{
kd->chain_max = ipvs->est_chain_max;
/* We are using single chain on RCU preemption */
if (IPVS_EST_TICK_CHAINS == 1)
kd->chain_max *= IPVS_EST_CHAIN_FACTOR;
kd->tick_max = IPVS_EST_TICK_CHAINS * kd->chain_max;
kd->est_max_count = IPVS_EST_NTICKS * kd->tick_max;
}
/* Create and start estimation kthread in a free or new array slot */
static int ip_vs_est_add_kthread(struct netns_ipvs *ipvs)
{
struct ip_vs_est_kt_data *kd = NULL;
int id = ipvs->est_kt_count;
int ret = -ENOMEM;
void *arr = NULL;
int i;
if ((unsigned long)ipvs->est_kt_count >= ipvs->est_max_threads &&
ipvs->enable && ipvs->est_max_threads)
return -EINVAL;
mutex_lock(&ipvs->est_mutex);
for (i = 0; i < id; i++) {
if (!ipvs->est_kt_arr[i])
break;
}
if (i >= id) {
arr = krealloc_array(ipvs->est_kt_arr, id + 1,
sizeof(struct ip_vs_est_kt_data *),
GFP_KERNEL);
if (!arr)
goto out;
ipvs->est_kt_arr = arr;
} else {
id = i;
}
kd = kzalloc(sizeof(*kd), GFP_KERNEL);
if (!kd)
goto out;
kd->ipvs = ipvs;
bitmap_fill(kd->avail, IPVS_EST_NTICKS);
kd->est_timer = jiffies;
kd->id = id;
ip_vs_est_set_params(ipvs, kd);
/* Pre-allocate stats used in calc phase */
if (!id && !kd->calc_stats) {
kd->calc_stats = ip_vs_stats_alloc();
if (!kd->calc_stats)
goto out;
}
/* Start kthread tasks only when services are present */
if (ipvs->enable && !ip_vs_est_stopped(ipvs)) {
ret = ip_vs_est_kthread_start(ipvs, kd);
if (ret < 0)
goto out;
}
if (arr)
ipvs->est_kt_count++;
ipvs->est_kt_arr[id] = kd;
kd = NULL;
/* Use most recent kthread for new ests */
ipvs->est_add_ktid = id;
ret = 0;
out:
mutex_unlock(&ipvs->est_mutex);
if (kd) {
ip_vs_stats_free(kd->calc_stats);
kfree(kd);
}
return ret;
}
/* Select ktid where to add new ests: available, unused or new slot */
static void ip_vs_est_update_ktid(struct netns_ipvs *ipvs)
{
int ktid, best = ipvs->est_kt_count;
struct ip_vs_est_kt_data *kd;
for (ktid = 0; ktid < ipvs->est_kt_count; ktid++) {
kd = ipvs->est_kt_arr[ktid];
if (kd) {
if (kd->est_count < kd->est_max_count) {
best = ktid;
break;
}
} else if (ktid < best) {
best = ktid;
}
}
ipvs->est_add_ktid = best;
}
/* Add estimator to current kthread (est_add_ktid) */
static int ip_vs_enqueue_estimator(struct netns_ipvs *ipvs,
struct ip_vs_estimator *est)
{
struct ip_vs_est_kt_data *kd = NULL;
struct ip_vs_est_tick_data *td;
int ktid, row, crow, cid, ret;
int delay = est->ktrow;
BUILD_BUG_ON_MSG(IPVS_EST_TICK_CHAINS > 127,
"Too many chains for ktcid");
if (ipvs->est_add_ktid < ipvs->est_kt_count) {
kd = ipvs->est_kt_arr[ipvs->est_add_ktid];
if (kd)
goto add_est;
}
ret = ip_vs_est_add_kthread(ipvs);
if (ret < 0)
goto out;
kd = ipvs->est_kt_arr[ipvs->est_add_ktid];
add_est:
ktid = kd->id;
/* For small number of estimators prefer to use few ticks,
* otherwise try to add into the last estimated row.
* est_row and add_row point after the row we should use
*/
if (kd->est_count >= 2 * kd->tick_max || delay < IPVS_EST_NTICKS - 1)
crow = READ_ONCE(kd->est_row);
else
crow = kd->add_row;
crow += delay;
if (crow >= IPVS_EST_NTICKS)
crow -= IPVS_EST_NTICKS;
/* Assume initial delay ? */
if (delay >= IPVS_EST_NTICKS - 1) {
/* Preserve initial delay or decrease it if no space in tick */
row = crow;
if (crow < IPVS_EST_NTICKS - 1) {
crow++;
row = find_last_bit(kd->avail, crow);
}
if (row >= crow)
row = find_last_bit(kd->avail, IPVS_EST_NTICKS);
} else {
/* Preserve delay or increase it if no space in tick */
row = IPVS_EST_NTICKS;
if (crow > 0)
row = find_next_bit(kd->avail, IPVS_EST_NTICKS, crow);
if (row >= IPVS_EST_NTICKS)
row = find_first_bit(kd->avail, IPVS_EST_NTICKS);
}
td = rcu_dereference_protected(kd->ticks[row], 1);
if (!td) {
td = kzalloc(sizeof(*td), GFP_KERNEL);
if (!td) {
ret = -ENOMEM;
goto out;
}
rcu_assign_pointer(kd->ticks[row], td);
}
cid = find_first_zero_bit(td->full, IPVS_EST_TICK_CHAINS);
kd->est_count++;
kd->tick_len[row]++;
if (!td->chain_len[cid])
__set_bit(cid, td->present);
td->chain_len[cid]++;
est->ktid = ktid;
est->ktrow = row;
est->ktcid = cid;
hlist_add_head_rcu(&est->list, &td->chains[cid]);
if (td->chain_len[cid] >= kd->chain_max) {
__set_bit(cid, td->full);
if (kd->tick_len[row] >= kd->tick_max)
__clear_bit(row, kd->avail);
}
/* Update est_add_ktid to point to first available/empty kt slot */
if (kd->est_count == kd->est_max_count)
ip_vs_est_update_ktid(ipvs);
ret = 0;
out:
return ret;
}
/* Start estimation for stats */
int ip_vs_start_estimator(struct netns_ipvs *ipvs, struct ip_vs_stats *stats)
{
struct ip_vs_estimator *est = &stats->est;
int ret;
if (!ipvs->est_max_threads && ipvs->enable)
ipvs->est_max_threads = ip_vs_est_max_threads(ipvs);
est->ktid = -1;
est->ktrow = IPVS_EST_NTICKS - 1; /* Initial delay */
/* We prefer this code to be short, kthread 0 will requeue the
* estimator to available chain. If tasks are disabled, we
* will not allocate much memory, just for kt 0.
*/
ret = 0;
if (!ipvs->est_kt_count || !ipvs->est_kt_arr[0])
ret = ip_vs_est_add_kthread(ipvs);
if (ret >= 0)
hlist_add_head(&est->list, &ipvs->est_temp_list);
else
INIT_HLIST_NODE(&est->list);
return ret;
}
static void ip_vs_est_kthread_destroy(struct ip_vs_est_kt_data *kd)
{
if (kd) {
if (kd->task) {
pr_info("stop unused estimator thread %d...\n", kd->id);
kthread_stop(kd->task);
}
ip_vs_stats_free(kd->calc_stats);
kfree(kd);
}
}
/* Unlink estimator from chain */
void ip_vs_stop_estimator(struct netns_ipvs *ipvs, struct ip_vs_stats *stats)
{
struct ip_vs_estimator *est = &stats->est;
struct ip_vs_est_tick_data *td;
struct ip_vs_est_kt_data *kd;
int ktid = est->ktid;
int row = est->ktrow;
int cid = est->ktcid;
/* Failed to add to chain ? */
if (hlist_unhashed(&est->list))
return;
/* On return, estimator can be freed, dequeue it now */
/* In est_temp_list ? */
if (ktid < 0) {
hlist_del(&est->list);
goto end_kt0;
}
hlist_del_rcu(&est->list);
kd = ipvs->est_kt_arr[ktid];
td = rcu_dereference_protected(kd->ticks[row], 1);
__clear_bit(cid, td->full);
td->chain_len[cid]--;
if (!td->chain_len[cid])
__clear_bit(cid, td->present);
kd->tick_len[row]--;
__set_bit(row, kd->avail);
if (!kd->tick_len[row]) {
RCU_INIT_POINTER(kd->ticks[row], NULL);
kfree_rcu(td, rcu_head);
}
kd->est_count--;
if (kd->est_count) {
/* This kt slot can become available just now, prefer it */
if (ktid < ipvs->est_add_ktid)
ipvs->est_add_ktid = ktid;
return;
}
if (ktid > 0) {
mutex_lock(&ipvs->est_mutex);
ip_vs_est_kthread_destroy(kd);
ipvs->est_kt_arr[ktid] = NULL;
if (ktid == ipvs->est_kt_count - 1) {
ipvs->est_kt_count--;
while (ipvs->est_kt_count > 1 &&
!ipvs->est_kt_arr[ipvs->est_kt_count - 1])
ipvs->est_kt_count--;
}
mutex_unlock(&ipvs->est_mutex);
/* This slot is now empty, prefer another available kt slot */
if (ktid == ipvs->est_add_ktid)
ip_vs_est_update_ktid(ipvs);
}
end_kt0:
/* kt 0 is freed after all other kthreads and chains are empty */
if (ipvs->est_kt_count == 1 && hlist_empty(&ipvs->est_temp_list)) {
kd = ipvs->est_kt_arr[0];
if (!kd || !kd->est_count) {
mutex_lock(&ipvs->est_mutex);
if (kd) {
ip_vs_est_kthread_destroy(kd);
ipvs->est_kt_arr[0] = NULL;
}
ipvs->est_kt_count--;
mutex_unlock(&ipvs->est_mutex);
ipvs->est_add_ktid = 0;
}
}
}
/* Register all ests from est_temp_list to kthreads */
static void ip_vs_est_drain_temp_list(struct netns_ipvs *ipvs)
{
struct ip_vs_estimator *est;
while (1) {
int max = 16;
mutex_lock(&__ip_vs_mutex);
while (max-- > 0) {
est = hlist_entry_safe(ipvs->est_temp_list.first,
struct ip_vs_estimator, list);
if (est) {
if (kthread_should_stop())
goto unlock;
hlist_del_init(&est->list);
if (ip_vs_enqueue_estimator(ipvs, est) >= 0)
continue;
est->ktid = -1;
hlist_add_head(&est->list,
&ipvs->est_temp_list);
/* Abort, some entries will not be estimated
* until next attempt
*/
}
goto unlock;
}
mutex_unlock(&__ip_vs_mutex);
cond_resched();
}
unlock:
mutex_unlock(&__ip_vs_mutex);
}
/* Calculate limits for all kthreads */
static int ip_vs_est_calc_limits(struct netns_ipvs *ipvs, int *chain_max)
{
DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
struct ip_vs_est_kt_data *kd;
struct hlist_head chain;
struct ip_vs_stats *s;
int cache_factor = 4;
int i, loops, ntest;
s32 min_est = 0;
ktime_t t1, t2;
int max = 8;
int ret = 1;
s64 diff;
u64 val;
INIT_HLIST_HEAD(&chain);
mutex_lock(&__ip_vs_mutex);
kd = ipvs->est_kt_arr[0];
mutex_unlock(&__ip_vs_mutex);
s = kd ? kd->calc_stats : NULL;
if (!s)
goto out;
hlist_add_head(&s->est.list, &chain);
loops = 1;
/* Get best result from many tests */
for (ntest = 0; ntest < 12; ntest++) {
if (!(ntest & 3)) {
/* Wait for cpufreq frequency transition */
wait_event_idle_timeout(wq, kthread_should_stop(),
HZ / 50);
if (!ipvs->enable || kthread_should_stop())
goto stop;
}
local_bh_disable();
rcu_read_lock();
/* Put stats in cache */
ip_vs_chain_estimation(&chain);
t1 = ktime_get();
for (i = loops * cache_factor; i > 0; i--)
ip_vs_chain_estimation(&chain);
t2 = ktime_get();
rcu_read_unlock();
local_bh_enable();
if (!ipvs->enable || kthread_should_stop())
goto stop;
cond_resched();
diff = ktime_to_ns(ktime_sub(t2, t1));
if (diff <= 1 * NSEC_PER_USEC) {
/* Do more loops on low time resolution */
loops *= 2;
continue;
}
if (diff >= NSEC_PER_SEC)
continue;
val = diff;
do_div(val, loops);
if (!min_est || val < min_est) {
min_est = val;
/* goal: 95usec per chain */
val = 95 * NSEC_PER_USEC;
if (val >= min_est) {
do_div(val, min_est);
max = (int)val;
} else {
max = 1;
}
}
}
out:
if (s)
hlist_del_init(&s->est.list);
*chain_max = max;
return ret;
stop:
ret = 0;
goto out;
}
/* Calculate the parameters and apply them in context of kt #0
* ECP: est_calc_phase
* ECM: est_chain_max
* ECP ECM Insert Chain enable Description
* ---------------------------------------------------------------------------
* 0 0 est_temp_list 0 create kt #0 context
* 0 0 est_temp_list 0->1 service added, start kthread #0 task
* 0->1 0 est_temp_list 1 kt task #0 started, enters calc phase
* 1 0 est_temp_list 1 kt #0: determine est_chain_max,
* stop tasks, move ests to est_temp_list
* and free kd for kthreads 1..last
* 1->0 0->N kt chains 1 ests can go to kthreads
* 0 N kt chains 1 drain est_temp_list, create new kthread
* contexts, start tasks, estimate
*/
static void ip_vs_est_calc_phase(struct netns_ipvs *ipvs)
{
int genid = atomic_read(&ipvs->est_genid);
struct ip_vs_est_tick_data *td;
struct ip_vs_est_kt_data *kd;
struct ip_vs_estimator *est;
struct ip_vs_stats *stats;
int id, row, cid, delay;
bool last, last_td;
int chain_max;
int step;
if (!ip_vs_est_calc_limits(ipvs, &chain_max))
return;
mutex_lock(&__ip_vs_mutex);
/* Stop all other tasks, so that we can immediately move the
* estimators to est_temp_list without RCU grace period
*/
mutex_lock(&ipvs->est_mutex);
for (id = 1; id < ipvs->est_kt_count; id++) {
/* netns clean up started, abort */
if (!ipvs->enable)
goto unlock2;
kd = ipvs->est_kt_arr[id];
if (!kd)
continue;
ip_vs_est_kthread_stop(kd);
}
mutex_unlock(&ipvs->est_mutex);
/* Move all estimators to est_temp_list but carefully,
* all estimators and kthread data can be released while
* we reschedule. Even for kthread 0.
*/
step = 0;
/* Order entries in est_temp_list in ascending delay, so now
* walk delay(desc), id(desc), cid(asc)
*/
delay = IPVS_EST_NTICKS;
next_delay:
delay--;
if (delay < 0)
goto end_dequeue;
last_kt:
/* Destroy contexts backwards */
id = ipvs->est_kt_count;
next_kt:
if (!ipvs->enable || kthread_should_stop())
goto unlock;
id--;
if (id < 0)
goto next_delay;
kd = ipvs->est_kt_arr[id];
if (!kd)
goto next_kt;
/* kt 0 can exist with empty chains */
if (!id && kd->est_count <= 1)
goto next_delay;
row = kd->est_row + delay;
if (row >= IPVS_EST_NTICKS)
row -= IPVS_EST_NTICKS;
td = rcu_dereference_protected(kd->ticks[row], 1);
if (!td)
goto next_kt;
cid = 0;
walk_chain:
if (kthread_should_stop())
goto unlock;
step++;
if (!(step & 63)) {
/* Give chance estimators to be added (to est_temp_list)
* and deleted (releasing kthread contexts)
*/
mutex_unlock(&__ip_vs_mutex);
cond_resched();
mutex_lock(&__ip_vs_mutex);
/* Current kt released ? */
if (id >= ipvs->est_kt_count)
goto last_kt;
if (kd != ipvs->est_kt_arr[id])
goto next_kt;
/* Current td released ? */
if (td != rcu_dereference_protected(kd->ticks[row], 1))
goto next_kt;
/* No fatal changes on the current kd and td */
}
est = hlist_entry_safe(td->chains[cid].first, struct ip_vs_estimator,
list);
if (!est) {
cid++;
if (cid >= IPVS_EST_TICK_CHAINS)
goto next_kt;
goto walk_chain;
}
/* We can cheat and increase est_count to protect kt 0 context
* from release but we prefer to keep the last estimator
*/
last = kd->est_count <= 1;
/* Do not free kt #0 data */
if (!id && last)
goto next_delay;
last_td = kd->tick_len[row] <= 1;
stats = container_of(est, struct ip_vs_stats, est);
ip_vs_stop_estimator(ipvs, stats);
/* Tasks are stopped, move without RCU grace period */
est->ktid = -1;
est->ktrow = row - kd->est_row;
if (est->ktrow < 0)
est->ktrow += IPVS_EST_NTICKS;
hlist_add_head(&est->list, &ipvs->est_temp_list);
/* kd freed ? */
if (last)
goto next_kt;
/* td freed ? */
if (last_td)
goto next_kt;
goto walk_chain;
end_dequeue:
/* All estimators removed while calculating ? */
if (!ipvs->est_kt_count)
goto unlock;
kd = ipvs->est_kt_arr[0];
if (!kd)
goto unlock;
kd->add_row = kd->est_row;
ipvs->est_chain_max = chain_max;
ip_vs_est_set_params(ipvs, kd);
pr_info("using max %d ests per chain, %d per kthread\n",
kd->chain_max, kd->est_max_count);
/* Try to keep tot_stats in kt0, enqueue it early */
if (ipvs->tot_stats && !hlist_unhashed(&ipvs->tot_stats->s.est.list) &&
ipvs->tot_stats->s.est.ktid == -1) {
hlist_del(&ipvs->tot_stats->s.est.list);
hlist_add_head(&ipvs->tot_stats->s.est.list,
&ipvs->est_temp_list);
}
mutex_lock(&ipvs->est_mutex);
/* We completed the calc phase, new calc phase not requested */
if (genid == atomic_read(&ipvs->est_genid))
ipvs->est_calc_phase = 0;
unlock2:
mutex_unlock(&ipvs->est_mutex);
unlock:
mutex_unlock(&__ip_vs_mutex);
}
void ip_vs_zero_estimator(struct ip_vs_stats *stats)
{
struct ip_vs_estimator *est = &stats->est;
struct ip_vs_kstats *k = &stats->kstats;
/* reset counters, caller must hold the stats->lock lock */
est->last_inbytes = k->inbytes;
est->last_outbytes = k->outbytes;
est->last_conns = k->conns;
est->last_inpkts = k->inpkts;
est->last_outpkts = k->outpkts;
est->cps = 0;
est->inpps = 0;
est->outpps = 0;
est->inbps = 0;
est->outbps = 0;
}
/* Get decoded rates */
void ip_vs_read_estimator(struct ip_vs_kstats *dst, struct ip_vs_stats *stats)
{
struct ip_vs_estimator *e = &stats->est;
dst->cps = (e->cps + 0x1FF) >> 10;
dst->inpps = (e->inpps + 0x1FF) >> 10;
dst->outpps = (e->outpps + 0x1FF) >> 10;
dst->inbps = (e->inbps + 0xF) >> 5;
dst->outbps = (e->outbps + 0xF) >> 5;
}
int __net_init ip_vs_estimator_net_init(struct netns_ipvs *ipvs)
{
INIT_HLIST_HEAD(&ipvs->est_temp_list);
ipvs->est_kt_arr = NULL;
ipvs->est_max_threads = 0;
ipvs->est_calc_phase = 0;
ipvs->est_chain_max = 0;
ipvs->est_kt_count = 0;
ipvs->est_add_ktid = 0;
atomic_set(&ipvs->est_genid, 0);
atomic_set(&ipvs->est_genid_done, 0);
__mutex_init(&ipvs->est_mutex, "ipvs->est_mutex", &__ipvs_est_key);
return 0;
}
void __net_exit ip_vs_estimator_net_cleanup(struct netns_ipvs *ipvs)
{
int i;
for (i = 0; i < ipvs->est_kt_count; i++)
ip_vs_est_kthread_destroy(ipvs->est_kt_arr[i]);
kfree(ipvs->est_kt_arr);
mutex_destroy(&ipvs->est_mutex);
}
| linux-master | net/netfilter/ipvs/ip_vs_est.c |
// SPDX-License-Identifier: GPL-2.0-only
#define KMSG_COMPONENT "IPVS"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/module.h>
#include <linux/kernel.h>
#include <net/ip_vs.h>
#include <net/netfilter/nf_conntrack.h>
#include <linux/netfilter/nf_conntrack_sip.h>
#ifdef CONFIG_IP_VS_DEBUG
static const char *ip_vs_dbg_callid(char *buf, size_t buf_len,
const char *callid, size_t callid_len,
int *idx)
{
size_t max_len = 64;
size_t len = min3(max_len, callid_len, buf_len - *idx - 1);
memcpy(buf + *idx, callid, len);
buf[*idx+len] = '\0';
*idx += len + 1;
return buf + *idx - len;
}
#define IP_VS_DEBUG_CALLID(callid, len) \
ip_vs_dbg_callid(ip_vs_dbg_buf, sizeof(ip_vs_dbg_buf), \
callid, len, &ip_vs_dbg_idx)
#endif
static int get_callid(const char *dptr, unsigned int dataoff,
unsigned int datalen,
unsigned int *matchoff, unsigned int *matchlen)
{
/* Find callid */
while (1) {
int ret = ct_sip_get_header(NULL, dptr, dataoff, datalen,
SIP_HDR_CALL_ID, matchoff,
matchlen);
if (ret > 0)
break;
if (!ret)
return -EINVAL;
dataoff += *matchoff;
}
/* Too large is useless */
if (*matchlen > IP_VS_PEDATA_MAXLEN)
return -EINVAL;
/* SIP headers are always followed by a line terminator */
if (*matchoff + *matchlen == datalen)
return -EINVAL;
/* RFC 2543 allows lines to be terminated with CR, LF or CRLF,
* RFC 3261 allows only CRLF, we support both. */
if (*(dptr + *matchoff + *matchlen) != '\r' &&
*(dptr + *matchoff + *matchlen) != '\n')
return -EINVAL;
IP_VS_DBG_BUF(9, "SIP callid %s (%d bytes)\n",
IP_VS_DEBUG_CALLID(dptr + *matchoff, *matchlen),
*matchlen);
return 0;
}
static int
ip_vs_sip_fill_param(struct ip_vs_conn_param *p, struct sk_buff *skb)
{
struct ip_vs_iphdr iph;
unsigned int dataoff, datalen, matchoff, matchlen;
const char *dptr;
int retc;
retc = ip_vs_fill_iph_skb(p->af, skb, false, &iph);
/* Only useful with UDP */
if (!retc || iph.protocol != IPPROTO_UDP)
return -EINVAL;
/* todo: IPv6 fragments:
* I think this only should be done for the first fragment. /HS
*/
dataoff = iph.len + sizeof(struct udphdr);
if (dataoff >= skb->len)
return -EINVAL;
retc = skb_linearize(skb);
if (retc < 0)
return retc;
dptr = skb->data + dataoff;
datalen = skb->len - dataoff;
if (get_callid(dptr, 0, datalen, &matchoff, &matchlen))
return -EINVAL;
/* N.B: pe_data is only set on success,
* this allows fallback to the default persistence logic on failure
*/
p->pe_data = kmemdup(dptr + matchoff, matchlen, GFP_ATOMIC);
if (!p->pe_data)
return -ENOMEM;
p->pe_data_len = matchlen;
return 0;
}
static bool ip_vs_sip_ct_match(const struct ip_vs_conn_param *p,
struct ip_vs_conn *ct)
{
bool ret = false;
if (ct->af == p->af &&
ip_vs_addr_equal(p->af, p->caddr, &ct->caddr) &&
/* protocol should only be IPPROTO_IP if
* d_addr is a fwmark */
ip_vs_addr_equal(p->protocol == IPPROTO_IP ? AF_UNSPEC : p->af,
p->vaddr, &ct->vaddr) &&
ct->vport == p->vport &&
ct->flags & IP_VS_CONN_F_TEMPLATE &&
ct->protocol == p->protocol &&
ct->pe_data && ct->pe_data_len == p->pe_data_len &&
!memcmp(ct->pe_data, p->pe_data, p->pe_data_len))
ret = true;
IP_VS_DBG_BUF(9, "SIP template match %s %s->%s:%d %s\n",
ip_vs_proto_name(p->protocol),
IP_VS_DEBUG_CALLID(p->pe_data, p->pe_data_len),
IP_VS_DBG_ADDR(p->af, p->vaddr), ntohs(p->vport),
ret ? "hit" : "not hit");
return ret;
}
static u32 ip_vs_sip_hashkey_raw(const struct ip_vs_conn_param *p,
u32 initval, bool inverse)
{
return jhash(p->pe_data, p->pe_data_len, initval);
}
static int ip_vs_sip_show_pe_data(const struct ip_vs_conn *cp, char *buf)
{
memcpy(buf, cp->pe_data, cp->pe_data_len);
return cp->pe_data_len;
}
static struct ip_vs_conn *
ip_vs_sip_conn_out(struct ip_vs_service *svc,
struct ip_vs_dest *dest,
struct sk_buff *skb,
const struct ip_vs_iphdr *iph,
__be16 dport,
__be16 cport)
{
if (likely(iph->protocol == IPPROTO_UDP))
return ip_vs_new_conn_out(svc, dest, skb, iph, dport, cport);
/* currently no need to handle other than UDP */
return NULL;
}
static struct ip_vs_pe ip_vs_sip_pe =
{
.name = "sip",
.refcnt = ATOMIC_INIT(0),
.module = THIS_MODULE,
.n_list = LIST_HEAD_INIT(ip_vs_sip_pe.n_list),
.fill_param = ip_vs_sip_fill_param,
.ct_match = ip_vs_sip_ct_match,
.hashkey_raw = ip_vs_sip_hashkey_raw,
.show_pe_data = ip_vs_sip_show_pe_data,
.conn_out = ip_vs_sip_conn_out,
};
static int __init ip_vs_sip_init(void)
{
return register_ip_vs_pe(&ip_vs_sip_pe);
}
static void __exit ip_vs_sip_cleanup(void)
{
unregister_ip_vs_pe(&ip_vs_sip_pe);
synchronize_rcu();
}
module_init(ip_vs_sip_init);
module_exit(ip_vs_sip_cleanup);
MODULE_LICENSE("GPL");
| linux-master | net/netfilter/ipvs/ip_vs_pe_sip.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* IPVS: Round-Robin Scheduling module
*
* Authors: Wensong Zhang <[email protected]>
* Peter Kese <[email protected]>
*
* Fixes/Changes:
* Wensong Zhang : changed the ip_vs_rr_schedule to return dest
* Julian Anastasov : fixed the NULL pointer access bug in debugging
* Wensong Zhang : changed some comestics things for debugging
* Wensong Zhang : changed for the d-linked destination list
* Wensong Zhang : added the ip_vs_rr_update_svc
* Wensong Zhang : added any dest with weight=0 is quiesced
*/
#define KMSG_COMPONENT "IPVS"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/module.h>
#include <linux/kernel.h>
#include <net/ip_vs.h>
static int ip_vs_rr_init_svc(struct ip_vs_service *svc)
{
svc->sched_data = &svc->destinations;
return 0;
}
static int ip_vs_rr_del_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest)
{
struct list_head *p;
spin_lock_bh(&svc->sched_lock);
p = (struct list_head *) svc->sched_data;
/* dest is already unlinked, so p->prev is not valid but
* p->next is valid, use it to reach previous entry.
*/
if (p == &dest->n_list)
svc->sched_data = p->next->prev;
spin_unlock_bh(&svc->sched_lock);
return 0;
}
/*
* Round-Robin Scheduling
*/
static struct ip_vs_dest *
ip_vs_rr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb,
struct ip_vs_iphdr *iph)
{
struct list_head *p;
struct ip_vs_dest *dest, *last;
int pass = 0;
IP_VS_DBG(6, "%s(): Scheduling...\n", __func__);
spin_lock_bh(&svc->sched_lock);
p = (struct list_head *) svc->sched_data;
last = dest = list_entry(p, struct ip_vs_dest, n_list);
do {
list_for_each_entry_continue_rcu(dest,
&svc->destinations,
n_list) {
if (!(dest->flags & IP_VS_DEST_F_OVERLOAD) &&
atomic_read(&dest->weight) > 0)
/* HIT */
goto out;
if (dest == last)
goto stop;
}
pass++;
/* Previous dest could be unlinked, do not loop forever.
* If we stay at head there is no need for 2nd pass.
*/
} while (pass < 2 && p != &svc->destinations);
stop:
spin_unlock_bh(&svc->sched_lock);
ip_vs_scheduler_err(svc, "no destination available");
return NULL;
out:
svc->sched_data = &dest->n_list;
spin_unlock_bh(&svc->sched_lock);
IP_VS_DBG_BUF(6, "RR: server %s:%u "
"activeconns %d refcnt %d weight %d\n",
IP_VS_DBG_ADDR(dest->af, &dest->addr), ntohs(dest->port),
atomic_read(&dest->activeconns),
refcount_read(&dest->refcnt), atomic_read(&dest->weight));
return dest;
}
static struct ip_vs_scheduler ip_vs_rr_scheduler = {
.name = "rr", /* name */
.refcnt = ATOMIC_INIT(0),
.module = THIS_MODULE,
.n_list = LIST_HEAD_INIT(ip_vs_rr_scheduler.n_list),
.init_service = ip_vs_rr_init_svc,
.add_dest = NULL,
.del_dest = ip_vs_rr_del_dest,
.schedule = ip_vs_rr_schedule,
};
static int __init ip_vs_rr_init(void)
{
return register_ip_vs_scheduler(&ip_vs_rr_scheduler);
}
static void __exit ip_vs_rr_cleanup(void)
{
unregister_ip_vs_scheduler(&ip_vs_rr_scheduler);
synchronize_rcu();
}
module_init(ip_vs_rr_init);
module_exit(ip_vs_rr_cleanup);
MODULE_LICENSE("GPL");
| linux-master | net/netfilter/ipvs/ip_vs_rr.c |
// SPDX-License-Identifier: GPL-2.0-only
#define KMSG_COMPONENT "IPVS"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
#include <asm/string.h>
#include <linux/kmod.h>
#include <linux/sysctl.h>
#include <net/ip_vs.h>
/* IPVS pe list */
static LIST_HEAD(ip_vs_pe);
/* semaphore for IPVS PEs. */
static DEFINE_MUTEX(ip_vs_pe_mutex);
/* Get pe in the pe list by name */
struct ip_vs_pe *__ip_vs_pe_getbyname(const char *pe_name)
{
struct ip_vs_pe *pe;
IP_VS_DBG(10, "%s(): pe_name \"%s\"\n", __func__,
pe_name);
rcu_read_lock();
list_for_each_entry_rcu(pe, &ip_vs_pe, n_list) {
/* Test and get the modules atomically */
if (pe->module &&
!try_module_get(pe->module)) {
/* This pe is just deleted */
continue;
}
if (strcmp(pe_name, pe->name)==0) {
/* HIT */
rcu_read_unlock();
return pe;
}
module_put(pe->module);
}
rcu_read_unlock();
return NULL;
}
/* Lookup pe and try to load it if it doesn't exist */
struct ip_vs_pe *ip_vs_pe_getbyname(const char *name)
{
struct ip_vs_pe *pe;
/* Search for the pe by name */
pe = __ip_vs_pe_getbyname(name);
/* If pe not found, load the module and search again */
if (!pe) {
request_module("ip_vs_pe_%s", name);
pe = __ip_vs_pe_getbyname(name);
}
return pe;
}
/* Register a pe in the pe list */
int register_ip_vs_pe(struct ip_vs_pe *pe)
{
struct ip_vs_pe *tmp;
/* increase the module use count */
if (!ip_vs_use_count_inc())
return -ENOENT;
mutex_lock(&ip_vs_pe_mutex);
/* Make sure that the pe with this name doesn't exist
* in the pe list.
*/
list_for_each_entry(tmp, &ip_vs_pe, n_list) {
if (strcmp(tmp->name, pe->name) == 0) {
mutex_unlock(&ip_vs_pe_mutex);
ip_vs_use_count_dec();
pr_err("%s(): [%s] pe already existed "
"in the system\n", __func__, pe->name);
return -EINVAL;
}
}
/* Add it into the d-linked pe list */
list_add_rcu(&pe->n_list, &ip_vs_pe);
mutex_unlock(&ip_vs_pe_mutex);
pr_info("[%s] pe registered.\n", pe->name);
return 0;
}
EXPORT_SYMBOL_GPL(register_ip_vs_pe);
/* Unregister a pe from the pe list */
int unregister_ip_vs_pe(struct ip_vs_pe *pe)
{
mutex_lock(&ip_vs_pe_mutex);
/* Remove it from the d-linked pe list */
list_del_rcu(&pe->n_list);
mutex_unlock(&ip_vs_pe_mutex);
/* decrease the module use count */
ip_vs_use_count_dec();
pr_info("[%s] pe unregistered.\n", pe->name);
return 0;
}
EXPORT_SYMBOL_GPL(unregister_ip_vs_pe);
| linux-master | net/netfilter/ipvs/ip_vs_pe.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/kernel.h>
#include <linux/ip.h>
#include <linux/sctp.h>
#include <net/ip.h>
#include <net/ip6_checksum.h>
#include <linux/netfilter.h>
#include <linux/netfilter_ipv4.h>
#include <net/sctp/checksum.h>
#include <net/ip_vs.h>
static int
sctp_csum_check(int af, struct sk_buff *skb, struct ip_vs_protocol *pp);
static int
sctp_conn_schedule(struct netns_ipvs *ipvs, int af, struct sk_buff *skb,
struct ip_vs_proto_data *pd,
int *verdict, struct ip_vs_conn **cpp,
struct ip_vs_iphdr *iph)
{
struct ip_vs_service *svc;
struct sctp_chunkhdr _schunkh, *sch;
struct sctphdr *sh, _sctph;
__be16 _ports[2], *ports = NULL;
if (likely(!ip_vs_iph_icmp(iph))) {
sh = skb_header_pointer(skb, iph->len, sizeof(_sctph), &_sctph);
if (sh) {
sch = skb_header_pointer(skb, iph->len + sizeof(_sctph),
sizeof(_schunkh), &_schunkh);
if (sch) {
if (sch->type == SCTP_CID_ABORT ||
!(sysctl_sloppy_sctp(ipvs) ||
sch->type == SCTP_CID_INIT))
return 1;
ports = &sh->source;
}
}
} else {
ports = skb_header_pointer(
skb, iph->len, sizeof(_ports), &_ports);
}
if (!ports) {
*verdict = NF_DROP;
return 0;
}
if (likely(!ip_vs_iph_inverse(iph)))
svc = ip_vs_service_find(ipvs, af, skb->mark, iph->protocol,
&iph->daddr, ports[1]);
else
svc = ip_vs_service_find(ipvs, af, skb->mark, iph->protocol,
&iph->saddr, ports[0]);
if (svc) {
int ignored;
if (ip_vs_todrop(ipvs)) {
/*
* It seems that we are very loaded.
* We have to drop this packet :(
*/
*verdict = NF_DROP;
return 0;
}
/*
* Let the virtual server select a real server for the
* incoming connection, and create a connection entry.
*/
*cpp = ip_vs_schedule(svc, skb, pd, &ignored, iph);
if (!*cpp && ignored <= 0) {
if (!ignored)
*verdict = ip_vs_leave(svc, skb, pd, iph);
else
*verdict = NF_DROP;
return 0;
}
}
/* NF_ACCEPT */
return 1;
}
static void sctp_nat_csum(struct sk_buff *skb, struct sctphdr *sctph,
unsigned int sctphoff)
{
sctph->checksum = sctp_compute_cksum(skb, sctphoff);
skb->ip_summed = CHECKSUM_UNNECESSARY;
}
static int
sctp_snat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp,
struct ip_vs_conn *cp, struct ip_vs_iphdr *iph)
{
struct sctphdr *sctph;
unsigned int sctphoff = iph->len;
bool payload_csum = false;
#ifdef CONFIG_IP_VS_IPV6
if (cp->af == AF_INET6 && iph->fragoffs)
return 1;
#endif
/* csum_check requires unshared skb */
if (skb_ensure_writable(skb, sctphoff + sizeof(*sctph)))
return 0;
if (unlikely(cp->app != NULL)) {
int ret;
/* Some checks before mangling */
if (!sctp_csum_check(cp->af, skb, pp))
return 0;
/* Call application helper if needed */
ret = ip_vs_app_pkt_out(cp, skb, iph);
if (ret == 0)
return 0;
/* ret=2: csum update is needed after payload mangling */
if (ret == 2)
payload_csum = true;
}
sctph = (void *) skb_network_header(skb) + sctphoff;
/* Only update csum if we really have to */
if (sctph->source != cp->vport || payload_csum ||
skb->ip_summed == CHECKSUM_PARTIAL) {
sctph->source = cp->vport;
sctp_nat_csum(skb, sctph, sctphoff);
} else {
skb->ip_summed = CHECKSUM_UNNECESSARY;
}
return 1;
}
static int
sctp_dnat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp,
struct ip_vs_conn *cp, struct ip_vs_iphdr *iph)
{
struct sctphdr *sctph;
unsigned int sctphoff = iph->len;
bool payload_csum = false;
#ifdef CONFIG_IP_VS_IPV6
if (cp->af == AF_INET6 && iph->fragoffs)
return 1;
#endif
/* csum_check requires unshared skb */
if (skb_ensure_writable(skb, sctphoff + sizeof(*sctph)))
return 0;
if (unlikely(cp->app != NULL)) {
int ret;
/* Some checks before mangling */
if (!sctp_csum_check(cp->af, skb, pp))
return 0;
/* Call application helper if needed */
ret = ip_vs_app_pkt_in(cp, skb, iph);
if (ret == 0)
return 0;
/* ret=2: csum update is needed after payload mangling */
if (ret == 2)
payload_csum = true;
}
sctph = (void *) skb_network_header(skb) + sctphoff;
/* Only update csum if we really have to */
if (sctph->dest != cp->dport || payload_csum ||
(skb->ip_summed == CHECKSUM_PARTIAL &&
!(skb_dst(skb)->dev->features & NETIF_F_SCTP_CRC))) {
sctph->dest = cp->dport;
sctp_nat_csum(skb, sctph, sctphoff);
} else if (skb->ip_summed != CHECKSUM_PARTIAL) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
}
return 1;
}
static int
sctp_csum_check(int af, struct sk_buff *skb, struct ip_vs_protocol *pp)
{
unsigned int sctphoff;
struct sctphdr *sh;
__le32 cmp, val;
#ifdef CONFIG_IP_VS_IPV6
if (af == AF_INET6)
sctphoff = sizeof(struct ipv6hdr);
else
#endif
sctphoff = ip_hdrlen(skb);
sh = (struct sctphdr *)(skb->data + sctphoff);
cmp = sh->checksum;
val = sctp_compute_cksum(skb, sctphoff);
if (val != cmp) {
/* CRC failure, dump it. */
IP_VS_DBG_RL_PKT(0, af, pp, skb, 0,
"Failed checksum for");
return 0;
}
return 1;
}
enum ipvs_sctp_event_t {
IP_VS_SCTP_DATA = 0, /* DATA, SACK, HEARTBEATs */
IP_VS_SCTP_INIT,
IP_VS_SCTP_INIT_ACK,
IP_VS_SCTP_COOKIE_ECHO,
IP_VS_SCTP_COOKIE_ACK,
IP_VS_SCTP_SHUTDOWN,
IP_VS_SCTP_SHUTDOWN_ACK,
IP_VS_SCTP_SHUTDOWN_COMPLETE,
IP_VS_SCTP_ERROR,
IP_VS_SCTP_ABORT,
IP_VS_SCTP_EVENT_LAST
};
/* RFC 2960, 3.2 Chunk Field Descriptions */
static __u8 sctp_events[] = {
[SCTP_CID_DATA] = IP_VS_SCTP_DATA,
[SCTP_CID_INIT] = IP_VS_SCTP_INIT,
[SCTP_CID_INIT_ACK] = IP_VS_SCTP_INIT_ACK,
[SCTP_CID_SACK] = IP_VS_SCTP_DATA,
[SCTP_CID_HEARTBEAT] = IP_VS_SCTP_DATA,
[SCTP_CID_HEARTBEAT_ACK] = IP_VS_SCTP_DATA,
[SCTP_CID_ABORT] = IP_VS_SCTP_ABORT,
[SCTP_CID_SHUTDOWN] = IP_VS_SCTP_SHUTDOWN,
[SCTP_CID_SHUTDOWN_ACK] = IP_VS_SCTP_SHUTDOWN_ACK,
[SCTP_CID_ERROR] = IP_VS_SCTP_ERROR,
[SCTP_CID_COOKIE_ECHO] = IP_VS_SCTP_COOKIE_ECHO,
[SCTP_CID_COOKIE_ACK] = IP_VS_SCTP_COOKIE_ACK,
[SCTP_CID_ECN_ECNE] = IP_VS_SCTP_DATA,
[SCTP_CID_ECN_CWR] = IP_VS_SCTP_DATA,
[SCTP_CID_SHUTDOWN_COMPLETE] = IP_VS_SCTP_SHUTDOWN_COMPLETE,
};
/* SCTP States:
* See RFC 2960, 4. SCTP Association State Diagram
*
* New states (not in diagram):
* - INIT1 state: use shorter timeout for dropped INIT packets
* - REJECTED state: use shorter timeout if INIT is rejected with ABORT
* - INIT, COOKIE_SENT, COOKIE_REPLIED, COOKIE states: for better debugging
*
* The states are as seen in real server. In the diagram, INIT1, INIT,
* COOKIE_SENT and COOKIE_REPLIED processing happens in CLOSED state.
*
* States as per packets from client (C) and server (S):
*
* Setup of client connection:
* IP_VS_SCTP_S_INIT1: First C:INIT sent, wait for S:INIT-ACK
* IP_VS_SCTP_S_INIT: Next C:INIT sent, wait for S:INIT-ACK
* IP_VS_SCTP_S_COOKIE_SENT: S:INIT-ACK sent, wait for C:COOKIE-ECHO
* IP_VS_SCTP_S_COOKIE_REPLIED: C:COOKIE-ECHO sent, wait for S:COOKIE-ACK
*
* Setup of server connection:
* IP_VS_SCTP_S_COOKIE_WAIT: S:INIT sent, wait for C:INIT-ACK
* IP_VS_SCTP_S_COOKIE: C:INIT-ACK sent, wait for S:COOKIE-ECHO
* IP_VS_SCTP_S_COOKIE_ECHOED: S:COOKIE-ECHO sent, wait for C:COOKIE-ACK
*/
#define sNO IP_VS_SCTP_S_NONE
#define sI1 IP_VS_SCTP_S_INIT1
#define sIN IP_VS_SCTP_S_INIT
#define sCS IP_VS_SCTP_S_COOKIE_SENT
#define sCR IP_VS_SCTP_S_COOKIE_REPLIED
#define sCW IP_VS_SCTP_S_COOKIE_WAIT
#define sCO IP_VS_SCTP_S_COOKIE
#define sCE IP_VS_SCTP_S_COOKIE_ECHOED
#define sES IP_VS_SCTP_S_ESTABLISHED
#define sSS IP_VS_SCTP_S_SHUTDOWN_SENT
#define sSR IP_VS_SCTP_S_SHUTDOWN_RECEIVED
#define sSA IP_VS_SCTP_S_SHUTDOWN_ACK_SENT
#define sRJ IP_VS_SCTP_S_REJECTED
#define sCL IP_VS_SCTP_S_CLOSED
static const __u8 sctp_states
[IP_VS_DIR_LAST][IP_VS_SCTP_EVENT_LAST][IP_VS_SCTP_S_LAST] = {
{ /* INPUT */
/* sNO, sI1, sIN, sCS, sCR, sCW, sCO, sCE, sES, sSS, sSR, sSA, sRJ, sCL*/
/* d */{sES, sI1, sIN, sCS, sCR, sCW, sCO, sCE, sES, sSS, sSR, sSA, sRJ, sCL},
/* i */{sI1, sIN, sIN, sCS, sCR, sCW, sCO, sCE, sES, sSS, sSR, sSA, sIN, sIN},
/* i_a */{sCW, sCW, sCW, sCS, sCR, sCO, sCO, sCE, sES, sSS, sSR, sSA, sRJ, sCL},
/* c_e */{sCR, sIN, sIN, sCR, sCR, sCW, sCO, sCE, sES, sSS, sSR, sSA, sRJ, sCL},
/* c_a */{sES, sI1, sIN, sCS, sCR, sCW, sCO, sES, sES, sSS, sSR, sSA, sRJ, sCL},
/* s */{sSR, sI1, sIN, sCS, sCR, sCW, sCO, sCE, sSR, sSS, sSR, sSA, sRJ, sCL},
/* s_a */{sCL, sIN, sIN, sCS, sCR, sCW, sCO, sCE, sES, sCL, sSR, sCL, sRJ, sCL},
/* s_c */{sCL, sCL, sCL, sCS, sCR, sCW, sCO, sCE, sES, sSS, sSR, sCL, sRJ, sCL},
/* err */{sCL, sI1, sIN, sCS, sCR, sCW, sCO, sCL, sES, sSS, sSR, sSA, sRJ, sCL},
/* ab */{sCL, sCL, sCL, sCL, sCL, sRJ, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL},
},
{ /* OUTPUT */
/* sNO, sI1, sIN, sCS, sCR, sCW, sCO, sCE, sES, sSS, sSR, sSA, sRJ, sCL*/
/* d */{sES, sI1, sIN, sCS, sCR, sCW, sCO, sCE, sES, sSS, sSR, sSA, sRJ, sCL},
/* i */{sCW, sCW, sCW, sCW, sCW, sCW, sCW, sCW, sES, sCW, sCW, sCW, sCW, sCW},
/* i_a */{sCS, sCS, sCS, sCS, sCR, sCW, sCO, sCE, sES, sSS, sSR, sSA, sRJ, sCL},
/* c_e */{sCE, sCE, sCE, sCE, sCE, sCE, sCE, sCE, sES, sSS, sSR, sSA, sRJ, sCL},
/* c_a */{sES, sES, sES, sES, sES, sES, sES, sES, sES, sSS, sSR, sSA, sRJ, sCL},
/* s */{sSS, sSS, sSS, sSS, sSS, sSS, sSS, sSS, sSS, sSS, sSR, sSA, sRJ, sCL},
/* s_a */{sSA, sSA, sSA, sSA, sSA, sCW, sCO, sCE, sES, sSA, sSA, sSA, sRJ, sCL},
/* s_c */{sCL, sI1, sIN, sCS, sCR, sCW, sCO, sCE, sES, sSS, sSR, sSA, sRJ, sCL},
/* err */{sCL, sCL, sCL, sCL, sCL, sCW, sCO, sCE, sES, sSS, sSR, sSA, sRJ, sCL},
/* ab */{sCL, sRJ, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL},
},
{ /* INPUT-ONLY */
/* sNO, sI1, sIN, sCS, sCR, sCW, sCO, sCE, sES, sSS, sSR, sSA, sRJ, sCL*/
/* d */{sES, sI1, sIN, sCS, sCR, sES, sCO, sCE, sES, sSS, sSR, sSA, sRJ, sCL},
/* i */{sI1, sIN, sIN, sIN, sIN, sIN, sCO, sCE, sES, sSS, sSR, sSA, sIN, sIN},
/* i_a */{sCE, sCE, sCE, sCE, sCE, sCE, sCO, sCE, sES, sSS, sSR, sSA, sRJ, sCL},
/* c_e */{sES, sES, sES, sES, sES, sES, sCO, sCE, sES, sSS, sSR, sSA, sRJ, sCL},
/* c_a */{sES, sI1, sIN, sES, sES, sCW, sES, sES, sES, sSS, sSR, sSA, sRJ, sCL},
/* s */{sSR, sI1, sIN, sCS, sCR, sCW, sCO, sCE, sSR, sSS, sSR, sSA, sRJ, sCL},
/* s_a */{sCL, sIN, sIN, sCS, sCR, sCW, sCO, sCE, sCL, sCL, sSR, sCL, sRJ, sCL},
/* s_c */{sCL, sCL, sCL, sCL, sCL, sCW, sCO, sCE, sES, sSS, sCL, sCL, sRJ, sCL},
/* err */{sCL, sI1, sIN, sCS, sCR, sCW, sCO, sCE, sES, sSS, sSR, sSA, sRJ, sCL},
/* ab */{sCL, sCL, sCL, sCL, sCL, sRJ, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL},
},
};
#define IP_VS_SCTP_MAX_RTO ((60 + 1) * HZ)
/* Timeout table[state] */
static const int sctp_timeouts[IP_VS_SCTP_S_LAST + 1] = {
[IP_VS_SCTP_S_NONE] = 2 * HZ,
[IP_VS_SCTP_S_INIT1] = (0 + 3 + 1) * HZ,
[IP_VS_SCTP_S_INIT] = IP_VS_SCTP_MAX_RTO,
[IP_VS_SCTP_S_COOKIE_SENT] = IP_VS_SCTP_MAX_RTO,
[IP_VS_SCTP_S_COOKIE_REPLIED] = IP_VS_SCTP_MAX_RTO,
[IP_VS_SCTP_S_COOKIE_WAIT] = IP_VS_SCTP_MAX_RTO,
[IP_VS_SCTP_S_COOKIE] = IP_VS_SCTP_MAX_RTO,
[IP_VS_SCTP_S_COOKIE_ECHOED] = IP_VS_SCTP_MAX_RTO,
[IP_VS_SCTP_S_ESTABLISHED] = 15 * 60 * HZ,
[IP_VS_SCTP_S_SHUTDOWN_SENT] = IP_VS_SCTP_MAX_RTO,
[IP_VS_SCTP_S_SHUTDOWN_RECEIVED] = IP_VS_SCTP_MAX_RTO,
[IP_VS_SCTP_S_SHUTDOWN_ACK_SENT] = IP_VS_SCTP_MAX_RTO,
[IP_VS_SCTP_S_REJECTED] = (0 + 3 + 1) * HZ,
[IP_VS_SCTP_S_CLOSED] = IP_VS_SCTP_MAX_RTO,
[IP_VS_SCTP_S_LAST] = 2 * HZ,
};
static const char *sctp_state_name_table[IP_VS_SCTP_S_LAST + 1] = {
[IP_VS_SCTP_S_NONE] = "NONE",
[IP_VS_SCTP_S_INIT1] = "INIT1",
[IP_VS_SCTP_S_INIT] = "INIT",
[IP_VS_SCTP_S_COOKIE_SENT] = "C-SENT",
[IP_VS_SCTP_S_COOKIE_REPLIED] = "C-REPLIED",
[IP_VS_SCTP_S_COOKIE_WAIT] = "C-WAIT",
[IP_VS_SCTP_S_COOKIE] = "COOKIE",
[IP_VS_SCTP_S_COOKIE_ECHOED] = "C-ECHOED",
[IP_VS_SCTP_S_ESTABLISHED] = "ESTABLISHED",
[IP_VS_SCTP_S_SHUTDOWN_SENT] = "S-SENT",
[IP_VS_SCTP_S_SHUTDOWN_RECEIVED] = "S-RECEIVED",
[IP_VS_SCTP_S_SHUTDOWN_ACK_SENT] = "S-ACK-SENT",
[IP_VS_SCTP_S_REJECTED] = "REJECTED",
[IP_VS_SCTP_S_CLOSED] = "CLOSED",
[IP_VS_SCTP_S_LAST] = "BUG!",
};
static const char *sctp_state_name(int state)
{
if (state >= IP_VS_SCTP_S_LAST)
return "ERR!";
if (sctp_state_name_table[state])
return sctp_state_name_table[state];
return "?";
}
static inline void
set_sctp_state(struct ip_vs_proto_data *pd, struct ip_vs_conn *cp,
int direction, const struct sk_buff *skb)
{
struct sctp_chunkhdr _sctpch, *sch;
unsigned char chunk_type;
int event, next_state;
int ihl, cofs;
#ifdef CONFIG_IP_VS_IPV6
ihl = cp->af == AF_INET ? ip_hdrlen(skb) : sizeof(struct ipv6hdr);
#else
ihl = ip_hdrlen(skb);
#endif
cofs = ihl + sizeof(struct sctphdr);
sch = skb_header_pointer(skb, cofs, sizeof(_sctpch), &_sctpch);
if (sch == NULL)
return;
chunk_type = sch->type;
/*
* Section 3: Multiple chunks can be bundled into one SCTP packet
* up to the MTU size, except for the INIT, INIT ACK, and
* SHUTDOWN COMPLETE chunks. These chunks MUST NOT be bundled with
* any other chunk in a packet.
*
* Section 3.3.7: DATA chunks MUST NOT be bundled with ABORT. Control
* chunks (except for INIT, INIT ACK, and SHUTDOWN COMPLETE) MAY be
* bundled with an ABORT, but they MUST be placed before the ABORT
* in the SCTP packet or they will be ignored by the receiver.
*/
if ((sch->type == SCTP_CID_COOKIE_ECHO) ||
(sch->type == SCTP_CID_COOKIE_ACK)) {
int clen = ntohs(sch->length);
if (clen >= sizeof(_sctpch)) {
sch = skb_header_pointer(skb, cofs + ALIGN(clen, 4),
sizeof(_sctpch), &_sctpch);
if (sch && sch->type == SCTP_CID_ABORT)
chunk_type = sch->type;
}
}
event = (chunk_type < sizeof(sctp_events)) ?
sctp_events[chunk_type] : IP_VS_SCTP_DATA;
/* Update direction to INPUT_ONLY if necessary
* or delete NO_OUTPUT flag if output packet detected
*/
if (cp->flags & IP_VS_CONN_F_NOOUTPUT) {
if (direction == IP_VS_DIR_OUTPUT)
cp->flags &= ~IP_VS_CONN_F_NOOUTPUT;
else
direction = IP_VS_DIR_INPUT_ONLY;
}
next_state = sctp_states[direction][event][cp->state];
if (next_state != cp->state) {
struct ip_vs_dest *dest = cp->dest;
IP_VS_DBG_BUF(8, "%s %s %s:%d->"
"%s:%d state: %s->%s conn->refcnt:%d\n",
pd->pp->name,
((direction == IP_VS_DIR_OUTPUT) ?
"output " : "input "),
IP_VS_DBG_ADDR(cp->daf, &cp->daddr),
ntohs(cp->dport),
IP_VS_DBG_ADDR(cp->af, &cp->caddr),
ntohs(cp->cport),
sctp_state_name(cp->state),
sctp_state_name(next_state),
refcount_read(&cp->refcnt));
if (dest) {
if (!(cp->flags & IP_VS_CONN_F_INACTIVE) &&
(next_state != IP_VS_SCTP_S_ESTABLISHED)) {
atomic_dec(&dest->activeconns);
atomic_inc(&dest->inactconns);
cp->flags |= IP_VS_CONN_F_INACTIVE;
} else if ((cp->flags & IP_VS_CONN_F_INACTIVE) &&
(next_state == IP_VS_SCTP_S_ESTABLISHED)) {
atomic_inc(&dest->activeconns);
atomic_dec(&dest->inactconns);
cp->flags &= ~IP_VS_CONN_F_INACTIVE;
}
}
if (next_state == IP_VS_SCTP_S_ESTABLISHED)
ip_vs_control_assure_ct(cp);
}
if (likely(pd))
cp->timeout = pd->timeout_table[cp->state = next_state];
else /* What to do ? */
cp->timeout = sctp_timeouts[cp->state = next_state];
}
static void
sctp_state_transition(struct ip_vs_conn *cp, int direction,
const struct sk_buff *skb, struct ip_vs_proto_data *pd)
{
spin_lock_bh(&cp->lock);
set_sctp_state(pd, cp, direction, skb);
spin_unlock_bh(&cp->lock);
}
static inline __u16 sctp_app_hashkey(__be16 port)
{
return (((__force u16)port >> SCTP_APP_TAB_BITS) ^ (__force u16)port)
& SCTP_APP_TAB_MASK;
}
static int sctp_register_app(struct netns_ipvs *ipvs, struct ip_vs_app *inc)
{
struct ip_vs_app *i;
__u16 hash;
__be16 port = inc->port;
int ret = 0;
struct ip_vs_proto_data *pd = ip_vs_proto_data_get(ipvs, IPPROTO_SCTP);
hash = sctp_app_hashkey(port);
list_for_each_entry(i, &ipvs->sctp_apps[hash], p_list) {
if (i->port == port) {
ret = -EEXIST;
goto out;
}
}
list_add_rcu(&inc->p_list, &ipvs->sctp_apps[hash]);
atomic_inc(&pd->appcnt);
out:
return ret;
}
static void sctp_unregister_app(struct netns_ipvs *ipvs, struct ip_vs_app *inc)
{
struct ip_vs_proto_data *pd = ip_vs_proto_data_get(ipvs, IPPROTO_SCTP);
atomic_dec(&pd->appcnt);
list_del_rcu(&inc->p_list);
}
static int sctp_app_conn_bind(struct ip_vs_conn *cp)
{
struct netns_ipvs *ipvs = cp->ipvs;
int hash;
struct ip_vs_app *inc;
int result = 0;
/* Default binding: bind app only for NAT */
if (IP_VS_FWD_METHOD(cp) != IP_VS_CONN_F_MASQ)
return 0;
/* Lookup application incarnations and bind the right one */
hash = sctp_app_hashkey(cp->vport);
list_for_each_entry_rcu(inc, &ipvs->sctp_apps[hash], p_list) {
if (inc->port == cp->vport) {
if (unlikely(!ip_vs_app_inc_get(inc)))
break;
IP_VS_DBG_BUF(9, "%s: Binding conn %s:%u->"
"%s:%u to app %s on port %u\n",
__func__,
IP_VS_DBG_ADDR(cp->af, &cp->caddr),
ntohs(cp->cport),
IP_VS_DBG_ADDR(cp->af, &cp->vaddr),
ntohs(cp->vport),
inc->name, ntohs(inc->port));
cp->app = inc;
if (inc->init_conn)
result = inc->init_conn(inc, cp);
break;
}
}
return result;
}
/* ---------------------------------------------
* timeouts is netns related now.
* ---------------------------------------------
*/
static int __ip_vs_sctp_init(struct netns_ipvs *ipvs, struct ip_vs_proto_data *pd)
{
ip_vs_init_hash_table(ipvs->sctp_apps, SCTP_APP_TAB_SIZE);
pd->timeout_table = ip_vs_create_timeout_table((int *)sctp_timeouts,
sizeof(sctp_timeouts));
if (!pd->timeout_table)
return -ENOMEM;
return 0;
}
static void __ip_vs_sctp_exit(struct netns_ipvs *ipvs, struct ip_vs_proto_data *pd)
{
kfree(pd->timeout_table);
}
struct ip_vs_protocol ip_vs_protocol_sctp = {
.name = "SCTP",
.protocol = IPPROTO_SCTP,
.num_states = IP_VS_SCTP_S_LAST,
.dont_defrag = 0,
.init = NULL,
.exit = NULL,
.init_netns = __ip_vs_sctp_init,
.exit_netns = __ip_vs_sctp_exit,
.register_app = sctp_register_app,
.unregister_app = sctp_unregister_app,
.conn_schedule = sctp_conn_schedule,
.conn_in_get = ip_vs_conn_in_get_proto,
.conn_out_get = ip_vs_conn_out_get_proto,
.snat_handler = sctp_snat_handler,
.dnat_handler = sctp_dnat_handler,
.state_name = sctp_state_name,
.state_transition = sctp_state_transition,
.app_conn_bind = sctp_app_conn_bind,
.debug_packet = ip_vs_tcpudp_debug_packet,
.timeout_change = NULL,
};
| linux-master | net/netfilter/ipvs/ip_vs_proto_sctp.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* IPVS: Weighted Least-Connection Scheduling module
*
* Authors: Wensong Zhang <[email protected]>
* Peter Kese <[email protected]>
*
* Changes:
* Wensong Zhang : changed the ip_vs_wlc_schedule to return dest
* Wensong Zhang : changed to use the inactconns in scheduling
* Wensong Zhang : changed some comestics things for debugging
* Wensong Zhang : changed for the d-linked destination list
* Wensong Zhang : added the ip_vs_wlc_update_svc
* Wensong Zhang : added any dest with weight=0 is quiesced
*/
#define KMSG_COMPONENT "IPVS"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/module.h>
#include <linux/kernel.h>
#include <net/ip_vs.h>
/*
* Weighted Least Connection scheduling
*/
static struct ip_vs_dest *
ip_vs_wlc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb,
struct ip_vs_iphdr *iph)
{
struct ip_vs_dest *dest, *least;
int loh, doh;
IP_VS_DBG(6, "ip_vs_wlc_schedule(): Scheduling...\n");
/*
* We calculate the load of each dest server as follows:
* (dest overhead) / dest->weight
*
* Remember -- no floats in kernel mode!!!
* The comparison of h1*w2 > h2*w1 is equivalent to that of
* h1/w1 > h2/w2
* if every weight is larger than zero.
*
* The server with weight=0 is quiesced and will not receive any
* new connections.
*/
list_for_each_entry_rcu(dest, &svc->destinations, n_list) {
if (!(dest->flags & IP_VS_DEST_F_OVERLOAD) &&
atomic_read(&dest->weight) > 0) {
least = dest;
loh = ip_vs_dest_conn_overhead(least);
goto nextstage;
}
}
ip_vs_scheduler_err(svc, "no destination available");
return NULL;
/*
* Find the destination with the least load.
*/
nextstage:
list_for_each_entry_continue_rcu(dest, &svc->destinations, n_list) {
if (dest->flags & IP_VS_DEST_F_OVERLOAD)
continue;
doh = ip_vs_dest_conn_overhead(dest);
if ((__s64)loh * atomic_read(&dest->weight) >
(__s64)doh * atomic_read(&least->weight)) {
least = dest;
loh = doh;
}
}
IP_VS_DBG_BUF(6, "WLC: server %s:%u "
"activeconns %d refcnt %d weight %d overhead %d\n",
IP_VS_DBG_ADDR(least->af, &least->addr),
ntohs(least->port),
atomic_read(&least->activeconns),
refcount_read(&least->refcnt),
atomic_read(&least->weight), loh);
return least;
}
static struct ip_vs_scheduler ip_vs_wlc_scheduler =
{
.name = "wlc",
.refcnt = ATOMIC_INIT(0),
.module = THIS_MODULE,
.n_list = LIST_HEAD_INIT(ip_vs_wlc_scheduler.n_list),
.schedule = ip_vs_wlc_schedule,
};
static int __init ip_vs_wlc_init(void)
{
return register_ip_vs_scheduler(&ip_vs_wlc_scheduler);
}
static void __exit ip_vs_wlc_cleanup(void)
{
unregister_ip_vs_scheduler(&ip_vs_wlc_scheduler);
synchronize_rcu();
}
module_init(ip_vs_wlc_init);
module_exit(ip_vs_wlc_cleanup);
MODULE_LICENSE("GPL");
| linux-master | net/netfilter/ipvs/ip_vs_wlc.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* ip_vs_proto_tcp.c: TCP load balancing support for IPVS
*
* Authors: Wensong Zhang <[email protected]>
* Julian Anastasov <[email protected]>
*
* Changes: Hans Schillstrom <[email protected]>
*
* Network name space (netns) aware.
* Global data moved to netns i.e struct netns_ipvs
* tcp_timeouts table has copy per netns in a hash table per
* protocol ip_vs_proto_data and is handled by netns
*/
#define KMSG_COMPONENT "IPVS"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/kernel.h>
#include <linux/ip.h>
#include <linux/tcp.h> /* for tcphdr */
#include <net/ip.h>
#include <net/tcp.h> /* for csum_tcpudp_magic */
#include <net/ip6_checksum.h>
#include <linux/netfilter.h>
#include <linux/netfilter_ipv4.h>
#include <linux/indirect_call_wrapper.h>
#include <net/ip_vs.h>
static int
tcp_csum_check(int af, struct sk_buff *skb, struct ip_vs_protocol *pp);
static int
tcp_conn_schedule(struct netns_ipvs *ipvs, int af, struct sk_buff *skb,
struct ip_vs_proto_data *pd,
int *verdict, struct ip_vs_conn **cpp,
struct ip_vs_iphdr *iph)
{
struct ip_vs_service *svc;
struct tcphdr _tcph, *th;
__be16 _ports[2], *ports = NULL;
/* In the event of icmp, we're only guaranteed to have the first 8
* bytes of the transport header, so we only check the rest of the
* TCP packet for non-ICMP packets
*/
if (likely(!ip_vs_iph_icmp(iph))) {
th = skb_header_pointer(skb, iph->len, sizeof(_tcph), &_tcph);
if (th) {
if (th->rst || !(sysctl_sloppy_tcp(ipvs) || th->syn))
return 1;
ports = &th->source;
}
} else {
ports = skb_header_pointer(
skb, iph->len, sizeof(_ports), &_ports);
}
if (!ports) {
*verdict = NF_DROP;
return 0;
}
/* No !th->ack check to allow scheduling on SYN+ACK for Active FTP */
if (likely(!ip_vs_iph_inverse(iph)))
svc = ip_vs_service_find(ipvs, af, skb->mark, iph->protocol,
&iph->daddr, ports[1]);
else
svc = ip_vs_service_find(ipvs, af, skb->mark, iph->protocol,
&iph->saddr, ports[0]);
if (svc) {
int ignored;
if (ip_vs_todrop(ipvs)) {
/*
* It seems that we are very loaded.
* We have to drop this packet :(
*/
*verdict = NF_DROP;
return 0;
}
/*
* Let the virtual server select a real server for the
* incoming connection, and create a connection entry.
*/
*cpp = ip_vs_schedule(svc, skb, pd, &ignored, iph);
if (!*cpp && ignored <= 0) {
if (!ignored)
*verdict = ip_vs_leave(svc, skb, pd, iph);
else
*verdict = NF_DROP;
return 0;
}
}
/* NF_ACCEPT */
return 1;
}
static inline void
tcp_fast_csum_update(int af, struct tcphdr *tcph,
const union nf_inet_addr *oldip,
const union nf_inet_addr *newip,
__be16 oldport, __be16 newport)
{
#ifdef CONFIG_IP_VS_IPV6
if (af == AF_INET6)
tcph->check =
csum_fold(ip_vs_check_diff16(oldip->ip6, newip->ip6,
ip_vs_check_diff2(oldport, newport,
~csum_unfold(tcph->check))));
else
#endif
tcph->check =
csum_fold(ip_vs_check_diff4(oldip->ip, newip->ip,
ip_vs_check_diff2(oldport, newport,
~csum_unfold(tcph->check))));
}
static inline void
tcp_partial_csum_update(int af, struct tcphdr *tcph,
const union nf_inet_addr *oldip,
const union nf_inet_addr *newip,
__be16 oldlen, __be16 newlen)
{
#ifdef CONFIG_IP_VS_IPV6
if (af == AF_INET6)
tcph->check =
~csum_fold(ip_vs_check_diff16(oldip->ip6, newip->ip6,
ip_vs_check_diff2(oldlen, newlen,
csum_unfold(tcph->check))));
else
#endif
tcph->check =
~csum_fold(ip_vs_check_diff4(oldip->ip, newip->ip,
ip_vs_check_diff2(oldlen, newlen,
csum_unfold(tcph->check))));
}
INDIRECT_CALLABLE_SCOPE int
tcp_snat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp,
struct ip_vs_conn *cp, struct ip_vs_iphdr *iph)
{
struct tcphdr *tcph;
unsigned int tcphoff = iph->len;
bool payload_csum = false;
int oldlen;
#ifdef CONFIG_IP_VS_IPV6
if (cp->af == AF_INET6 && iph->fragoffs)
return 1;
#endif
oldlen = skb->len - tcphoff;
/* csum_check requires unshared skb */
if (skb_ensure_writable(skb, tcphoff + sizeof(*tcph)))
return 0;
if (unlikely(cp->app != NULL)) {
int ret;
/* Some checks before mangling */
if (!tcp_csum_check(cp->af, skb, pp))
return 0;
/* Call application helper if needed */
if (!(ret = ip_vs_app_pkt_out(cp, skb, iph)))
return 0;
/* ret=2: csum update is needed after payload mangling */
if (ret == 1)
oldlen = skb->len - tcphoff;
else
payload_csum = true;
}
tcph = (void *)skb_network_header(skb) + tcphoff;
tcph->source = cp->vport;
/* Adjust TCP checksums */
if (skb->ip_summed == CHECKSUM_PARTIAL) {
tcp_partial_csum_update(cp->af, tcph, &cp->daddr, &cp->vaddr,
htons(oldlen),
htons(skb->len - tcphoff));
} else if (!payload_csum) {
/* Only port and addr are changed, do fast csum update */
tcp_fast_csum_update(cp->af, tcph, &cp->daddr, &cp->vaddr,
cp->dport, cp->vport);
if (skb->ip_summed == CHECKSUM_COMPLETE)
skb->ip_summed = cp->app ?
CHECKSUM_UNNECESSARY : CHECKSUM_NONE;
} else {
/* full checksum calculation */
tcph->check = 0;
skb->csum = skb_checksum(skb, tcphoff, skb->len - tcphoff, 0);
#ifdef CONFIG_IP_VS_IPV6
if (cp->af == AF_INET6)
tcph->check = csum_ipv6_magic(&cp->vaddr.in6,
&cp->caddr.in6,
skb->len - tcphoff,
cp->protocol, skb->csum);
else
#endif
tcph->check = csum_tcpudp_magic(cp->vaddr.ip,
cp->caddr.ip,
skb->len - tcphoff,
cp->protocol,
skb->csum);
skb->ip_summed = CHECKSUM_UNNECESSARY;
IP_VS_DBG(11, "O-pkt: %s O-csum=%d (+%zd)\n",
pp->name, tcph->check,
(char*)&(tcph->check) - (char*)tcph);
}
return 1;
}
static int
tcp_dnat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp,
struct ip_vs_conn *cp, struct ip_vs_iphdr *iph)
{
struct tcphdr *tcph;
unsigned int tcphoff = iph->len;
bool payload_csum = false;
int oldlen;
#ifdef CONFIG_IP_VS_IPV6
if (cp->af == AF_INET6 && iph->fragoffs)
return 1;
#endif
oldlen = skb->len - tcphoff;
/* csum_check requires unshared skb */
if (skb_ensure_writable(skb, tcphoff + sizeof(*tcph)))
return 0;
if (unlikely(cp->app != NULL)) {
int ret;
/* Some checks before mangling */
if (!tcp_csum_check(cp->af, skb, pp))
return 0;
/*
* Attempt ip_vs_app call.
* It will fix ip_vs_conn and iph ack_seq stuff
*/
if (!(ret = ip_vs_app_pkt_in(cp, skb, iph)))
return 0;
/* ret=2: csum update is needed after payload mangling */
if (ret == 1)
oldlen = skb->len - tcphoff;
else
payload_csum = true;
}
tcph = (void *)skb_network_header(skb) + tcphoff;
tcph->dest = cp->dport;
/*
* Adjust TCP checksums
*/
if (skb->ip_summed == CHECKSUM_PARTIAL) {
tcp_partial_csum_update(cp->af, tcph, &cp->vaddr, &cp->daddr,
htons(oldlen),
htons(skb->len - tcphoff));
} else if (!payload_csum) {
/* Only port and addr are changed, do fast csum update */
tcp_fast_csum_update(cp->af, tcph, &cp->vaddr, &cp->daddr,
cp->vport, cp->dport);
if (skb->ip_summed == CHECKSUM_COMPLETE)
skb->ip_summed = cp->app ?
CHECKSUM_UNNECESSARY : CHECKSUM_NONE;
} else {
/* full checksum calculation */
tcph->check = 0;
skb->csum = skb_checksum(skb, tcphoff, skb->len - tcphoff, 0);
#ifdef CONFIG_IP_VS_IPV6
if (cp->af == AF_INET6)
tcph->check = csum_ipv6_magic(&cp->caddr.in6,
&cp->daddr.in6,
skb->len - tcphoff,
cp->protocol, skb->csum);
else
#endif
tcph->check = csum_tcpudp_magic(cp->caddr.ip,
cp->daddr.ip,
skb->len - tcphoff,
cp->protocol,
skb->csum);
skb->ip_summed = CHECKSUM_UNNECESSARY;
}
return 1;
}
static int
tcp_csum_check(int af, struct sk_buff *skb, struct ip_vs_protocol *pp)
{
unsigned int tcphoff;
#ifdef CONFIG_IP_VS_IPV6
if (af == AF_INET6)
tcphoff = sizeof(struct ipv6hdr);
else
#endif
tcphoff = ip_hdrlen(skb);
switch (skb->ip_summed) {
case CHECKSUM_NONE:
skb->csum = skb_checksum(skb, tcphoff, skb->len - tcphoff, 0);
fallthrough;
case CHECKSUM_COMPLETE:
#ifdef CONFIG_IP_VS_IPV6
if (af == AF_INET6) {
if (csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
&ipv6_hdr(skb)->daddr,
skb->len - tcphoff,
ipv6_hdr(skb)->nexthdr,
skb->csum)) {
IP_VS_DBG_RL_PKT(0, af, pp, skb, 0,
"Failed checksum for");
return 0;
}
} else
#endif
if (csum_tcpudp_magic(ip_hdr(skb)->saddr,
ip_hdr(skb)->daddr,
skb->len - tcphoff,
ip_hdr(skb)->protocol,
skb->csum)) {
IP_VS_DBG_RL_PKT(0, af, pp, skb, 0,
"Failed checksum for");
return 0;
}
break;
default:
/* No need to checksum. */
break;
}
return 1;
}
#define TCP_DIR_INPUT 0
#define TCP_DIR_OUTPUT 4
#define TCP_DIR_INPUT_ONLY 8
static const int tcp_state_off[IP_VS_DIR_LAST] = {
[IP_VS_DIR_INPUT] = TCP_DIR_INPUT,
[IP_VS_DIR_OUTPUT] = TCP_DIR_OUTPUT,
[IP_VS_DIR_INPUT_ONLY] = TCP_DIR_INPUT_ONLY,
};
/*
* Timeout table[state]
*/
static const int tcp_timeouts[IP_VS_TCP_S_LAST+1] = {
[IP_VS_TCP_S_NONE] = 2*HZ,
[IP_VS_TCP_S_ESTABLISHED] = 15*60*HZ,
[IP_VS_TCP_S_SYN_SENT] = 2*60*HZ,
[IP_VS_TCP_S_SYN_RECV] = 1*60*HZ,
[IP_VS_TCP_S_FIN_WAIT] = 2*60*HZ,
[IP_VS_TCP_S_TIME_WAIT] = 2*60*HZ,
[IP_VS_TCP_S_CLOSE] = 10*HZ,
[IP_VS_TCP_S_CLOSE_WAIT] = 60*HZ,
[IP_VS_TCP_S_LAST_ACK] = 30*HZ,
[IP_VS_TCP_S_LISTEN] = 2*60*HZ,
[IP_VS_TCP_S_SYNACK] = 120*HZ,
[IP_VS_TCP_S_LAST] = 2*HZ,
};
static const char *const tcp_state_name_table[IP_VS_TCP_S_LAST+1] = {
[IP_VS_TCP_S_NONE] = "NONE",
[IP_VS_TCP_S_ESTABLISHED] = "ESTABLISHED",
[IP_VS_TCP_S_SYN_SENT] = "SYN_SENT",
[IP_VS_TCP_S_SYN_RECV] = "SYN_RECV",
[IP_VS_TCP_S_FIN_WAIT] = "FIN_WAIT",
[IP_VS_TCP_S_TIME_WAIT] = "TIME_WAIT",
[IP_VS_TCP_S_CLOSE] = "CLOSE",
[IP_VS_TCP_S_CLOSE_WAIT] = "CLOSE_WAIT",
[IP_VS_TCP_S_LAST_ACK] = "LAST_ACK",
[IP_VS_TCP_S_LISTEN] = "LISTEN",
[IP_VS_TCP_S_SYNACK] = "SYNACK",
[IP_VS_TCP_S_LAST] = "BUG!",
};
static const bool tcp_state_active_table[IP_VS_TCP_S_LAST] = {
[IP_VS_TCP_S_NONE] = false,
[IP_VS_TCP_S_ESTABLISHED] = true,
[IP_VS_TCP_S_SYN_SENT] = true,
[IP_VS_TCP_S_SYN_RECV] = true,
[IP_VS_TCP_S_FIN_WAIT] = false,
[IP_VS_TCP_S_TIME_WAIT] = false,
[IP_VS_TCP_S_CLOSE] = false,
[IP_VS_TCP_S_CLOSE_WAIT] = false,
[IP_VS_TCP_S_LAST_ACK] = false,
[IP_VS_TCP_S_LISTEN] = false,
[IP_VS_TCP_S_SYNACK] = true,
};
#define sNO IP_VS_TCP_S_NONE
#define sES IP_VS_TCP_S_ESTABLISHED
#define sSS IP_VS_TCP_S_SYN_SENT
#define sSR IP_VS_TCP_S_SYN_RECV
#define sFW IP_VS_TCP_S_FIN_WAIT
#define sTW IP_VS_TCP_S_TIME_WAIT
#define sCL IP_VS_TCP_S_CLOSE
#define sCW IP_VS_TCP_S_CLOSE_WAIT
#define sLA IP_VS_TCP_S_LAST_ACK
#define sLI IP_VS_TCP_S_LISTEN
#define sSA IP_VS_TCP_S_SYNACK
struct tcp_states_t {
int next_state[IP_VS_TCP_S_LAST];
};
static const char * tcp_state_name(int state)
{
if (state >= IP_VS_TCP_S_LAST)
return "ERR!";
return tcp_state_name_table[state] ? tcp_state_name_table[state] : "?";
}
static bool tcp_state_active(int state)
{
if (state >= IP_VS_TCP_S_LAST)
return false;
return tcp_state_active_table[state];
}
static struct tcp_states_t tcp_states[] = {
/* INPUT */
/* sNO, sES, sSS, sSR, sFW, sTW, sCL, sCW, sLA, sLI, sSA */
/*syn*/ {{sSR, sES, sES, sSR, sSR, sSR, sSR, sSR, sSR, sSR, sSR }},
/*fin*/ {{sCL, sCW, sSS, sTW, sTW, sTW, sCL, sCW, sLA, sLI, sTW }},
/*ack*/ {{sES, sES, sSS, sES, sFW, sTW, sCL, sCW, sCL, sLI, sES }},
/*rst*/ {{sCL, sCL, sCL, sSR, sCL, sCL, sCL, sCL, sLA, sLI, sSR }},
/* OUTPUT */
/* sNO, sES, sSS, sSR, sFW, sTW, sCL, sCW, sLA, sLI, sSA */
/*syn*/ {{sSS, sES, sSS, sSR, sSS, sSS, sSS, sSS, sSS, sLI, sSR }},
/*fin*/ {{sTW, sFW, sSS, sTW, sFW, sTW, sCL, sTW, sLA, sLI, sTW }},
/*ack*/ {{sES, sES, sSS, sES, sFW, sTW, sCL, sCW, sLA, sES, sES }},
/*rst*/ {{sCL, sCL, sSS, sCL, sCL, sTW, sCL, sCL, sCL, sCL, sCL }},
/* INPUT-ONLY */
/* sNO, sES, sSS, sSR, sFW, sTW, sCL, sCW, sLA, sLI, sSA */
/*syn*/ {{sSR, sES, sES, sSR, sSR, sSR, sSR, sSR, sSR, sSR, sSR }},
/*fin*/ {{sCL, sFW, sSS, sTW, sFW, sTW, sCL, sCW, sLA, sLI, sTW }},
/*ack*/ {{sES, sES, sSS, sES, sFW, sTW, sCL, sCW, sCL, sLI, sES }},
/*rst*/ {{sCL, sCL, sCL, sSR, sCL, sCL, sCL, sCL, sLA, sLI, sCL }},
};
static struct tcp_states_t tcp_states_dos[] = {
/* INPUT */
/* sNO, sES, sSS, sSR, sFW, sTW, sCL, sCW, sLA, sLI, sSA */
/*syn*/ {{sSR, sES, sES, sSR, sSR, sSR, sSR, sSR, sSR, sSR, sSA }},
/*fin*/ {{sCL, sCW, sSS, sTW, sTW, sTW, sCL, sCW, sLA, sLI, sSA }},
/*ack*/ {{sES, sES, sSS, sSR, sFW, sTW, sCL, sCW, sCL, sLI, sSA }},
/*rst*/ {{sCL, sCL, sCL, sSR, sCL, sCL, sCL, sCL, sLA, sLI, sCL }},
/* OUTPUT */
/* sNO, sES, sSS, sSR, sFW, sTW, sCL, sCW, sLA, sLI, sSA */
/*syn*/ {{sSS, sES, sSS, sSA, sSS, sSS, sSS, sSS, sSS, sLI, sSA }},
/*fin*/ {{sTW, sFW, sSS, sTW, sFW, sTW, sCL, sTW, sLA, sLI, sTW }},
/*ack*/ {{sES, sES, sSS, sES, sFW, sTW, sCL, sCW, sLA, sES, sES }},
/*rst*/ {{sCL, sCL, sSS, sCL, sCL, sTW, sCL, sCL, sCL, sCL, sCL }},
/* INPUT-ONLY */
/* sNO, sES, sSS, sSR, sFW, sTW, sCL, sCW, sLA, sLI, sSA */
/*syn*/ {{sSA, sES, sES, sSR, sSA, sSA, sSA, sSA, sSA, sSA, sSA }},
/*fin*/ {{sCL, sFW, sSS, sTW, sFW, sTW, sCL, sCW, sLA, sLI, sTW }},
/*ack*/ {{sES, sES, sSS, sES, sFW, sTW, sCL, sCW, sCL, sLI, sES }},
/*rst*/ {{sCL, sCL, sCL, sSR, sCL, sCL, sCL, sCL, sLA, sLI, sCL }},
};
static void tcp_timeout_change(struct ip_vs_proto_data *pd, int flags)
{
int on = (flags & 1); /* secure_tcp */
/*
** FIXME: change secure_tcp to independent sysctl var
** or make it per-service or per-app because it is valid
** for most if not for all of the applications. Something
** like "capabilities" (flags) for each object.
*/
pd->tcp_state_table = (on ? tcp_states_dos : tcp_states);
}
static inline int tcp_state_idx(struct tcphdr *th)
{
if (th->rst)
return 3;
if (th->syn)
return 0;
if (th->fin)
return 1;
if (th->ack)
return 2;
return -1;
}
static inline void
set_tcp_state(struct ip_vs_proto_data *pd, struct ip_vs_conn *cp,
int direction, struct tcphdr *th)
{
int state_idx;
int new_state = IP_VS_TCP_S_CLOSE;
int state_off = tcp_state_off[direction];
/*
* Update state offset to INPUT_ONLY if necessary
* or delete NO_OUTPUT flag if output packet detected
*/
if (cp->flags & IP_VS_CONN_F_NOOUTPUT) {
if (state_off == TCP_DIR_OUTPUT)
cp->flags &= ~IP_VS_CONN_F_NOOUTPUT;
else
state_off = TCP_DIR_INPUT_ONLY;
}
if ((state_idx = tcp_state_idx(th)) < 0) {
IP_VS_DBG(8, "tcp_state_idx=%d!!!\n", state_idx);
goto tcp_state_out;
}
new_state =
pd->tcp_state_table[state_off+state_idx].next_state[cp->state];
tcp_state_out:
if (new_state != cp->state) {
struct ip_vs_dest *dest = cp->dest;
IP_VS_DBG_BUF(8, "%s %s [%c%c%c%c] c:%s:%d v:%s:%d "
"d:%s:%d state: %s->%s conn->refcnt:%d\n",
pd->pp->name,
((state_off == TCP_DIR_OUTPUT) ?
"output " : "input "),
th->syn ? 'S' : '.',
th->fin ? 'F' : '.',
th->ack ? 'A' : '.',
th->rst ? 'R' : '.',
IP_VS_DBG_ADDR(cp->af, &cp->caddr),
ntohs(cp->cport),
IP_VS_DBG_ADDR(cp->af, &cp->vaddr),
ntohs(cp->vport),
IP_VS_DBG_ADDR(cp->daf, &cp->daddr),
ntohs(cp->dport),
tcp_state_name(cp->state),
tcp_state_name(new_state),
refcount_read(&cp->refcnt));
if (dest) {
if (!(cp->flags & IP_VS_CONN_F_INACTIVE) &&
!tcp_state_active(new_state)) {
atomic_dec(&dest->activeconns);
atomic_inc(&dest->inactconns);
cp->flags |= IP_VS_CONN_F_INACTIVE;
} else if ((cp->flags & IP_VS_CONN_F_INACTIVE) &&
tcp_state_active(new_state)) {
atomic_inc(&dest->activeconns);
atomic_dec(&dest->inactconns);
cp->flags &= ~IP_VS_CONN_F_INACTIVE;
}
}
if (new_state == IP_VS_TCP_S_ESTABLISHED)
ip_vs_control_assure_ct(cp);
}
if (likely(pd))
cp->timeout = pd->timeout_table[cp->state = new_state];
else /* What to do ? */
cp->timeout = tcp_timeouts[cp->state = new_state];
}
/*
* Handle state transitions
*/
static void
tcp_state_transition(struct ip_vs_conn *cp, int direction,
const struct sk_buff *skb,
struct ip_vs_proto_data *pd)
{
struct tcphdr _tcph, *th;
#ifdef CONFIG_IP_VS_IPV6
int ihl = cp->af == AF_INET ? ip_hdrlen(skb) : sizeof(struct ipv6hdr);
#else
int ihl = ip_hdrlen(skb);
#endif
th = skb_header_pointer(skb, ihl, sizeof(_tcph), &_tcph);
if (th == NULL)
return;
spin_lock_bh(&cp->lock);
set_tcp_state(pd, cp, direction, th);
spin_unlock_bh(&cp->lock);
}
static inline __u16 tcp_app_hashkey(__be16 port)
{
return (((__force u16)port >> TCP_APP_TAB_BITS) ^ (__force u16)port)
& TCP_APP_TAB_MASK;
}
static int tcp_register_app(struct netns_ipvs *ipvs, struct ip_vs_app *inc)
{
struct ip_vs_app *i;
__u16 hash;
__be16 port = inc->port;
int ret = 0;
struct ip_vs_proto_data *pd = ip_vs_proto_data_get(ipvs, IPPROTO_TCP);
hash = tcp_app_hashkey(port);
list_for_each_entry(i, &ipvs->tcp_apps[hash], p_list) {
if (i->port == port) {
ret = -EEXIST;
goto out;
}
}
list_add_rcu(&inc->p_list, &ipvs->tcp_apps[hash]);
atomic_inc(&pd->appcnt);
out:
return ret;
}
static void
tcp_unregister_app(struct netns_ipvs *ipvs, struct ip_vs_app *inc)
{
struct ip_vs_proto_data *pd = ip_vs_proto_data_get(ipvs, IPPROTO_TCP);
atomic_dec(&pd->appcnt);
list_del_rcu(&inc->p_list);
}
static int
tcp_app_conn_bind(struct ip_vs_conn *cp)
{
struct netns_ipvs *ipvs = cp->ipvs;
int hash;
struct ip_vs_app *inc;
int result = 0;
/* Default binding: bind app only for NAT */
if (IP_VS_FWD_METHOD(cp) != IP_VS_CONN_F_MASQ)
return 0;
/* Lookup application incarnations and bind the right one */
hash = tcp_app_hashkey(cp->vport);
list_for_each_entry_rcu(inc, &ipvs->tcp_apps[hash], p_list) {
if (inc->port == cp->vport) {
if (unlikely(!ip_vs_app_inc_get(inc)))
break;
IP_VS_DBG_BUF(9, "%s(): Binding conn %s:%u->"
"%s:%u to app %s on port %u\n",
__func__,
IP_VS_DBG_ADDR(cp->af, &cp->caddr),
ntohs(cp->cport),
IP_VS_DBG_ADDR(cp->af, &cp->vaddr),
ntohs(cp->vport),
inc->name, ntohs(inc->port));
cp->app = inc;
if (inc->init_conn)
result = inc->init_conn(inc, cp);
break;
}
}
return result;
}
/*
* Set LISTEN timeout. (ip_vs_conn_put will setup timer)
*/
void ip_vs_tcp_conn_listen(struct ip_vs_conn *cp)
{
struct ip_vs_proto_data *pd = ip_vs_proto_data_get(cp->ipvs, IPPROTO_TCP);
spin_lock_bh(&cp->lock);
cp->state = IP_VS_TCP_S_LISTEN;
cp->timeout = (pd ? pd->timeout_table[IP_VS_TCP_S_LISTEN]
: tcp_timeouts[IP_VS_TCP_S_LISTEN]);
spin_unlock_bh(&cp->lock);
}
/* ---------------------------------------------
* timeouts is netns related now.
* ---------------------------------------------
*/
static int __ip_vs_tcp_init(struct netns_ipvs *ipvs, struct ip_vs_proto_data *pd)
{
ip_vs_init_hash_table(ipvs->tcp_apps, TCP_APP_TAB_SIZE);
pd->timeout_table = ip_vs_create_timeout_table((int *)tcp_timeouts,
sizeof(tcp_timeouts));
if (!pd->timeout_table)
return -ENOMEM;
pd->tcp_state_table = tcp_states;
return 0;
}
static void __ip_vs_tcp_exit(struct netns_ipvs *ipvs, struct ip_vs_proto_data *pd)
{
kfree(pd->timeout_table);
}
struct ip_vs_protocol ip_vs_protocol_tcp = {
.name = "TCP",
.protocol = IPPROTO_TCP,
.num_states = IP_VS_TCP_S_LAST,
.dont_defrag = 0,
.init = NULL,
.exit = NULL,
.init_netns = __ip_vs_tcp_init,
.exit_netns = __ip_vs_tcp_exit,
.register_app = tcp_register_app,
.unregister_app = tcp_unregister_app,
.conn_schedule = tcp_conn_schedule,
.conn_in_get = ip_vs_conn_in_get_proto,
.conn_out_get = ip_vs_conn_out_get_proto,
.snat_handler = tcp_snat_handler,
.dnat_handler = tcp_dnat_handler,
.state_name = tcp_state_name,
.state_transition = tcp_state_transition,
.app_conn_bind = tcp_app_conn_bind,
.debug_packet = ip_vs_tcpudp_debug_packet,
.timeout_change = tcp_timeout_change,
};
| linux-master | net/netfilter/ipvs/ip_vs_proto_tcp.c |
// SPDX-License-Identifier: GPL-2.0
/* IPVS: Maglev Hashing scheduling module
*
* Authors: Inju Song <[email protected]>
*
*/
/* The mh algorithm is to assign a preference list of all the lookup
* table positions to each destination and populate the table with
* the most-preferred position of destinations. Then it is to select
* destination with the hash key of source IP address through looking
* up a the lookup table.
*
* The algorithm is detailed in:
* [3.4 Consistent Hasing]
https://www.usenix.org/system/files/conference/nsdi16/nsdi16-paper-eisenbud.pdf
*
*/
#define KMSG_COMPONENT "IPVS"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/ip.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/skbuff.h>
#include <net/ip_vs.h>
#include <linux/siphash.h>
#include <linux/bitops.h>
#include <linux/gcd.h>
#define IP_VS_SVC_F_SCHED_MH_FALLBACK IP_VS_SVC_F_SCHED1 /* MH fallback */
#define IP_VS_SVC_F_SCHED_MH_PORT IP_VS_SVC_F_SCHED2 /* MH use port */
struct ip_vs_mh_lookup {
struct ip_vs_dest __rcu *dest; /* real server (cache) */
};
struct ip_vs_mh_dest_setup {
unsigned int offset; /* starting offset */
unsigned int skip; /* skip */
unsigned int perm; /* next_offset */
int turns; /* weight / gcd() and rshift */
};
/* Available prime numbers for MH table */
static int primes[] = {251, 509, 1021, 2039, 4093,
8191, 16381, 32749, 65521, 131071};
/* For IPVS MH entry hash table */
#ifndef CONFIG_IP_VS_MH_TAB_INDEX
#define CONFIG_IP_VS_MH_TAB_INDEX 12
#endif
#define IP_VS_MH_TAB_BITS (CONFIG_IP_VS_MH_TAB_INDEX / 2)
#define IP_VS_MH_TAB_INDEX (CONFIG_IP_VS_MH_TAB_INDEX - 8)
#define IP_VS_MH_TAB_SIZE primes[IP_VS_MH_TAB_INDEX]
struct ip_vs_mh_state {
struct rcu_head rcu_head;
struct ip_vs_mh_lookup *lookup;
struct ip_vs_mh_dest_setup *dest_setup;
hsiphash_key_t hash1, hash2;
int gcd;
int rshift;
};
static inline void generate_hash_secret(hsiphash_key_t *hash1,
hsiphash_key_t *hash2)
{
hash1->key[0] = 2654435761UL;
hash1->key[1] = 2654435761UL;
hash2->key[0] = 2654446892UL;
hash2->key[1] = 2654446892UL;
}
/* Helper function to determine if server is unavailable */
static inline bool is_unavailable(struct ip_vs_dest *dest)
{
return atomic_read(&dest->weight) <= 0 ||
dest->flags & IP_VS_DEST_F_OVERLOAD;
}
/* Returns hash value for IPVS MH entry */
static inline unsigned int
ip_vs_mh_hashkey(int af, const union nf_inet_addr *addr,
__be16 port, hsiphash_key_t *key, unsigned int offset)
{
unsigned int v;
__be32 addr_fold = addr->ip;
#ifdef CONFIG_IP_VS_IPV6
if (af == AF_INET6)
addr_fold = addr->ip6[0] ^ addr->ip6[1] ^
addr->ip6[2] ^ addr->ip6[3];
#endif
v = (offset + ntohs(port) + ntohl(addr_fold));
return hsiphash(&v, sizeof(v), key);
}
/* Reset all the hash buckets of the specified table. */
static void ip_vs_mh_reset(struct ip_vs_mh_state *s)
{
int i;
struct ip_vs_mh_lookup *l;
struct ip_vs_dest *dest;
l = &s->lookup[0];
for (i = 0; i < IP_VS_MH_TAB_SIZE; i++) {
dest = rcu_dereference_protected(l->dest, 1);
if (dest) {
ip_vs_dest_put(dest);
RCU_INIT_POINTER(l->dest, NULL);
}
l++;
}
}
static int ip_vs_mh_permutate(struct ip_vs_mh_state *s,
struct ip_vs_service *svc)
{
struct list_head *p;
struct ip_vs_mh_dest_setup *ds;
struct ip_vs_dest *dest;
int lw;
/* If gcd is smaller then 1, number of dests or
* all last_weight of dests are zero. So, skip
* permutation for the dests.
*/
if (s->gcd < 1)
return 0;
/* Set dest_setup for the dests permutation */
p = &svc->destinations;
ds = &s->dest_setup[0];
while ((p = p->next) != &svc->destinations) {
dest = list_entry(p, struct ip_vs_dest, n_list);
ds->offset = ip_vs_mh_hashkey(svc->af, &dest->addr,
dest->port, &s->hash1, 0) %
IP_VS_MH_TAB_SIZE;
ds->skip = ip_vs_mh_hashkey(svc->af, &dest->addr,
dest->port, &s->hash2, 0) %
(IP_VS_MH_TAB_SIZE - 1) + 1;
ds->perm = ds->offset;
lw = atomic_read(&dest->last_weight);
ds->turns = ((lw / s->gcd) >> s->rshift) ? : (lw != 0);
ds++;
}
return 0;
}
static int ip_vs_mh_populate(struct ip_vs_mh_state *s,
struct ip_vs_service *svc)
{
int n, c, dt_count;
unsigned long *table;
struct list_head *p;
struct ip_vs_mh_dest_setup *ds;
struct ip_vs_dest *dest, *new_dest;
/* If gcd is smaller then 1, number of dests or
* all last_weight of dests are zero. So, skip
* the population for the dests and reset lookup table.
*/
if (s->gcd < 1) {
ip_vs_mh_reset(s);
return 0;
}
table = bitmap_zalloc(IP_VS_MH_TAB_SIZE, GFP_KERNEL);
if (!table)
return -ENOMEM;
p = &svc->destinations;
n = 0;
dt_count = 0;
while (n < IP_VS_MH_TAB_SIZE) {
if (p == &svc->destinations)
p = p->next;
ds = &s->dest_setup[0];
while (p != &svc->destinations) {
/* Ignore added server with zero weight */
if (ds->turns < 1) {
p = p->next;
ds++;
continue;
}
c = ds->perm;
while (test_bit(c, table)) {
/* Add skip, mod IP_VS_MH_TAB_SIZE */
ds->perm += ds->skip;
if (ds->perm >= IP_VS_MH_TAB_SIZE)
ds->perm -= IP_VS_MH_TAB_SIZE;
c = ds->perm;
}
__set_bit(c, table);
dest = rcu_dereference_protected(s->lookup[c].dest, 1);
new_dest = list_entry(p, struct ip_vs_dest, n_list);
if (dest != new_dest) {
if (dest)
ip_vs_dest_put(dest);
ip_vs_dest_hold(new_dest);
RCU_INIT_POINTER(s->lookup[c].dest, new_dest);
}
if (++n == IP_VS_MH_TAB_SIZE)
goto out;
if (++dt_count >= ds->turns) {
dt_count = 0;
p = p->next;
ds++;
}
}
}
out:
bitmap_free(table);
return 0;
}
/* Get ip_vs_dest associated with supplied parameters. */
static inline struct ip_vs_dest *
ip_vs_mh_get(struct ip_vs_service *svc, struct ip_vs_mh_state *s,
const union nf_inet_addr *addr, __be16 port)
{
unsigned int hash = ip_vs_mh_hashkey(svc->af, addr, port, &s->hash1, 0)
% IP_VS_MH_TAB_SIZE;
struct ip_vs_dest *dest = rcu_dereference(s->lookup[hash].dest);
return (!dest || is_unavailable(dest)) ? NULL : dest;
}
/* As ip_vs_mh_get, but with fallback if selected server is unavailable */
static inline struct ip_vs_dest *
ip_vs_mh_get_fallback(struct ip_vs_service *svc, struct ip_vs_mh_state *s,
const union nf_inet_addr *addr, __be16 port)
{
unsigned int offset, roffset;
unsigned int hash, ihash;
struct ip_vs_dest *dest;
/* First try the dest it's supposed to go to */
ihash = ip_vs_mh_hashkey(svc->af, addr, port,
&s->hash1, 0) % IP_VS_MH_TAB_SIZE;
dest = rcu_dereference(s->lookup[ihash].dest);
if (!dest)
return NULL;
if (!is_unavailable(dest))
return dest;
IP_VS_DBG_BUF(6, "MH: selected unavailable server %s:%u, reselecting",
IP_VS_DBG_ADDR(dest->af, &dest->addr), ntohs(dest->port));
/* If the original dest is unavailable, loop around the table
* starting from ihash to find a new dest
*/
for (offset = 0; offset < IP_VS_MH_TAB_SIZE; offset++) {
roffset = (offset + ihash) % IP_VS_MH_TAB_SIZE;
hash = ip_vs_mh_hashkey(svc->af, addr, port, &s->hash1,
roffset) % IP_VS_MH_TAB_SIZE;
dest = rcu_dereference(s->lookup[hash].dest);
if (!dest)
break;
if (!is_unavailable(dest))
return dest;
IP_VS_DBG_BUF(6,
"MH: selected unavailable server %s:%u (offset %u), reselecting",
IP_VS_DBG_ADDR(dest->af, &dest->addr),
ntohs(dest->port), roffset);
}
return NULL;
}
/* Assign all the hash buckets of the specified table with the service. */
static int ip_vs_mh_reassign(struct ip_vs_mh_state *s,
struct ip_vs_service *svc)
{
int ret;
if (svc->num_dests > IP_VS_MH_TAB_SIZE)
return -EINVAL;
if (svc->num_dests >= 1) {
s->dest_setup = kcalloc(svc->num_dests,
sizeof(struct ip_vs_mh_dest_setup),
GFP_KERNEL);
if (!s->dest_setup)
return -ENOMEM;
}
ip_vs_mh_permutate(s, svc);
ret = ip_vs_mh_populate(s, svc);
if (ret < 0)
goto out;
IP_VS_DBG_BUF(6, "MH: reassign lookup table of %s:%u\n",
IP_VS_DBG_ADDR(svc->af, &svc->addr),
ntohs(svc->port));
out:
if (svc->num_dests >= 1) {
kfree(s->dest_setup);
s->dest_setup = NULL;
}
return ret;
}
static int ip_vs_mh_gcd_weight(struct ip_vs_service *svc)
{
struct ip_vs_dest *dest;
int weight;
int g = 0;
list_for_each_entry(dest, &svc->destinations, n_list) {
weight = atomic_read(&dest->last_weight);
if (weight > 0) {
if (g > 0)
g = gcd(weight, g);
else
g = weight;
}
}
return g;
}
/* To avoid assigning huge weight for the MH table,
* calculate shift value with gcd.
*/
static int ip_vs_mh_shift_weight(struct ip_vs_service *svc, int gcd)
{
struct ip_vs_dest *dest;
int new_weight, weight = 0;
int mw, shift;
/* If gcd is smaller then 1, number of dests or
* all last_weight of dests are zero. So, return
* shift value as zero.
*/
if (gcd < 1)
return 0;
list_for_each_entry(dest, &svc->destinations, n_list) {
new_weight = atomic_read(&dest->last_weight);
if (new_weight > weight)
weight = new_weight;
}
/* Because gcd is greater than zero,
* the maximum weight and gcd are always greater than zero
*/
mw = weight / gcd;
/* shift = occupied bits of weight/gcd - MH highest bits */
shift = fls(mw) - IP_VS_MH_TAB_BITS;
return (shift >= 0) ? shift : 0;
}
static void ip_vs_mh_state_free(struct rcu_head *head)
{
struct ip_vs_mh_state *s;
s = container_of(head, struct ip_vs_mh_state, rcu_head);
kfree(s->lookup);
kfree(s);
}
static int ip_vs_mh_init_svc(struct ip_vs_service *svc)
{
int ret;
struct ip_vs_mh_state *s;
/* Allocate the MH table for this service */
s = kzalloc(sizeof(*s), GFP_KERNEL);
if (!s)
return -ENOMEM;
s->lookup = kcalloc(IP_VS_MH_TAB_SIZE, sizeof(struct ip_vs_mh_lookup),
GFP_KERNEL);
if (!s->lookup) {
kfree(s);
return -ENOMEM;
}
generate_hash_secret(&s->hash1, &s->hash2);
s->gcd = ip_vs_mh_gcd_weight(svc);
s->rshift = ip_vs_mh_shift_weight(svc, s->gcd);
IP_VS_DBG(6,
"MH lookup table (memory=%zdbytes) allocated for current service\n",
sizeof(struct ip_vs_mh_lookup) * IP_VS_MH_TAB_SIZE);
/* Assign the lookup table with current dests */
ret = ip_vs_mh_reassign(s, svc);
if (ret < 0) {
ip_vs_mh_reset(s);
ip_vs_mh_state_free(&s->rcu_head);
return ret;
}
/* No more failures, attach state */
svc->sched_data = s;
return 0;
}
static void ip_vs_mh_done_svc(struct ip_vs_service *svc)
{
struct ip_vs_mh_state *s = svc->sched_data;
/* Got to clean up lookup entry here */
ip_vs_mh_reset(s);
call_rcu(&s->rcu_head, ip_vs_mh_state_free);
IP_VS_DBG(6, "MH lookup table (memory=%zdbytes) released\n",
sizeof(struct ip_vs_mh_lookup) * IP_VS_MH_TAB_SIZE);
}
static int ip_vs_mh_dest_changed(struct ip_vs_service *svc,
struct ip_vs_dest *dest)
{
struct ip_vs_mh_state *s = svc->sched_data;
s->gcd = ip_vs_mh_gcd_weight(svc);
s->rshift = ip_vs_mh_shift_weight(svc, s->gcd);
/* Assign the lookup table with the updated service */
return ip_vs_mh_reassign(s, svc);
}
/* Helper function to get port number */
static inline __be16
ip_vs_mh_get_port(const struct sk_buff *skb, struct ip_vs_iphdr *iph)
{
__be16 _ports[2], *ports;
/* At this point we know that we have a valid packet of some kind.
* Because ICMP packets are only guaranteed to have the first 8
* bytes, let's just grab the ports. Fortunately they're in the
* same position for all three of the protocols we care about.
*/
switch (iph->protocol) {
case IPPROTO_TCP:
case IPPROTO_UDP:
case IPPROTO_SCTP:
ports = skb_header_pointer(skb, iph->len, sizeof(_ports),
&_ports);
if (unlikely(!ports))
return 0;
if (likely(!ip_vs_iph_inverse(iph)))
return ports[0];
else
return ports[1];
default:
return 0;
}
}
/* Maglev Hashing scheduling */
static struct ip_vs_dest *
ip_vs_mh_schedule(struct ip_vs_service *svc, const struct sk_buff *skb,
struct ip_vs_iphdr *iph)
{
struct ip_vs_dest *dest;
struct ip_vs_mh_state *s;
__be16 port = 0;
const union nf_inet_addr *hash_addr;
hash_addr = ip_vs_iph_inverse(iph) ? &iph->daddr : &iph->saddr;
IP_VS_DBG(6, "%s : Scheduling...\n", __func__);
if (svc->flags & IP_VS_SVC_F_SCHED_MH_PORT)
port = ip_vs_mh_get_port(skb, iph);
s = (struct ip_vs_mh_state *)svc->sched_data;
if (svc->flags & IP_VS_SVC_F_SCHED_MH_FALLBACK)
dest = ip_vs_mh_get_fallback(svc, s, hash_addr, port);
else
dest = ip_vs_mh_get(svc, s, hash_addr, port);
if (!dest) {
ip_vs_scheduler_err(svc, "no destination available");
return NULL;
}
IP_VS_DBG_BUF(6, "MH: source IP address %s:%u --> server %s:%u\n",
IP_VS_DBG_ADDR(svc->af, hash_addr),
ntohs(port),
IP_VS_DBG_ADDR(dest->af, &dest->addr),
ntohs(dest->port));
return dest;
}
/* IPVS MH Scheduler structure */
static struct ip_vs_scheduler ip_vs_mh_scheduler = {
.name = "mh",
.refcnt = ATOMIC_INIT(0),
.module = THIS_MODULE,
.n_list = LIST_HEAD_INIT(ip_vs_mh_scheduler.n_list),
.init_service = ip_vs_mh_init_svc,
.done_service = ip_vs_mh_done_svc,
.add_dest = ip_vs_mh_dest_changed,
.del_dest = ip_vs_mh_dest_changed,
.upd_dest = ip_vs_mh_dest_changed,
.schedule = ip_vs_mh_schedule,
};
static int __init ip_vs_mh_init(void)
{
return register_ip_vs_scheduler(&ip_vs_mh_scheduler);
}
static void __exit ip_vs_mh_cleanup(void)
{
unregister_ip_vs_scheduler(&ip_vs_mh_scheduler);
rcu_barrier();
}
module_init(ip_vs_mh_init);
module_exit(ip_vs_mh_cleanup);
MODULE_DESCRIPTION("Maglev hashing ipvs scheduler");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Inju Song <[email protected]>");
| linux-master | net/netfilter/ipvs/ip_vs_mh.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* IPVS: Never Queue scheduling module
*
* Authors: Wensong Zhang <[email protected]>
*
* Changes:
*/
/*
* The NQ algorithm adopts a two-speed model. When there is an idle server
* available, the job will be sent to the idle server, instead of waiting
* for a fast one. When there is no idle server available, the job will be
* sent to the server that minimize its expected delay (The Shortest
* Expected Delay scheduling algorithm).
*
* See the following paper for more information:
* A. Weinrib and S. Shenker, Greed is not enough: Adaptive load sharing
* in large heterogeneous systems. In Proceedings IEEE INFOCOM'88,
* pages 986-994, 1988.
*
* Thanks must go to Marko Buuri <[email protected]> for talking NQ to me.
*
* The difference between NQ and SED is that NQ can improve overall
* system utilization.
*
*/
#define KMSG_COMPONENT "IPVS"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/module.h>
#include <linux/kernel.h>
#include <net/ip_vs.h>
static inline int
ip_vs_nq_dest_overhead(struct ip_vs_dest *dest)
{
/*
* We only use the active connection number in the cost
* calculation here.
*/
return atomic_read(&dest->activeconns) + 1;
}
/*
* Weighted Least Connection scheduling
*/
static struct ip_vs_dest *
ip_vs_nq_schedule(struct ip_vs_service *svc, const struct sk_buff *skb,
struct ip_vs_iphdr *iph)
{
struct ip_vs_dest *dest, *least = NULL;
int loh = 0, doh;
IP_VS_DBG(6, "%s(): Scheduling...\n", __func__);
/*
* We calculate the load of each dest server as follows:
* (server expected overhead) / dest->weight
*
* Remember -- no floats in kernel mode!!!
* The comparison of h1*w2 > h2*w1 is equivalent to that of
* h1/w1 > h2/w2
* if every weight is larger than zero.
*
* The server with weight=0 is quiesced and will not receive any
* new connections.
*/
list_for_each_entry_rcu(dest, &svc->destinations, n_list) {
if (dest->flags & IP_VS_DEST_F_OVERLOAD ||
!atomic_read(&dest->weight))
continue;
doh = ip_vs_nq_dest_overhead(dest);
/* return the server directly if it is idle */
if (atomic_read(&dest->activeconns) == 0) {
least = dest;
loh = doh;
goto out;
}
if (!least ||
((__s64)loh * atomic_read(&dest->weight) >
(__s64)doh * atomic_read(&least->weight))) {
least = dest;
loh = doh;
}
}
if (!least) {
ip_vs_scheduler_err(svc, "no destination available");
return NULL;
}
out:
IP_VS_DBG_BUF(6, "NQ: server %s:%u "
"activeconns %d refcnt %d weight %d overhead %d\n",
IP_VS_DBG_ADDR(least->af, &least->addr),
ntohs(least->port),
atomic_read(&least->activeconns),
refcount_read(&least->refcnt),
atomic_read(&least->weight), loh);
return least;
}
static struct ip_vs_scheduler ip_vs_nq_scheduler =
{
.name = "nq",
.refcnt = ATOMIC_INIT(0),
.module = THIS_MODULE,
.n_list = LIST_HEAD_INIT(ip_vs_nq_scheduler.n_list),
.schedule = ip_vs_nq_schedule,
};
static int __init ip_vs_nq_init(void)
{
return register_ip_vs_scheduler(&ip_vs_nq_scheduler);
}
static void __exit ip_vs_nq_cleanup(void)
{
unregister_ip_vs_scheduler(&ip_vs_nq_scheduler);
synchronize_rcu();
}
module_init(ip_vs_nq_init);
module_exit(ip_vs_nq_cleanup);
MODULE_LICENSE("GPL");
| linux-master | net/netfilter/ipvs/ip_vs_nq.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* IPVS An implementation of the IP virtual server support for the
* LINUX operating system. IPVS is now implemented as a module
* over the Netfilter framework. IPVS can be used to build a
* high-performance and highly available server based on a
* cluster of servers.
*
* Authors: Wensong Zhang <[email protected]>
* Peter Kese <[email protected]>
*
* Changes:
*/
#define KMSG_COMPONENT "IPVS"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
#include <asm/string.h>
#include <linux/kmod.h>
#include <linux/sysctl.h>
#include <net/ip_vs.h>
EXPORT_SYMBOL(ip_vs_scheduler_err);
/*
* IPVS scheduler list
*/
static LIST_HEAD(ip_vs_schedulers);
/* semaphore for schedulers */
static DEFINE_MUTEX(ip_vs_sched_mutex);
/*
* Bind a service with a scheduler
*/
int ip_vs_bind_scheduler(struct ip_vs_service *svc,
struct ip_vs_scheduler *scheduler)
{
int ret;
if (scheduler->init_service) {
ret = scheduler->init_service(svc);
if (ret) {
pr_err("%s(): init error\n", __func__);
return ret;
}
}
rcu_assign_pointer(svc->scheduler, scheduler);
return 0;
}
/*
* Unbind a service with its scheduler
*/
void ip_vs_unbind_scheduler(struct ip_vs_service *svc,
struct ip_vs_scheduler *sched)
{
struct ip_vs_scheduler *cur_sched;
cur_sched = rcu_dereference_protected(svc->scheduler, 1);
/* This check proves that old 'sched' was installed */
if (!cur_sched)
return;
if (sched->done_service)
sched->done_service(svc);
/* svc->scheduler can be set to NULL only by caller */
}
/*
* Get scheduler in the scheduler list by name
*/
static struct ip_vs_scheduler *ip_vs_sched_getbyname(const char *sched_name)
{
struct ip_vs_scheduler *sched;
IP_VS_DBG(2, "%s(): sched_name \"%s\"\n", __func__, sched_name);
mutex_lock(&ip_vs_sched_mutex);
list_for_each_entry(sched, &ip_vs_schedulers, n_list) {
/*
* Test and get the modules atomically
*/
if (sched->module && !try_module_get(sched->module)) {
/*
* This scheduler is just deleted
*/
continue;
}
if (strcmp(sched_name, sched->name)==0) {
/* HIT */
mutex_unlock(&ip_vs_sched_mutex);
return sched;
}
module_put(sched->module);
}
mutex_unlock(&ip_vs_sched_mutex);
return NULL;
}
/*
* Lookup scheduler and try to load it if it doesn't exist
*/
struct ip_vs_scheduler *ip_vs_scheduler_get(const char *sched_name)
{
struct ip_vs_scheduler *sched;
/*
* Search for the scheduler by sched_name
*/
sched = ip_vs_sched_getbyname(sched_name);
/*
* If scheduler not found, load the module and search again
*/
if (sched == NULL) {
request_module("ip_vs_%s", sched_name);
sched = ip_vs_sched_getbyname(sched_name);
}
return sched;
}
void ip_vs_scheduler_put(struct ip_vs_scheduler *scheduler)
{
if (scheduler)
module_put(scheduler->module);
}
/*
* Common error output helper for schedulers
*/
void ip_vs_scheduler_err(struct ip_vs_service *svc, const char *msg)
{
struct ip_vs_scheduler *sched = rcu_dereference(svc->scheduler);
char *sched_name = sched ? sched->name : "none";
if (svc->fwmark) {
IP_VS_ERR_RL("%s: FWM %u 0x%08X - %s\n",
sched_name, svc->fwmark, svc->fwmark, msg);
#ifdef CONFIG_IP_VS_IPV6
} else if (svc->af == AF_INET6) {
IP_VS_ERR_RL("%s: %s [%pI6c]:%d - %s\n",
sched_name, ip_vs_proto_name(svc->protocol),
&svc->addr.in6, ntohs(svc->port), msg);
#endif
} else {
IP_VS_ERR_RL("%s: %s %pI4:%d - %s\n",
sched_name, ip_vs_proto_name(svc->protocol),
&svc->addr.ip, ntohs(svc->port), msg);
}
}
/*
* Register a scheduler in the scheduler list
*/
int register_ip_vs_scheduler(struct ip_vs_scheduler *scheduler)
{
struct ip_vs_scheduler *sched;
if (!scheduler) {
pr_err("%s(): NULL arg\n", __func__);
return -EINVAL;
}
if (!scheduler->name) {
pr_err("%s(): NULL scheduler_name\n", __func__);
return -EINVAL;
}
/* increase the module use count */
if (!ip_vs_use_count_inc())
return -ENOENT;
mutex_lock(&ip_vs_sched_mutex);
if (!list_empty(&scheduler->n_list)) {
mutex_unlock(&ip_vs_sched_mutex);
ip_vs_use_count_dec();
pr_err("%s(): [%s] scheduler already linked\n",
__func__, scheduler->name);
return -EINVAL;
}
/*
* Make sure that the scheduler with this name doesn't exist
* in the scheduler list.
*/
list_for_each_entry(sched, &ip_vs_schedulers, n_list) {
if (strcmp(scheduler->name, sched->name) == 0) {
mutex_unlock(&ip_vs_sched_mutex);
ip_vs_use_count_dec();
pr_err("%s(): [%s] scheduler already existed "
"in the system\n", __func__, scheduler->name);
return -EINVAL;
}
}
/*
* Add it into the d-linked scheduler list
*/
list_add(&scheduler->n_list, &ip_vs_schedulers);
mutex_unlock(&ip_vs_sched_mutex);
pr_info("[%s] scheduler registered.\n", scheduler->name);
return 0;
}
/*
* Unregister a scheduler from the scheduler list
*/
int unregister_ip_vs_scheduler(struct ip_vs_scheduler *scheduler)
{
if (!scheduler) {
pr_err("%s(): NULL arg\n", __func__);
return -EINVAL;
}
mutex_lock(&ip_vs_sched_mutex);
if (list_empty(&scheduler->n_list)) {
mutex_unlock(&ip_vs_sched_mutex);
pr_err("%s(): [%s] scheduler is not in the list. failed\n",
__func__, scheduler->name);
return -EINVAL;
}
/*
* Remove it from the d-linked scheduler list
*/
list_del(&scheduler->n_list);
mutex_unlock(&ip_vs_sched_mutex);
/* decrease the module use count */
ip_vs_use_count_dec();
pr_info("[%s] scheduler unregistered.\n", scheduler->name);
return 0;
}
| linux-master | net/netfilter/ipvs/ip_vs_sched.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* IPVS An implementation of the IP virtual server support for the
* LINUX operating system. IPVS is now implemented as a module
* over the Netfilter framework. IPVS can be used to build a
* high-performance and highly available server based on a
* cluster of servers.
*
* Authors: Wensong Zhang <[email protected]>
* Peter Kese <[email protected]>
* Julian Anastasov <[email protected]>
*
* The IPVS code for kernel 2.2 was done by Wensong Zhang and Peter Kese,
* with changes/fixes from Julian Anastasov, Lars Marowsky-Bree, Horms
* and others. Many code here is taken from IP MASQ code of kernel 2.2.
*
* Changes:
*/
#define KMSG_COMPONENT "IPVS"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/interrupt.h>
#include <linux/in.h>
#include <linux/inet.h>
#include <linux/net.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/proc_fs.h> /* for proc_net_* */
#include <linux/slab.h>
#include <linux/seq_file.h>
#include <linux/jhash.h>
#include <linux/random.h>
#include <net/net_namespace.h>
#include <net/ip_vs.h>
#ifndef CONFIG_IP_VS_TAB_BITS
#define CONFIG_IP_VS_TAB_BITS 12
#endif
/*
* Connection hash size. Default is what was selected at compile time.
*/
static int ip_vs_conn_tab_bits = CONFIG_IP_VS_TAB_BITS;
module_param_named(conn_tab_bits, ip_vs_conn_tab_bits, int, 0444);
MODULE_PARM_DESC(conn_tab_bits, "Set connections' hash size");
/* size and mask values */
int ip_vs_conn_tab_size __read_mostly;
static int ip_vs_conn_tab_mask __read_mostly;
/*
* Connection hash table: for input and output packets lookups of IPVS
*/
static struct hlist_head *ip_vs_conn_tab __read_mostly;
/* SLAB cache for IPVS connections */
static struct kmem_cache *ip_vs_conn_cachep __read_mostly;
/* counter for no client port connections */
static atomic_t ip_vs_conn_no_cport_cnt = ATOMIC_INIT(0);
/* random value for IPVS connection hash */
static unsigned int ip_vs_conn_rnd __read_mostly;
/*
* Fine locking granularity for big connection hash table
*/
#define CT_LOCKARRAY_BITS 5
#define CT_LOCKARRAY_SIZE (1<<CT_LOCKARRAY_BITS)
#define CT_LOCKARRAY_MASK (CT_LOCKARRAY_SIZE-1)
/* We need an addrstrlen that works with or without v6 */
#ifdef CONFIG_IP_VS_IPV6
#define IP_VS_ADDRSTRLEN INET6_ADDRSTRLEN
#else
#define IP_VS_ADDRSTRLEN (8+1)
#endif
struct ip_vs_aligned_lock
{
spinlock_t l;
} __attribute__((__aligned__(SMP_CACHE_BYTES)));
/* lock array for conn table */
static struct ip_vs_aligned_lock
__ip_vs_conntbl_lock_array[CT_LOCKARRAY_SIZE] __cacheline_aligned;
static inline void ct_write_lock_bh(unsigned int key)
{
spin_lock_bh(&__ip_vs_conntbl_lock_array[key&CT_LOCKARRAY_MASK].l);
}
static inline void ct_write_unlock_bh(unsigned int key)
{
spin_unlock_bh(&__ip_vs_conntbl_lock_array[key&CT_LOCKARRAY_MASK].l);
}
static void ip_vs_conn_expire(struct timer_list *t);
/*
* Returns hash value for IPVS connection entry
*/
static unsigned int ip_vs_conn_hashkey(struct netns_ipvs *ipvs, int af, unsigned int proto,
const union nf_inet_addr *addr,
__be16 port)
{
#ifdef CONFIG_IP_VS_IPV6
if (af == AF_INET6)
return (jhash_3words(jhash(addr, 16, ip_vs_conn_rnd),
(__force u32)port, proto, ip_vs_conn_rnd) ^
((size_t)ipvs>>8)) & ip_vs_conn_tab_mask;
#endif
return (jhash_3words((__force u32)addr->ip, (__force u32)port, proto,
ip_vs_conn_rnd) ^
((size_t)ipvs>>8)) & ip_vs_conn_tab_mask;
}
static unsigned int ip_vs_conn_hashkey_param(const struct ip_vs_conn_param *p,
bool inverse)
{
const union nf_inet_addr *addr;
__be16 port;
if (p->pe_data && p->pe->hashkey_raw)
return p->pe->hashkey_raw(p, ip_vs_conn_rnd, inverse) &
ip_vs_conn_tab_mask;
if (likely(!inverse)) {
addr = p->caddr;
port = p->cport;
} else {
addr = p->vaddr;
port = p->vport;
}
return ip_vs_conn_hashkey(p->ipvs, p->af, p->protocol, addr, port);
}
static unsigned int ip_vs_conn_hashkey_conn(const struct ip_vs_conn *cp)
{
struct ip_vs_conn_param p;
ip_vs_conn_fill_param(cp->ipvs, cp->af, cp->protocol,
&cp->caddr, cp->cport, NULL, 0, &p);
if (cp->pe) {
p.pe = cp->pe;
p.pe_data = cp->pe_data;
p.pe_data_len = cp->pe_data_len;
}
return ip_vs_conn_hashkey_param(&p, false);
}
/*
* Hashes ip_vs_conn in ip_vs_conn_tab by netns,proto,addr,port.
* returns bool success.
*/
static inline int ip_vs_conn_hash(struct ip_vs_conn *cp)
{
unsigned int hash;
int ret;
if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
return 0;
/* Hash by protocol, client address and port */
hash = ip_vs_conn_hashkey_conn(cp);
ct_write_lock_bh(hash);
spin_lock(&cp->lock);
if (!(cp->flags & IP_VS_CONN_F_HASHED)) {
cp->flags |= IP_VS_CONN_F_HASHED;
refcount_inc(&cp->refcnt);
hlist_add_head_rcu(&cp->c_list, &ip_vs_conn_tab[hash]);
ret = 1;
} else {
pr_err("%s(): request for already hashed, called from %pS\n",
__func__, __builtin_return_address(0));
ret = 0;
}
spin_unlock(&cp->lock);
ct_write_unlock_bh(hash);
return ret;
}
/*
* UNhashes ip_vs_conn from ip_vs_conn_tab.
* returns bool success. Caller should hold conn reference.
*/
static inline int ip_vs_conn_unhash(struct ip_vs_conn *cp)
{
unsigned int hash;
int ret;
/* unhash it and decrease its reference counter */
hash = ip_vs_conn_hashkey_conn(cp);
ct_write_lock_bh(hash);
spin_lock(&cp->lock);
if (cp->flags & IP_VS_CONN_F_HASHED) {
hlist_del_rcu(&cp->c_list);
cp->flags &= ~IP_VS_CONN_F_HASHED;
refcount_dec(&cp->refcnt);
ret = 1;
} else
ret = 0;
spin_unlock(&cp->lock);
ct_write_unlock_bh(hash);
return ret;
}
/* Try to unlink ip_vs_conn from ip_vs_conn_tab.
* returns bool success.
*/
static inline bool ip_vs_conn_unlink(struct ip_vs_conn *cp)
{
unsigned int hash;
bool ret = false;
if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
return refcount_dec_if_one(&cp->refcnt);
hash = ip_vs_conn_hashkey_conn(cp);
ct_write_lock_bh(hash);
spin_lock(&cp->lock);
if (cp->flags & IP_VS_CONN_F_HASHED) {
/* Decrease refcnt and unlink conn only if we are last user */
if (refcount_dec_if_one(&cp->refcnt)) {
hlist_del_rcu(&cp->c_list);
cp->flags &= ~IP_VS_CONN_F_HASHED;
ret = true;
}
}
spin_unlock(&cp->lock);
ct_write_unlock_bh(hash);
return ret;
}
/*
* Gets ip_vs_conn associated with supplied parameters in the ip_vs_conn_tab.
* Called for pkts coming from OUTside-to-INside.
* p->caddr, p->cport: pkt source address (foreign host)
* p->vaddr, p->vport: pkt dest address (load balancer)
*/
static inline struct ip_vs_conn *
__ip_vs_conn_in_get(const struct ip_vs_conn_param *p)
{
unsigned int hash;
struct ip_vs_conn *cp;
hash = ip_vs_conn_hashkey_param(p, false);
rcu_read_lock();
hlist_for_each_entry_rcu(cp, &ip_vs_conn_tab[hash], c_list) {
if (p->cport == cp->cport && p->vport == cp->vport &&
cp->af == p->af &&
ip_vs_addr_equal(p->af, p->caddr, &cp->caddr) &&
ip_vs_addr_equal(p->af, p->vaddr, &cp->vaddr) &&
((!p->cport) ^ (!(cp->flags & IP_VS_CONN_F_NO_CPORT))) &&
p->protocol == cp->protocol &&
cp->ipvs == p->ipvs) {
if (!__ip_vs_conn_get(cp))
continue;
/* HIT */
rcu_read_unlock();
return cp;
}
}
rcu_read_unlock();
return NULL;
}
struct ip_vs_conn *ip_vs_conn_in_get(const struct ip_vs_conn_param *p)
{
struct ip_vs_conn *cp;
cp = __ip_vs_conn_in_get(p);
if (!cp && atomic_read(&ip_vs_conn_no_cport_cnt)) {
struct ip_vs_conn_param cport_zero_p = *p;
cport_zero_p.cport = 0;
cp = __ip_vs_conn_in_get(&cport_zero_p);
}
IP_VS_DBG_BUF(9, "lookup/in %s %s:%d->%s:%d %s\n",
ip_vs_proto_name(p->protocol),
IP_VS_DBG_ADDR(p->af, p->caddr), ntohs(p->cport),
IP_VS_DBG_ADDR(p->af, p->vaddr), ntohs(p->vport),
cp ? "hit" : "not hit");
return cp;
}
static int
ip_vs_conn_fill_param_proto(struct netns_ipvs *ipvs,
int af, const struct sk_buff *skb,
const struct ip_vs_iphdr *iph,
struct ip_vs_conn_param *p)
{
__be16 _ports[2], *pptr;
pptr = frag_safe_skb_hp(skb, iph->len, sizeof(_ports), _ports);
if (pptr == NULL)
return 1;
if (likely(!ip_vs_iph_inverse(iph)))
ip_vs_conn_fill_param(ipvs, af, iph->protocol, &iph->saddr,
pptr[0], &iph->daddr, pptr[1], p);
else
ip_vs_conn_fill_param(ipvs, af, iph->protocol, &iph->daddr,
pptr[1], &iph->saddr, pptr[0], p);
return 0;
}
struct ip_vs_conn *
ip_vs_conn_in_get_proto(struct netns_ipvs *ipvs, int af,
const struct sk_buff *skb,
const struct ip_vs_iphdr *iph)
{
struct ip_vs_conn_param p;
if (ip_vs_conn_fill_param_proto(ipvs, af, skb, iph, &p))
return NULL;
return ip_vs_conn_in_get(&p);
}
EXPORT_SYMBOL_GPL(ip_vs_conn_in_get_proto);
/* Get reference to connection template */
struct ip_vs_conn *ip_vs_ct_in_get(const struct ip_vs_conn_param *p)
{
unsigned int hash;
struct ip_vs_conn *cp;
hash = ip_vs_conn_hashkey_param(p, false);
rcu_read_lock();
hlist_for_each_entry_rcu(cp, &ip_vs_conn_tab[hash], c_list) {
if (unlikely(p->pe_data && p->pe->ct_match)) {
if (cp->ipvs != p->ipvs)
continue;
if (p->pe == cp->pe && p->pe->ct_match(p, cp)) {
if (__ip_vs_conn_get(cp))
goto out;
}
continue;
}
if (cp->af == p->af &&
ip_vs_addr_equal(p->af, p->caddr, &cp->caddr) &&
/* protocol should only be IPPROTO_IP if
* p->vaddr is a fwmark */
ip_vs_addr_equal(p->protocol == IPPROTO_IP ? AF_UNSPEC :
p->af, p->vaddr, &cp->vaddr) &&
p->vport == cp->vport && p->cport == cp->cport &&
cp->flags & IP_VS_CONN_F_TEMPLATE &&
p->protocol == cp->protocol &&
cp->ipvs == p->ipvs) {
if (__ip_vs_conn_get(cp))
goto out;
}
}
cp = NULL;
out:
rcu_read_unlock();
IP_VS_DBG_BUF(9, "template lookup/in %s %s:%d->%s:%d %s\n",
ip_vs_proto_name(p->protocol),
IP_VS_DBG_ADDR(p->af, p->caddr), ntohs(p->cport),
IP_VS_DBG_ADDR(p->af, p->vaddr), ntohs(p->vport),
cp ? "hit" : "not hit");
return cp;
}
/* Gets ip_vs_conn associated with supplied parameters in the ip_vs_conn_tab.
* Called for pkts coming from inside-to-OUTside.
* p->caddr, p->cport: pkt source address (inside host)
* p->vaddr, p->vport: pkt dest address (foreign host) */
struct ip_vs_conn *ip_vs_conn_out_get(const struct ip_vs_conn_param *p)
{
unsigned int hash;
struct ip_vs_conn *cp, *ret=NULL;
const union nf_inet_addr *saddr;
__be16 sport;
/*
* Check for "full" addressed entries
*/
hash = ip_vs_conn_hashkey_param(p, true);
rcu_read_lock();
hlist_for_each_entry_rcu(cp, &ip_vs_conn_tab[hash], c_list) {
if (p->vport != cp->cport)
continue;
if (IP_VS_FWD_METHOD(cp) != IP_VS_CONN_F_MASQ) {
sport = cp->vport;
saddr = &cp->vaddr;
} else {
sport = cp->dport;
saddr = &cp->daddr;
}
if (p->cport == sport && cp->af == p->af &&
ip_vs_addr_equal(p->af, p->vaddr, &cp->caddr) &&
ip_vs_addr_equal(p->af, p->caddr, saddr) &&
p->protocol == cp->protocol &&
cp->ipvs == p->ipvs) {
if (!__ip_vs_conn_get(cp))
continue;
/* HIT */
ret = cp;
break;
}
}
rcu_read_unlock();
IP_VS_DBG_BUF(9, "lookup/out %s %s:%d->%s:%d %s\n",
ip_vs_proto_name(p->protocol),
IP_VS_DBG_ADDR(p->af, p->caddr), ntohs(p->cport),
IP_VS_DBG_ADDR(p->af, p->vaddr), ntohs(p->vport),
ret ? "hit" : "not hit");
return ret;
}
struct ip_vs_conn *
ip_vs_conn_out_get_proto(struct netns_ipvs *ipvs, int af,
const struct sk_buff *skb,
const struct ip_vs_iphdr *iph)
{
struct ip_vs_conn_param p;
if (ip_vs_conn_fill_param_proto(ipvs, af, skb, iph, &p))
return NULL;
return ip_vs_conn_out_get(&p);
}
EXPORT_SYMBOL_GPL(ip_vs_conn_out_get_proto);
/*
* Put back the conn and restart its timer with its timeout
*/
static void __ip_vs_conn_put_timer(struct ip_vs_conn *cp)
{
unsigned long t = (cp->flags & IP_VS_CONN_F_ONE_PACKET) ?
0 : cp->timeout;
mod_timer(&cp->timer, jiffies+t);
__ip_vs_conn_put(cp);
}
void ip_vs_conn_put(struct ip_vs_conn *cp)
{
if ((cp->flags & IP_VS_CONN_F_ONE_PACKET) &&
(refcount_read(&cp->refcnt) == 1) &&
!timer_pending(&cp->timer))
/* expire connection immediately */
ip_vs_conn_expire(&cp->timer);
else
__ip_vs_conn_put_timer(cp);
}
/*
* Fill a no_client_port connection with a client port number
*/
void ip_vs_conn_fill_cport(struct ip_vs_conn *cp, __be16 cport)
{
if (ip_vs_conn_unhash(cp)) {
spin_lock_bh(&cp->lock);
if (cp->flags & IP_VS_CONN_F_NO_CPORT) {
atomic_dec(&ip_vs_conn_no_cport_cnt);
cp->flags &= ~IP_VS_CONN_F_NO_CPORT;
cp->cport = cport;
}
spin_unlock_bh(&cp->lock);
/* hash on new dport */
ip_vs_conn_hash(cp);
}
}
/*
* Bind a connection entry with the corresponding packet_xmit.
* Called by ip_vs_conn_new.
*/
static inline void ip_vs_bind_xmit(struct ip_vs_conn *cp)
{
switch (IP_VS_FWD_METHOD(cp)) {
case IP_VS_CONN_F_MASQ:
cp->packet_xmit = ip_vs_nat_xmit;
break;
case IP_VS_CONN_F_TUNNEL:
#ifdef CONFIG_IP_VS_IPV6
if (cp->daf == AF_INET6)
cp->packet_xmit = ip_vs_tunnel_xmit_v6;
else
#endif
cp->packet_xmit = ip_vs_tunnel_xmit;
break;
case IP_VS_CONN_F_DROUTE:
cp->packet_xmit = ip_vs_dr_xmit;
break;
case IP_VS_CONN_F_LOCALNODE:
cp->packet_xmit = ip_vs_null_xmit;
break;
case IP_VS_CONN_F_BYPASS:
cp->packet_xmit = ip_vs_bypass_xmit;
break;
}
}
#ifdef CONFIG_IP_VS_IPV6
static inline void ip_vs_bind_xmit_v6(struct ip_vs_conn *cp)
{
switch (IP_VS_FWD_METHOD(cp)) {
case IP_VS_CONN_F_MASQ:
cp->packet_xmit = ip_vs_nat_xmit_v6;
break;
case IP_VS_CONN_F_TUNNEL:
if (cp->daf == AF_INET6)
cp->packet_xmit = ip_vs_tunnel_xmit_v6;
else
cp->packet_xmit = ip_vs_tunnel_xmit;
break;
case IP_VS_CONN_F_DROUTE:
cp->packet_xmit = ip_vs_dr_xmit_v6;
break;
case IP_VS_CONN_F_LOCALNODE:
cp->packet_xmit = ip_vs_null_xmit;
break;
case IP_VS_CONN_F_BYPASS:
cp->packet_xmit = ip_vs_bypass_xmit_v6;
break;
}
}
#endif
static inline int ip_vs_dest_totalconns(struct ip_vs_dest *dest)
{
return atomic_read(&dest->activeconns)
+ atomic_read(&dest->inactconns);
}
/*
* Bind a connection entry with a virtual service destination
* Called just after a new connection entry is created.
*/
static inline void
ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest)
{
unsigned int conn_flags;
__u32 flags;
/* if dest is NULL, then return directly */
if (!dest)
return;
/* Increase the refcnt counter of the dest */
ip_vs_dest_hold(dest);
conn_flags = atomic_read(&dest->conn_flags);
if (cp->protocol != IPPROTO_UDP)
conn_flags &= ~IP_VS_CONN_F_ONE_PACKET;
flags = cp->flags;
/* Bind with the destination and its corresponding transmitter */
if (flags & IP_VS_CONN_F_SYNC) {
/* if the connection is not template and is created
* by sync, preserve the activity flag.
*/
if (!(flags & IP_VS_CONN_F_TEMPLATE))
conn_flags &= ~IP_VS_CONN_F_INACTIVE;
/* connections inherit forwarding method from dest */
flags &= ~(IP_VS_CONN_F_FWD_MASK | IP_VS_CONN_F_NOOUTPUT);
}
flags |= conn_flags;
cp->flags = flags;
cp->dest = dest;
IP_VS_DBG_BUF(7, "Bind-dest %s c:%s:%d v:%s:%d "
"d:%s:%d fwd:%c s:%u conn->flags:%X conn->refcnt:%d "
"dest->refcnt:%d\n",
ip_vs_proto_name(cp->protocol),
IP_VS_DBG_ADDR(cp->af, &cp->caddr), ntohs(cp->cport),
IP_VS_DBG_ADDR(cp->af, &cp->vaddr), ntohs(cp->vport),
IP_VS_DBG_ADDR(cp->daf, &cp->daddr), ntohs(cp->dport),
ip_vs_fwd_tag(cp), cp->state,
cp->flags, refcount_read(&cp->refcnt),
refcount_read(&dest->refcnt));
/* Update the connection counters */
if (!(flags & IP_VS_CONN_F_TEMPLATE)) {
/* It is a normal connection, so modify the counters
* according to the flags, later the protocol can
* update them on state change
*/
if (!(flags & IP_VS_CONN_F_INACTIVE))
atomic_inc(&dest->activeconns);
else
atomic_inc(&dest->inactconns);
} else {
/* It is a persistent connection/template, so increase
the persistent connection counter */
atomic_inc(&dest->persistconns);
}
if (dest->u_threshold != 0 &&
ip_vs_dest_totalconns(dest) >= dest->u_threshold)
dest->flags |= IP_VS_DEST_F_OVERLOAD;
}
/*
* Check if there is a destination for the connection, if so
* bind the connection to the destination.
*/
void ip_vs_try_bind_dest(struct ip_vs_conn *cp)
{
struct ip_vs_dest *dest;
rcu_read_lock();
/* This function is only invoked by the synchronization code. We do
* not currently support heterogeneous pools with synchronization,
* so we can make the assumption that the svc_af is the same as the
* dest_af
*/
dest = ip_vs_find_dest(cp->ipvs, cp->af, cp->af, &cp->daddr,
cp->dport, &cp->vaddr, cp->vport,
cp->protocol, cp->fwmark, cp->flags);
if (dest) {
struct ip_vs_proto_data *pd;
spin_lock_bh(&cp->lock);
if (cp->dest) {
spin_unlock_bh(&cp->lock);
rcu_read_unlock();
return;
}
/* Applications work depending on the forwarding method
* but better to reassign them always when binding dest */
if (cp->app)
ip_vs_unbind_app(cp);
ip_vs_bind_dest(cp, dest);
spin_unlock_bh(&cp->lock);
/* Update its packet transmitter */
cp->packet_xmit = NULL;
#ifdef CONFIG_IP_VS_IPV6
if (cp->af == AF_INET6)
ip_vs_bind_xmit_v6(cp);
else
#endif
ip_vs_bind_xmit(cp);
pd = ip_vs_proto_data_get(cp->ipvs, cp->protocol);
if (pd && atomic_read(&pd->appcnt))
ip_vs_bind_app(cp, pd->pp);
}
rcu_read_unlock();
}
/*
* Unbind a connection entry with its VS destination
* Called by the ip_vs_conn_expire function.
*/
static inline void ip_vs_unbind_dest(struct ip_vs_conn *cp)
{
struct ip_vs_dest *dest = cp->dest;
if (!dest)
return;
IP_VS_DBG_BUF(7, "Unbind-dest %s c:%s:%d v:%s:%d "
"d:%s:%d fwd:%c s:%u conn->flags:%X conn->refcnt:%d "
"dest->refcnt:%d\n",
ip_vs_proto_name(cp->protocol),
IP_VS_DBG_ADDR(cp->af, &cp->caddr), ntohs(cp->cport),
IP_VS_DBG_ADDR(cp->af, &cp->vaddr), ntohs(cp->vport),
IP_VS_DBG_ADDR(cp->daf, &cp->daddr), ntohs(cp->dport),
ip_vs_fwd_tag(cp), cp->state,
cp->flags, refcount_read(&cp->refcnt),
refcount_read(&dest->refcnt));
/* Update the connection counters */
if (!(cp->flags & IP_VS_CONN_F_TEMPLATE)) {
/* It is a normal connection, so decrease the inactconns
or activeconns counter */
if (cp->flags & IP_VS_CONN_F_INACTIVE) {
atomic_dec(&dest->inactconns);
} else {
atomic_dec(&dest->activeconns);
}
} else {
/* It is a persistent connection/template, so decrease
the persistent connection counter */
atomic_dec(&dest->persistconns);
}
if (dest->l_threshold != 0) {
if (ip_vs_dest_totalconns(dest) < dest->l_threshold)
dest->flags &= ~IP_VS_DEST_F_OVERLOAD;
} else if (dest->u_threshold != 0) {
if (ip_vs_dest_totalconns(dest) * 4 < dest->u_threshold * 3)
dest->flags &= ~IP_VS_DEST_F_OVERLOAD;
} else {
if (dest->flags & IP_VS_DEST_F_OVERLOAD)
dest->flags &= ~IP_VS_DEST_F_OVERLOAD;
}
ip_vs_dest_put(dest);
}
static int expire_quiescent_template(struct netns_ipvs *ipvs,
struct ip_vs_dest *dest)
{
#ifdef CONFIG_SYSCTL
return ipvs->sysctl_expire_quiescent_template &&
(atomic_read(&dest->weight) == 0);
#else
return 0;
#endif
}
/*
* Checking if the destination of a connection template is available.
* If available, return 1, otherwise invalidate this connection
* template and return 0.
*/
int ip_vs_check_template(struct ip_vs_conn *ct, struct ip_vs_dest *cdest)
{
struct ip_vs_dest *dest = ct->dest;
struct netns_ipvs *ipvs = ct->ipvs;
/*
* Checking the dest server status.
*/
if ((dest == NULL) ||
!(dest->flags & IP_VS_DEST_F_AVAILABLE) ||
expire_quiescent_template(ipvs, dest) ||
(cdest && (dest != cdest))) {
IP_VS_DBG_BUF(9, "check_template: dest not available for "
"protocol %s s:%s:%d v:%s:%d "
"-> d:%s:%d\n",
ip_vs_proto_name(ct->protocol),
IP_VS_DBG_ADDR(ct->af, &ct->caddr),
ntohs(ct->cport),
IP_VS_DBG_ADDR(ct->af, &ct->vaddr),
ntohs(ct->vport),
IP_VS_DBG_ADDR(ct->daf, &ct->daddr),
ntohs(ct->dport));
/*
* Invalidate the connection template
*/
if (ct->vport != htons(0xffff)) {
if (ip_vs_conn_unhash(ct)) {
ct->dport = htons(0xffff);
ct->vport = htons(0xffff);
ct->cport = 0;
ip_vs_conn_hash(ct);
}
}
/*
* Simply decrease the refcnt of the template,
* don't restart its timer.
*/
__ip_vs_conn_put(ct);
return 0;
}
return 1;
}
static void ip_vs_conn_rcu_free(struct rcu_head *head)
{
struct ip_vs_conn *cp = container_of(head, struct ip_vs_conn,
rcu_head);
ip_vs_pe_put(cp->pe);
kfree(cp->pe_data);
kmem_cache_free(ip_vs_conn_cachep, cp);
}
/* Try to delete connection while not holding reference */
static void ip_vs_conn_del(struct ip_vs_conn *cp)
{
if (del_timer(&cp->timer)) {
/* Drop cp->control chain too */
if (cp->control)
cp->timeout = 0;
ip_vs_conn_expire(&cp->timer);
}
}
/* Try to delete connection while holding reference */
static void ip_vs_conn_del_put(struct ip_vs_conn *cp)
{
if (del_timer(&cp->timer)) {
/* Drop cp->control chain too */
if (cp->control)
cp->timeout = 0;
__ip_vs_conn_put(cp);
ip_vs_conn_expire(&cp->timer);
} else {
__ip_vs_conn_put(cp);
}
}
static void ip_vs_conn_expire(struct timer_list *t)
{
struct ip_vs_conn *cp = from_timer(cp, t, timer);
struct netns_ipvs *ipvs = cp->ipvs;
/*
* do I control anybody?
*/
if (atomic_read(&cp->n_control))
goto expire_later;
/* Unlink conn if not referenced anymore */
if (likely(ip_vs_conn_unlink(cp))) {
struct ip_vs_conn *ct = cp->control;
/* delete the timer if it is activated by other users */
del_timer(&cp->timer);
/* does anybody control me? */
if (ct) {
bool has_ref = !cp->timeout && __ip_vs_conn_get(ct);
ip_vs_control_del(cp);
/* Drop CTL or non-assured TPL if not used anymore */
if (has_ref && !atomic_read(&ct->n_control) &&
(!(ct->flags & IP_VS_CONN_F_TEMPLATE) ||
!(ct->state & IP_VS_CTPL_S_ASSURED))) {
IP_VS_DBG(4, "drop controlling connection\n");
ip_vs_conn_del_put(ct);
} else if (has_ref) {
__ip_vs_conn_put(ct);
}
}
if ((cp->flags & IP_VS_CONN_F_NFCT) &&
!(cp->flags & IP_VS_CONN_F_ONE_PACKET)) {
/* Do not access conntracks during subsys cleanup
* because nf_conntrack_find_get can not be used after
* conntrack cleanup for the net.
*/
smp_rmb();
if (ipvs->enable)
ip_vs_conn_drop_conntrack(cp);
}
if (unlikely(cp->app != NULL))
ip_vs_unbind_app(cp);
ip_vs_unbind_dest(cp);
if (cp->flags & IP_VS_CONN_F_NO_CPORT)
atomic_dec(&ip_vs_conn_no_cport_cnt);
if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
ip_vs_conn_rcu_free(&cp->rcu_head);
else
call_rcu(&cp->rcu_head, ip_vs_conn_rcu_free);
atomic_dec(&ipvs->conn_count);
return;
}
expire_later:
IP_VS_DBG(7, "delayed: conn->refcnt=%d conn->n_control=%d\n",
refcount_read(&cp->refcnt),
atomic_read(&cp->n_control));
refcount_inc(&cp->refcnt);
cp->timeout = 60*HZ;
if (ipvs->sync_state & IP_VS_STATE_MASTER)
ip_vs_sync_conn(ipvs, cp, sysctl_sync_threshold(ipvs));
__ip_vs_conn_put_timer(cp);
}
/* Modify timer, so that it expires as soon as possible.
* Can be called without reference only if under RCU lock.
* We can have such chain of conns linked with ->control: DATA->CTL->TPL
* - DATA (eg. FTP) and TPL (persistence) can be present depending on setup
* - cp->timeout=0 indicates all conns from chain should be dropped but
* TPL is not dropped if in assured state
*/
void ip_vs_conn_expire_now(struct ip_vs_conn *cp)
{
/* Using mod_timer_pending will ensure the timer is not
* modified after the final del_timer in ip_vs_conn_expire.
*/
if (timer_pending(&cp->timer) &&
time_after(cp->timer.expires, jiffies))
mod_timer_pending(&cp->timer, jiffies);
}
/*
* Create a new connection entry and hash it into the ip_vs_conn_tab
*/
struct ip_vs_conn *
ip_vs_conn_new(const struct ip_vs_conn_param *p, int dest_af,
const union nf_inet_addr *daddr, __be16 dport, unsigned int flags,
struct ip_vs_dest *dest, __u32 fwmark)
{
struct ip_vs_conn *cp;
struct netns_ipvs *ipvs = p->ipvs;
struct ip_vs_proto_data *pd = ip_vs_proto_data_get(p->ipvs,
p->protocol);
cp = kmem_cache_alloc(ip_vs_conn_cachep, GFP_ATOMIC);
if (cp == NULL) {
IP_VS_ERR_RL("%s(): no memory\n", __func__);
return NULL;
}
INIT_HLIST_NODE(&cp->c_list);
timer_setup(&cp->timer, ip_vs_conn_expire, 0);
cp->ipvs = ipvs;
cp->af = p->af;
cp->daf = dest_af;
cp->protocol = p->protocol;
ip_vs_addr_set(p->af, &cp->caddr, p->caddr);
cp->cport = p->cport;
/* proto should only be IPPROTO_IP if p->vaddr is a fwmark */
ip_vs_addr_set(p->protocol == IPPROTO_IP ? AF_UNSPEC : p->af,
&cp->vaddr, p->vaddr);
cp->vport = p->vport;
ip_vs_addr_set(cp->daf, &cp->daddr, daddr);
cp->dport = dport;
cp->flags = flags;
cp->fwmark = fwmark;
if (flags & IP_VS_CONN_F_TEMPLATE && p->pe) {
ip_vs_pe_get(p->pe);
cp->pe = p->pe;
cp->pe_data = p->pe_data;
cp->pe_data_len = p->pe_data_len;
} else {
cp->pe = NULL;
cp->pe_data = NULL;
cp->pe_data_len = 0;
}
spin_lock_init(&cp->lock);
/*
* Set the entry is referenced by the current thread before hashing
* it in the table, so that other thread run ip_vs_random_dropentry
* but cannot drop this entry.
*/
refcount_set(&cp->refcnt, 1);
cp->control = NULL;
atomic_set(&cp->n_control, 0);
atomic_set(&cp->in_pkts, 0);
cp->packet_xmit = NULL;
cp->app = NULL;
cp->app_data = NULL;
/* reset struct ip_vs_seq */
cp->in_seq.delta = 0;
cp->out_seq.delta = 0;
atomic_inc(&ipvs->conn_count);
if (flags & IP_VS_CONN_F_NO_CPORT)
atomic_inc(&ip_vs_conn_no_cport_cnt);
/* Bind the connection with a destination server */
cp->dest = NULL;
ip_vs_bind_dest(cp, dest);
/* Set its state and timeout */
cp->state = 0;
cp->old_state = 0;
cp->timeout = 3*HZ;
cp->sync_endtime = jiffies & ~3UL;
/* Bind its packet transmitter */
#ifdef CONFIG_IP_VS_IPV6
if (p->af == AF_INET6)
ip_vs_bind_xmit_v6(cp);
else
#endif
ip_vs_bind_xmit(cp);
if (unlikely(pd && atomic_read(&pd->appcnt)))
ip_vs_bind_app(cp, pd->pp);
/*
* Allow conntrack to be preserved. By default, conntrack
* is created and destroyed for every packet.
* Sometimes keeping conntrack can be useful for
* IP_VS_CONN_F_ONE_PACKET too.
*/
if (ip_vs_conntrack_enabled(ipvs))
cp->flags |= IP_VS_CONN_F_NFCT;
/* Hash it in the ip_vs_conn_tab finally */
ip_vs_conn_hash(cp);
return cp;
}
/*
* /proc/net/ip_vs_conn entries
*/
#ifdef CONFIG_PROC_FS
struct ip_vs_iter_state {
struct seq_net_private p;
struct hlist_head *l;
};
static void *ip_vs_conn_array(struct seq_file *seq, loff_t pos)
{
int idx;
struct ip_vs_conn *cp;
struct ip_vs_iter_state *iter = seq->private;
for (idx = 0; idx < ip_vs_conn_tab_size; idx++) {
hlist_for_each_entry_rcu(cp, &ip_vs_conn_tab[idx], c_list) {
/* __ip_vs_conn_get() is not needed by
* ip_vs_conn_seq_show and ip_vs_conn_sync_seq_show
*/
if (pos-- == 0) {
iter->l = &ip_vs_conn_tab[idx];
return cp;
}
}
cond_resched_rcu();
}
return NULL;
}
static void *ip_vs_conn_seq_start(struct seq_file *seq, loff_t *pos)
__acquires(RCU)
{
struct ip_vs_iter_state *iter = seq->private;
iter->l = NULL;
rcu_read_lock();
return *pos ? ip_vs_conn_array(seq, *pos - 1) :SEQ_START_TOKEN;
}
static void *ip_vs_conn_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
struct ip_vs_conn *cp = v;
struct ip_vs_iter_state *iter = seq->private;
struct hlist_node *e;
struct hlist_head *l = iter->l;
int idx;
++*pos;
if (v == SEQ_START_TOKEN)
return ip_vs_conn_array(seq, 0);
/* more on same hash chain? */
e = rcu_dereference(hlist_next_rcu(&cp->c_list));
if (e)
return hlist_entry(e, struct ip_vs_conn, c_list);
idx = l - ip_vs_conn_tab;
while (++idx < ip_vs_conn_tab_size) {
hlist_for_each_entry_rcu(cp, &ip_vs_conn_tab[idx], c_list) {
iter->l = &ip_vs_conn_tab[idx];
return cp;
}
cond_resched_rcu();
}
iter->l = NULL;
return NULL;
}
static void ip_vs_conn_seq_stop(struct seq_file *seq, void *v)
__releases(RCU)
{
rcu_read_unlock();
}
static int ip_vs_conn_seq_show(struct seq_file *seq, void *v)
{
if (v == SEQ_START_TOKEN)
seq_puts(seq,
"Pro FromIP FPrt ToIP TPrt DestIP DPrt State Expires PEName PEData\n");
else {
const struct ip_vs_conn *cp = v;
struct net *net = seq_file_net(seq);
char pe_data[IP_VS_PENAME_MAXLEN + IP_VS_PEDATA_MAXLEN + 3];
size_t len = 0;
char dbuf[IP_VS_ADDRSTRLEN];
if (!net_eq(cp->ipvs->net, net))
return 0;
if (cp->pe_data) {
pe_data[0] = ' ';
len = strlen(cp->pe->name);
memcpy(pe_data + 1, cp->pe->name, len);
pe_data[len + 1] = ' ';
len += 2;
len += cp->pe->show_pe_data(cp, pe_data + len);
}
pe_data[len] = '\0';
#ifdef CONFIG_IP_VS_IPV6
if (cp->daf == AF_INET6)
snprintf(dbuf, sizeof(dbuf), "%pI6", &cp->daddr.in6);
else
#endif
snprintf(dbuf, sizeof(dbuf), "%08X",
ntohl(cp->daddr.ip));
#ifdef CONFIG_IP_VS_IPV6
if (cp->af == AF_INET6)
seq_printf(seq, "%-3s %pI6 %04X %pI6 %04X "
"%s %04X %-11s %7u%s\n",
ip_vs_proto_name(cp->protocol),
&cp->caddr.in6, ntohs(cp->cport),
&cp->vaddr.in6, ntohs(cp->vport),
dbuf, ntohs(cp->dport),
ip_vs_state_name(cp),
jiffies_delta_to_msecs(cp->timer.expires -
jiffies) / 1000,
pe_data);
else
#endif
seq_printf(seq,
"%-3s %08X %04X %08X %04X"
" %s %04X %-11s %7u%s\n",
ip_vs_proto_name(cp->protocol),
ntohl(cp->caddr.ip), ntohs(cp->cport),
ntohl(cp->vaddr.ip), ntohs(cp->vport),
dbuf, ntohs(cp->dport),
ip_vs_state_name(cp),
jiffies_delta_to_msecs(cp->timer.expires -
jiffies) / 1000,
pe_data);
}
return 0;
}
static const struct seq_operations ip_vs_conn_seq_ops = {
.start = ip_vs_conn_seq_start,
.next = ip_vs_conn_seq_next,
.stop = ip_vs_conn_seq_stop,
.show = ip_vs_conn_seq_show,
};
static const char *ip_vs_origin_name(unsigned int flags)
{
if (flags & IP_VS_CONN_F_SYNC)
return "SYNC";
else
return "LOCAL";
}
static int ip_vs_conn_sync_seq_show(struct seq_file *seq, void *v)
{
char dbuf[IP_VS_ADDRSTRLEN];
if (v == SEQ_START_TOKEN)
seq_puts(seq,
"Pro FromIP FPrt ToIP TPrt DestIP DPrt State Origin Expires\n");
else {
const struct ip_vs_conn *cp = v;
struct net *net = seq_file_net(seq);
if (!net_eq(cp->ipvs->net, net))
return 0;
#ifdef CONFIG_IP_VS_IPV6
if (cp->daf == AF_INET6)
snprintf(dbuf, sizeof(dbuf), "%pI6", &cp->daddr.in6);
else
#endif
snprintf(dbuf, sizeof(dbuf), "%08X",
ntohl(cp->daddr.ip));
#ifdef CONFIG_IP_VS_IPV6
if (cp->af == AF_INET6)
seq_printf(seq, "%-3s %pI6 %04X %pI6 %04X "
"%s %04X %-11s %-6s %7u\n",
ip_vs_proto_name(cp->protocol),
&cp->caddr.in6, ntohs(cp->cport),
&cp->vaddr.in6, ntohs(cp->vport),
dbuf, ntohs(cp->dport),
ip_vs_state_name(cp),
ip_vs_origin_name(cp->flags),
jiffies_delta_to_msecs(cp->timer.expires -
jiffies) / 1000);
else
#endif
seq_printf(seq,
"%-3s %08X %04X %08X %04X "
"%s %04X %-11s %-6s %7u\n",
ip_vs_proto_name(cp->protocol),
ntohl(cp->caddr.ip), ntohs(cp->cport),
ntohl(cp->vaddr.ip), ntohs(cp->vport),
dbuf, ntohs(cp->dport),
ip_vs_state_name(cp),
ip_vs_origin_name(cp->flags),
jiffies_delta_to_msecs(cp->timer.expires -
jiffies) / 1000);
}
return 0;
}
static const struct seq_operations ip_vs_conn_sync_seq_ops = {
.start = ip_vs_conn_seq_start,
.next = ip_vs_conn_seq_next,
.stop = ip_vs_conn_seq_stop,
.show = ip_vs_conn_sync_seq_show,
};
#endif
/* Randomly drop connection entries before running out of memory
* Can be used for DATA and CTL conns. For TPL conns there are exceptions:
* - traffic for services in OPS mode increases ct->in_pkts, so it is supported
* - traffic for services not in OPS mode does not increase ct->in_pkts in
* all cases, so it is not supported
*/
static inline int todrop_entry(struct ip_vs_conn *cp)
{
/*
* The drop rate array needs tuning for real environments.
* Called from timer bh only => no locking
*/
static const signed char todrop_rate[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8};
static signed char todrop_counter[9] = {0};
int i;
/* if the conn entry hasn't lasted for 60 seconds, don't drop it.
This will leave enough time for normal connection to get
through. */
if (time_before(cp->timeout + jiffies, cp->timer.expires + 60*HZ))
return 0;
/* Don't drop the entry if its number of incoming packets is not
located in [0, 8] */
i = atomic_read(&cp->in_pkts);
if (i > 8 || i < 0) return 0;
if (!todrop_rate[i]) return 0;
if (--todrop_counter[i] > 0) return 0;
todrop_counter[i] = todrop_rate[i];
return 1;
}
static inline bool ip_vs_conn_ops_mode(struct ip_vs_conn *cp)
{
struct ip_vs_service *svc;
if (!cp->dest)
return false;
svc = rcu_dereference(cp->dest->svc);
return svc && (svc->flags & IP_VS_SVC_F_ONEPACKET);
}
/* Called from keventd and must protect itself from softirqs */
void ip_vs_random_dropentry(struct netns_ipvs *ipvs)
{
int idx;
struct ip_vs_conn *cp;
rcu_read_lock();
/*
* Randomly scan 1/32 of the whole table every second
*/
for (idx = 0; idx < (ip_vs_conn_tab_size>>5); idx++) {
unsigned int hash = get_random_u32() & ip_vs_conn_tab_mask;
hlist_for_each_entry_rcu(cp, &ip_vs_conn_tab[hash], c_list) {
if (cp->ipvs != ipvs)
continue;
if (atomic_read(&cp->n_control))
continue;
if (cp->flags & IP_VS_CONN_F_TEMPLATE) {
/* connection template of OPS */
if (ip_vs_conn_ops_mode(cp))
goto try_drop;
if (!(cp->state & IP_VS_CTPL_S_ASSURED))
goto drop;
continue;
}
if (cp->protocol == IPPROTO_TCP) {
switch(cp->state) {
case IP_VS_TCP_S_SYN_RECV:
case IP_VS_TCP_S_SYNACK:
break;
case IP_VS_TCP_S_ESTABLISHED:
if (todrop_entry(cp))
break;
continue;
default:
continue;
}
} else if (cp->protocol == IPPROTO_SCTP) {
switch (cp->state) {
case IP_VS_SCTP_S_INIT1:
case IP_VS_SCTP_S_INIT:
break;
case IP_VS_SCTP_S_ESTABLISHED:
if (todrop_entry(cp))
break;
continue;
default:
continue;
}
} else {
try_drop:
if (!todrop_entry(cp))
continue;
}
drop:
IP_VS_DBG(4, "drop connection\n");
ip_vs_conn_del(cp);
}
cond_resched_rcu();
}
rcu_read_unlock();
}
/*
* Flush all the connection entries in the ip_vs_conn_tab
*/
static void ip_vs_conn_flush(struct netns_ipvs *ipvs)
{
int idx;
struct ip_vs_conn *cp, *cp_c;
flush_again:
rcu_read_lock();
for (idx = 0; idx < ip_vs_conn_tab_size; idx++) {
hlist_for_each_entry_rcu(cp, &ip_vs_conn_tab[idx], c_list) {
if (cp->ipvs != ipvs)
continue;
if (atomic_read(&cp->n_control))
continue;
cp_c = cp->control;
IP_VS_DBG(4, "del connection\n");
ip_vs_conn_del(cp);
if (cp_c && !atomic_read(&cp_c->n_control)) {
IP_VS_DBG(4, "del controlling connection\n");
ip_vs_conn_del(cp_c);
}
}
cond_resched_rcu();
}
rcu_read_unlock();
/* the counter may be not NULL, because maybe some conn entries
are run by slow timer handler or unhashed but still referred */
if (atomic_read(&ipvs->conn_count) != 0) {
schedule();
goto flush_again;
}
}
#ifdef CONFIG_SYSCTL
void ip_vs_expire_nodest_conn_flush(struct netns_ipvs *ipvs)
{
int idx;
struct ip_vs_conn *cp, *cp_c;
struct ip_vs_dest *dest;
rcu_read_lock();
for (idx = 0; idx < ip_vs_conn_tab_size; idx++) {
hlist_for_each_entry_rcu(cp, &ip_vs_conn_tab[idx], c_list) {
if (cp->ipvs != ipvs)
continue;
dest = cp->dest;
if (!dest || (dest->flags & IP_VS_DEST_F_AVAILABLE))
continue;
if (atomic_read(&cp->n_control))
continue;
cp_c = cp->control;
IP_VS_DBG(4, "del connection\n");
ip_vs_conn_del(cp);
if (cp_c && !atomic_read(&cp_c->n_control)) {
IP_VS_DBG(4, "del controlling connection\n");
ip_vs_conn_del(cp_c);
}
}
cond_resched_rcu();
/* netns clean up started, abort delayed work */
if (!ipvs->enable)
break;
}
rcu_read_unlock();
}
#endif
/*
* per netns init and exit
*/
int __net_init ip_vs_conn_net_init(struct netns_ipvs *ipvs)
{
atomic_set(&ipvs->conn_count, 0);
#ifdef CONFIG_PROC_FS
if (!proc_create_net("ip_vs_conn", 0, ipvs->net->proc_net,
&ip_vs_conn_seq_ops,
sizeof(struct ip_vs_iter_state)))
goto err_conn;
if (!proc_create_net("ip_vs_conn_sync", 0, ipvs->net->proc_net,
&ip_vs_conn_sync_seq_ops,
sizeof(struct ip_vs_iter_state)))
goto err_conn_sync;
#endif
return 0;
#ifdef CONFIG_PROC_FS
err_conn_sync:
remove_proc_entry("ip_vs_conn", ipvs->net->proc_net);
err_conn:
return -ENOMEM;
#endif
}
void __net_exit ip_vs_conn_net_cleanup(struct netns_ipvs *ipvs)
{
/* flush all the connection entries first */
ip_vs_conn_flush(ipvs);
#ifdef CONFIG_PROC_FS
remove_proc_entry("ip_vs_conn", ipvs->net->proc_net);
remove_proc_entry("ip_vs_conn_sync", ipvs->net->proc_net);
#endif
}
int __init ip_vs_conn_init(void)
{
size_t tab_array_size;
int max_avail;
#if BITS_PER_LONG > 32
int max = 27;
#else
int max = 20;
#endif
int min = 8;
int idx;
max_avail = order_base_2(totalram_pages()) + PAGE_SHIFT;
max_avail -= 2; /* ~4 in hash row */
max_avail -= 1; /* IPVS up to 1/2 of mem */
max_avail -= order_base_2(sizeof(struct ip_vs_conn));
max = clamp(max, min, max_avail);
ip_vs_conn_tab_bits = clamp_val(ip_vs_conn_tab_bits, min, max);
ip_vs_conn_tab_size = 1 << ip_vs_conn_tab_bits;
ip_vs_conn_tab_mask = ip_vs_conn_tab_size - 1;
/*
* Allocate the connection hash table and initialize its list heads
*/
tab_array_size = array_size(ip_vs_conn_tab_size,
sizeof(*ip_vs_conn_tab));
ip_vs_conn_tab = kvmalloc_array(ip_vs_conn_tab_size,
sizeof(*ip_vs_conn_tab), GFP_KERNEL);
if (!ip_vs_conn_tab)
return -ENOMEM;
/* Allocate ip_vs_conn slab cache */
ip_vs_conn_cachep = kmem_cache_create("ip_vs_conn",
sizeof(struct ip_vs_conn), 0,
SLAB_HWCACHE_ALIGN, NULL);
if (!ip_vs_conn_cachep) {
kvfree(ip_vs_conn_tab);
return -ENOMEM;
}
pr_info("Connection hash table configured (size=%d, memory=%zdKbytes)\n",
ip_vs_conn_tab_size, tab_array_size / 1024);
IP_VS_DBG(0, "Each connection entry needs %zd bytes at least\n",
sizeof(struct ip_vs_conn));
for (idx = 0; idx < ip_vs_conn_tab_size; idx++)
INIT_HLIST_HEAD(&ip_vs_conn_tab[idx]);
for (idx = 0; idx < CT_LOCKARRAY_SIZE; idx++) {
spin_lock_init(&__ip_vs_conntbl_lock_array[idx].l);
}
/* calculate the random value for connection hash */
get_random_bytes(&ip_vs_conn_rnd, sizeof(ip_vs_conn_rnd));
return 0;
}
void ip_vs_conn_cleanup(void)
{
/* Wait all ip_vs_conn_rcu_free() callbacks to complete */
rcu_barrier();
/* Release the empty cache */
kmem_cache_destroy(ip_vs_conn_cachep);
kvfree(ip_vs_conn_tab);
}
| linux-master | net/netfilter/ipvs/ip_vs_conn.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* IPVS: Weighted Fail Over module
*
* Authors: Kenny Mathis <[email protected]>
*
* Changes:
* Kenny Mathis : added initial functionality based on weight
*/
#define KMSG_COMPONENT "IPVS"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/module.h>
#include <linux/kernel.h>
#include <net/ip_vs.h>
/* Weighted Fail Over Module */
static struct ip_vs_dest *
ip_vs_fo_schedule(struct ip_vs_service *svc, const struct sk_buff *skb,
struct ip_vs_iphdr *iph)
{
struct ip_vs_dest *dest, *hweight = NULL;
int hw = 0; /* Track highest weight */
IP_VS_DBG(6, "ip_vs_fo_schedule(): Scheduling...\n");
/* Basic failover functionality
* Find virtual server with highest weight and send it traffic
*/
list_for_each_entry_rcu(dest, &svc->destinations, n_list) {
if (!(dest->flags & IP_VS_DEST_F_OVERLOAD) &&
atomic_read(&dest->weight) > hw) {
hweight = dest;
hw = atomic_read(&dest->weight);
}
}
if (hweight) {
IP_VS_DBG_BUF(6, "FO: server %s:%u activeconns %d weight %d\n",
IP_VS_DBG_ADDR(hweight->af, &hweight->addr),
ntohs(hweight->port),
atomic_read(&hweight->activeconns),
atomic_read(&hweight->weight));
return hweight;
}
ip_vs_scheduler_err(svc, "no destination available");
return NULL;
}
static struct ip_vs_scheduler ip_vs_fo_scheduler = {
.name = "fo",
.refcnt = ATOMIC_INIT(0),
.module = THIS_MODULE,
.n_list = LIST_HEAD_INIT(ip_vs_fo_scheduler.n_list),
.schedule = ip_vs_fo_schedule,
};
static int __init ip_vs_fo_init(void)
{
return register_ip_vs_scheduler(&ip_vs_fo_scheduler);
}
static void __exit ip_vs_fo_cleanup(void)
{
unregister_ip_vs_scheduler(&ip_vs_fo_scheduler);
synchronize_rcu();
}
module_init(ip_vs_fo_init);
module_exit(ip_vs_fo_cleanup);
MODULE_LICENSE("GPL");
| linux-master | net/netfilter/ipvs/ip_vs_fo.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* IPVS: Power of Twos Choice Scheduling module
*
* Authors: Darby Payne <[email protected]>
*/
#define KMSG_COMPONENT "IPVS"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/random.h>
#include <net/ip_vs.h>
/* Power of Twos Choice scheduling, algorithm originally described by
* Michael Mitzenmacher.
*
* Randomly picks two destinations and picks the one with the least
* amount of connections
*
* The algorithm calculates a few variables
* - total_weight = sum of all weights
* - rweight1 = random number between [0,total_weight]
* - rweight2 = random number between [0,total_weight]
*
* For each destination
* decrement rweight1 and rweight2 by the destination weight
* pick choice1 when rweight1 is <= 0
* pick choice2 when rweight2 is <= 0
*
* Return choice2 if choice2 has less connections than choice 1 normalized
* by weight
*
* References
* ----------
*
* [Mitzenmacher 2016]
* The Power of Two Random Choices: A Survey of Techniques and Results
* Michael Mitzenmacher, Andrea W. Richa y, Ramesh Sitaraman
* http://www.eecs.harvard.edu/~michaelm/NEWWORK/postscripts/twosurvey.pdf
*
*/
static struct ip_vs_dest *ip_vs_twos_schedule(struct ip_vs_service *svc,
const struct sk_buff *skb,
struct ip_vs_iphdr *iph)
{
struct ip_vs_dest *dest, *choice1 = NULL, *choice2 = NULL;
int rweight1, rweight2, weight1 = -1, weight2 = -1, overhead1 = 0;
int overhead2, total_weight = 0, weight;
IP_VS_DBG(6, "%s(): Scheduling...\n", __func__);
/* Generate a random weight between [0,sum of all weights) */
list_for_each_entry_rcu(dest, &svc->destinations, n_list) {
if (!(dest->flags & IP_VS_DEST_F_OVERLOAD)) {
weight = atomic_read(&dest->weight);
if (weight > 0) {
total_weight += weight;
choice1 = dest;
}
}
}
if (!choice1) {
ip_vs_scheduler_err(svc, "no destination available");
return NULL;
}
/* Add 1 to total_weight so that the random weights are inclusive
* from 0 to total_weight
*/
total_weight += 1;
rweight1 = get_random_u32_below(total_weight);
rweight2 = get_random_u32_below(total_weight);
/* Pick two weighted servers */
list_for_each_entry_rcu(dest, &svc->destinations, n_list) {
if (dest->flags & IP_VS_DEST_F_OVERLOAD)
continue;
weight = atomic_read(&dest->weight);
if (weight <= 0)
continue;
rweight1 -= weight;
rweight2 -= weight;
if (rweight1 <= 0 && weight1 == -1) {
choice1 = dest;
weight1 = weight;
overhead1 = ip_vs_dest_conn_overhead(dest);
}
if (rweight2 <= 0 && weight2 == -1) {
choice2 = dest;
weight2 = weight;
overhead2 = ip_vs_dest_conn_overhead(dest);
}
if (weight1 != -1 && weight2 != -1)
goto nextstage;
}
nextstage:
if (choice2 && (weight2 * overhead1) > (weight1 * overhead2))
choice1 = choice2;
IP_VS_DBG_BUF(6, "twos: server %s:%u conns %d refcnt %d weight %d\n",
IP_VS_DBG_ADDR(choice1->af, &choice1->addr),
ntohs(choice1->port), atomic_read(&choice1->activeconns),
refcount_read(&choice1->refcnt),
atomic_read(&choice1->weight));
return choice1;
}
static struct ip_vs_scheduler ip_vs_twos_scheduler = {
.name = "twos",
.refcnt = ATOMIC_INIT(0),
.module = THIS_MODULE,
.n_list = LIST_HEAD_INIT(ip_vs_twos_scheduler.n_list),
.schedule = ip_vs_twos_schedule,
};
static int __init ip_vs_twos_init(void)
{
return register_ip_vs_scheduler(&ip_vs_twos_scheduler);
}
static void __exit ip_vs_twos_cleanup(void)
{
unregister_ip_vs_scheduler(&ip_vs_twos_scheduler);
synchronize_rcu();
}
module_init(ip_vs_twos_init);
module_exit(ip_vs_twos_cleanup);
MODULE_LICENSE("GPL");
| linux-master | net/netfilter/ipvs/ip_vs_twos.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* ip_vs_nfct.c: Netfilter connection tracking support for IPVS
*
* Portions Copyright (C) 2001-2002
* Antefacto Ltd, 181 Parnell St, Dublin 1, Ireland.
*
* Portions Copyright (C) 2003-2010
* Julian Anastasov
*
* Authors:
* Ben North <[email protected]>
* Julian Anastasov <[email protected]> Reorganize and sync with latest kernels
* Hannes Eder <[email protected]> Extend NFCT support for FTP, ipvs match
*
* Current status:
*
* - provide conntrack confirmation for new and related connections, by
* this way we can see their proper conntrack state in all hooks
* - support for all forwarding methods, not only NAT
* - FTP support (NAT), ability to support other NAT apps with expectations
* - to correctly create expectations for related NAT connections the proper
* NF conntrack support must be already installed, eg. ip_vs_ftp requires
* nf_conntrack_ftp ... iptables_nat for the same ports (but no iptables
* NAT rules are needed)
* - alter reply for NAT when forwarding packet in original direction:
* conntrack from client in NEW or RELATED (Passive FTP DATA) state or
* when RELATED conntrack is created from real server (Active FTP DATA)
* - if iptables_nat is not loaded the Passive FTP will not work (the
* PASV response can not be NAT-ed) but Active FTP should work
*/
#define KMSG_COMPONENT "IPVS"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/compiler.h>
#include <linux/vmalloc.h>
#include <linux/skbuff.h>
#include <net/ip.h>
#include <linux/netfilter.h>
#include <linux/netfilter_ipv4.h>
#include <net/ip_vs.h>
#include <net/netfilter/nf_conntrack_core.h>
#include <net/netfilter/nf_conntrack_expect.h>
#include <net/netfilter/nf_conntrack_seqadj.h>
#include <net/netfilter/nf_conntrack_helper.h>
#include <net/netfilter/nf_conntrack_zones.h>
#define FMT_TUPLE "%s:%u->%s:%u/%u"
#define ARG_TUPLE(T) IP_VS_DBG_ADDR((T)->src.l3num, &(T)->src.u3), \
ntohs((T)->src.u.all), \
IP_VS_DBG_ADDR((T)->src.l3num, &(T)->dst.u3), \
ntohs((T)->dst.u.all), \
(T)->dst.protonum
#define FMT_CONN "%s:%u->%s:%u->%s:%u/%u:%u"
#define ARG_CONN(C) IP_VS_DBG_ADDR((C)->af, &((C)->caddr)), \
ntohs((C)->cport), \
IP_VS_DBG_ADDR((C)->af, &((C)->vaddr)), \
ntohs((C)->vport), \
IP_VS_DBG_ADDR((C)->daf, &((C)->daddr)), \
ntohs((C)->dport), \
(C)->protocol, (C)->state
void
ip_vs_update_conntrack(struct sk_buff *skb, struct ip_vs_conn *cp, int outin)
{
enum ip_conntrack_info ctinfo;
struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
struct nf_conntrack_tuple new_tuple;
if (ct == NULL || nf_ct_is_confirmed(ct) ||
nf_ct_is_dying(ct))
return;
/* Never alter conntrack for non-NAT conns */
if (IP_VS_FWD_METHOD(cp) != IP_VS_CONN_F_MASQ)
return;
/* Never alter conntrack for OPS conns (no reply is expected) */
if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
return;
/* Alter reply only in original direction */
if (CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL)
return;
/* Applications may adjust TCP seqs */
if (cp->app && nf_ct_protonum(ct) == IPPROTO_TCP &&
!nfct_seqadj(ct) && !nfct_seqadj_ext_add(ct))
return;
/*
* The connection is not yet in the hashtable, so we update it.
* CIP->VIP will remain the same, so leave the tuple in
* IP_CT_DIR_ORIGINAL untouched. When the reply comes back from the
* real-server we will see RIP->DIP.
*/
new_tuple = ct->tuplehash[IP_CT_DIR_REPLY].tuple;
/*
* This will also take care of UDP and other protocols.
*/
if (outin) {
new_tuple.src.u3 = cp->daddr;
if (new_tuple.dst.protonum != IPPROTO_ICMP &&
new_tuple.dst.protonum != IPPROTO_ICMPV6)
new_tuple.src.u.tcp.port = cp->dport;
} else {
new_tuple.dst.u3 = cp->vaddr;
if (new_tuple.dst.protonum != IPPROTO_ICMP &&
new_tuple.dst.protonum != IPPROTO_ICMPV6)
new_tuple.dst.u.tcp.port = cp->vport;
}
IP_VS_DBG_BUF(7, "%s: Updating conntrack ct=%p, status=0x%lX, "
"ctinfo=%d, old reply=" FMT_TUPLE "\n",
__func__, ct, ct->status, ctinfo,
ARG_TUPLE(&ct->tuplehash[IP_CT_DIR_REPLY].tuple));
IP_VS_DBG_BUF(7, "%s: Updating conntrack ct=%p, status=0x%lX, "
"ctinfo=%d, new reply=" FMT_TUPLE "\n",
__func__, ct, ct->status, ctinfo,
ARG_TUPLE(&new_tuple));
nf_conntrack_alter_reply(ct, &new_tuple);
IP_VS_DBG_BUF(7, "%s: Updated conntrack ct=%p for cp=" FMT_CONN "\n",
__func__, ct, ARG_CONN(cp));
}
int ip_vs_confirm_conntrack(struct sk_buff *skb)
{
return nf_conntrack_confirm(skb);
}
/*
* Called from init_conntrack() as expectfn handler.
*/
static void ip_vs_nfct_expect_callback(struct nf_conn *ct,
struct nf_conntrack_expect *exp)
{
struct nf_conntrack_tuple *orig, new_reply;
struct ip_vs_conn *cp;
struct ip_vs_conn_param p;
struct net *net = nf_ct_net(ct);
/*
* We assume that no NF locks are held before this callback.
* ip_vs_conn_out_get and ip_vs_conn_in_get should match their
* expectations even if they use wildcard values, now we provide the
* actual values from the newly created original conntrack direction.
* The conntrack is confirmed when packet reaches IPVS hooks.
*/
/* RS->CLIENT */
orig = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
ip_vs_conn_fill_param(net_ipvs(net), exp->tuple.src.l3num, orig->dst.protonum,
&orig->src.u3, orig->src.u.tcp.port,
&orig->dst.u3, orig->dst.u.tcp.port, &p);
cp = ip_vs_conn_out_get(&p);
if (cp) {
/* Change reply CLIENT->RS to CLIENT->VS */
IP_VS_DBG_BUF(7, "%s: for ct=%p, status=0x%lX found inout cp="
FMT_CONN "\n",
__func__, ct, ct->status, ARG_CONN(cp));
new_reply = ct->tuplehash[IP_CT_DIR_REPLY].tuple;
IP_VS_DBG_BUF(7, "%s: ct=%p before alter: reply tuple="
FMT_TUPLE "\n",
__func__, ct, ARG_TUPLE(&new_reply));
new_reply.dst.u3 = cp->vaddr;
new_reply.dst.u.tcp.port = cp->vport;
goto alter;
}
/* CLIENT->VS */
cp = ip_vs_conn_in_get(&p);
if (cp) {
/* Change reply VS->CLIENT to RS->CLIENT */
IP_VS_DBG_BUF(7, "%s: for ct=%p, status=0x%lX found outin cp="
FMT_CONN "\n",
__func__, ct, ct->status, ARG_CONN(cp));
new_reply = ct->tuplehash[IP_CT_DIR_REPLY].tuple;
IP_VS_DBG_BUF(7, "%s: ct=%p before alter: reply tuple="
FMT_TUPLE "\n",
__func__, ct, ARG_TUPLE(&new_reply));
new_reply.src.u3 = cp->daddr;
new_reply.src.u.tcp.port = cp->dport;
goto alter;
}
IP_VS_DBG_BUF(7, "%s: ct=%p, status=0x%lX, tuple=" FMT_TUPLE
" - unknown expect\n",
__func__, ct, ct->status, ARG_TUPLE(orig));
return;
alter:
/* Never alter conntrack for non-NAT conns */
if (IP_VS_FWD_METHOD(cp) == IP_VS_CONN_F_MASQ)
nf_conntrack_alter_reply(ct, &new_reply);
ip_vs_conn_put(cp);
return;
}
/*
* Create NF conntrack expectation with wildcard (optional) source port.
* Then the default callback function will alter the reply and will confirm
* the conntrack entry when the first packet comes.
* Use port 0 to expect connection from any port.
*/
void ip_vs_nfct_expect_related(struct sk_buff *skb, struct nf_conn *ct,
struct ip_vs_conn *cp, u_int8_t proto,
const __be16 port, int from_rs)
{
struct nf_conntrack_expect *exp;
if (ct == NULL)
return;
exp = nf_ct_expect_alloc(ct);
if (!exp)
return;
nf_ct_expect_init(exp, NF_CT_EXPECT_CLASS_DEFAULT, nf_ct_l3num(ct),
from_rs ? &cp->daddr : &cp->caddr,
from_rs ? &cp->caddr : &cp->vaddr,
proto, port ? &port : NULL,
from_rs ? &cp->cport : &cp->vport);
exp->expectfn = ip_vs_nfct_expect_callback;
IP_VS_DBG_BUF(7, "%s: ct=%p, expect tuple=" FMT_TUPLE "\n",
__func__, ct, ARG_TUPLE(&exp->tuple));
nf_ct_expect_related(exp, 0);
nf_ct_expect_put(exp);
}
EXPORT_SYMBOL(ip_vs_nfct_expect_related);
/*
* Our connection was terminated, try to drop the conntrack immediately
*/
void ip_vs_conn_drop_conntrack(struct ip_vs_conn *cp)
{
struct nf_conntrack_tuple_hash *h;
struct nf_conn *ct;
struct nf_conntrack_tuple tuple;
if (!cp->cport)
return;
tuple = (struct nf_conntrack_tuple) {
.dst = { .protonum = cp->protocol, .dir = IP_CT_DIR_ORIGINAL } };
tuple.src.u3 = cp->caddr;
tuple.src.u.all = cp->cport;
tuple.src.l3num = cp->af;
tuple.dst.u3 = cp->vaddr;
tuple.dst.u.all = cp->vport;
IP_VS_DBG_BUF(7, "%s: dropping conntrack for conn " FMT_CONN "\n",
__func__, ARG_CONN(cp));
h = nf_conntrack_find_get(cp->ipvs->net, &nf_ct_zone_dflt, &tuple);
if (h) {
ct = nf_ct_tuplehash_to_ctrack(h);
if (nf_ct_kill(ct)) {
IP_VS_DBG_BUF(7, "%s: ct=%p deleted for tuple="
FMT_TUPLE "\n",
__func__, ct, ARG_TUPLE(&tuple));
} else {
IP_VS_DBG_BUF(7, "%s: ct=%p, no conntrack for tuple="
FMT_TUPLE "\n",
__func__, ct, ARG_TUPLE(&tuple));
}
nf_ct_put(ct);
} else {
IP_VS_DBG_BUF(7, "%s: no conntrack for tuple=" FMT_TUPLE "\n",
__func__, ARG_TUPLE(&tuple));
}
}
| linux-master | net/netfilter/ipvs/ip_vs_nfct.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* IPVS: Weighted Round-Robin Scheduling module
*
* Authors: Wensong Zhang <[email protected]>
*
* Changes:
* Wensong Zhang : changed the ip_vs_wrr_schedule to return dest
* Wensong Zhang : changed some comestics things for debugging
* Wensong Zhang : changed for the d-linked destination list
* Wensong Zhang : added the ip_vs_wrr_update_svc
* Julian Anastasov : fixed the bug of returning destination
* with weight 0 when all weights are zero
*/
#define KMSG_COMPONENT "IPVS"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/net.h>
#include <linux/gcd.h>
#include <net/ip_vs.h>
/* The WRR algorithm depends on some caclulations:
* - mw: maximum weight
* - di: weight step, greatest common divisor from all weights
* - cw: current required weight
* As result, all weights are in the [di..mw] range with a step=di.
*
* First, we start with cw = mw and select dests with weight >= cw.
* Then cw is reduced with di and all dests are checked again.
* Last pass should be with cw = di. We have mw/di passes in total:
*
* pass 1: cw = max weight
* pass 2: cw = max weight - di
* pass 3: cw = max weight - 2 * di
* ...
* last pass: cw = di
*
* Weights are supposed to be >= di but we run in parallel with
* weight changes, it is possible some dest weight to be reduced
* below di, bad if it is the only available dest.
*
* So, we modify how mw is calculated, now it is reduced with (di - 1),
* so that last cw is 1 to catch such dests with weight below di:
* pass 1: cw = max weight - (di - 1)
* pass 2: cw = max weight - di - (di - 1)
* pass 3: cw = max weight - 2 * di - (di - 1)
* ...
* last pass: cw = 1
*
*/
/*
* current destination pointer for weighted round-robin scheduling
*/
struct ip_vs_wrr_mark {
struct ip_vs_dest *cl; /* current dest or head */
int cw; /* current weight */
int mw; /* maximum weight */
int di; /* decreasing interval */
struct rcu_head rcu_head;
};
static int ip_vs_wrr_gcd_weight(struct ip_vs_service *svc)
{
struct ip_vs_dest *dest;
int weight;
int g = 0;
list_for_each_entry(dest, &svc->destinations, n_list) {
weight = atomic_read(&dest->weight);
if (weight > 0) {
if (g > 0)
g = gcd(weight, g);
else
g = weight;
}
}
return g ? g : 1;
}
/*
* Get the maximum weight of the service destinations.
*/
static int ip_vs_wrr_max_weight(struct ip_vs_service *svc)
{
struct ip_vs_dest *dest;
int new_weight, weight = 0;
list_for_each_entry(dest, &svc->destinations, n_list) {
new_weight = atomic_read(&dest->weight);
if (new_weight > weight)
weight = new_weight;
}
return weight;
}
static int ip_vs_wrr_init_svc(struct ip_vs_service *svc)
{
struct ip_vs_wrr_mark *mark;
/*
* Allocate the mark variable for WRR scheduling
*/
mark = kmalloc(sizeof(struct ip_vs_wrr_mark), GFP_KERNEL);
if (mark == NULL)
return -ENOMEM;
mark->cl = list_entry(&svc->destinations, struct ip_vs_dest, n_list);
mark->di = ip_vs_wrr_gcd_weight(svc);
mark->mw = ip_vs_wrr_max_weight(svc) - (mark->di - 1);
mark->cw = mark->mw;
svc->sched_data = mark;
return 0;
}
static void ip_vs_wrr_done_svc(struct ip_vs_service *svc)
{
struct ip_vs_wrr_mark *mark = svc->sched_data;
/*
* Release the mark variable
*/
kfree_rcu(mark, rcu_head);
}
static int ip_vs_wrr_dest_changed(struct ip_vs_service *svc,
struct ip_vs_dest *dest)
{
struct ip_vs_wrr_mark *mark = svc->sched_data;
spin_lock_bh(&svc->sched_lock);
mark->cl = list_entry(&svc->destinations, struct ip_vs_dest, n_list);
mark->di = ip_vs_wrr_gcd_weight(svc);
mark->mw = ip_vs_wrr_max_weight(svc) - (mark->di - 1);
if (mark->cw > mark->mw || !mark->cw)
mark->cw = mark->mw;
else if (mark->di > 1)
mark->cw = (mark->cw / mark->di) * mark->di + 1;
spin_unlock_bh(&svc->sched_lock);
return 0;
}
/*
* Weighted Round-Robin Scheduling
*/
static struct ip_vs_dest *
ip_vs_wrr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb,
struct ip_vs_iphdr *iph)
{
struct ip_vs_dest *dest, *last, *stop = NULL;
struct ip_vs_wrr_mark *mark = svc->sched_data;
bool last_pass = false, restarted = false;
IP_VS_DBG(6, "%s(): Scheduling...\n", __func__);
spin_lock_bh(&svc->sched_lock);
dest = mark->cl;
/* No available dests? */
if (mark->mw == 0)
goto err_noavail;
last = dest;
/* Stop only after all dests were checked for weight >= 1 (last pass) */
while (1) {
list_for_each_entry_continue_rcu(dest,
&svc->destinations,
n_list) {
if (!(dest->flags & IP_VS_DEST_F_OVERLOAD) &&
atomic_read(&dest->weight) >= mark->cw)
goto found;
if (dest == stop)
goto err_over;
}
mark->cw -= mark->di;
if (mark->cw <= 0) {
mark->cw = mark->mw;
/* Stop if we tried last pass from first dest:
* 1. last_pass: we started checks when cw > di but
* then all dests were checked for w >= 1
* 2. last was head: the first and only traversal
* was for weight >= 1, for all dests.
*/
if (last_pass ||
&last->n_list == &svc->destinations)
goto err_over;
restarted = true;
}
last_pass = mark->cw <= mark->di;
if (last_pass && restarted &&
&last->n_list != &svc->destinations) {
/* First traversal was for w >= 1 but only
* for dests after 'last', now do the same
* for all dests up to 'last'.
*/
stop = last;
}
}
found:
IP_VS_DBG_BUF(6, "WRR: server %s:%u "
"activeconns %d refcnt %d weight %d\n",
IP_VS_DBG_ADDR(dest->af, &dest->addr), ntohs(dest->port),
atomic_read(&dest->activeconns),
refcount_read(&dest->refcnt),
atomic_read(&dest->weight));
mark->cl = dest;
out:
spin_unlock_bh(&svc->sched_lock);
return dest;
err_noavail:
mark->cl = dest;
dest = NULL;
ip_vs_scheduler_err(svc, "no destination available");
goto out;
err_over:
mark->cl = dest;
dest = NULL;
ip_vs_scheduler_err(svc, "no destination available: "
"all destinations are overloaded");
goto out;
}
static struct ip_vs_scheduler ip_vs_wrr_scheduler = {
.name = "wrr",
.refcnt = ATOMIC_INIT(0),
.module = THIS_MODULE,
.n_list = LIST_HEAD_INIT(ip_vs_wrr_scheduler.n_list),
.init_service = ip_vs_wrr_init_svc,
.done_service = ip_vs_wrr_done_svc,
.add_dest = ip_vs_wrr_dest_changed,
.del_dest = ip_vs_wrr_dest_changed,
.upd_dest = ip_vs_wrr_dest_changed,
.schedule = ip_vs_wrr_schedule,
};
static int __init ip_vs_wrr_init(void)
{
return register_ip_vs_scheduler(&ip_vs_wrr_scheduler) ;
}
static void __exit ip_vs_wrr_cleanup(void)
{
unregister_ip_vs_scheduler(&ip_vs_wrr_scheduler);
synchronize_rcu();
}
module_init(ip_vs_wrr_init);
module_exit(ip_vs_wrr_cleanup);
MODULE_LICENSE("GPL");
| linux-master | net/netfilter/ipvs/ip_vs_wrr.c |
// SPDX-License-Identifier: GPL-2.0
/*
* IPVS An implementation of the IP virtual server support for the
* LINUX operating system. IPVS is now implemented as a module
* over the NetFilter framework. IPVS can be used to build a
* high-performance and highly available server based on a
* cluster of servers.
*
* Version 1, is capable of handling both version 0 and 1 messages.
* Version 0 is the plain old format.
* Note Version 0 receivers will just drop Ver 1 messages.
* Version 1 is capable of handle IPv6, Persistence data,
* time-outs, and firewall marks.
* In ver.1 "ip_vs_sync_conn_options" will be sent in netw. order.
* Ver. 0 can be turned on by sysctl -w net.ipv4.vs.sync_version=0
*
* Definitions Message: is a complete datagram
* Sync_conn: is a part of a Message
* Param Data is an option to a Sync_conn.
*
* Authors: Wensong Zhang <[email protected]>
*
* ip_vs_sync: sync connection info from master load balancer to backups
* through multicast
*
* Changes:
* Alexandre Cassen : Added master & backup support at a time.
* Alexandre Cassen : Added SyncID support for incoming sync
* messages filtering.
* Justin Ossevoort : Fix endian problem on sync message size.
* Hans Schillstrom : Added Version 1: i.e. IPv6,
* Persistence support, fwmark and time-out.
*/
#define KMSG_COMPONENT "IPVS"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/inetdevice.h>
#include <linux/net.h>
#include <linux/completion.h>
#include <linux/delay.h>
#include <linux/skbuff.h>
#include <linux/in.h>
#include <linux/igmp.h> /* for ip_mc_join_group */
#include <linux/udp.h>
#include <linux/err.h>
#include <linux/kthread.h>
#include <linux/wait.h>
#include <linux/kernel.h>
#include <linux/sched/signal.h>
#include <asm/unaligned.h> /* Used for ntoh_seq and hton_seq */
#include <net/ip.h>
#include <net/sock.h>
#include <net/ip_vs.h>
#define IP_VS_SYNC_GROUP 0xe0000051 /* multicast addr - 224.0.0.81 */
#define IP_VS_SYNC_PORT 8848 /* multicast port */
#define SYNC_PROTO_VER 1 /* Protocol version in header */
static struct lock_class_key __ipvs_sync_key;
/*
* IPVS sync connection entry
* Version 0, i.e. original version.
*/
struct ip_vs_sync_conn_v0 {
__u8 reserved;
/* Protocol, addresses and port numbers */
__u8 protocol; /* Which protocol (TCP/UDP) */
__be16 cport;
__be16 vport;
__be16 dport;
__be32 caddr; /* client address */
__be32 vaddr; /* virtual address */
__be32 daddr; /* destination address */
/* Flags and state transition */
__be16 flags; /* status flags */
__be16 state; /* state info */
/* The sequence options start here */
};
struct ip_vs_sync_conn_options {
struct ip_vs_seq in_seq; /* incoming seq. struct */
struct ip_vs_seq out_seq; /* outgoing seq. struct */
};
/*
Sync Connection format (sync_conn)
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Type | Protocol | Ver. | Size |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Flags |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| State | cport |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| vport | dport |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| fwmark |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| timeout (in sec.) |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| ... |
| IP-Addresses (v4 or v6) |
| ... |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
Optional Parameters.
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Param. Type | Param. Length | Param. data |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
| ... |
| +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| | Param Type | Param. Length |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Param data |
| Last Param data should be padded for 32 bit alignment |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
*/
/*
* Type 0, IPv4 sync connection format
*/
struct ip_vs_sync_v4 {
__u8 type;
__u8 protocol; /* Which protocol (TCP/UDP) */
__be16 ver_size; /* Version msb 4 bits */
/* Flags and state transition */
__be32 flags; /* status flags */
__be16 state; /* state info */
/* Protocol, addresses and port numbers */
__be16 cport;
__be16 vport;
__be16 dport;
__be32 fwmark; /* Firewall mark from skb */
__be32 timeout; /* cp timeout */
__be32 caddr; /* client address */
__be32 vaddr; /* virtual address */
__be32 daddr; /* destination address */
/* The sequence options start here */
/* PE data padded to 32bit alignment after seq. options */
};
/*
* Type 2 messages IPv6
*/
struct ip_vs_sync_v6 {
__u8 type;
__u8 protocol; /* Which protocol (TCP/UDP) */
__be16 ver_size; /* Version msb 4 bits */
/* Flags and state transition */
__be32 flags; /* status flags */
__be16 state; /* state info */
/* Protocol, addresses and port numbers */
__be16 cport;
__be16 vport;
__be16 dport;
__be32 fwmark; /* Firewall mark from skb */
__be32 timeout; /* cp timeout */
struct in6_addr caddr; /* client address */
struct in6_addr vaddr; /* virtual address */
struct in6_addr daddr; /* destination address */
/* The sequence options start here */
/* PE data padded to 32bit alignment after seq. options */
};
union ip_vs_sync_conn {
struct ip_vs_sync_v4 v4;
struct ip_vs_sync_v6 v6;
};
/* Bits in Type field in above */
#define STYPE_INET6 0
#define STYPE_F_INET6 (1 << STYPE_INET6)
#define SVER_SHIFT 12 /* Shift to get version */
#define SVER_MASK 0x0fff /* Mask to strip version */
#define IPVS_OPT_SEQ_DATA 1
#define IPVS_OPT_PE_DATA 2
#define IPVS_OPT_PE_NAME 3
#define IPVS_OPT_PARAM 7
#define IPVS_OPT_F_SEQ_DATA (1 << (IPVS_OPT_SEQ_DATA-1))
#define IPVS_OPT_F_PE_DATA (1 << (IPVS_OPT_PE_DATA-1))
#define IPVS_OPT_F_PE_NAME (1 << (IPVS_OPT_PE_NAME-1))
#define IPVS_OPT_F_PARAM (1 << (IPVS_OPT_PARAM-1))
struct ip_vs_sync_thread_data {
struct task_struct *task;
struct netns_ipvs *ipvs;
struct socket *sock;
char *buf;
int id;
};
/* Version 0 definition of packet sizes */
#define SIMPLE_CONN_SIZE (sizeof(struct ip_vs_sync_conn_v0))
#define FULL_CONN_SIZE \
(sizeof(struct ip_vs_sync_conn_v0) + sizeof(struct ip_vs_sync_conn_options))
/*
The master mulitcasts messages (Datagrams) to the backup load balancers
in the following format.
Version 1:
Note, first byte should be Zero, so ver 0 receivers will drop the packet.
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| 0 | SyncID | Size |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Count Conns | Version | Reserved, set to Zero |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| |
| IPVS Sync Connection (1) |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| . |
~ . ~
| . |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| |
| IPVS Sync Connection (n) |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
Version 0 Header
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Count Conns | SyncID | Size |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| IPVS Sync Connection (1) |
*/
/* Version 0 header */
struct ip_vs_sync_mesg_v0 {
__u8 nr_conns;
__u8 syncid;
__be16 size;
/* ip_vs_sync_conn entries start here */
};
/* Version 1 header */
struct ip_vs_sync_mesg {
__u8 reserved; /* must be zero */
__u8 syncid;
__be16 size;
__u8 nr_conns;
__s8 version; /* SYNC_PROTO_VER */
__u16 spare;
/* ip_vs_sync_conn entries start here */
};
union ipvs_sockaddr {
struct sockaddr_in in;
struct sockaddr_in6 in6;
};
struct ip_vs_sync_buff {
struct list_head list;
unsigned long firstuse;
/* pointers for the message data */
struct ip_vs_sync_mesg *mesg;
unsigned char *head;
unsigned char *end;
};
/*
* Copy of struct ip_vs_seq
* From unaligned network order to aligned host order
*/
static void ntoh_seq(struct ip_vs_seq *no, struct ip_vs_seq *ho)
{
memset(ho, 0, sizeof(*ho));
ho->init_seq = get_unaligned_be32(&no->init_seq);
ho->delta = get_unaligned_be32(&no->delta);
ho->previous_delta = get_unaligned_be32(&no->previous_delta);
}
/*
* Copy of struct ip_vs_seq
* From Aligned host order to unaligned network order
*/
static void hton_seq(struct ip_vs_seq *ho, struct ip_vs_seq *no)
{
put_unaligned_be32(ho->init_seq, &no->init_seq);
put_unaligned_be32(ho->delta, &no->delta);
put_unaligned_be32(ho->previous_delta, &no->previous_delta);
}
static inline struct ip_vs_sync_buff *
sb_dequeue(struct netns_ipvs *ipvs, struct ipvs_master_sync_state *ms)
{
struct ip_vs_sync_buff *sb;
spin_lock_bh(&ipvs->sync_lock);
if (list_empty(&ms->sync_queue)) {
sb = NULL;
__set_current_state(TASK_INTERRUPTIBLE);
} else {
sb = list_entry(ms->sync_queue.next, struct ip_vs_sync_buff,
list);
list_del(&sb->list);
ms->sync_queue_len--;
if (!ms->sync_queue_len)
ms->sync_queue_delay = 0;
}
spin_unlock_bh(&ipvs->sync_lock);
return sb;
}
/*
* Create a new sync buffer for Version 1 proto.
*/
static inline struct ip_vs_sync_buff *
ip_vs_sync_buff_create(struct netns_ipvs *ipvs, unsigned int len)
{
struct ip_vs_sync_buff *sb;
if (!(sb=kmalloc(sizeof(struct ip_vs_sync_buff), GFP_ATOMIC)))
return NULL;
len = max_t(unsigned int, len + sizeof(struct ip_vs_sync_mesg),
ipvs->mcfg.sync_maxlen);
sb->mesg = kmalloc(len, GFP_ATOMIC);
if (!sb->mesg) {
kfree(sb);
return NULL;
}
sb->mesg->reserved = 0; /* old nr_conns i.e. must be zero now */
sb->mesg->version = SYNC_PROTO_VER;
sb->mesg->syncid = ipvs->mcfg.syncid;
sb->mesg->size = htons(sizeof(struct ip_vs_sync_mesg));
sb->mesg->nr_conns = 0;
sb->mesg->spare = 0;
sb->head = (unsigned char *)sb->mesg + sizeof(struct ip_vs_sync_mesg);
sb->end = (unsigned char *)sb->mesg + len;
sb->firstuse = jiffies;
return sb;
}
static inline void ip_vs_sync_buff_release(struct ip_vs_sync_buff *sb)
{
kfree(sb->mesg);
kfree(sb);
}
static inline void sb_queue_tail(struct netns_ipvs *ipvs,
struct ipvs_master_sync_state *ms)
{
struct ip_vs_sync_buff *sb = ms->sync_buff;
spin_lock(&ipvs->sync_lock);
if (ipvs->sync_state & IP_VS_STATE_MASTER &&
ms->sync_queue_len < sysctl_sync_qlen_max(ipvs)) {
if (!ms->sync_queue_len)
schedule_delayed_work(&ms->master_wakeup_work,
max(IPVS_SYNC_SEND_DELAY, 1));
ms->sync_queue_len++;
list_add_tail(&sb->list, &ms->sync_queue);
if ((++ms->sync_queue_delay) == IPVS_SYNC_WAKEUP_RATE) {
int id = (int)(ms - ipvs->ms);
wake_up_process(ipvs->master_tinfo[id].task);
}
} else
ip_vs_sync_buff_release(sb);
spin_unlock(&ipvs->sync_lock);
}
/*
* Get the current sync buffer if it has been created for more
* than the specified time or the specified time is zero.
*/
static inline struct ip_vs_sync_buff *
get_curr_sync_buff(struct netns_ipvs *ipvs, struct ipvs_master_sync_state *ms,
unsigned long time)
{
struct ip_vs_sync_buff *sb;
spin_lock_bh(&ipvs->sync_buff_lock);
sb = ms->sync_buff;
if (sb && time_after_eq(jiffies - sb->firstuse, time)) {
ms->sync_buff = NULL;
__set_current_state(TASK_RUNNING);
} else
sb = NULL;
spin_unlock_bh(&ipvs->sync_buff_lock);
return sb;
}
static inline int
select_master_thread_id(struct netns_ipvs *ipvs, struct ip_vs_conn *cp)
{
return ((long) cp >> (1 + ilog2(sizeof(*cp)))) & ipvs->threads_mask;
}
/*
* Create a new sync buffer for Version 0 proto.
*/
static inline struct ip_vs_sync_buff *
ip_vs_sync_buff_create_v0(struct netns_ipvs *ipvs, unsigned int len)
{
struct ip_vs_sync_buff *sb;
struct ip_vs_sync_mesg_v0 *mesg;
if (!(sb=kmalloc(sizeof(struct ip_vs_sync_buff), GFP_ATOMIC)))
return NULL;
len = max_t(unsigned int, len + sizeof(struct ip_vs_sync_mesg_v0),
ipvs->mcfg.sync_maxlen);
sb->mesg = kmalloc(len, GFP_ATOMIC);
if (!sb->mesg) {
kfree(sb);
return NULL;
}
mesg = (struct ip_vs_sync_mesg_v0 *)sb->mesg;
mesg->nr_conns = 0;
mesg->syncid = ipvs->mcfg.syncid;
mesg->size = htons(sizeof(struct ip_vs_sync_mesg_v0));
sb->head = (unsigned char *)mesg + sizeof(struct ip_vs_sync_mesg_v0);
sb->end = (unsigned char *)mesg + len;
sb->firstuse = jiffies;
return sb;
}
/* Check if connection is controlled by persistence */
static inline bool in_persistence(struct ip_vs_conn *cp)
{
for (cp = cp->control; cp; cp = cp->control) {
if (cp->flags & IP_VS_CONN_F_TEMPLATE)
return true;
}
return false;
}
/* Check if conn should be synced.
* pkts: conn packets, use sysctl_sync_threshold to avoid packet check
* - (1) sync_refresh_period: reduce sync rate. Additionally, retry
* sync_retries times with period of sync_refresh_period/8
* - (2) if both sync_refresh_period and sync_period are 0 send sync only
* for state changes or only once when pkts matches sync_threshold
* - (3) templates: rate can be reduced only with sync_refresh_period or
* with (2)
*/
static int ip_vs_sync_conn_needed(struct netns_ipvs *ipvs,
struct ip_vs_conn *cp, int pkts)
{
unsigned long orig = READ_ONCE(cp->sync_endtime);
unsigned long now = jiffies;
unsigned long n = (now + cp->timeout) & ~3UL;
unsigned int sync_refresh_period;
int sync_period;
int force;
/* Check if we sync in current state */
if (unlikely(cp->flags & IP_VS_CONN_F_TEMPLATE))
force = 0;
else if (unlikely(sysctl_sync_persist_mode(ipvs) && in_persistence(cp)))
return 0;
else if (likely(cp->protocol == IPPROTO_TCP)) {
if (!((1 << cp->state) &
((1 << IP_VS_TCP_S_ESTABLISHED) |
(1 << IP_VS_TCP_S_FIN_WAIT) |
(1 << IP_VS_TCP_S_CLOSE) |
(1 << IP_VS_TCP_S_CLOSE_WAIT) |
(1 << IP_VS_TCP_S_TIME_WAIT))))
return 0;
force = cp->state != cp->old_state;
if (force && cp->state != IP_VS_TCP_S_ESTABLISHED)
goto set;
} else if (unlikely(cp->protocol == IPPROTO_SCTP)) {
if (!((1 << cp->state) &
((1 << IP_VS_SCTP_S_ESTABLISHED) |
(1 << IP_VS_SCTP_S_SHUTDOWN_SENT) |
(1 << IP_VS_SCTP_S_SHUTDOWN_RECEIVED) |
(1 << IP_VS_SCTP_S_SHUTDOWN_ACK_SENT) |
(1 << IP_VS_SCTP_S_CLOSED))))
return 0;
force = cp->state != cp->old_state;
if (force && cp->state != IP_VS_SCTP_S_ESTABLISHED)
goto set;
} else {
/* UDP or another protocol with single state */
force = 0;
}
sync_refresh_period = sysctl_sync_refresh_period(ipvs);
if (sync_refresh_period > 0) {
long diff = n - orig;
long min_diff = max(cp->timeout >> 1, 10UL * HZ);
/* Avoid sync if difference is below sync_refresh_period
* and below the half timeout.
*/
if (abs(diff) < min_t(long, sync_refresh_period, min_diff)) {
int retries = orig & 3;
if (retries >= sysctl_sync_retries(ipvs))
return 0;
if (time_before(now, orig - cp->timeout +
(sync_refresh_period >> 3)))
return 0;
n |= retries + 1;
}
}
sync_period = sysctl_sync_period(ipvs);
if (sync_period > 0) {
if (!(cp->flags & IP_VS_CONN_F_TEMPLATE) &&
pkts % sync_period != sysctl_sync_threshold(ipvs))
return 0;
} else if (!sync_refresh_period &&
pkts != sysctl_sync_threshold(ipvs))
return 0;
set:
cp->old_state = cp->state;
n = cmpxchg(&cp->sync_endtime, orig, n);
return n == orig || force;
}
/*
* Version 0 , could be switched in by sys_ctl.
* Add an ip_vs_conn information into the current sync_buff.
*/
static void ip_vs_sync_conn_v0(struct netns_ipvs *ipvs, struct ip_vs_conn *cp,
int pkts)
{
struct ip_vs_sync_mesg_v0 *m;
struct ip_vs_sync_conn_v0 *s;
struct ip_vs_sync_buff *buff;
struct ipvs_master_sync_state *ms;
int id;
unsigned int len;
if (unlikely(cp->af != AF_INET))
return;
/* Do not sync ONE PACKET */
if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
return;
if (!ip_vs_sync_conn_needed(ipvs, cp, pkts))
return;
spin_lock_bh(&ipvs->sync_buff_lock);
if (!(ipvs->sync_state & IP_VS_STATE_MASTER)) {
spin_unlock_bh(&ipvs->sync_buff_lock);
return;
}
id = select_master_thread_id(ipvs, cp);
ms = &ipvs->ms[id];
buff = ms->sync_buff;
len = (cp->flags & IP_VS_CONN_F_SEQ_MASK) ? FULL_CONN_SIZE :
SIMPLE_CONN_SIZE;
if (buff) {
m = (struct ip_vs_sync_mesg_v0 *) buff->mesg;
/* Send buffer if it is for v1 */
if (buff->head + len > buff->end || !m->nr_conns) {
sb_queue_tail(ipvs, ms);
ms->sync_buff = NULL;
buff = NULL;
}
}
if (!buff) {
buff = ip_vs_sync_buff_create_v0(ipvs, len);
if (!buff) {
spin_unlock_bh(&ipvs->sync_buff_lock);
pr_err("ip_vs_sync_buff_create failed.\n");
return;
}
ms->sync_buff = buff;
}
m = (struct ip_vs_sync_mesg_v0 *) buff->mesg;
s = (struct ip_vs_sync_conn_v0 *) buff->head;
/* copy members */
s->reserved = 0;
s->protocol = cp->protocol;
s->cport = cp->cport;
s->vport = cp->vport;
s->dport = cp->dport;
s->caddr = cp->caddr.ip;
s->vaddr = cp->vaddr.ip;
s->daddr = cp->daddr.ip;
s->flags = htons(cp->flags & ~IP_VS_CONN_F_HASHED);
s->state = htons(cp->state);
if (cp->flags & IP_VS_CONN_F_SEQ_MASK) {
struct ip_vs_sync_conn_options *opt =
(struct ip_vs_sync_conn_options *)&s[1];
memcpy(opt, &cp->sync_conn_opt, sizeof(*opt));
}
m->nr_conns++;
m->size = htons(ntohs(m->size) + len);
buff->head += len;
spin_unlock_bh(&ipvs->sync_buff_lock);
/* synchronize its controller if it has */
cp = cp->control;
if (cp) {
if (cp->flags & IP_VS_CONN_F_TEMPLATE)
pkts = atomic_inc_return(&cp->in_pkts);
else
pkts = sysctl_sync_threshold(ipvs);
ip_vs_sync_conn(ipvs, cp, pkts);
}
}
/*
* Add an ip_vs_conn information into the current sync_buff.
* Called by ip_vs_in.
* Sending Version 1 messages
*/
void ip_vs_sync_conn(struct netns_ipvs *ipvs, struct ip_vs_conn *cp, int pkts)
{
struct ip_vs_sync_mesg *m;
union ip_vs_sync_conn *s;
struct ip_vs_sync_buff *buff;
struct ipvs_master_sync_state *ms;
int id;
__u8 *p;
unsigned int len, pe_name_len, pad;
/* Handle old version of the protocol */
if (sysctl_sync_ver(ipvs) == 0) {
ip_vs_sync_conn_v0(ipvs, cp, pkts);
return;
}
/* Do not sync ONE PACKET */
if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
goto control;
sloop:
if (!ip_vs_sync_conn_needed(ipvs, cp, pkts))
goto control;
/* Sanity checks */
pe_name_len = 0;
if (cp->pe_data_len) {
if (!cp->pe_data || !cp->dest) {
IP_VS_ERR_RL("SYNC, connection pe_data invalid\n");
return;
}
pe_name_len = strnlen(cp->pe->name, IP_VS_PENAME_MAXLEN);
}
spin_lock_bh(&ipvs->sync_buff_lock);
if (!(ipvs->sync_state & IP_VS_STATE_MASTER)) {
spin_unlock_bh(&ipvs->sync_buff_lock);
return;
}
id = select_master_thread_id(ipvs, cp);
ms = &ipvs->ms[id];
#ifdef CONFIG_IP_VS_IPV6
if (cp->af == AF_INET6)
len = sizeof(struct ip_vs_sync_v6);
else
#endif
len = sizeof(struct ip_vs_sync_v4);
if (cp->flags & IP_VS_CONN_F_SEQ_MASK)
len += sizeof(struct ip_vs_sync_conn_options) + 2;
if (cp->pe_data_len)
len += cp->pe_data_len + 2; /* + Param hdr field */
if (pe_name_len)
len += pe_name_len + 2;
/* check if there is a space for this one */
pad = 0;
buff = ms->sync_buff;
if (buff) {
m = buff->mesg;
pad = (4 - (size_t) buff->head) & 3;
/* Send buffer if it is for v0 */
if (buff->head + len + pad > buff->end || m->reserved) {
sb_queue_tail(ipvs, ms);
ms->sync_buff = NULL;
buff = NULL;
pad = 0;
}
}
if (!buff) {
buff = ip_vs_sync_buff_create(ipvs, len);
if (!buff) {
spin_unlock_bh(&ipvs->sync_buff_lock);
pr_err("ip_vs_sync_buff_create failed.\n");
return;
}
ms->sync_buff = buff;
m = buff->mesg;
}
p = buff->head;
buff->head += pad + len;
m->size = htons(ntohs(m->size) + pad + len);
/* Add ev. padding from prev. sync_conn */
while (pad--)
*(p++) = 0;
s = (union ip_vs_sync_conn *)p;
/* Set message type & copy members */
s->v4.type = (cp->af == AF_INET6 ? STYPE_F_INET6 : 0);
s->v4.ver_size = htons(len & SVER_MASK); /* Version 0 */
s->v4.flags = htonl(cp->flags & ~IP_VS_CONN_F_HASHED);
s->v4.state = htons(cp->state);
s->v4.protocol = cp->protocol;
s->v4.cport = cp->cport;
s->v4.vport = cp->vport;
s->v4.dport = cp->dport;
s->v4.fwmark = htonl(cp->fwmark);
s->v4.timeout = htonl(cp->timeout / HZ);
m->nr_conns++;
#ifdef CONFIG_IP_VS_IPV6
if (cp->af == AF_INET6) {
p += sizeof(struct ip_vs_sync_v6);
s->v6.caddr = cp->caddr.in6;
s->v6.vaddr = cp->vaddr.in6;
s->v6.daddr = cp->daddr.in6;
} else
#endif
{
p += sizeof(struct ip_vs_sync_v4); /* options ptr */
s->v4.caddr = cp->caddr.ip;
s->v4.vaddr = cp->vaddr.ip;
s->v4.daddr = cp->daddr.ip;
}
if (cp->flags & IP_VS_CONN_F_SEQ_MASK) {
*(p++) = IPVS_OPT_SEQ_DATA;
*(p++) = sizeof(struct ip_vs_sync_conn_options);
hton_seq((struct ip_vs_seq *)p, &cp->in_seq);
p += sizeof(struct ip_vs_seq);
hton_seq((struct ip_vs_seq *)p, &cp->out_seq);
p += sizeof(struct ip_vs_seq);
}
/* Handle pe data */
if (cp->pe_data_len && cp->pe_data) {
*(p++) = IPVS_OPT_PE_DATA;
*(p++) = cp->pe_data_len;
memcpy(p, cp->pe_data, cp->pe_data_len);
p += cp->pe_data_len;
if (pe_name_len) {
/* Add PE_NAME */
*(p++) = IPVS_OPT_PE_NAME;
*(p++) = pe_name_len;
memcpy(p, cp->pe->name, pe_name_len);
p += pe_name_len;
}
}
spin_unlock_bh(&ipvs->sync_buff_lock);
control:
/* synchronize its controller if it has */
cp = cp->control;
if (!cp)
return;
if (cp->flags & IP_VS_CONN_F_TEMPLATE)
pkts = atomic_inc_return(&cp->in_pkts);
else
pkts = sysctl_sync_threshold(ipvs);
goto sloop;
}
/*
* fill_param used by version 1
*/
static inline int
ip_vs_conn_fill_param_sync(struct netns_ipvs *ipvs, int af, union ip_vs_sync_conn *sc,
struct ip_vs_conn_param *p,
__u8 *pe_data, unsigned int pe_data_len,
__u8 *pe_name, unsigned int pe_name_len)
{
#ifdef CONFIG_IP_VS_IPV6
if (af == AF_INET6)
ip_vs_conn_fill_param(ipvs, af, sc->v6.protocol,
(const union nf_inet_addr *)&sc->v6.caddr,
sc->v6.cport,
(const union nf_inet_addr *)&sc->v6.vaddr,
sc->v6.vport, p);
else
#endif
ip_vs_conn_fill_param(ipvs, af, sc->v4.protocol,
(const union nf_inet_addr *)&sc->v4.caddr,
sc->v4.cport,
(const union nf_inet_addr *)&sc->v4.vaddr,
sc->v4.vport, p);
/* Handle pe data */
if (pe_data_len) {
if (pe_name_len) {
char buff[IP_VS_PENAME_MAXLEN+1];
memcpy(buff, pe_name, pe_name_len);
buff[pe_name_len]=0;
p->pe = __ip_vs_pe_getbyname(buff);
if (!p->pe) {
IP_VS_DBG(3, "BACKUP, no %s engine found/loaded\n",
buff);
return 1;
}
} else {
IP_VS_ERR_RL("BACKUP, Invalid PE parameters\n");
return 1;
}
p->pe_data = kmemdup(pe_data, pe_data_len, GFP_ATOMIC);
if (!p->pe_data) {
module_put(p->pe->module);
return -ENOMEM;
}
p->pe_data_len = pe_data_len;
}
return 0;
}
/*
* Connection Add / Update.
* Common for version 0 and 1 reception of backup sync_conns.
* Param: ...
* timeout is in sec.
*/
static void ip_vs_proc_conn(struct netns_ipvs *ipvs, struct ip_vs_conn_param *param,
unsigned int flags, unsigned int state,
unsigned int protocol, unsigned int type,
const union nf_inet_addr *daddr, __be16 dport,
unsigned long timeout, __u32 fwmark,
struct ip_vs_sync_conn_options *opt)
{
struct ip_vs_dest *dest;
struct ip_vs_conn *cp;
if (!(flags & IP_VS_CONN_F_TEMPLATE)) {
cp = ip_vs_conn_in_get(param);
if (cp && ((cp->dport != dport) ||
!ip_vs_addr_equal(cp->daf, &cp->daddr, daddr))) {
if (!(flags & IP_VS_CONN_F_INACTIVE)) {
ip_vs_conn_expire_now(cp);
__ip_vs_conn_put(cp);
cp = NULL;
} else {
/* This is the expiration message for the
* connection that was already replaced, so we
* just ignore it.
*/
__ip_vs_conn_put(cp);
kfree(param->pe_data);
return;
}
}
} else {
cp = ip_vs_ct_in_get(param);
}
if (cp) {
/* Free pe_data */
kfree(param->pe_data);
dest = cp->dest;
spin_lock_bh(&cp->lock);
if ((cp->flags ^ flags) & IP_VS_CONN_F_INACTIVE &&
!(flags & IP_VS_CONN_F_TEMPLATE) && dest) {
if (flags & IP_VS_CONN_F_INACTIVE) {
atomic_dec(&dest->activeconns);
atomic_inc(&dest->inactconns);
} else {
atomic_inc(&dest->activeconns);
atomic_dec(&dest->inactconns);
}
}
flags &= IP_VS_CONN_F_BACKUP_UPD_MASK;
flags |= cp->flags & ~IP_VS_CONN_F_BACKUP_UPD_MASK;
cp->flags = flags;
spin_unlock_bh(&cp->lock);
if (!dest)
ip_vs_try_bind_dest(cp);
} else {
/*
* Find the appropriate destination for the connection.
* If it is not found the connection will remain unbound
* but still handled.
*/
rcu_read_lock();
/* This function is only invoked by the synchronization
* code. We do not currently support heterogeneous pools
* with synchronization, so we can make the assumption that
* the svc_af is the same as the dest_af
*/
dest = ip_vs_find_dest(ipvs, type, type, daddr, dport,
param->vaddr, param->vport, protocol,
fwmark, flags);
cp = ip_vs_conn_new(param, type, daddr, dport, flags, dest,
fwmark);
rcu_read_unlock();
if (!cp) {
kfree(param->pe_data);
IP_VS_DBG(2, "BACKUP, add new conn. failed\n");
return;
}
if (!(flags & IP_VS_CONN_F_TEMPLATE))
kfree(param->pe_data);
}
if (opt) {
cp->in_seq = opt->in_seq;
cp->out_seq = opt->out_seq;
}
atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs));
cp->state = state;
cp->old_state = cp->state;
/*
* For Ver 0 messages style
* - Not possible to recover the right timeout for templates
* - can not find the right fwmark
* virtual service. If needed, we can do it for
* non-fwmark persistent services.
* Ver 1 messages style.
* - No problem.
*/
if (timeout) {
if (timeout > MAX_SCHEDULE_TIMEOUT / HZ)
timeout = MAX_SCHEDULE_TIMEOUT / HZ;
cp->timeout = timeout*HZ;
} else {
struct ip_vs_proto_data *pd;
pd = ip_vs_proto_data_get(ipvs, protocol);
if (!(flags & IP_VS_CONN_F_TEMPLATE) && pd && pd->timeout_table)
cp->timeout = pd->timeout_table[state];
else
cp->timeout = (3*60*HZ);
}
ip_vs_conn_put(cp);
}
/*
* Process received multicast message for Version 0
*/
static void ip_vs_process_message_v0(struct netns_ipvs *ipvs, const char *buffer,
const size_t buflen)
{
struct ip_vs_sync_mesg_v0 *m = (struct ip_vs_sync_mesg_v0 *)buffer;
struct ip_vs_sync_conn_v0 *s;
struct ip_vs_sync_conn_options *opt;
struct ip_vs_protocol *pp;
struct ip_vs_conn_param param;
char *p;
int i;
p = (char *)buffer + sizeof(struct ip_vs_sync_mesg_v0);
for (i=0; i<m->nr_conns; i++) {
unsigned int flags, state;
if (p + SIMPLE_CONN_SIZE > buffer+buflen) {
IP_VS_ERR_RL("BACKUP v0, bogus conn\n");
return;
}
s = (struct ip_vs_sync_conn_v0 *) p;
flags = ntohs(s->flags) | IP_VS_CONN_F_SYNC;
flags &= ~IP_VS_CONN_F_HASHED;
if (flags & IP_VS_CONN_F_SEQ_MASK) {
opt = (struct ip_vs_sync_conn_options *)&s[1];
p += FULL_CONN_SIZE;
if (p > buffer+buflen) {
IP_VS_ERR_RL("BACKUP v0, Dropping buffer bogus conn options\n");
return;
}
} else {
opt = NULL;
p += SIMPLE_CONN_SIZE;
}
state = ntohs(s->state);
if (!(flags & IP_VS_CONN_F_TEMPLATE)) {
pp = ip_vs_proto_get(s->protocol);
if (!pp) {
IP_VS_DBG(2, "BACKUP v0, Unsupported protocol %u\n",
s->protocol);
continue;
}
if (state >= pp->num_states) {
IP_VS_DBG(2, "BACKUP v0, Invalid %s state %u\n",
pp->name, state);
continue;
}
} else {
if (state >= IP_VS_CTPL_S_LAST)
IP_VS_DBG(7, "BACKUP v0, Invalid tpl state %u\n",
state);
}
ip_vs_conn_fill_param(ipvs, AF_INET, s->protocol,
(const union nf_inet_addr *)&s->caddr,
s->cport,
(const union nf_inet_addr *)&s->vaddr,
s->vport, ¶m);
/* Send timeout as Zero */
ip_vs_proc_conn(ipvs, ¶m, flags, state, s->protocol, AF_INET,
(union nf_inet_addr *)&s->daddr, s->dport,
0, 0, opt);
}
}
/*
* Handle options
*/
static inline int ip_vs_proc_seqopt(__u8 *p, unsigned int plen,
__u32 *opt_flags,
struct ip_vs_sync_conn_options *opt)
{
struct ip_vs_sync_conn_options *topt;
topt = (struct ip_vs_sync_conn_options *)p;
if (plen != sizeof(struct ip_vs_sync_conn_options)) {
IP_VS_DBG(2, "BACKUP, bogus conn options length\n");
return -EINVAL;
}
if (*opt_flags & IPVS_OPT_F_SEQ_DATA) {
IP_VS_DBG(2, "BACKUP, conn options found twice\n");
return -EINVAL;
}
ntoh_seq(&topt->in_seq, &opt->in_seq);
ntoh_seq(&topt->out_seq, &opt->out_seq);
*opt_flags |= IPVS_OPT_F_SEQ_DATA;
return 0;
}
static int ip_vs_proc_str(__u8 *p, unsigned int plen, unsigned int *data_len,
__u8 **data, unsigned int maxlen,
__u32 *opt_flags, __u32 flag)
{
if (plen > maxlen) {
IP_VS_DBG(2, "BACKUP, bogus par.data len > %d\n", maxlen);
return -EINVAL;
}
if (*opt_flags & flag) {
IP_VS_DBG(2, "BACKUP, Par.data found twice 0x%x\n", flag);
return -EINVAL;
}
*data_len = plen;
*data = p;
*opt_flags |= flag;
return 0;
}
/*
* Process a Version 1 sync. connection
*/
static inline int ip_vs_proc_sync_conn(struct netns_ipvs *ipvs, __u8 *p, __u8 *msg_end)
{
struct ip_vs_sync_conn_options opt;
union ip_vs_sync_conn *s;
struct ip_vs_protocol *pp;
struct ip_vs_conn_param param;
__u32 flags;
unsigned int af, state, pe_data_len=0, pe_name_len=0;
__u8 *pe_data=NULL, *pe_name=NULL;
__u32 opt_flags=0;
int retc=0;
s = (union ip_vs_sync_conn *) p;
if (s->v6.type & STYPE_F_INET6) {
#ifdef CONFIG_IP_VS_IPV6
af = AF_INET6;
p += sizeof(struct ip_vs_sync_v6);
#else
IP_VS_DBG(3,"BACKUP, IPv6 msg received, and IPVS is not compiled for IPv6\n");
retc = 10;
goto out;
#endif
} else if (!s->v4.type) {
af = AF_INET;
p += sizeof(struct ip_vs_sync_v4);
} else {
return -10;
}
if (p > msg_end)
return -20;
/* Process optional params check Type & Len. */
while (p < msg_end) {
int ptype;
int plen;
if (p+2 > msg_end)
return -30;
ptype = *(p++);
plen = *(p++);
if (!plen || ((p + plen) > msg_end))
return -40;
/* Handle seq option p = param data */
switch (ptype & ~IPVS_OPT_F_PARAM) {
case IPVS_OPT_SEQ_DATA:
if (ip_vs_proc_seqopt(p, plen, &opt_flags, &opt))
return -50;
break;
case IPVS_OPT_PE_DATA:
if (ip_vs_proc_str(p, plen, &pe_data_len, &pe_data,
IP_VS_PEDATA_MAXLEN, &opt_flags,
IPVS_OPT_F_PE_DATA))
return -60;
break;
case IPVS_OPT_PE_NAME:
if (ip_vs_proc_str(p, plen,&pe_name_len, &pe_name,
IP_VS_PENAME_MAXLEN, &opt_flags,
IPVS_OPT_F_PE_NAME))
return -70;
break;
default:
/* Param data mandatory ? */
if (!(ptype & IPVS_OPT_F_PARAM)) {
IP_VS_DBG(3, "BACKUP, Unknown mandatory param %d found\n",
ptype & ~IPVS_OPT_F_PARAM);
retc = 20;
goto out;
}
}
p += plen; /* Next option */
}
/* Get flags and Mask off unsupported */
flags = ntohl(s->v4.flags) & IP_VS_CONN_F_BACKUP_MASK;
flags |= IP_VS_CONN_F_SYNC;
state = ntohs(s->v4.state);
if (!(flags & IP_VS_CONN_F_TEMPLATE)) {
pp = ip_vs_proto_get(s->v4.protocol);
if (!pp) {
IP_VS_DBG(3,"BACKUP, Unsupported protocol %u\n",
s->v4.protocol);
retc = 30;
goto out;
}
if (state >= pp->num_states) {
IP_VS_DBG(3, "BACKUP, Invalid %s state %u\n",
pp->name, state);
retc = 40;
goto out;
}
} else {
if (state >= IP_VS_CTPL_S_LAST)
IP_VS_DBG(7, "BACKUP, Invalid tpl state %u\n",
state);
}
if (ip_vs_conn_fill_param_sync(ipvs, af, s, ¶m, pe_data,
pe_data_len, pe_name, pe_name_len)) {
retc = 50;
goto out;
}
/* If only IPv4, just silent skip IPv6 */
if (af == AF_INET)
ip_vs_proc_conn(ipvs, ¶m, flags, state, s->v4.protocol, af,
(union nf_inet_addr *)&s->v4.daddr, s->v4.dport,
ntohl(s->v4.timeout), ntohl(s->v4.fwmark),
(opt_flags & IPVS_OPT_F_SEQ_DATA ? &opt : NULL)
);
#ifdef CONFIG_IP_VS_IPV6
else
ip_vs_proc_conn(ipvs, ¶m, flags, state, s->v6.protocol, af,
(union nf_inet_addr *)&s->v6.daddr, s->v6.dport,
ntohl(s->v6.timeout), ntohl(s->v6.fwmark),
(opt_flags & IPVS_OPT_F_SEQ_DATA ? &opt : NULL)
);
#endif
ip_vs_pe_put(param.pe);
return 0;
/* Error exit */
out:
IP_VS_DBG(2, "BACKUP, Single msg dropped err:%d\n", retc);
return retc;
}
/*
* Process received multicast message and create the corresponding
* ip_vs_conn entries.
* Handles Version 0 & 1
*/
static void ip_vs_process_message(struct netns_ipvs *ipvs, __u8 *buffer,
const size_t buflen)
{
struct ip_vs_sync_mesg *m2 = (struct ip_vs_sync_mesg *)buffer;
__u8 *p, *msg_end;
int i, nr_conns;
if (buflen < sizeof(struct ip_vs_sync_mesg_v0)) {
IP_VS_DBG(2, "BACKUP, message header too short\n");
return;
}
if (buflen != ntohs(m2->size)) {
IP_VS_DBG(2, "BACKUP, bogus message size\n");
return;
}
/* SyncID sanity check */
if (ipvs->bcfg.syncid != 0 && m2->syncid != ipvs->bcfg.syncid) {
IP_VS_DBG(7, "BACKUP, Ignoring syncid = %d\n", m2->syncid);
return;
}
/* Handle version 1 message */
if ((m2->version == SYNC_PROTO_VER) && (m2->reserved == 0)
&& (m2->spare == 0)) {
msg_end = buffer + sizeof(struct ip_vs_sync_mesg);
nr_conns = m2->nr_conns;
for (i=0; i<nr_conns; i++) {
union ip_vs_sync_conn *s;
unsigned int size;
int retc;
p = msg_end;
if (p + sizeof(s->v4) > buffer+buflen) {
IP_VS_ERR_RL("BACKUP, Dropping buffer, too small\n");
return;
}
s = (union ip_vs_sync_conn *)p;
size = ntohs(s->v4.ver_size) & SVER_MASK;
msg_end = p + size;
/* Basic sanity checks */
if (msg_end > buffer+buflen) {
IP_VS_ERR_RL("BACKUP, Dropping buffer, msg > buffer\n");
return;
}
if (ntohs(s->v4.ver_size) >> SVER_SHIFT) {
IP_VS_ERR_RL("BACKUP, Dropping buffer, Unknown version %d\n",
ntohs(s->v4.ver_size) >> SVER_SHIFT);
return;
}
/* Process a single sync_conn */
retc = ip_vs_proc_sync_conn(ipvs, p, msg_end);
if (retc < 0) {
IP_VS_ERR_RL("BACKUP, Dropping buffer, Err: %d in decoding\n",
retc);
return;
}
/* Make sure we have 32 bit alignment */
msg_end = p + ((size + 3) & ~3);
}
} else {
/* Old type of message */
ip_vs_process_message_v0(ipvs, buffer, buflen);
return;
}
}
/*
* Setup sndbuf (mode=1) or rcvbuf (mode=0)
*/
static void set_sock_size(struct sock *sk, int mode, int val)
{
/* setsockopt(sock, SOL_SOCKET, SO_SNDBUF, &val, sizeof(val)); */
/* setsockopt(sock, SOL_SOCKET, SO_RCVBUF, &val, sizeof(val)); */
lock_sock(sk);
if (mode) {
val = clamp_t(int, val, (SOCK_MIN_SNDBUF + 1) / 2,
READ_ONCE(sysctl_wmem_max));
sk->sk_sndbuf = val * 2;
sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
} else {
val = clamp_t(int, val, (SOCK_MIN_RCVBUF + 1) / 2,
READ_ONCE(sysctl_rmem_max));
sk->sk_rcvbuf = val * 2;
sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
}
release_sock(sk);
}
/*
* Setup loopback of outgoing multicasts on a sending socket
*/
static void set_mcast_loop(struct sock *sk, u_char loop)
{
/* setsockopt(sock, SOL_IP, IP_MULTICAST_LOOP, &loop, sizeof(loop)); */
lock_sock(sk);
inet_assign_bit(MC_LOOP, sk, loop);
#ifdef CONFIG_IP_VS_IPV6
if (sk->sk_family == AF_INET6) {
struct ipv6_pinfo *np = inet6_sk(sk);
/* IPV6_MULTICAST_LOOP */
np->mc_loop = loop ? 1 : 0;
}
#endif
release_sock(sk);
}
/*
* Specify TTL for outgoing multicasts on a sending socket
*/
static void set_mcast_ttl(struct sock *sk, u_char ttl)
{
struct inet_sock *inet = inet_sk(sk);
/* setsockopt(sock, SOL_IP, IP_MULTICAST_TTL, &ttl, sizeof(ttl)); */
lock_sock(sk);
inet->mc_ttl = ttl;
#ifdef CONFIG_IP_VS_IPV6
if (sk->sk_family == AF_INET6) {
struct ipv6_pinfo *np = inet6_sk(sk);
/* IPV6_MULTICAST_HOPS */
np->mcast_hops = ttl;
}
#endif
release_sock(sk);
}
/* Control fragmentation of messages */
static void set_mcast_pmtudisc(struct sock *sk, int val)
{
struct inet_sock *inet = inet_sk(sk);
/* setsockopt(sock, SOL_IP, IP_MTU_DISCOVER, &val, sizeof(val)); */
lock_sock(sk);
inet->pmtudisc = val;
#ifdef CONFIG_IP_VS_IPV6
if (sk->sk_family == AF_INET6) {
struct ipv6_pinfo *np = inet6_sk(sk);
/* IPV6_MTU_DISCOVER */
np->pmtudisc = val;
}
#endif
release_sock(sk);
}
/*
* Specifiy default interface for outgoing multicasts
*/
static int set_mcast_if(struct sock *sk, struct net_device *dev)
{
struct inet_sock *inet = inet_sk(sk);
if (sk->sk_bound_dev_if && dev->ifindex != sk->sk_bound_dev_if)
return -EINVAL;
lock_sock(sk);
inet->mc_index = dev->ifindex;
/* inet->mc_addr = 0; */
#ifdef CONFIG_IP_VS_IPV6
if (sk->sk_family == AF_INET6) {
struct ipv6_pinfo *np = inet6_sk(sk);
/* IPV6_MULTICAST_IF */
np->mcast_oif = dev->ifindex;
}
#endif
release_sock(sk);
return 0;
}
/*
* Join a multicast group.
* the group is specified by a class D multicast address 224.0.0.0/8
* in the in_addr structure passed in as a parameter.
*/
static int
join_mcast_group(struct sock *sk, struct in_addr *addr, struct net_device *dev)
{
struct ip_mreqn mreq;
int ret;
memset(&mreq, 0, sizeof(mreq));
memcpy(&mreq.imr_multiaddr, addr, sizeof(struct in_addr));
if (sk->sk_bound_dev_if && dev->ifindex != sk->sk_bound_dev_if)
return -EINVAL;
mreq.imr_ifindex = dev->ifindex;
lock_sock(sk);
ret = ip_mc_join_group(sk, &mreq);
release_sock(sk);
return ret;
}
#ifdef CONFIG_IP_VS_IPV6
static int join_mcast_group6(struct sock *sk, struct in6_addr *addr,
struct net_device *dev)
{
int ret;
if (sk->sk_bound_dev_if && dev->ifindex != sk->sk_bound_dev_if)
return -EINVAL;
lock_sock(sk);
ret = ipv6_sock_mc_join(sk, dev->ifindex, addr);
release_sock(sk);
return ret;
}
#endif
static int bind_mcastif_addr(struct socket *sock, struct net_device *dev)
{
__be32 addr;
struct sockaddr_in sin;
addr = inet_select_addr(dev, 0, RT_SCOPE_UNIVERSE);
if (!addr)
pr_err("You probably need to specify IP address on "
"multicast interface.\n");
IP_VS_DBG(7, "binding socket with (%s) %pI4\n",
dev->name, &addr);
/* Now bind the socket with the address of multicast interface */
sin.sin_family = AF_INET;
sin.sin_addr.s_addr = addr;
sin.sin_port = 0;
return sock->ops->bind(sock, (struct sockaddr*)&sin, sizeof(sin));
}
static void get_mcast_sockaddr(union ipvs_sockaddr *sa, int *salen,
struct ipvs_sync_daemon_cfg *c, int id)
{
if (AF_INET6 == c->mcast_af) {
sa->in6 = (struct sockaddr_in6) {
.sin6_family = AF_INET6,
.sin6_port = htons(c->mcast_port + id),
};
sa->in6.sin6_addr = c->mcast_group.in6;
*salen = sizeof(sa->in6);
} else {
sa->in = (struct sockaddr_in) {
.sin_family = AF_INET,
.sin_port = htons(c->mcast_port + id),
};
sa->in.sin_addr = c->mcast_group.in;
*salen = sizeof(sa->in);
}
}
/*
* Set up sending multicast socket over UDP
*/
static int make_send_sock(struct netns_ipvs *ipvs, int id,
struct net_device *dev, struct socket **sock_ret)
{
/* multicast addr */
union ipvs_sockaddr mcast_addr;
struct socket *sock;
int result, salen;
/* First create a socket */
result = sock_create_kern(ipvs->net, ipvs->mcfg.mcast_af, SOCK_DGRAM,
IPPROTO_UDP, &sock);
if (result < 0) {
pr_err("Error during creation of socket; terminating\n");
goto error;
}
*sock_ret = sock;
result = set_mcast_if(sock->sk, dev);
if (result < 0) {
pr_err("Error setting outbound mcast interface\n");
goto error;
}
set_mcast_loop(sock->sk, 0);
set_mcast_ttl(sock->sk, ipvs->mcfg.mcast_ttl);
/* Allow fragmentation if MTU changes */
set_mcast_pmtudisc(sock->sk, IP_PMTUDISC_DONT);
result = sysctl_sync_sock_size(ipvs);
if (result > 0)
set_sock_size(sock->sk, 1, result);
if (AF_INET == ipvs->mcfg.mcast_af)
result = bind_mcastif_addr(sock, dev);
else
result = 0;
if (result < 0) {
pr_err("Error binding address of the mcast interface\n");
goto error;
}
get_mcast_sockaddr(&mcast_addr, &salen, &ipvs->mcfg, id);
result = sock->ops->connect(sock, (struct sockaddr *) &mcast_addr,
salen, 0);
if (result < 0) {
pr_err("Error connecting to the multicast addr\n");
goto error;
}
return 0;
error:
return result;
}
/*
* Set up receiving multicast socket over UDP
*/
static int make_receive_sock(struct netns_ipvs *ipvs, int id,
struct net_device *dev, struct socket **sock_ret)
{
/* multicast addr */
union ipvs_sockaddr mcast_addr;
struct socket *sock;
int result, salen;
/* First create a socket */
result = sock_create_kern(ipvs->net, ipvs->bcfg.mcast_af, SOCK_DGRAM,
IPPROTO_UDP, &sock);
if (result < 0) {
pr_err("Error during creation of socket; terminating\n");
goto error;
}
*sock_ret = sock;
/* it is equivalent to the REUSEADDR option in user-space */
sock->sk->sk_reuse = SK_CAN_REUSE;
result = sysctl_sync_sock_size(ipvs);
if (result > 0)
set_sock_size(sock->sk, 0, result);
get_mcast_sockaddr(&mcast_addr, &salen, &ipvs->bcfg, id);
sock->sk->sk_bound_dev_if = dev->ifindex;
result = sock->ops->bind(sock, (struct sockaddr *)&mcast_addr, salen);
if (result < 0) {
pr_err("Error binding to the multicast addr\n");
goto error;
}
/* join the multicast group */
#ifdef CONFIG_IP_VS_IPV6
if (ipvs->bcfg.mcast_af == AF_INET6)
result = join_mcast_group6(sock->sk, &mcast_addr.in6.sin6_addr,
dev);
else
#endif
result = join_mcast_group(sock->sk, &mcast_addr.in.sin_addr,
dev);
if (result < 0) {
pr_err("Error joining to the multicast group\n");
goto error;
}
return 0;
error:
return result;
}
static int
ip_vs_send_async(struct socket *sock, const char *buffer, const size_t length)
{
struct msghdr msg = {.msg_flags = MSG_DONTWAIT|MSG_NOSIGNAL};
struct kvec iov;
int len;
iov.iov_base = (void *)buffer;
iov.iov_len = length;
len = kernel_sendmsg(sock, &msg, &iov, 1, (size_t)(length));
return len;
}
static int
ip_vs_send_sync_msg(struct socket *sock, struct ip_vs_sync_mesg *msg)
{
int msize;
int ret;
msize = ntohs(msg->size);
ret = ip_vs_send_async(sock, (char *)msg, msize);
if (ret >= 0 || ret == -EAGAIN)
return ret;
pr_err("ip_vs_send_async error %d\n", ret);
return 0;
}
static int
ip_vs_receive(struct socket *sock, char *buffer, const size_t buflen)
{
struct msghdr msg = {NULL,};
struct kvec iov = {buffer, buflen};
int len;
/* Receive a packet */
iov_iter_kvec(&msg.msg_iter, ITER_DEST, &iov, 1, buflen);
len = sock_recvmsg(sock, &msg, MSG_DONTWAIT);
if (len < 0)
return len;
return len;
}
/* Wakeup the master thread for sending */
static void master_wakeup_work_handler(struct work_struct *work)
{
struct ipvs_master_sync_state *ms =
container_of(work, struct ipvs_master_sync_state,
master_wakeup_work.work);
struct netns_ipvs *ipvs = ms->ipvs;
spin_lock_bh(&ipvs->sync_lock);
if (ms->sync_queue_len &&
ms->sync_queue_delay < IPVS_SYNC_WAKEUP_RATE) {
int id = (int)(ms - ipvs->ms);
ms->sync_queue_delay = IPVS_SYNC_WAKEUP_RATE;
wake_up_process(ipvs->master_tinfo[id].task);
}
spin_unlock_bh(&ipvs->sync_lock);
}
/* Get next buffer to send */
static inline struct ip_vs_sync_buff *
next_sync_buff(struct netns_ipvs *ipvs, struct ipvs_master_sync_state *ms)
{
struct ip_vs_sync_buff *sb;
sb = sb_dequeue(ipvs, ms);
if (sb)
return sb;
/* Do not delay entries in buffer for more than 2 seconds */
return get_curr_sync_buff(ipvs, ms, IPVS_SYNC_FLUSH_TIME);
}
static int sync_thread_master(void *data)
{
struct ip_vs_sync_thread_data *tinfo = data;
struct netns_ipvs *ipvs = tinfo->ipvs;
struct ipvs_master_sync_state *ms = &ipvs->ms[tinfo->id];
struct sock *sk = tinfo->sock->sk;
struct ip_vs_sync_buff *sb;
pr_info("sync thread started: state = MASTER, mcast_ifn = %s, "
"syncid = %d, id = %d\n",
ipvs->mcfg.mcast_ifn, ipvs->mcfg.syncid, tinfo->id);
for (;;) {
sb = next_sync_buff(ipvs, ms);
if (unlikely(kthread_should_stop()))
break;
if (!sb) {
schedule_timeout(IPVS_SYNC_CHECK_PERIOD);
continue;
}
while (ip_vs_send_sync_msg(tinfo->sock, sb->mesg) < 0) {
/* (Ab)use interruptible sleep to avoid increasing
* the load avg.
*/
__wait_event_interruptible(*sk_sleep(sk),
sock_writeable(sk) ||
kthread_should_stop());
if (unlikely(kthread_should_stop()))
goto done;
}
ip_vs_sync_buff_release(sb);
}
done:
__set_current_state(TASK_RUNNING);
if (sb)
ip_vs_sync_buff_release(sb);
/* clean up the sync_buff queue */
while ((sb = sb_dequeue(ipvs, ms)))
ip_vs_sync_buff_release(sb);
__set_current_state(TASK_RUNNING);
/* clean up the current sync_buff */
sb = get_curr_sync_buff(ipvs, ms, 0);
if (sb)
ip_vs_sync_buff_release(sb);
return 0;
}
static int sync_thread_backup(void *data)
{
struct ip_vs_sync_thread_data *tinfo = data;
struct netns_ipvs *ipvs = tinfo->ipvs;
struct sock *sk = tinfo->sock->sk;
struct udp_sock *up = udp_sk(sk);
int len;
pr_info("sync thread started: state = BACKUP, mcast_ifn = %s, "
"syncid = %d, id = %d\n",
ipvs->bcfg.mcast_ifn, ipvs->bcfg.syncid, tinfo->id);
while (!kthread_should_stop()) {
wait_event_interruptible(*sk_sleep(sk),
!skb_queue_empty_lockless(&sk->sk_receive_queue) ||
!skb_queue_empty_lockless(&up->reader_queue) ||
kthread_should_stop());
/* do we have data now? */
while (!skb_queue_empty_lockless(&sk->sk_receive_queue) ||
!skb_queue_empty_lockless(&up->reader_queue)) {
len = ip_vs_receive(tinfo->sock, tinfo->buf,
ipvs->bcfg.sync_maxlen);
if (len <= 0) {
if (len != -EAGAIN)
pr_err("receiving message error\n");
break;
}
ip_vs_process_message(ipvs, tinfo->buf, len);
}
}
return 0;
}
int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c,
int state)
{
struct ip_vs_sync_thread_data *ti = NULL, *tinfo;
struct task_struct *task;
struct net_device *dev;
char *name;
int (*threadfn)(void *data);
int id = 0, count, hlen;
int result = -ENOMEM;
u16 mtu, min_mtu;
IP_VS_DBG(7, "%s(): pid %d\n", __func__, task_pid_nr(current));
IP_VS_DBG(7, "Each ip_vs_sync_conn entry needs %zd bytes\n",
sizeof(struct ip_vs_sync_conn_v0));
/* increase the module use count */
if (!ip_vs_use_count_inc())
return -ENOPROTOOPT;
/* Do not hold one mutex and then to block on another */
for (;;) {
rtnl_lock();
if (mutex_trylock(&ipvs->sync_mutex))
break;
rtnl_unlock();
mutex_lock(&ipvs->sync_mutex);
if (rtnl_trylock())
break;
mutex_unlock(&ipvs->sync_mutex);
}
if (!ipvs->sync_state) {
count = clamp(sysctl_sync_ports(ipvs), 1, IPVS_SYNC_PORTS_MAX);
ipvs->threads_mask = count - 1;
} else
count = ipvs->threads_mask + 1;
if (c->mcast_af == AF_UNSPEC) {
c->mcast_af = AF_INET;
c->mcast_group.ip = cpu_to_be32(IP_VS_SYNC_GROUP);
}
if (!c->mcast_port)
c->mcast_port = IP_VS_SYNC_PORT;
if (!c->mcast_ttl)
c->mcast_ttl = 1;
dev = __dev_get_by_name(ipvs->net, c->mcast_ifn);
if (!dev) {
pr_err("Unknown mcast interface: %s\n", c->mcast_ifn);
result = -ENODEV;
goto out_early;
}
hlen = (AF_INET6 == c->mcast_af) ?
sizeof(struct ipv6hdr) + sizeof(struct udphdr) :
sizeof(struct iphdr) + sizeof(struct udphdr);
mtu = (state == IP_VS_STATE_BACKUP) ?
clamp(dev->mtu, 1500U, 65535U) : 1500U;
min_mtu = (state == IP_VS_STATE_BACKUP) ? 1024 : 1;
if (c->sync_maxlen)
c->sync_maxlen = clamp_t(unsigned int,
c->sync_maxlen, min_mtu,
65535 - hlen);
else
c->sync_maxlen = mtu - hlen;
if (state == IP_VS_STATE_MASTER) {
result = -EEXIST;
if (ipvs->ms)
goto out_early;
ipvs->mcfg = *c;
name = "ipvs-m:%d:%d";
threadfn = sync_thread_master;
} else if (state == IP_VS_STATE_BACKUP) {
result = -EEXIST;
if (ipvs->backup_tinfo)
goto out_early;
ipvs->bcfg = *c;
name = "ipvs-b:%d:%d";
threadfn = sync_thread_backup;
} else {
result = -EINVAL;
goto out_early;
}
if (state == IP_VS_STATE_MASTER) {
struct ipvs_master_sync_state *ms;
result = -ENOMEM;
ipvs->ms = kcalloc(count, sizeof(ipvs->ms[0]), GFP_KERNEL);
if (!ipvs->ms)
goto out;
ms = ipvs->ms;
for (id = 0; id < count; id++, ms++) {
INIT_LIST_HEAD(&ms->sync_queue);
ms->sync_queue_len = 0;
ms->sync_queue_delay = 0;
INIT_DELAYED_WORK(&ms->master_wakeup_work,
master_wakeup_work_handler);
ms->ipvs = ipvs;
}
}
result = -ENOMEM;
ti = kcalloc(count, sizeof(struct ip_vs_sync_thread_data),
GFP_KERNEL);
if (!ti)
goto out;
for (id = 0; id < count; id++) {
tinfo = &ti[id];
tinfo->ipvs = ipvs;
if (state == IP_VS_STATE_BACKUP) {
result = -ENOMEM;
tinfo->buf = kmalloc(ipvs->bcfg.sync_maxlen,
GFP_KERNEL);
if (!tinfo->buf)
goto out;
}
tinfo->id = id;
if (state == IP_VS_STATE_MASTER)
result = make_send_sock(ipvs, id, dev, &tinfo->sock);
else
result = make_receive_sock(ipvs, id, dev, &tinfo->sock);
if (result < 0)
goto out;
task = kthread_run(threadfn, tinfo, name, ipvs->gen, id);
if (IS_ERR(task)) {
result = PTR_ERR(task);
goto out;
}
tinfo->task = task;
}
/* mark as active */
if (state == IP_VS_STATE_MASTER)
ipvs->master_tinfo = ti;
else
ipvs->backup_tinfo = ti;
spin_lock_bh(&ipvs->sync_buff_lock);
ipvs->sync_state |= state;
spin_unlock_bh(&ipvs->sync_buff_lock);
mutex_unlock(&ipvs->sync_mutex);
rtnl_unlock();
return 0;
out:
/* We do not need RTNL lock anymore, release it here so that
* sock_release below can use rtnl_lock to leave the mcast group.
*/
rtnl_unlock();
id = min(id, count - 1);
if (ti) {
for (tinfo = ti + id; tinfo >= ti; tinfo--) {
if (tinfo->task)
kthread_stop(tinfo->task);
}
}
if (!(ipvs->sync_state & IP_VS_STATE_MASTER)) {
kfree(ipvs->ms);
ipvs->ms = NULL;
}
mutex_unlock(&ipvs->sync_mutex);
/* No more mutexes, release socks */
if (ti) {
for (tinfo = ti + id; tinfo >= ti; tinfo--) {
if (tinfo->sock)
sock_release(tinfo->sock);
kfree(tinfo->buf);
}
kfree(ti);
}
/* decrease the module use count */
ip_vs_use_count_dec();
return result;
out_early:
mutex_unlock(&ipvs->sync_mutex);
rtnl_unlock();
/* decrease the module use count */
ip_vs_use_count_dec();
return result;
}
int stop_sync_thread(struct netns_ipvs *ipvs, int state)
{
struct ip_vs_sync_thread_data *ti, *tinfo;
int id;
int retc = -EINVAL;
IP_VS_DBG(7, "%s(): pid %d\n", __func__, task_pid_nr(current));
mutex_lock(&ipvs->sync_mutex);
if (state == IP_VS_STATE_MASTER) {
retc = -ESRCH;
if (!ipvs->ms)
goto err;
ti = ipvs->master_tinfo;
/*
* The lock synchronizes with sb_queue_tail(), so that we don't
* add sync buffers to the queue, when we are already in
* progress of stopping the master sync daemon.
*/
spin_lock_bh(&ipvs->sync_buff_lock);
spin_lock(&ipvs->sync_lock);
ipvs->sync_state &= ~IP_VS_STATE_MASTER;
spin_unlock(&ipvs->sync_lock);
spin_unlock_bh(&ipvs->sync_buff_lock);
retc = 0;
for (id = ipvs->threads_mask; id >= 0; id--) {
struct ipvs_master_sync_state *ms = &ipvs->ms[id];
int ret;
tinfo = &ti[id];
pr_info("stopping master sync thread %d ...\n",
task_pid_nr(tinfo->task));
cancel_delayed_work_sync(&ms->master_wakeup_work);
ret = kthread_stop(tinfo->task);
if (retc >= 0)
retc = ret;
}
kfree(ipvs->ms);
ipvs->ms = NULL;
ipvs->master_tinfo = NULL;
} else if (state == IP_VS_STATE_BACKUP) {
retc = -ESRCH;
if (!ipvs->backup_tinfo)
goto err;
ti = ipvs->backup_tinfo;
ipvs->sync_state &= ~IP_VS_STATE_BACKUP;
retc = 0;
for (id = ipvs->threads_mask; id >= 0; id--) {
int ret;
tinfo = &ti[id];
pr_info("stopping backup sync thread %d ...\n",
task_pid_nr(tinfo->task));
ret = kthread_stop(tinfo->task);
if (retc >= 0)
retc = ret;
}
ipvs->backup_tinfo = NULL;
} else {
goto err;
}
id = ipvs->threads_mask;
mutex_unlock(&ipvs->sync_mutex);
/* No more mutexes, release socks */
for (tinfo = ti + id; tinfo >= ti; tinfo--) {
if (tinfo->sock)
sock_release(tinfo->sock);
kfree(tinfo->buf);
}
kfree(ti);
/* decrease the module use count */
ip_vs_use_count_dec();
return retc;
err:
mutex_unlock(&ipvs->sync_mutex);
return retc;
}
/*
* Initialize data struct for each netns
*/
int __net_init ip_vs_sync_net_init(struct netns_ipvs *ipvs)
{
__mutex_init(&ipvs->sync_mutex, "ipvs->sync_mutex", &__ipvs_sync_key);
spin_lock_init(&ipvs->sync_lock);
spin_lock_init(&ipvs->sync_buff_lock);
return 0;
}
void ip_vs_sync_net_cleanup(struct netns_ipvs *ipvs)
{
int retc;
retc = stop_sync_thread(ipvs, IP_VS_STATE_MASTER);
if (retc && retc != -ESRCH)
pr_err("Failed to stop Master Daemon\n");
retc = stop_sync_thread(ipvs, IP_VS_STATE_BACKUP);
if (retc && retc != -ESRCH)
pr_err("Failed to stop Backup Daemon\n");
}
| linux-master | net/netfilter/ipvs/ip_vs_sync.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* IPVS: Source Hashing scheduling module
*
* Authors: Wensong Zhang <[email protected]>
*
* Changes:
*/
/*
* The sh algorithm is to select server by the hash key of source IP
* address. The pseudo code is as follows:
*
* n <- servernode[src_ip];
* if (n is dead) OR
* (n is overloaded) or (n.weight <= 0) then
* return NULL;
*
* return n;
*
* Notes that servernode is a 256-bucket hash table that maps the hash
* index derived from packet source IP address to the current server
* array. If the sh scheduler is used in cache cluster, it is good to
* combine it with cache_bypass feature. When the statically assigned
* server is dead or overloaded, the load balancer can bypass the cache
* server and send requests to the original server directly.
*
* The weight destination attribute can be used to control the
* distribution of connections to the destinations in servernode. The
* greater the weight, the more connections the destination
* will receive.
*
*/
#define KMSG_COMPONENT "IPVS"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/ip.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/skbuff.h>
#include <net/ip_vs.h>
#include <net/tcp.h>
#include <linux/udp.h>
#include <linux/sctp.h>
/*
* IPVS SH bucket
*/
struct ip_vs_sh_bucket {
struct ip_vs_dest __rcu *dest; /* real server (cache) */
};
/*
* for IPVS SH entry hash table
*/
#ifndef CONFIG_IP_VS_SH_TAB_BITS
#define CONFIG_IP_VS_SH_TAB_BITS 8
#endif
#define IP_VS_SH_TAB_BITS CONFIG_IP_VS_SH_TAB_BITS
#define IP_VS_SH_TAB_SIZE (1 << IP_VS_SH_TAB_BITS)
#define IP_VS_SH_TAB_MASK (IP_VS_SH_TAB_SIZE - 1)
struct ip_vs_sh_state {
struct rcu_head rcu_head;
struct ip_vs_sh_bucket buckets[IP_VS_SH_TAB_SIZE];
};
/* Helper function to determine if server is unavailable */
static inline bool is_unavailable(struct ip_vs_dest *dest)
{
return atomic_read(&dest->weight) <= 0 ||
dest->flags & IP_VS_DEST_F_OVERLOAD;
}
/*
* Returns hash value for IPVS SH entry
*/
static inline unsigned int
ip_vs_sh_hashkey(int af, const union nf_inet_addr *addr,
__be16 port, unsigned int offset)
{
__be32 addr_fold = addr->ip;
#ifdef CONFIG_IP_VS_IPV6
if (af == AF_INET6)
addr_fold = addr->ip6[0]^addr->ip6[1]^
addr->ip6[2]^addr->ip6[3];
#endif
return (offset + hash_32(ntohs(port) + ntohl(addr_fold),
IP_VS_SH_TAB_BITS)) &
IP_VS_SH_TAB_MASK;
}
/*
* Get ip_vs_dest associated with supplied parameters.
*/
static inline struct ip_vs_dest *
ip_vs_sh_get(struct ip_vs_service *svc, struct ip_vs_sh_state *s,
const union nf_inet_addr *addr, __be16 port)
{
unsigned int hash = ip_vs_sh_hashkey(svc->af, addr, port, 0);
struct ip_vs_dest *dest = rcu_dereference(s->buckets[hash].dest);
return (!dest || is_unavailable(dest)) ? NULL : dest;
}
/* As ip_vs_sh_get, but with fallback if selected server is unavailable
*
* The fallback strategy loops around the table starting from a "random"
* point (in fact, it is chosen to be the original hash value to make the
* algorithm deterministic) to find a new server.
*/
static inline struct ip_vs_dest *
ip_vs_sh_get_fallback(struct ip_vs_service *svc, struct ip_vs_sh_state *s,
const union nf_inet_addr *addr, __be16 port)
{
unsigned int offset, roffset;
unsigned int hash, ihash;
struct ip_vs_dest *dest;
/* first try the dest it's supposed to go to */
ihash = ip_vs_sh_hashkey(svc->af, addr, port, 0);
dest = rcu_dereference(s->buckets[ihash].dest);
if (!dest)
return NULL;
if (!is_unavailable(dest))
return dest;
IP_VS_DBG_BUF(6, "SH: selected unavailable server %s:%d, reselecting",
IP_VS_DBG_ADDR(dest->af, &dest->addr), ntohs(dest->port));
/* if the original dest is unavailable, loop around the table
* starting from ihash to find a new dest
*/
for (offset = 0; offset < IP_VS_SH_TAB_SIZE; offset++) {
roffset = (offset + ihash) % IP_VS_SH_TAB_SIZE;
hash = ip_vs_sh_hashkey(svc->af, addr, port, roffset);
dest = rcu_dereference(s->buckets[hash].dest);
if (!dest)
break;
if (!is_unavailable(dest))
return dest;
IP_VS_DBG_BUF(6, "SH: selected unavailable "
"server %s:%d (offset %d), reselecting",
IP_VS_DBG_ADDR(dest->af, &dest->addr),
ntohs(dest->port), roffset);
}
return NULL;
}
/*
* Assign all the hash buckets of the specified table with the service.
*/
static int
ip_vs_sh_reassign(struct ip_vs_sh_state *s, struct ip_vs_service *svc)
{
int i;
struct ip_vs_sh_bucket *b;
struct list_head *p;
struct ip_vs_dest *dest;
int d_count;
bool empty;
b = &s->buckets[0];
p = &svc->destinations;
empty = list_empty(p);
d_count = 0;
for (i=0; i<IP_VS_SH_TAB_SIZE; i++) {
dest = rcu_dereference_protected(b->dest, 1);
if (dest)
ip_vs_dest_put(dest);
if (empty)
RCU_INIT_POINTER(b->dest, NULL);
else {
if (p == &svc->destinations)
p = p->next;
dest = list_entry(p, struct ip_vs_dest, n_list);
ip_vs_dest_hold(dest);
RCU_INIT_POINTER(b->dest, dest);
IP_VS_DBG_BUF(6, "assigned i: %d dest: %s weight: %d\n",
i, IP_VS_DBG_ADDR(dest->af, &dest->addr),
atomic_read(&dest->weight));
/* Don't move to next dest until filling weight */
if (++d_count >= atomic_read(&dest->weight)) {
p = p->next;
d_count = 0;
}
}
b++;
}
return 0;
}
/*
* Flush all the hash buckets of the specified table.
*/
static void ip_vs_sh_flush(struct ip_vs_sh_state *s)
{
int i;
struct ip_vs_sh_bucket *b;
struct ip_vs_dest *dest;
b = &s->buckets[0];
for (i=0; i<IP_VS_SH_TAB_SIZE; i++) {
dest = rcu_dereference_protected(b->dest, 1);
if (dest) {
ip_vs_dest_put(dest);
RCU_INIT_POINTER(b->dest, NULL);
}
b++;
}
}
static int ip_vs_sh_init_svc(struct ip_vs_service *svc)
{
struct ip_vs_sh_state *s;
/* allocate the SH table for this service */
s = kzalloc(sizeof(struct ip_vs_sh_state), GFP_KERNEL);
if (s == NULL)
return -ENOMEM;
svc->sched_data = s;
IP_VS_DBG(6, "SH hash table (memory=%zdbytes) allocated for "
"current service\n",
sizeof(struct ip_vs_sh_bucket)*IP_VS_SH_TAB_SIZE);
/* assign the hash buckets with current dests */
ip_vs_sh_reassign(s, svc);
return 0;
}
static void ip_vs_sh_done_svc(struct ip_vs_service *svc)
{
struct ip_vs_sh_state *s = svc->sched_data;
/* got to clean up hash buckets here */
ip_vs_sh_flush(s);
/* release the table itself */
kfree_rcu(s, rcu_head);
IP_VS_DBG(6, "SH hash table (memory=%zdbytes) released\n",
sizeof(struct ip_vs_sh_bucket)*IP_VS_SH_TAB_SIZE);
}
static int ip_vs_sh_dest_changed(struct ip_vs_service *svc,
struct ip_vs_dest *dest)
{
struct ip_vs_sh_state *s = svc->sched_data;
/* assign the hash buckets with the updated service */
ip_vs_sh_reassign(s, svc);
return 0;
}
/* Helper function to get port number */
static inline __be16
ip_vs_sh_get_port(const struct sk_buff *skb, struct ip_vs_iphdr *iph)
{
__be16 _ports[2], *ports;
/* At this point we know that we have a valid packet of some kind.
* Because ICMP packets are only guaranteed to have the first 8
* bytes, let's just grab the ports. Fortunately they're in the
* same position for all three of the protocols we care about.
*/
switch (iph->protocol) {
case IPPROTO_TCP:
case IPPROTO_UDP:
case IPPROTO_SCTP:
ports = skb_header_pointer(skb, iph->len, sizeof(_ports),
&_ports);
if (unlikely(!ports))
return 0;
if (likely(!ip_vs_iph_inverse(iph)))
return ports[0];
else
return ports[1];
default:
return 0;
}
}
/*
* Source Hashing scheduling
*/
static struct ip_vs_dest *
ip_vs_sh_schedule(struct ip_vs_service *svc, const struct sk_buff *skb,
struct ip_vs_iphdr *iph)
{
struct ip_vs_dest *dest;
struct ip_vs_sh_state *s;
__be16 port = 0;
const union nf_inet_addr *hash_addr;
hash_addr = ip_vs_iph_inverse(iph) ? &iph->daddr : &iph->saddr;
IP_VS_DBG(6, "ip_vs_sh_schedule(): Scheduling...\n");
if (svc->flags & IP_VS_SVC_F_SCHED_SH_PORT)
port = ip_vs_sh_get_port(skb, iph);
s = (struct ip_vs_sh_state *) svc->sched_data;
if (svc->flags & IP_VS_SVC_F_SCHED_SH_FALLBACK)
dest = ip_vs_sh_get_fallback(svc, s, hash_addr, port);
else
dest = ip_vs_sh_get(svc, s, hash_addr, port);
if (!dest) {
ip_vs_scheduler_err(svc, "no destination available");
return NULL;
}
IP_VS_DBG_BUF(6, "SH: source IP address %s --> server %s:%d\n",
IP_VS_DBG_ADDR(svc->af, hash_addr),
IP_VS_DBG_ADDR(dest->af, &dest->addr),
ntohs(dest->port));
return dest;
}
/*
* IPVS SH Scheduler structure
*/
static struct ip_vs_scheduler ip_vs_sh_scheduler =
{
.name = "sh",
.refcnt = ATOMIC_INIT(0),
.module = THIS_MODULE,
.n_list = LIST_HEAD_INIT(ip_vs_sh_scheduler.n_list),
.init_service = ip_vs_sh_init_svc,
.done_service = ip_vs_sh_done_svc,
.add_dest = ip_vs_sh_dest_changed,
.del_dest = ip_vs_sh_dest_changed,
.upd_dest = ip_vs_sh_dest_changed,
.schedule = ip_vs_sh_schedule,
};
static int __init ip_vs_sh_init(void)
{
return register_ip_vs_scheduler(&ip_vs_sh_scheduler);
}
static void __exit ip_vs_sh_cleanup(void)
{
unregister_ip_vs_scheduler(&ip_vs_sh_scheduler);
synchronize_rcu();
}
module_init(ip_vs_sh_init);
module_exit(ip_vs_sh_cleanup);
MODULE_LICENSE("GPL");
| linux-master | net/netfilter/ipvs/ip_vs_sh.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* IPVS: Least-Connection Scheduling module
*
* Authors: Wensong Zhang <[email protected]>
*
* Changes:
* Wensong Zhang : added the ip_vs_lc_update_svc
* Wensong Zhang : added any dest with weight=0 is quiesced
*/
#define KMSG_COMPONENT "IPVS"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/module.h>
#include <linux/kernel.h>
#include <net/ip_vs.h>
/*
* Least Connection scheduling
*/
static struct ip_vs_dest *
ip_vs_lc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb,
struct ip_vs_iphdr *iph)
{
struct ip_vs_dest *dest, *least = NULL;
unsigned int loh = 0, doh;
IP_VS_DBG(6, "%s(): Scheduling...\n", __func__);
/*
* Simply select the server with the least number of
* (activeconns<<5) + inactconns
* Except whose weight is equal to zero.
* If the weight is equal to zero, it means that the server is
* quiesced, the existing connections to the server still get
* served, but no new connection is assigned to the server.
*/
list_for_each_entry_rcu(dest, &svc->destinations, n_list) {
if ((dest->flags & IP_VS_DEST_F_OVERLOAD) ||
atomic_read(&dest->weight) == 0)
continue;
doh = ip_vs_dest_conn_overhead(dest);
if (!least || doh < loh) {
least = dest;
loh = doh;
}
}
if (!least)
ip_vs_scheduler_err(svc, "no destination available");
else
IP_VS_DBG_BUF(6, "LC: server %s:%u activeconns %d "
"inactconns %d\n",
IP_VS_DBG_ADDR(least->af, &least->addr),
ntohs(least->port),
atomic_read(&least->activeconns),
atomic_read(&least->inactconns));
return least;
}
static struct ip_vs_scheduler ip_vs_lc_scheduler = {
.name = "lc",
.refcnt = ATOMIC_INIT(0),
.module = THIS_MODULE,
.n_list = LIST_HEAD_INIT(ip_vs_lc_scheduler.n_list),
.schedule = ip_vs_lc_schedule,
};
static int __init ip_vs_lc_init(void)
{
return register_ip_vs_scheduler(&ip_vs_lc_scheduler) ;
}
static void __exit ip_vs_lc_cleanup(void)
{
unregister_ip_vs_scheduler(&ip_vs_lc_scheduler);
synchronize_rcu();
}
module_init(ip_vs_lc_init);
module_exit(ip_vs_lc_cleanup);
MODULE_LICENSE("GPL");
| linux-master | net/netfilter/ipvs/ip_vs_lc.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* IPVS: Locality-Based Least-Connection with Replication scheduler
*
* Authors: Wensong Zhang <[email protected]>
*
* Changes:
* Julian Anastasov : Added the missing (dest->weight>0)
* condition in the ip_vs_dest_set_max.
*/
/*
* The lblc/r algorithm is as follows (pseudo code):
*
* if serverSet[dest_ip] is null then
* n, serverSet[dest_ip] <- {weighted least-conn node};
* else
* n <- {least-conn (alive) node in serverSet[dest_ip]};
* if (n is null) OR
* (n.conns>n.weight AND
* there is a node m with m.conns<m.weight/2) then
* n <- {weighted least-conn node};
* add n to serverSet[dest_ip];
* if |serverSet[dest_ip]| > 1 AND
* now - serverSet[dest_ip].lastMod > T then
* m <- {most conn node in serverSet[dest_ip]};
* remove m from serverSet[dest_ip];
* if serverSet[dest_ip] changed then
* serverSet[dest_ip].lastMod <- now;
*
* return n;
*
*/
#define KMSG_COMPONENT "IPVS"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/ip.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/skbuff.h>
#include <linux/jiffies.h>
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/hash.h>
/* for sysctl */
#include <linux/fs.h>
#include <linux/sysctl.h>
#include <net/net_namespace.h>
#include <net/ip_vs.h>
/*
* It is for garbage collection of stale IPVS lblcr entries,
* when the table is full.
*/
#define CHECK_EXPIRE_INTERVAL (60*HZ)
#define ENTRY_TIMEOUT (6*60*HZ)
#define DEFAULT_EXPIRATION (24*60*60*HZ)
/*
* It is for full expiration check.
* When there is no partial expiration check (garbage collection)
* in a half hour, do a full expiration check to collect stale
* entries that haven't been touched for a day.
*/
#define COUNT_FOR_FULL_EXPIRATION 30
/*
* for IPVS lblcr entry hash table
*/
#ifndef CONFIG_IP_VS_LBLCR_TAB_BITS
#define CONFIG_IP_VS_LBLCR_TAB_BITS 10
#endif
#define IP_VS_LBLCR_TAB_BITS CONFIG_IP_VS_LBLCR_TAB_BITS
#define IP_VS_LBLCR_TAB_SIZE (1 << IP_VS_LBLCR_TAB_BITS)
#define IP_VS_LBLCR_TAB_MASK (IP_VS_LBLCR_TAB_SIZE - 1)
/*
* IPVS destination set structure and operations
*/
struct ip_vs_dest_set_elem {
struct list_head list; /* list link */
struct ip_vs_dest *dest; /* destination server */
struct rcu_head rcu_head;
};
struct ip_vs_dest_set {
atomic_t size; /* set size */
unsigned long lastmod; /* last modified time */
struct list_head list; /* destination list */
};
static void ip_vs_dest_set_insert(struct ip_vs_dest_set *set,
struct ip_vs_dest *dest, bool check)
{
struct ip_vs_dest_set_elem *e;
if (check) {
list_for_each_entry(e, &set->list, list) {
if (e->dest == dest)
return;
}
}
e = kmalloc(sizeof(*e), GFP_ATOMIC);
if (e == NULL)
return;
ip_vs_dest_hold(dest);
e->dest = dest;
list_add_rcu(&e->list, &set->list);
atomic_inc(&set->size);
set->lastmod = jiffies;
}
static void ip_vs_lblcr_elem_rcu_free(struct rcu_head *head)
{
struct ip_vs_dest_set_elem *e;
e = container_of(head, struct ip_vs_dest_set_elem, rcu_head);
ip_vs_dest_put_and_free(e->dest);
kfree(e);
}
static void
ip_vs_dest_set_erase(struct ip_vs_dest_set *set, struct ip_vs_dest *dest)
{
struct ip_vs_dest_set_elem *e;
list_for_each_entry(e, &set->list, list) {
if (e->dest == dest) {
/* HIT */
atomic_dec(&set->size);
set->lastmod = jiffies;
list_del_rcu(&e->list);
call_rcu(&e->rcu_head, ip_vs_lblcr_elem_rcu_free);
break;
}
}
}
static void ip_vs_dest_set_eraseall(struct ip_vs_dest_set *set)
{
struct ip_vs_dest_set_elem *e, *ep;
list_for_each_entry_safe(e, ep, &set->list, list) {
list_del_rcu(&e->list);
call_rcu(&e->rcu_head, ip_vs_lblcr_elem_rcu_free);
}
}
/* get weighted least-connection node in the destination set */
static inline struct ip_vs_dest *ip_vs_dest_set_min(struct ip_vs_dest_set *set)
{
struct ip_vs_dest_set_elem *e;
struct ip_vs_dest *dest, *least;
int loh, doh;
/* select the first destination server, whose weight > 0 */
list_for_each_entry_rcu(e, &set->list, list) {
least = e->dest;
if (least->flags & IP_VS_DEST_F_OVERLOAD)
continue;
if ((atomic_read(&least->weight) > 0)
&& (least->flags & IP_VS_DEST_F_AVAILABLE)) {
loh = ip_vs_dest_conn_overhead(least);
goto nextstage;
}
}
return NULL;
/* find the destination with the weighted least load */
nextstage:
list_for_each_entry_continue_rcu(e, &set->list, list) {
dest = e->dest;
if (dest->flags & IP_VS_DEST_F_OVERLOAD)
continue;
doh = ip_vs_dest_conn_overhead(dest);
if (((__s64)loh * atomic_read(&dest->weight) >
(__s64)doh * atomic_read(&least->weight))
&& (dest->flags & IP_VS_DEST_F_AVAILABLE)) {
least = dest;
loh = doh;
}
}
IP_VS_DBG_BUF(6, "%s(): server %s:%d "
"activeconns %d refcnt %d weight %d overhead %d\n",
__func__,
IP_VS_DBG_ADDR(least->af, &least->addr),
ntohs(least->port),
atomic_read(&least->activeconns),
refcount_read(&least->refcnt),
atomic_read(&least->weight), loh);
return least;
}
/* get weighted most-connection node in the destination set */
static inline struct ip_vs_dest *ip_vs_dest_set_max(struct ip_vs_dest_set *set)
{
struct ip_vs_dest_set_elem *e;
struct ip_vs_dest *dest, *most;
int moh, doh;
if (set == NULL)
return NULL;
/* select the first destination server, whose weight > 0 */
list_for_each_entry(e, &set->list, list) {
most = e->dest;
if (atomic_read(&most->weight) > 0) {
moh = ip_vs_dest_conn_overhead(most);
goto nextstage;
}
}
return NULL;
/* find the destination with the weighted most load */
nextstage:
list_for_each_entry_continue(e, &set->list, list) {
dest = e->dest;
doh = ip_vs_dest_conn_overhead(dest);
/* moh/mw < doh/dw ==> moh*dw < doh*mw, where mw,dw>0 */
if (((__s64)moh * atomic_read(&dest->weight) <
(__s64)doh * atomic_read(&most->weight))
&& (atomic_read(&dest->weight) > 0)) {
most = dest;
moh = doh;
}
}
IP_VS_DBG_BUF(6, "%s(): server %s:%d "
"activeconns %d refcnt %d weight %d overhead %d\n",
__func__,
IP_VS_DBG_ADDR(most->af, &most->addr), ntohs(most->port),
atomic_read(&most->activeconns),
refcount_read(&most->refcnt),
atomic_read(&most->weight), moh);
return most;
}
/*
* IPVS lblcr entry represents an association between destination
* IP address and its destination server set
*/
struct ip_vs_lblcr_entry {
struct hlist_node list;
int af; /* address family */
union nf_inet_addr addr; /* destination IP address */
struct ip_vs_dest_set set; /* destination server set */
unsigned long lastuse; /* last used time */
struct rcu_head rcu_head;
};
/*
* IPVS lblcr hash table
*/
struct ip_vs_lblcr_table {
struct rcu_head rcu_head;
struct hlist_head bucket[IP_VS_LBLCR_TAB_SIZE]; /* hash bucket */
atomic_t entries; /* number of entries */
int max_size; /* maximum size of entries */
struct timer_list periodic_timer; /* collect stale entries */
struct ip_vs_service *svc; /* pointer back to service */
int rover; /* rover for expire check */
int counter; /* counter for no expire */
bool dead;
};
#ifdef CONFIG_SYSCTL
/*
* IPVS LBLCR sysctl table
*/
static struct ctl_table vs_vars_table[] = {
{
.procname = "lblcr_expiration",
.data = NULL,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{ }
};
#endif
static inline void ip_vs_lblcr_free(struct ip_vs_lblcr_entry *en)
{
hlist_del_rcu(&en->list);
ip_vs_dest_set_eraseall(&en->set);
kfree_rcu(en, rcu_head);
}
/*
* Returns hash value for IPVS LBLCR entry
*/
static inline unsigned int
ip_vs_lblcr_hashkey(int af, const union nf_inet_addr *addr)
{
__be32 addr_fold = addr->ip;
#ifdef CONFIG_IP_VS_IPV6
if (af == AF_INET6)
addr_fold = addr->ip6[0]^addr->ip6[1]^
addr->ip6[2]^addr->ip6[3];
#endif
return hash_32(ntohl(addr_fold), IP_VS_LBLCR_TAB_BITS);
}
/*
* Hash an entry in the ip_vs_lblcr_table.
* returns bool success.
*/
static void
ip_vs_lblcr_hash(struct ip_vs_lblcr_table *tbl, struct ip_vs_lblcr_entry *en)
{
unsigned int hash = ip_vs_lblcr_hashkey(en->af, &en->addr);
hlist_add_head_rcu(&en->list, &tbl->bucket[hash]);
atomic_inc(&tbl->entries);
}
/* Get ip_vs_lblcr_entry associated with supplied parameters. */
static inline struct ip_vs_lblcr_entry *
ip_vs_lblcr_get(int af, struct ip_vs_lblcr_table *tbl,
const union nf_inet_addr *addr)
{
unsigned int hash = ip_vs_lblcr_hashkey(af, addr);
struct ip_vs_lblcr_entry *en;
hlist_for_each_entry_rcu(en, &tbl->bucket[hash], list)
if (ip_vs_addr_equal(af, &en->addr, addr))
return en;
return NULL;
}
/*
* Create or update an ip_vs_lblcr_entry, which is a mapping of a destination
* IP address to a server. Called under spin lock.
*/
static inline struct ip_vs_lblcr_entry *
ip_vs_lblcr_new(struct ip_vs_lblcr_table *tbl, const union nf_inet_addr *daddr,
u16 af, struct ip_vs_dest *dest)
{
struct ip_vs_lblcr_entry *en;
en = ip_vs_lblcr_get(af, tbl, daddr);
if (!en) {
en = kmalloc(sizeof(*en), GFP_ATOMIC);
if (!en)
return NULL;
en->af = af;
ip_vs_addr_copy(af, &en->addr, daddr);
en->lastuse = jiffies;
/* initialize its dest set */
atomic_set(&(en->set.size), 0);
INIT_LIST_HEAD(&en->set.list);
ip_vs_dest_set_insert(&en->set, dest, false);
ip_vs_lblcr_hash(tbl, en);
return en;
}
ip_vs_dest_set_insert(&en->set, dest, true);
return en;
}
/*
* Flush all the entries of the specified table.
*/
static void ip_vs_lblcr_flush(struct ip_vs_service *svc)
{
struct ip_vs_lblcr_table *tbl = svc->sched_data;
int i;
struct ip_vs_lblcr_entry *en;
struct hlist_node *next;
spin_lock_bh(&svc->sched_lock);
tbl->dead = true;
for (i = 0; i < IP_VS_LBLCR_TAB_SIZE; i++) {
hlist_for_each_entry_safe(en, next, &tbl->bucket[i], list) {
ip_vs_lblcr_free(en);
}
}
spin_unlock_bh(&svc->sched_lock);
}
static int sysctl_lblcr_expiration(struct ip_vs_service *svc)
{
#ifdef CONFIG_SYSCTL
return svc->ipvs->sysctl_lblcr_expiration;
#else
return DEFAULT_EXPIRATION;
#endif
}
static inline void ip_vs_lblcr_full_check(struct ip_vs_service *svc)
{
struct ip_vs_lblcr_table *tbl = svc->sched_data;
unsigned long now = jiffies;
int i, j;
struct ip_vs_lblcr_entry *en;
struct hlist_node *next;
for (i = 0, j = tbl->rover; i < IP_VS_LBLCR_TAB_SIZE; i++) {
j = (j + 1) & IP_VS_LBLCR_TAB_MASK;
spin_lock(&svc->sched_lock);
hlist_for_each_entry_safe(en, next, &tbl->bucket[j], list) {
if (time_after(en->lastuse +
sysctl_lblcr_expiration(svc), now))
continue;
ip_vs_lblcr_free(en);
atomic_dec(&tbl->entries);
}
spin_unlock(&svc->sched_lock);
}
tbl->rover = j;
}
/*
* Periodical timer handler for IPVS lblcr table
* It is used to collect stale entries when the number of entries
* exceeds the maximum size of the table.
*
* Fixme: we probably need more complicated algorithm to collect
* entries that have not been used for a long time even
* if the number of entries doesn't exceed the maximum size
* of the table.
* The full expiration check is for this purpose now.
*/
static void ip_vs_lblcr_check_expire(struct timer_list *t)
{
struct ip_vs_lblcr_table *tbl = from_timer(tbl, t, periodic_timer);
struct ip_vs_service *svc = tbl->svc;
unsigned long now = jiffies;
int goal;
int i, j;
struct ip_vs_lblcr_entry *en;
struct hlist_node *next;
if ((tbl->counter % COUNT_FOR_FULL_EXPIRATION) == 0) {
/* do full expiration check */
ip_vs_lblcr_full_check(svc);
tbl->counter = 1;
goto out;
}
if (atomic_read(&tbl->entries) <= tbl->max_size) {
tbl->counter++;
goto out;
}
goal = (atomic_read(&tbl->entries) - tbl->max_size)*4/3;
if (goal > tbl->max_size/2)
goal = tbl->max_size/2;
for (i = 0, j = tbl->rover; i < IP_VS_LBLCR_TAB_SIZE; i++) {
j = (j + 1) & IP_VS_LBLCR_TAB_MASK;
spin_lock(&svc->sched_lock);
hlist_for_each_entry_safe(en, next, &tbl->bucket[j], list) {
if (time_before(now, en->lastuse+ENTRY_TIMEOUT))
continue;
ip_vs_lblcr_free(en);
atomic_dec(&tbl->entries);
goal--;
}
spin_unlock(&svc->sched_lock);
if (goal <= 0)
break;
}
tbl->rover = j;
out:
mod_timer(&tbl->periodic_timer, jiffies+CHECK_EXPIRE_INTERVAL);
}
static int ip_vs_lblcr_init_svc(struct ip_vs_service *svc)
{
int i;
struct ip_vs_lblcr_table *tbl;
/*
* Allocate the ip_vs_lblcr_table for this service
*/
tbl = kmalloc(sizeof(*tbl), GFP_KERNEL);
if (tbl == NULL)
return -ENOMEM;
svc->sched_data = tbl;
IP_VS_DBG(6, "LBLCR hash table (memory=%zdbytes) allocated for "
"current service\n", sizeof(*tbl));
/*
* Initialize the hash buckets
*/
for (i = 0; i < IP_VS_LBLCR_TAB_SIZE; i++) {
INIT_HLIST_HEAD(&tbl->bucket[i]);
}
tbl->max_size = IP_VS_LBLCR_TAB_SIZE*16;
tbl->rover = 0;
tbl->counter = 1;
tbl->dead = false;
tbl->svc = svc;
atomic_set(&tbl->entries, 0);
/*
* Hook periodic timer for garbage collection
*/
timer_setup(&tbl->periodic_timer, ip_vs_lblcr_check_expire, 0);
mod_timer(&tbl->periodic_timer, jiffies + CHECK_EXPIRE_INTERVAL);
return 0;
}
static void ip_vs_lblcr_done_svc(struct ip_vs_service *svc)
{
struct ip_vs_lblcr_table *tbl = svc->sched_data;
/* remove periodic timer */
timer_shutdown_sync(&tbl->periodic_timer);
/* got to clean up table entries here */
ip_vs_lblcr_flush(svc);
/* release the table itself */
kfree_rcu(tbl, rcu_head);
IP_VS_DBG(6, "LBLCR hash table (memory=%zdbytes) released\n",
sizeof(*tbl));
}
static inline struct ip_vs_dest *
__ip_vs_lblcr_schedule(struct ip_vs_service *svc)
{
struct ip_vs_dest *dest, *least;
int loh, doh;
/*
* We use the following formula to estimate the load:
* (dest overhead) / dest->weight
*
* Remember -- no floats in kernel mode!!!
* The comparison of h1*w2 > h2*w1 is equivalent to that of
* h1/w1 > h2/w2
* if every weight is larger than zero.
*
* The server with weight=0 is quiesced and will not receive any
* new connection.
*/
list_for_each_entry_rcu(dest, &svc->destinations, n_list) {
if (dest->flags & IP_VS_DEST_F_OVERLOAD)
continue;
if (atomic_read(&dest->weight) > 0) {
least = dest;
loh = ip_vs_dest_conn_overhead(least);
goto nextstage;
}
}
return NULL;
/*
* Find the destination with the least load.
*/
nextstage:
list_for_each_entry_continue_rcu(dest, &svc->destinations, n_list) {
if (dest->flags & IP_VS_DEST_F_OVERLOAD)
continue;
doh = ip_vs_dest_conn_overhead(dest);
if ((__s64)loh * atomic_read(&dest->weight) >
(__s64)doh * atomic_read(&least->weight)) {
least = dest;
loh = doh;
}
}
IP_VS_DBG_BUF(6, "LBLCR: server %s:%d "
"activeconns %d refcnt %d weight %d overhead %d\n",
IP_VS_DBG_ADDR(least->af, &least->addr),
ntohs(least->port),
atomic_read(&least->activeconns),
refcount_read(&least->refcnt),
atomic_read(&least->weight), loh);
return least;
}
/*
* If this destination server is overloaded and there is a less loaded
* server, then return true.
*/
static inline int
is_overloaded(struct ip_vs_dest *dest, struct ip_vs_service *svc)
{
if (atomic_read(&dest->activeconns) > atomic_read(&dest->weight)) {
struct ip_vs_dest *d;
list_for_each_entry_rcu(d, &svc->destinations, n_list) {
if (atomic_read(&d->activeconns)*2
< atomic_read(&d->weight)) {
return 1;
}
}
}
return 0;
}
/*
* Locality-Based (weighted) Least-Connection scheduling
*/
static struct ip_vs_dest *
ip_vs_lblcr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb,
struct ip_vs_iphdr *iph)
{
struct ip_vs_lblcr_table *tbl = svc->sched_data;
struct ip_vs_dest *dest;
struct ip_vs_lblcr_entry *en;
IP_VS_DBG(6, "%s(): Scheduling...\n", __func__);
/* First look in our cache */
en = ip_vs_lblcr_get(svc->af, tbl, &iph->daddr);
if (en) {
en->lastuse = jiffies;
/* Get the least loaded destination */
dest = ip_vs_dest_set_min(&en->set);
/* More than one destination + enough time passed by, cleanup */
if (atomic_read(&en->set.size) > 1 &&
time_after(jiffies, en->set.lastmod +
sysctl_lblcr_expiration(svc))) {
spin_lock_bh(&svc->sched_lock);
if (atomic_read(&en->set.size) > 1) {
struct ip_vs_dest *m;
m = ip_vs_dest_set_max(&en->set);
if (m)
ip_vs_dest_set_erase(&en->set, m);
}
spin_unlock_bh(&svc->sched_lock);
}
/* If the destination is not overloaded, use it */
if (dest && !is_overloaded(dest, svc))
goto out;
/* The cache entry is invalid, time to schedule */
dest = __ip_vs_lblcr_schedule(svc);
if (!dest) {
ip_vs_scheduler_err(svc, "no destination available");
return NULL;
}
/* Update our cache entry */
spin_lock_bh(&svc->sched_lock);
if (!tbl->dead)
ip_vs_dest_set_insert(&en->set, dest, true);
spin_unlock_bh(&svc->sched_lock);
goto out;
}
/* No cache entry, time to schedule */
dest = __ip_vs_lblcr_schedule(svc);
if (!dest) {
IP_VS_DBG(1, "no destination available\n");
return NULL;
}
/* If we fail to create a cache entry, we'll just use the valid dest */
spin_lock_bh(&svc->sched_lock);
if (!tbl->dead)
ip_vs_lblcr_new(tbl, &iph->daddr, svc->af, dest);
spin_unlock_bh(&svc->sched_lock);
out:
IP_VS_DBG_BUF(6, "LBLCR: destination IP address %s --> server %s:%d\n",
IP_VS_DBG_ADDR(svc->af, &iph->daddr),
IP_VS_DBG_ADDR(dest->af, &dest->addr), ntohs(dest->port));
return dest;
}
/*
* IPVS LBLCR Scheduler structure
*/
static struct ip_vs_scheduler ip_vs_lblcr_scheduler =
{
.name = "lblcr",
.refcnt = ATOMIC_INIT(0),
.module = THIS_MODULE,
.n_list = LIST_HEAD_INIT(ip_vs_lblcr_scheduler.n_list),
.init_service = ip_vs_lblcr_init_svc,
.done_service = ip_vs_lblcr_done_svc,
.schedule = ip_vs_lblcr_schedule,
};
/*
* per netns init.
*/
#ifdef CONFIG_SYSCTL
static int __net_init __ip_vs_lblcr_init(struct net *net)
{
struct netns_ipvs *ipvs = net_ipvs(net);
size_t vars_table_size = ARRAY_SIZE(vs_vars_table);
if (!ipvs)
return -ENOENT;
if (!net_eq(net, &init_net)) {
ipvs->lblcr_ctl_table = kmemdup(vs_vars_table,
sizeof(vs_vars_table),
GFP_KERNEL);
if (ipvs->lblcr_ctl_table == NULL)
return -ENOMEM;
/* Don't export sysctls to unprivileged users */
if (net->user_ns != &init_user_ns) {
ipvs->lblcr_ctl_table[0].procname = NULL;
vars_table_size = 0;
}
} else
ipvs->lblcr_ctl_table = vs_vars_table;
ipvs->sysctl_lblcr_expiration = DEFAULT_EXPIRATION;
ipvs->lblcr_ctl_table[0].data = &ipvs->sysctl_lblcr_expiration;
ipvs->lblcr_ctl_header = register_net_sysctl_sz(net, "net/ipv4/vs",
ipvs->lblcr_ctl_table,
vars_table_size);
if (!ipvs->lblcr_ctl_header) {
if (!net_eq(net, &init_net))
kfree(ipvs->lblcr_ctl_table);
return -ENOMEM;
}
return 0;
}
static void __net_exit __ip_vs_lblcr_exit(struct net *net)
{
struct netns_ipvs *ipvs = net_ipvs(net);
unregister_net_sysctl_table(ipvs->lblcr_ctl_header);
if (!net_eq(net, &init_net))
kfree(ipvs->lblcr_ctl_table);
}
#else
static int __net_init __ip_vs_lblcr_init(struct net *net) { return 0; }
static void __net_exit __ip_vs_lblcr_exit(struct net *net) { }
#endif
static struct pernet_operations ip_vs_lblcr_ops = {
.init = __ip_vs_lblcr_init,
.exit = __ip_vs_lblcr_exit,
};
static int __init ip_vs_lblcr_init(void)
{
int ret;
ret = register_pernet_subsys(&ip_vs_lblcr_ops);
if (ret)
return ret;
ret = register_ip_vs_scheduler(&ip_vs_lblcr_scheduler);
if (ret)
unregister_pernet_subsys(&ip_vs_lblcr_ops);
return ret;
}
static void __exit ip_vs_lblcr_cleanup(void)
{
unregister_ip_vs_scheduler(&ip_vs_lblcr_scheduler);
unregister_pernet_subsys(&ip_vs_lblcr_ops);
rcu_barrier();
}
module_init(ip_vs_lblcr_init);
module_exit(ip_vs_lblcr_cleanup);
MODULE_LICENSE("GPL");
| linux-master | net/netfilter/ipvs/ip_vs_lblcr.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* ip_vs_proto_udp.c: UDP load balancing support for IPVS
*
* Authors: Wensong Zhang <[email protected]>
* Julian Anastasov <[email protected]>
*
* Changes: Hans Schillstrom <[email protected]>
* Network name space (netns) aware.
*/
#define KMSG_COMPONENT "IPVS"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/in.h>
#include <linux/ip.h>
#include <linux/kernel.h>
#include <linux/netfilter.h>
#include <linux/netfilter_ipv4.h>
#include <linux/udp.h>
#include <linux/indirect_call_wrapper.h>
#include <net/ip_vs.h>
#include <net/ip.h>
#include <net/ip6_checksum.h>
static int
udp_csum_check(int af, struct sk_buff *skb, struct ip_vs_protocol *pp);
static int
udp_conn_schedule(struct netns_ipvs *ipvs, int af, struct sk_buff *skb,
struct ip_vs_proto_data *pd,
int *verdict, struct ip_vs_conn **cpp,
struct ip_vs_iphdr *iph)
{
struct ip_vs_service *svc;
struct udphdr _udph, *uh;
__be16 _ports[2], *ports = NULL;
if (likely(!ip_vs_iph_icmp(iph))) {
/* IPv6 fragments, only first fragment will hit this */
uh = skb_header_pointer(skb, iph->len, sizeof(_udph), &_udph);
if (uh)
ports = &uh->source;
} else {
ports = skb_header_pointer(
skb, iph->len, sizeof(_ports), &_ports);
}
if (!ports) {
*verdict = NF_DROP;
return 0;
}
if (likely(!ip_vs_iph_inverse(iph)))
svc = ip_vs_service_find(ipvs, af, skb->mark, iph->protocol,
&iph->daddr, ports[1]);
else
svc = ip_vs_service_find(ipvs, af, skb->mark, iph->protocol,
&iph->saddr, ports[0]);
if (svc) {
int ignored;
if (ip_vs_todrop(ipvs)) {
/*
* It seems that we are very loaded.
* We have to drop this packet :(
*/
*verdict = NF_DROP;
return 0;
}
/*
* Let the virtual server select a real server for the
* incoming connection, and create a connection entry.
*/
*cpp = ip_vs_schedule(svc, skb, pd, &ignored, iph);
if (!*cpp && ignored <= 0) {
if (!ignored)
*verdict = ip_vs_leave(svc, skb, pd, iph);
else
*verdict = NF_DROP;
return 0;
}
}
/* NF_ACCEPT */
return 1;
}
static inline void
udp_fast_csum_update(int af, struct udphdr *uhdr,
const union nf_inet_addr *oldip,
const union nf_inet_addr *newip,
__be16 oldport, __be16 newport)
{
#ifdef CONFIG_IP_VS_IPV6
if (af == AF_INET6)
uhdr->check =
csum_fold(ip_vs_check_diff16(oldip->ip6, newip->ip6,
ip_vs_check_diff2(oldport, newport,
~csum_unfold(uhdr->check))));
else
#endif
uhdr->check =
csum_fold(ip_vs_check_diff4(oldip->ip, newip->ip,
ip_vs_check_diff2(oldport, newport,
~csum_unfold(uhdr->check))));
if (!uhdr->check)
uhdr->check = CSUM_MANGLED_0;
}
static inline void
udp_partial_csum_update(int af, struct udphdr *uhdr,
const union nf_inet_addr *oldip,
const union nf_inet_addr *newip,
__be16 oldlen, __be16 newlen)
{
#ifdef CONFIG_IP_VS_IPV6
if (af == AF_INET6)
uhdr->check =
~csum_fold(ip_vs_check_diff16(oldip->ip6, newip->ip6,
ip_vs_check_diff2(oldlen, newlen,
csum_unfold(uhdr->check))));
else
#endif
uhdr->check =
~csum_fold(ip_vs_check_diff4(oldip->ip, newip->ip,
ip_vs_check_diff2(oldlen, newlen,
csum_unfold(uhdr->check))));
}
INDIRECT_CALLABLE_SCOPE int
udp_snat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp,
struct ip_vs_conn *cp, struct ip_vs_iphdr *iph)
{
struct udphdr *udph;
unsigned int udphoff = iph->len;
bool payload_csum = false;
int oldlen;
#ifdef CONFIG_IP_VS_IPV6
if (cp->af == AF_INET6 && iph->fragoffs)
return 1;
#endif
oldlen = skb->len - udphoff;
/* csum_check requires unshared skb */
if (skb_ensure_writable(skb, udphoff + sizeof(*udph)))
return 0;
if (unlikely(cp->app != NULL)) {
int ret;
/* Some checks before mangling */
if (!udp_csum_check(cp->af, skb, pp))
return 0;
/*
* Call application helper if needed
*/
if (!(ret = ip_vs_app_pkt_out(cp, skb, iph)))
return 0;
/* ret=2: csum update is needed after payload mangling */
if (ret == 1)
oldlen = skb->len - udphoff;
else
payload_csum = true;
}
udph = (void *)skb_network_header(skb) + udphoff;
udph->source = cp->vport;
/*
* Adjust UDP checksums
*/
if (skb->ip_summed == CHECKSUM_PARTIAL) {
udp_partial_csum_update(cp->af, udph, &cp->daddr, &cp->vaddr,
htons(oldlen),
htons(skb->len - udphoff));
} else if (!payload_csum && (udph->check != 0)) {
/* Only port and addr are changed, do fast csum update */
udp_fast_csum_update(cp->af, udph, &cp->daddr, &cp->vaddr,
cp->dport, cp->vport);
if (skb->ip_summed == CHECKSUM_COMPLETE)
skb->ip_summed = cp->app ?
CHECKSUM_UNNECESSARY : CHECKSUM_NONE;
} else {
/* full checksum calculation */
udph->check = 0;
skb->csum = skb_checksum(skb, udphoff, skb->len - udphoff, 0);
#ifdef CONFIG_IP_VS_IPV6
if (cp->af == AF_INET6)
udph->check = csum_ipv6_magic(&cp->vaddr.in6,
&cp->caddr.in6,
skb->len - udphoff,
cp->protocol, skb->csum);
else
#endif
udph->check = csum_tcpudp_magic(cp->vaddr.ip,
cp->caddr.ip,
skb->len - udphoff,
cp->protocol,
skb->csum);
if (udph->check == 0)
udph->check = CSUM_MANGLED_0;
skb->ip_summed = CHECKSUM_UNNECESSARY;
IP_VS_DBG(11, "O-pkt: %s O-csum=%d (+%zd)\n",
pp->name, udph->check,
(char*)&(udph->check) - (char*)udph);
}
return 1;
}
static int
udp_dnat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp,
struct ip_vs_conn *cp, struct ip_vs_iphdr *iph)
{
struct udphdr *udph;
unsigned int udphoff = iph->len;
bool payload_csum = false;
int oldlen;
#ifdef CONFIG_IP_VS_IPV6
if (cp->af == AF_INET6 && iph->fragoffs)
return 1;
#endif
oldlen = skb->len - udphoff;
/* csum_check requires unshared skb */
if (skb_ensure_writable(skb, udphoff + sizeof(*udph)))
return 0;
if (unlikely(cp->app != NULL)) {
int ret;
/* Some checks before mangling */
if (!udp_csum_check(cp->af, skb, pp))
return 0;
/*
* Attempt ip_vs_app call.
* It will fix ip_vs_conn
*/
if (!(ret = ip_vs_app_pkt_in(cp, skb, iph)))
return 0;
/* ret=2: csum update is needed after payload mangling */
if (ret == 1)
oldlen = skb->len - udphoff;
else
payload_csum = true;
}
udph = (void *)skb_network_header(skb) + udphoff;
udph->dest = cp->dport;
/*
* Adjust UDP checksums
*/
if (skb->ip_summed == CHECKSUM_PARTIAL) {
udp_partial_csum_update(cp->af, udph, &cp->vaddr, &cp->daddr,
htons(oldlen),
htons(skb->len - udphoff));
} else if (!payload_csum && (udph->check != 0)) {
/* Only port and addr are changed, do fast csum update */
udp_fast_csum_update(cp->af, udph, &cp->vaddr, &cp->daddr,
cp->vport, cp->dport);
if (skb->ip_summed == CHECKSUM_COMPLETE)
skb->ip_summed = cp->app ?
CHECKSUM_UNNECESSARY : CHECKSUM_NONE;
} else {
/* full checksum calculation */
udph->check = 0;
skb->csum = skb_checksum(skb, udphoff, skb->len - udphoff, 0);
#ifdef CONFIG_IP_VS_IPV6
if (cp->af == AF_INET6)
udph->check = csum_ipv6_magic(&cp->caddr.in6,
&cp->daddr.in6,
skb->len - udphoff,
cp->protocol, skb->csum);
else
#endif
udph->check = csum_tcpudp_magic(cp->caddr.ip,
cp->daddr.ip,
skb->len - udphoff,
cp->protocol,
skb->csum);
if (udph->check == 0)
udph->check = CSUM_MANGLED_0;
skb->ip_summed = CHECKSUM_UNNECESSARY;
}
return 1;
}
static int
udp_csum_check(int af, struct sk_buff *skb, struct ip_vs_protocol *pp)
{
struct udphdr _udph, *uh;
unsigned int udphoff;
#ifdef CONFIG_IP_VS_IPV6
if (af == AF_INET6)
udphoff = sizeof(struct ipv6hdr);
else
#endif
udphoff = ip_hdrlen(skb);
uh = skb_header_pointer(skb, udphoff, sizeof(_udph), &_udph);
if (uh == NULL)
return 0;
if (uh->check != 0) {
switch (skb->ip_summed) {
case CHECKSUM_NONE:
skb->csum = skb_checksum(skb, udphoff,
skb->len - udphoff, 0);
fallthrough;
case CHECKSUM_COMPLETE:
#ifdef CONFIG_IP_VS_IPV6
if (af == AF_INET6) {
if (csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
&ipv6_hdr(skb)->daddr,
skb->len - udphoff,
ipv6_hdr(skb)->nexthdr,
skb->csum)) {
IP_VS_DBG_RL_PKT(0, af, pp, skb, 0,
"Failed checksum for");
return 0;
}
} else
#endif
if (csum_tcpudp_magic(ip_hdr(skb)->saddr,
ip_hdr(skb)->daddr,
skb->len - udphoff,
ip_hdr(skb)->protocol,
skb->csum)) {
IP_VS_DBG_RL_PKT(0, af, pp, skb, 0,
"Failed checksum for");
return 0;
}
break;
default:
/* No need to checksum. */
break;
}
}
return 1;
}
static inline __u16 udp_app_hashkey(__be16 port)
{
return (((__force u16)port >> UDP_APP_TAB_BITS) ^ (__force u16)port)
& UDP_APP_TAB_MASK;
}
static int udp_register_app(struct netns_ipvs *ipvs, struct ip_vs_app *inc)
{
struct ip_vs_app *i;
__u16 hash;
__be16 port = inc->port;
int ret = 0;
struct ip_vs_proto_data *pd = ip_vs_proto_data_get(ipvs, IPPROTO_UDP);
hash = udp_app_hashkey(port);
list_for_each_entry(i, &ipvs->udp_apps[hash], p_list) {
if (i->port == port) {
ret = -EEXIST;
goto out;
}
}
list_add_rcu(&inc->p_list, &ipvs->udp_apps[hash]);
atomic_inc(&pd->appcnt);
out:
return ret;
}
static void
udp_unregister_app(struct netns_ipvs *ipvs, struct ip_vs_app *inc)
{
struct ip_vs_proto_data *pd = ip_vs_proto_data_get(ipvs, IPPROTO_UDP);
atomic_dec(&pd->appcnt);
list_del_rcu(&inc->p_list);
}
static int udp_app_conn_bind(struct ip_vs_conn *cp)
{
struct netns_ipvs *ipvs = cp->ipvs;
int hash;
struct ip_vs_app *inc;
int result = 0;
/* Default binding: bind app only for NAT */
if (IP_VS_FWD_METHOD(cp) != IP_VS_CONN_F_MASQ)
return 0;
/* Lookup application incarnations and bind the right one */
hash = udp_app_hashkey(cp->vport);
list_for_each_entry_rcu(inc, &ipvs->udp_apps[hash], p_list) {
if (inc->port == cp->vport) {
if (unlikely(!ip_vs_app_inc_get(inc)))
break;
IP_VS_DBG_BUF(9, "%s(): Binding conn %s:%u->"
"%s:%u to app %s on port %u\n",
__func__,
IP_VS_DBG_ADDR(cp->af, &cp->caddr),
ntohs(cp->cport),
IP_VS_DBG_ADDR(cp->af, &cp->vaddr),
ntohs(cp->vport),
inc->name, ntohs(inc->port));
cp->app = inc;
if (inc->init_conn)
result = inc->init_conn(inc, cp);
break;
}
}
return result;
}
static const int udp_timeouts[IP_VS_UDP_S_LAST+1] = {
[IP_VS_UDP_S_NORMAL] = 5*60*HZ,
[IP_VS_UDP_S_LAST] = 2*HZ,
};
static const char *const udp_state_name_table[IP_VS_UDP_S_LAST+1] = {
[IP_VS_UDP_S_NORMAL] = "UDP",
[IP_VS_UDP_S_LAST] = "BUG!",
};
static const char * udp_state_name(int state)
{
if (state >= IP_VS_UDP_S_LAST)
return "ERR!";
return udp_state_name_table[state] ? udp_state_name_table[state] : "?";
}
static void
udp_state_transition(struct ip_vs_conn *cp, int direction,
const struct sk_buff *skb,
struct ip_vs_proto_data *pd)
{
if (unlikely(!pd)) {
pr_err("UDP no ns data\n");
return;
}
cp->timeout = pd->timeout_table[IP_VS_UDP_S_NORMAL];
if (direction == IP_VS_DIR_OUTPUT)
ip_vs_control_assure_ct(cp);
}
static int __udp_init(struct netns_ipvs *ipvs, struct ip_vs_proto_data *pd)
{
ip_vs_init_hash_table(ipvs->udp_apps, UDP_APP_TAB_SIZE);
pd->timeout_table = ip_vs_create_timeout_table((int *)udp_timeouts,
sizeof(udp_timeouts));
if (!pd->timeout_table)
return -ENOMEM;
return 0;
}
static void __udp_exit(struct netns_ipvs *ipvs, struct ip_vs_proto_data *pd)
{
kfree(pd->timeout_table);
}
struct ip_vs_protocol ip_vs_protocol_udp = {
.name = "UDP",
.protocol = IPPROTO_UDP,
.num_states = IP_VS_UDP_S_LAST,
.dont_defrag = 0,
.init = NULL,
.exit = NULL,
.init_netns = __udp_init,
.exit_netns = __udp_exit,
.conn_schedule = udp_conn_schedule,
.conn_in_get = ip_vs_conn_in_get_proto,
.conn_out_get = ip_vs_conn_out_get_proto,
.snat_handler = udp_snat_handler,
.dnat_handler = udp_dnat_handler,
.state_transition = udp_state_transition,
.state_name = udp_state_name,
.register_app = udp_register_app,
.unregister_app = udp_unregister_app,
.app_conn_bind = udp_app_conn_bind,
.debug_packet = ip_vs_tcpudp_debug_packet,
.timeout_change = NULL,
};
| linux-master | net/netfilter/ipvs/ip_vs_proto_udp.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* IPVS An implementation of the IP virtual server support for the
* LINUX operating system. IPVS is now implemented as a module
* over the Netfilter framework. IPVS can be used to build a
* high-performance and highly available server based on a
* cluster of servers.
*
* Authors: Wensong Zhang <[email protected]>
* Peter Kese <[email protected]>
* Julian Anastasov <[email protected]>
*
* The IPVS code for kernel 2.2 was done by Wensong Zhang and Peter Kese,
* with changes/fixes from Julian Anastasov, Lars Marowsky-Bree, Horms
* and others.
*
* Changes:
* Paul `Rusty' Russell properly handle non-linear skbs
* Harald Welte don't use nfcache
*/
#define KMSG_COMPONENT "IPVS"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/ip.h>
#include <linux/tcp.h>
#include <linux/sctp.h>
#include <linux/icmp.h>
#include <linux/slab.h>
#include <net/ip.h>
#include <net/tcp.h>
#include <net/udp.h>
#include <net/icmp.h> /* for icmp_send */
#include <net/gue.h>
#include <net/gre.h>
#include <net/route.h>
#include <net/ip6_checksum.h>
#include <net/netns/generic.h> /* net_generic() */
#include <linux/netfilter.h>
#include <linux/netfilter_ipv4.h>
#ifdef CONFIG_IP_VS_IPV6
#include <net/ipv6.h>
#include <linux/netfilter_ipv6.h>
#include <net/ip6_route.h>
#endif
#include <net/ip_vs.h>
#include <linux/indirect_call_wrapper.h>
EXPORT_SYMBOL(register_ip_vs_scheduler);
EXPORT_SYMBOL(unregister_ip_vs_scheduler);
EXPORT_SYMBOL(ip_vs_proto_name);
EXPORT_SYMBOL(ip_vs_conn_new);
EXPORT_SYMBOL(ip_vs_conn_in_get);
EXPORT_SYMBOL(ip_vs_conn_out_get);
#ifdef CONFIG_IP_VS_PROTO_TCP
EXPORT_SYMBOL(ip_vs_tcp_conn_listen);
#endif
EXPORT_SYMBOL(ip_vs_conn_put);
#ifdef CONFIG_IP_VS_DEBUG
EXPORT_SYMBOL(ip_vs_get_debug_level);
#endif
EXPORT_SYMBOL(ip_vs_new_conn_out);
#if defined(CONFIG_IP_VS_PROTO_TCP) && defined(CONFIG_IP_VS_PROTO_UDP)
#define SNAT_CALL(f, ...) \
INDIRECT_CALL_2(f, tcp_snat_handler, udp_snat_handler, __VA_ARGS__)
#elif defined(CONFIG_IP_VS_PROTO_TCP)
#define SNAT_CALL(f, ...) INDIRECT_CALL_1(f, tcp_snat_handler, __VA_ARGS__)
#elif defined(CONFIG_IP_VS_PROTO_UDP)
#define SNAT_CALL(f, ...) INDIRECT_CALL_1(f, udp_snat_handler, __VA_ARGS__)
#else
#define SNAT_CALL(f, ...) f(__VA_ARGS__)
#endif
static unsigned int ip_vs_net_id __read_mostly;
/* netns cnt used for uniqueness */
static atomic_t ipvs_netns_cnt = ATOMIC_INIT(0);
/* ID used in ICMP lookups */
#define icmp_id(icmph) (((icmph)->un).echo.id)
#define icmpv6_id(icmph) (icmph->icmp6_dataun.u_echo.identifier)
const char *ip_vs_proto_name(unsigned int proto)
{
static char buf[20];
switch (proto) {
case IPPROTO_IP:
return "IP";
case IPPROTO_UDP:
return "UDP";
case IPPROTO_TCP:
return "TCP";
case IPPROTO_SCTP:
return "SCTP";
case IPPROTO_ICMP:
return "ICMP";
#ifdef CONFIG_IP_VS_IPV6
case IPPROTO_ICMPV6:
return "ICMPv6";
#endif
default:
sprintf(buf, "IP_%u", proto);
return buf;
}
}
void ip_vs_init_hash_table(struct list_head *table, int rows)
{
while (--rows >= 0)
INIT_LIST_HEAD(&table[rows]);
}
static inline void
ip_vs_in_stats(struct ip_vs_conn *cp, struct sk_buff *skb)
{
struct ip_vs_dest *dest = cp->dest;
struct netns_ipvs *ipvs = cp->ipvs;
if (dest && (dest->flags & IP_VS_DEST_F_AVAILABLE)) {
struct ip_vs_cpu_stats *s;
struct ip_vs_service *svc;
local_bh_disable();
s = this_cpu_ptr(dest->stats.cpustats);
u64_stats_update_begin(&s->syncp);
u64_stats_inc(&s->cnt.inpkts);
u64_stats_add(&s->cnt.inbytes, skb->len);
u64_stats_update_end(&s->syncp);
svc = rcu_dereference(dest->svc);
s = this_cpu_ptr(svc->stats.cpustats);
u64_stats_update_begin(&s->syncp);
u64_stats_inc(&s->cnt.inpkts);
u64_stats_add(&s->cnt.inbytes, skb->len);
u64_stats_update_end(&s->syncp);
s = this_cpu_ptr(ipvs->tot_stats->s.cpustats);
u64_stats_update_begin(&s->syncp);
u64_stats_inc(&s->cnt.inpkts);
u64_stats_add(&s->cnt.inbytes, skb->len);
u64_stats_update_end(&s->syncp);
local_bh_enable();
}
}
static inline void
ip_vs_out_stats(struct ip_vs_conn *cp, struct sk_buff *skb)
{
struct ip_vs_dest *dest = cp->dest;
struct netns_ipvs *ipvs = cp->ipvs;
if (dest && (dest->flags & IP_VS_DEST_F_AVAILABLE)) {
struct ip_vs_cpu_stats *s;
struct ip_vs_service *svc;
local_bh_disable();
s = this_cpu_ptr(dest->stats.cpustats);
u64_stats_update_begin(&s->syncp);
u64_stats_inc(&s->cnt.outpkts);
u64_stats_add(&s->cnt.outbytes, skb->len);
u64_stats_update_end(&s->syncp);
svc = rcu_dereference(dest->svc);
s = this_cpu_ptr(svc->stats.cpustats);
u64_stats_update_begin(&s->syncp);
u64_stats_inc(&s->cnt.outpkts);
u64_stats_add(&s->cnt.outbytes, skb->len);
u64_stats_update_end(&s->syncp);
s = this_cpu_ptr(ipvs->tot_stats->s.cpustats);
u64_stats_update_begin(&s->syncp);
u64_stats_inc(&s->cnt.outpkts);
u64_stats_add(&s->cnt.outbytes, skb->len);
u64_stats_update_end(&s->syncp);
local_bh_enable();
}
}
static inline void
ip_vs_conn_stats(struct ip_vs_conn *cp, struct ip_vs_service *svc)
{
struct netns_ipvs *ipvs = svc->ipvs;
struct ip_vs_cpu_stats *s;
local_bh_disable();
s = this_cpu_ptr(cp->dest->stats.cpustats);
u64_stats_update_begin(&s->syncp);
u64_stats_inc(&s->cnt.conns);
u64_stats_update_end(&s->syncp);
s = this_cpu_ptr(svc->stats.cpustats);
u64_stats_update_begin(&s->syncp);
u64_stats_inc(&s->cnt.conns);
u64_stats_update_end(&s->syncp);
s = this_cpu_ptr(ipvs->tot_stats->s.cpustats);
u64_stats_update_begin(&s->syncp);
u64_stats_inc(&s->cnt.conns);
u64_stats_update_end(&s->syncp);
local_bh_enable();
}
static inline void
ip_vs_set_state(struct ip_vs_conn *cp, int direction,
const struct sk_buff *skb,
struct ip_vs_proto_data *pd)
{
if (likely(pd->pp->state_transition))
pd->pp->state_transition(cp, direction, skb, pd);
}
static inline int
ip_vs_conn_fill_param_persist(const struct ip_vs_service *svc,
struct sk_buff *skb, int protocol,
const union nf_inet_addr *caddr, __be16 cport,
const union nf_inet_addr *vaddr, __be16 vport,
struct ip_vs_conn_param *p)
{
ip_vs_conn_fill_param(svc->ipvs, svc->af, protocol, caddr, cport, vaddr,
vport, p);
p->pe = rcu_dereference(svc->pe);
if (p->pe && p->pe->fill_param)
return p->pe->fill_param(p, skb);
return 0;
}
/*
* IPVS persistent scheduling function
* It creates a connection entry according to its template if exists,
* or selects a server and creates a connection entry plus a template.
* Locking: we are svc user (svc->refcnt), so we hold all dests too
* Protocols supported: TCP, UDP
*/
static struct ip_vs_conn *
ip_vs_sched_persist(struct ip_vs_service *svc,
struct sk_buff *skb, __be16 src_port, __be16 dst_port,
int *ignored, struct ip_vs_iphdr *iph)
{
struct ip_vs_conn *cp = NULL;
struct ip_vs_dest *dest;
struct ip_vs_conn *ct;
__be16 dport = 0; /* destination port to forward */
unsigned int flags;
struct ip_vs_conn_param param;
const union nf_inet_addr fwmark = { .ip = htonl(svc->fwmark) };
union nf_inet_addr snet; /* source network of the client,
after masking */
const union nf_inet_addr *src_addr, *dst_addr;
if (likely(!ip_vs_iph_inverse(iph))) {
src_addr = &iph->saddr;
dst_addr = &iph->daddr;
} else {
src_addr = &iph->daddr;
dst_addr = &iph->saddr;
}
/* Mask saddr with the netmask to adjust template granularity */
#ifdef CONFIG_IP_VS_IPV6
if (svc->af == AF_INET6)
ipv6_addr_prefix(&snet.in6, &src_addr->in6,
(__force __u32) svc->netmask);
else
#endif
snet.ip = src_addr->ip & svc->netmask;
IP_VS_DBG_BUF(6, "p-schedule: src %s:%u dest %s:%u "
"mnet %s\n",
IP_VS_DBG_ADDR(svc->af, src_addr), ntohs(src_port),
IP_VS_DBG_ADDR(svc->af, dst_addr), ntohs(dst_port),
IP_VS_DBG_ADDR(svc->af, &snet));
/*
* As far as we know, FTP is a very complicated network protocol, and
* it uses control connection and data connections. For active FTP,
* FTP server initialize data connection to the client, its source port
* is often 20. For passive FTP, FTP server tells the clients the port
* that it passively listens to, and the client issues the data
* connection. In the tunneling or direct routing mode, the load
* balancer is on the client-to-server half of connection, the port
* number is unknown to the load balancer. So, a conn template like
* <caddr, 0, vaddr, 0, daddr, 0> is created for persistent FTP
* service, and a template like <caddr, 0, vaddr, vport, daddr, dport>
* is created for other persistent services.
*/
{
int protocol = iph->protocol;
const union nf_inet_addr *vaddr = dst_addr;
__be16 vport = 0;
if (dst_port == svc->port) {
/* non-FTP template:
* <protocol, caddr, 0, vaddr, vport, daddr, dport>
* FTP template:
* <protocol, caddr, 0, vaddr, 0, daddr, 0>
*/
if (svc->port != FTPPORT)
vport = dst_port;
} else {
/* Note: persistent fwmark-based services and
* persistent port zero service are handled here.
* fwmark template:
* <IPPROTO_IP,caddr,0,fwmark,0,daddr,0>
* port zero template:
* <protocol,caddr,0,vaddr,0,daddr,0>
*/
if (svc->fwmark) {
protocol = IPPROTO_IP;
vaddr = &fwmark;
}
}
/* return *ignored = -1 so NF_DROP can be used */
if (ip_vs_conn_fill_param_persist(svc, skb, protocol, &snet, 0,
vaddr, vport, ¶m) < 0) {
*ignored = -1;
return NULL;
}
}
/* Check if a template already exists */
ct = ip_vs_ct_in_get(¶m);
if (!ct || !ip_vs_check_template(ct, NULL)) {
struct ip_vs_scheduler *sched;
/*
* No template found or the dest of the connection
* template is not available.
* return *ignored=0 i.e. ICMP and NF_DROP
*/
sched = rcu_dereference(svc->scheduler);
if (sched) {
/* read svc->sched_data after svc->scheduler */
smp_rmb();
dest = sched->schedule(svc, skb, iph);
} else {
dest = NULL;
}
if (!dest) {
IP_VS_DBG(1, "p-schedule: no dest found.\n");
kfree(param.pe_data);
*ignored = 0;
return NULL;
}
if (dst_port == svc->port && svc->port != FTPPORT)
dport = dest->port;
/* Create a template
* This adds param.pe_data to the template,
* and thus param.pe_data will be destroyed
* when the template expires */
ct = ip_vs_conn_new(¶m, dest->af, &dest->addr, dport,
IP_VS_CONN_F_TEMPLATE, dest, skb->mark);
if (ct == NULL) {
kfree(param.pe_data);
*ignored = -1;
return NULL;
}
ct->timeout = svc->timeout;
} else {
/* set destination with the found template */
dest = ct->dest;
kfree(param.pe_data);
}
dport = dst_port;
if (dport == svc->port && dest->port)
dport = dest->port;
flags = (svc->flags & IP_VS_SVC_F_ONEPACKET
&& iph->protocol == IPPROTO_UDP) ?
IP_VS_CONN_F_ONE_PACKET : 0;
/*
* Create a new connection according to the template
*/
ip_vs_conn_fill_param(svc->ipvs, svc->af, iph->protocol, src_addr,
src_port, dst_addr, dst_port, ¶m);
cp = ip_vs_conn_new(¶m, dest->af, &dest->addr, dport, flags, dest,
skb->mark);
if (cp == NULL) {
ip_vs_conn_put(ct);
*ignored = -1;
return NULL;
}
/*
* Add its control
*/
ip_vs_control_add(cp, ct);
ip_vs_conn_put(ct);
ip_vs_conn_stats(cp, svc);
return cp;
}
/*
* IPVS main scheduling function
* It selects a server according to the virtual service, and
* creates a connection entry.
* Protocols supported: TCP, UDP
*
* Usage of *ignored
*
* 1 : protocol tried to schedule (eg. on SYN), found svc but the
* svc/scheduler decides that this packet should be accepted with
* NF_ACCEPT because it must not be scheduled.
*
* 0 : scheduler can not find destination, so try bypass or
* return ICMP and then NF_DROP (ip_vs_leave).
*
* -1 : scheduler tried to schedule but fatal error occurred, eg.
* ip_vs_conn_new failure (ENOMEM) or ip_vs_sip_fill_param
* failure such as missing Call-ID, ENOMEM on skb_linearize
* or pe_data. In this case we should return NF_DROP without
* any attempts to send ICMP with ip_vs_leave.
*/
struct ip_vs_conn *
ip_vs_schedule(struct ip_vs_service *svc, struct sk_buff *skb,
struct ip_vs_proto_data *pd, int *ignored,
struct ip_vs_iphdr *iph)
{
struct ip_vs_protocol *pp = pd->pp;
struct ip_vs_conn *cp = NULL;
struct ip_vs_scheduler *sched;
struct ip_vs_dest *dest;
__be16 _ports[2], *pptr, cport, vport;
const void *caddr, *vaddr;
unsigned int flags;
*ignored = 1;
/*
* IPv6 frags, only the first hit here.
*/
pptr = frag_safe_skb_hp(skb, iph->len, sizeof(_ports), _ports);
if (pptr == NULL)
return NULL;
if (likely(!ip_vs_iph_inverse(iph))) {
cport = pptr[0];
caddr = &iph->saddr;
vport = pptr[1];
vaddr = &iph->daddr;
} else {
cport = pptr[1];
caddr = &iph->daddr;
vport = pptr[0];
vaddr = &iph->saddr;
}
/*
* FTPDATA needs this check when using local real server.
* Never schedule Active FTPDATA connections from real server.
* For LVS-NAT they must be already created. For other methods
* with persistence the connection is created on SYN+ACK.
*/
if (cport == FTPDATA) {
IP_VS_DBG_PKT(12, svc->af, pp, skb, iph->off,
"Not scheduling FTPDATA");
return NULL;
}
/*
* Do not schedule replies from local real server.
*/
if ((!skb->dev || skb->dev->flags & IFF_LOOPBACK)) {
iph->hdr_flags ^= IP_VS_HDR_INVERSE;
cp = INDIRECT_CALL_1(pp->conn_in_get,
ip_vs_conn_in_get_proto, svc->ipvs,
svc->af, skb, iph);
iph->hdr_flags ^= IP_VS_HDR_INVERSE;
if (cp) {
IP_VS_DBG_PKT(12, svc->af, pp, skb, iph->off,
"Not scheduling reply for existing"
" connection");
__ip_vs_conn_put(cp);
return NULL;
}
}
/*
* Persistent service
*/
if (svc->flags & IP_VS_SVC_F_PERSISTENT)
return ip_vs_sched_persist(svc, skb, cport, vport, ignored,
iph);
*ignored = 0;
/*
* Non-persistent service
*/
if (!svc->fwmark && vport != svc->port) {
if (!svc->port)
pr_err("Schedule: port zero only supported "
"in persistent services, "
"check your ipvs configuration\n");
return NULL;
}
sched = rcu_dereference(svc->scheduler);
if (sched) {
/* read svc->sched_data after svc->scheduler */
smp_rmb();
dest = sched->schedule(svc, skb, iph);
} else {
dest = NULL;
}
if (dest == NULL) {
IP_VS_DBG(1, "Schedule: no dest found.\n");
return NULL;
}
flags = (svc->flags & IP_VS_SVC_F_ONEPACKET
&& iph->protocol == IPPROTO_UDP) ?
IP_VS_CONN_F_ONE_PACKET : 0;
/*
* Create a connection entry.
*/
{
struct ip_vs_conn_param p;
ip_vs_conn_fill_param(svc->ipvs, svc->af, iph->protocol,
caddr, cport, vaddr, vport, &p);
cp = ip_vs_conn_new(&p, dest->af, &dest->addr,
dest->port ? dest->port : vport,
flags, dest, skb->mark);
if (!cp) {
*ignored = -1;
return NULL;
}
}
IP_VS_DBG_BUF(6, "Schedule fwd:%c c:%s:%u v:%s:%u "
"d:%s:%u conn->flags:%X conn->refcnt:%d\n",
ip_vs_fwd_tag(cp),
IP_VS_DBG_ADDR(cp->af, &cp->caddr), ntohs(cp->cport),
IP_VS_DBG_ADDR(cp->af, &cp->vaddr), ntohs(cp->vport),
IP_VS_DBG_ADDR(cp->daf, &cp->daddr), ntohs(cp->dport),
cp->flags, refcount_read(&cp->refcnt));
ip_vs_conn_stats(cp, svc);
return cp;
}
static inline int ip_vs_addr_is_unicast(struct net *net, int af,
union nf_inet_addr *addr)
{
#ifdef CONFIG_IP_VS_IPV6
if (af == AF_INET6)
return ipv6_addr_type(&addr->in6) & IPV6_ADDR_UNICAST;
#endif
return (inet_addr_type(net, addr->ip) == RTN_UNICAST);
}
/*
* Pass or drop the packet.
* Called by ip_vs_in, when the virtual service is available but
* no destination is available for a new connection.
*/
int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
struct ip_vs_proto_data *pd, struct ip_vs_iphdr *iph)
{
__be16 _ports[2], *pptr, dport;
struct netns_ipvs *ipvs = svc->ipvs;
struct net *net = ipvs->net;
pptr = frag_safe_skb_hp(skb, iph->len, sizeof(_ports), _ports);
if (!pptr)
return NF_DROP;
dport = likely(!ip_vs_iph_inverse(iph)) ? pptr[1] : pptr[0];
/* if it is fwmark-based service, the cache_bypass sysctl is up
and the destination is a non-local unicast, then create
a cache_bypass connection entry */
if (sysctl_cache_bypass(ipvs) && svc->fwmark &&
!(iph->hdr_flags & (IP_VS_HDR_INVERSE | IP_VS_HDR_ICMP)) &&
ip_vs_addr_is_unicast(net, svc->af, &iph->daddr)) {
int ret;
struct ip_vs_conn *cp;
unsigned int flags = (svc->flags & IP_VS_SVC_F_ONEPACKET &&
iph->protocol == IPPROTO_UDP) ?
IP_VS_CONN_F_ONE_PACKET : 0;
union nf_inet_addr daddr = { .all = { 0, 0, 0, 0 } };
/* create a new connection entry */
IP_VS_DBG(6, "%s(): create a cache_bypass entry\n", __func__);
{
struct ip_vs_conn_param p;
ip_vs_conn_fill_param(svc->ipvs, svc->af, iph->protocol,
&iph->saddr, pptr[0],
&iph->daddr, pptr[1], &p);
cp = ip_vs_conn_new(&p, svc->af, &daddr, 0,
IP_VS_CONN_F_BYPASS | flags,
NULL, skb->mark);
if (!cp)
return NF_DROP;
}
/* statistics */
ip_vs_in_stats(cp, skb);
/* set state */
ip_vs_set_state(cp, IP_VS_DIR_INPUT, skb, pd);
/* transmit the first SYN packet */
ret = cp->packet_xmit(skb, cp, pd->pp, iph);
/* do not touch skb anymore */
if ((cp->flags & IP_VS_CONN_F_ONE_PACKET) && cp->control)
atomic_inc(&cp->control->in_pkts);
else
atomic_inc(&cp->in_pkts);
ip_vs_conn_put(cp);
return ret;
}
/*
* When the virtual ftp service is presented, packets destined
* for other services on the VIP may get here (except services
* listed in the ipvs table), pass the packets, because it is
* not ipvs job to decide to drop the packets.
*/
if (svc->port == FTPPORT && dport != FTPPORT)
return NF_ACCEPT;
if (unlikely(ip_vs_iph_icmp(iph)))
return NF_DROP;
/*
* Notify the client that the destination is unreachable, and
* release the socket buffer.
* Since it is in IP layer, the TCP socket is not actually
* created, the TCP RST packet cannot be sent, instead that
* ICMP_PORT_UNREACH is sent here no matter it is TCP/UDP. --WZ
*/
#ifdef CONFIG_IP_VS_IPV6
if (svc->af == AF_INET6) {
if (!skb->dev)
skb->dev = net->loopback_dev;
icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
} else
#endif
icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
return NF_DROP;
}
#ifdef CONFIG_SYSCTL
static int sysctl_snat_reroute(struct netns_ipvs *ipvs)
{
return ipvs->sysctl_snat_reroute;
}
static int sysctl_nat_icmp_send(struct netns_ipvs *ipvs)
{
return ipvs->sysctl_nat_icmp_send;
}
#else
static int sysctl_snat_reroute(struct netns_ipvs *ipvs) { return 0; }
static int sysctl_nat_icmp_send(struct netns_ipvs *ipvs) { return 0; }
#endif
__sum16 ip_vs_checksum_complete(struct sk_buff *skb, int offset)
{
return csum_fold(skb_checksum(skb, offset, skb->len - offset, 0));
}
static inline enum ip_defrag_users ip_vs_defrag_user(unsigned int hooknum)
{
if (NF_INET_LOCAL_IN == hooknum)
return IP_DEFRAG_VS_IN;
if (NF_INET_FORWARD == hooknum)
return IP_DEFRAG_VS_FWD;
return IP_DEFRAG_VS_OUT;
}
static inline int ip_vs_gather_frags(struct netns_ipvs *ipvs,
struct sk_buff *skb, u_int32_t user)
{
int err;
local_bh_disable();
err = ip_defrag(ipvs->net, skb, user);
local_bh_enable();
if (!err)
ip_send_check(ip_hdr(skb));
return err;
}
static int ip_vs_route_me_harder(struct netns_ipvs *ipvs, int af,
struct sk_buff *skb, unsigned int hooknum)
{
if (!sysctl_snat_reroute(ipvs))
return 0;
/* Reroute replies only to remote clients (FORWARD and LOCAL_OUT) */
if (NF_INET_LOCAL_IN == hooknum)
return 0;
#ifdef CONFIG_IP_VS_IPV6
if (af == AF_INET6) {
struct dst_entry *dst = skb_dst(skb);
if (dst->dev && !(dst->dev->flags & IFF_LOOPBACK) &&
ip6_route_me_harder(ipvs->net, skb->sk, skb) != 0)
return 1;
} else
#endif
if (!(skb_rtable(skb)->rt_flags & RTCF_LOCAL) &&
ip_route_me_harder(ipvs->net, skb->sk, skb, RTN_LOCAL) != 0)
return 1;
return 0;
}
/*
* Packet has been made sufficiently writable in caller
* - inout: 1=in->out, 0=out->in
*/
void ip_vs_nat_icmp(struct sk_buff *skb, struct ip_vs_protocol *pp,
struct ip_vs_conn *cp, int inout)
{
struct iphdr *iph = ip_hdr(skb);
unsigned int icmp_offset = iph->ihl*4;
struct icmphdr *icmph = (struct icmphdr *)(skb_network_header(skb) +
icmp_offset);
struct iphdr *ciph = (struct iphdr *)(icmph + 1);
if (inout) {
iph->saddr = cp->vaddr.ip;
ip_send_check(iph);
ciph->daddr = cp->vaddr.ip;
ip_send_check(ciph);
} else {
iph->daddr = cp->daddr.ip;
ip_send_check(iph);
ciph->saddr = cp->daddr.ip;
ip_send_check(ciph);
}
/* the TCP/UDP/SCTP port */
if (IPPROTO_TCP == ciph->protocol || IPPROTO_UDP == ciph->protocol ||
IPPROTO_SCTP == ciph->protocol) {
__be16 *ports = (void *)ciph + ciph->ihl*4;
if (inout)
ports[1] = cp->vport;
else
ports[0] = cp->dport;
}
/* And finally the ICMP checksum */
icmph->checksum = 0;
icmph->checksum = ip_vs_checksum_complete(skb, icmp_offset);
skb->ip_summed = CHECKSUM_UNNECESSARY;
if (inout)
IP_VS_DBG_PKT(11, AF_INET, pp, skb, (void *)ciph - (void *)iph,
"Forwarding altered outgoing ICMP");
else
IP_VS_DBG_PKT(11, AF_INET, pp, skb, (void *)ciph - (void *)iph,
"Forwarding altered incoming ICMP");
}
#ifdef CONFIG_IP_VS_IPV6
void ip_vs_nat_icmp_v6(struct sk_buff *skb, struct ip_vs_protocol *pp,
struct ip_vs_conn *cp, int inout)
{
struct ipv6hdr *iph = ipv6_hdr(skb);
unsigned int icmp_offset = 0;
unsigned int offs = 0; /* header offset*/
int protocol;
struct icmp6hdr *icmph;
struct ipv6hdr *ciph;
unsigned short fragoffs;
ipv6_find_hdr(skb, &icmp_offset, IPPROTO_ICMPV6, &fragoffs, NULL);
icmph = (struct icmp6hdr *)(skb_network_header(skb) + icmp_offset);
offs = icmp_offset + sizeof(struct icmp6hdr);
ciph = (struct ipv6hdr *)(skb_network_header(skb) + offs);
protocol = ipv6_find_hdr(skb, &offs, -1, &fragoffs, NULL);
if (inout) {
iph->saddr = cp->vaddr.in6;
ciph->daddr = cp->vaddr.in6;
} else {
iph->daddr = cp->daddr.in6;
ciph->saddr = cp->daddr.in6;
}
/* the TCP/UDP/SCTP port */
if (!fragoffs && (IPPROTO_TCP == protocol || IPPROTO_UDP == protocol ||
IPPROTO_SCTP == protocol)) {
__be16 *ports = (void *)(skb_network_header(skb) + offs);
IP_VS_DBG(11, "%s() changed port %d to %d\n", __func__,
ntohs(inout ? ports[1] : ports[0]),
ntohs(inout ? cp->vport : cp->dport));
if (inout)
ports[1] = cp->vport;
else
ports[0] = cp->dport;
}
/* And finally the ICMP checksum */
icmph->icmp6_cksum = ~csum_ipv6_magic(&iph->saddr, &iph->daddr,
skb->len - icmp_offset,
IPPROTO_ICMPV6, 0);
skb->csum_start = skb_network_header(skb) - skb->head + icmp_offset;
skb->csum_offset = offsetof(struct icmp6hdr, icmp6_cksum);
skb->ip_summed = CHECKSUM_PARTIAL;
if (inout)
IP_VS_DBG_PKT(11, AF_INET6, pp, skb,
(void *)ciph - (void *)iph,
"Forwarding altered outgoing ICMPv6");
else
IP_VS_DBG_PKT(11, AF_INET6, pp, skb,
(void *)ciph - (void *)iph,
"Forwarding altered incoming ICMPv6");
}
#endif
/* Handle relevant response ICMP messages - forward to the right
* destination host.
*/
static int handle_response_icmp(int af, struct sk_buff *skb,
union nf_inet_addr *snet,
__u8 protocol, struct ip_vs_conn *cp,
struct ip_vs_protocol *pp,
unsigned int offset, unsigned int ihl,
unsigned int hooknum)
{
unsigned int verdict = NF_DROP;
if (IP_VS_FWD_METHOD(cp) != IP_VS_CONN_F_MASQ)
goto after_nat;
/* Ensure the checksum is correct */
if (!skb_csum_unnecessary(skb) && ip_vs_checksum_complete(skb, ihl)) {
/* Failed checksum! */
IP_VS_DBG_BUF(1, "Forward ICMP: failed checksum from %s!\n",
IP_VS_DBG_ADDR(af, snet));
goto out;
}
if (IPPROTO_TCP == protocol || IPPROTO_UDP == protocol ||
IPPROTO_SCTP == protocol)
offset += 2 * sizeof(__u16);
if (skb_ensure_writable(skb, offset))
goto out;
#ifdef CONFIG_IP_VS_IPV6
if (af == AF_INET6)
ip_vs_nat_icmp_v6(skb, pp, cp, 1);
else
#endif
ip_vs_nat_icmp(skb, pp, cp, 1);
if (ip_vs_route_me_harder(cp->ipvs, af, skb, hooknum))
goto out;
after_nat:
/* do the statistics and put it back */
ip_vs_out_stats(cp, skb);
skb->ipvs_property = 1;
if (!(cp->flags & IP_VS_CONN_F_NFCT))
ip_vs_notrack(skb);
else
ip_vs_update_conntrack(skb, cp, 0);
verdict = NF_ACCEPT;
out:
__ip_vs_conn_put(cp);
return verdict;
}
/*
* Handle ICMP messages in the inside-to-outside direction (outgoing).
* Find any that might be relevant, check against existing connections.
* Currently handles error types - unreachable, quench, ttl exceeded.
*/
static int ip_vs_out_icmp(struct netns_ipvs *ipvs, struct sk_buff *skb,
int *related, unsigned int hooknum)
{
struct iphdr *iph;
struct icmphdr _icmph, *ic;
struct iphdr _ciph, *cih; /* The ip header contained within the ICMP */
struct ip_vs_iphdr ciph;
struct ip_vs_conn *cp;
struct ip_vs_protocol *pp;
unsigned int offset, ihl;
union nf_inet_addr snet;
*related = 1;
/* reassemble IP fragments */
if (ip_is_fragment(ip_hdr(skb))) {
if (ip_vs_gather_frags(ipvs, skb, ip_vs_defrag_user(hooknum)))
return NF_STOLEN;
}
iph = ip_hdr(skb);
offset = ihl = iph->ihl * 4;
ic = skb_header_pointer(skb, offset, sizeof(_icmph), &_icmph);
if (ic == NULL)
return NF_DROP;
IP_VS_DBG(12, "Outgoing ICMP (%d,%d) %pI4->%pI4\n",
ic->type, ntohs(icmp_id(ic)),
&iph->saddr, &iph->daddr);
/*
* Work through seeing if this is for us.
* These checks are supposed to be in an order that means easy
* things are checked first to speed up processing.... however
* this means that some packets will manage to get a long way
* down this stack and then be rejected, but that's life.
*/
if ((ic->type != ICMP_DEST_UNREACH) &&
(ic->type != ICMP_SOURCE_QUENCH) &&
(ic->type != ICMP_TIME_EXCEEDED)) {
*related = 0;
return NF_ACCEPT;
}
/* Now find the contained IP header */
offset += sizeof(_icmph);
cih = skb_header_pointer(skb, offset, sizeof(_ciph), &_ciph);
if (cih == NULL)
return NF_ACCEPT; /* The packet looks wrong, ignore */
pp = ip_vs_proto_get(cih->protocol);
if (!pp)
return NF_ACCEPT;
/* Is the embedded protocol header present? */
if (unlikely(cih->frag_off & htons(IP_OFFSET) &&
pp->dont_defrag))
return NF_ACCEPT;
IP_VS_DBG_PKT(11, AF_INET, pp, skb, offset,
"Checking outgoing ICMP for");
ip_vs_fill_iph_skb_icmp(AF_INET, skb, offset, true, &ciph);
/* The embedded headers contain source and dest in reverse order */
cp = INDIRECT_CALL_1(pp->conn_out_get, ip_vs_conn_out_get_proto,
ipvs, AF_INET, skb, &ciph);
if (!cp)
return NF_ACCEPT;
snet.ip = iph->saddr;
return handle_response_icmp(AF_INET, skb, &snet, cih->protocol, cp,
pp, ciph.len, ihl, hooknum);
}
#ifdef CONFIG_IP_VS_IPV6
static int ip_vs_out_icmp_v6(struct netns_ipvs *ipvs, struct sk_buff *skb,
int *related, unsigned int hooknum,
struct ip_vs_iphdr *ipvsh)
{
struct icmp6hdr _icmph, *ic;
struct ip_vs_iphdr ciph = {.flags = 0, .fragoffs = 0};/*Contained IP */
struct ip_vs_conn *cp;
struct ip_vs_protocol *pp;
union nf_inet_addr snet;
unsigned int offset;
*related = 1;
ic = frag_safe_skb_hp(skb, ipvsh->len, sizeof(_icmph), &_icmph);
if (ic == NULL)
return NF_DROP;
/*
* Work through seeing if this is for us.
* These checks are supposed to be in an order that means easy
* things are checked first to speed up processing.... however
* this means that some packets will manage to get a long way
* down this stack and then be rejected, but that's life.
*/
if (ic->icmp6_type & ICMPV6_INFOMSG_MASK) {
*related = 0;
return NF_ACCEPT;
}
/* Fragment header that is before ICMP header tells us that:
* it's not an error message since they can't be fragmented.
*/
if (ipvsh->flags & IP6_FH_F_FRAG)
return NF_DROP;
IP_VS_DBG(8, "Outgoing ICMPv6 (%d,%d) %pI6c->%pI6c\n",
ic->icmp6_type, ntohs(icmpv6_id(ic)),
&ipvsh->saddr, &ipvsh->daddr);
if (!ip_vs_fill_iph_skb_icmp(AF_INET6, skb, ipvsh->len + sizeof(_icmph),
true, &ciph))
return NF_ACCEPT; /* The packet looks wrong, ignore */
pp = ip_vs_proto_get(ciph.protocol);
if (!pp)
return NF_ACCEPT;
/* The embedded headers contain source and dest in reverse order */
cp = INDIRECT_CALL_1(pp->conn_out_get, ip_vs_conn_out_get_proto,
ipvs, AF_INET6, skb, &ciph);
if (!cp)
return NF_ACCEPT;
snet.in6 = ciph.saddr.in6;
offset = ciph.len;
return handle_response_icmp(AF_INET6, skb, &snet, ciph.protocol, cp,
pp, offset, sizeof(struct ipv6hdr),
hooknum);
}
#endif
/*
* Check if sctp chunc is ABORT chunk
*/
static inline int is_sctp_abort(const struct sk_buff *skb, int nh_len)
{
struct sctp_chunkhdr *sch, schunk;
sch = skb_header_pointer(skb, nh_len + sizeof(struct sctphdr),
sizeof(schunk), &schunk);
if (sch == NULL)
return 0;
if (sch->type == SCTP_CID_ABORT)
return 1;
return 0;
}
static inline int is_tcp_reset(const struct sk_buff *skb, int nh_len)
{
struct tcphdr _tcph, *th;
th = skb_header_pointer(skb, nh_len, sizeof(_tcph), &_tcph);
if (th == NULL)
return 0;
return th->rst;
}
static inline bool is_new_conn(const struct sk_buff *skb,
struct ip_vs_iphdr *iph)
{
switch (iph->protocol) {
case IPPROTO_TCP: {
struct tcphdr _tcph, *th;
th = skb_header_pointer(skb, iph->len, sizeof(_tcph), &_tcph);
if (th == NULL)
return false;
return th->syn;
}
case IPPROTO_SCTP: {
struct sctp_chunkhdr *sch, schunk;
sch = skb_header_pointer(skb, iph->len + sizeof(struct sctphdr),
sizeof(schunk), &schunk);
if (sch == NULL)
return false;
return sch->type == SCTP_CID_INIT;
}
default:
return false;
}
}
static inline bool is_new_conn_expected(const struct ip_vs_conn *cp,
int conn_reuse_mode)
{
/* Controlled (FTP DATA or persistence)? */
if (cp->control)
return false;
switch (cp->protocol) {
case IPPROTO_TCP:
return (cp->state == IP_VS_TCP_S_TIME_WAIT) ||
(cp->state == IP_VS_TCP_S_CLOSE) ||
((conn_reuse_mode & 2) &&
(cp->state == IP_VS_TCP_S_FIN_WAIT) &&
(cp->flags & IP_VS_CONN_F_NOOUTPUT));
case IPPROTO_SCTP:
return cp->state == IP_VS_SCTP_S_CLOSED;
default:
return false;
}
}
/* Generic function to create new connections for outgoing RS packets
*
* Pre-requisites for successful connection creation:
* 1) Virtual Service is NOT fwmark based:
* In fwmark-VS actual vaddr and vport are unknown to IPVS
* 2) Real Server and Virtual Service were NOT configured without port:
* This is to allow match of different VS to the same RS ip-addr
*/
struct ip_vs_conn *ip_vs_new_conn_out(struct ip_vs_service *svc,
struct ip_vs_dest *dest,
struct sk_buff *skb,
const struct ip_vs_iphdr *iph,
__be16 dport,
__be16 cport)
{
struct ip_vs_conn_param param;
struct ip_vs_conn *ct = NULL, *cp = NULL;
const union nf_inet_addr *vaddr, *daddr, *caddr;
union nf_inet_addr snet;
__be16 vport;
unsigned int flags;
vaddr = &svc->addr;
vport = svc->port;
daddr = &iph->saddr;
caddr = &iph->daddr;
/* check pre-requisites are satisfied */
if (svc->fwmark)
return NULL;
if (!vport || !dport)
return NULL;
/* for persistent service first create connection template */
if (svc->flags & IP_VS_SVC_F_PERSISTENT) {
/* apply netmask the same way ingress-side does */
#ifdef CONFIG_IP_VS_IPV6
if (svc->af == AF_INET6)
ipv6_addr_prefix(&snet.in6, &caddr->in6,
(__force __u32)svc->netmask);
else
#endif
snet.ip = caddr->ip & svc->netmask;
/* fill params and create template if not existent */
if (ip_vs_conn_fill_param_persist(svc, skb, iph->protocol,
&snet, 0, vaddr,
vport, ¶m) < 0)
return NULL;
ct = ip_vs_ct_in_get(¶m);
/* check if template exists and points to the same dest */
if (!ct || !ip_vs_check_template(ct, dest)) {
ct = ip_vs_conn_new(¶m, dest->af, daddr, dport,
IP_VS_CONN_F_TEMPLATE, dest, 0);
if (!ct) {
kfree(param.pe_data);
return NULL;
}
ct->timeout = svc->timeout;
} else {
kfree(param.pe_data);
}
}
/* connection flags */
flags = ((svc->flags & IP_VS_SVC_F_ONEPACKET) &&
iph->protocol == IPPROTO_UDP) ? IP_VS_CONN_F_ONE_PACKET : 0;
/* create connection */
ip_vs_conn_fill_param(svc->ipvs, svc->af, iph->protocol,
caddr, cport, vaddr, vport, ¶m);
cp = ip_vs_conn_new(¶m, dest->af, daddr, dport, flags, dest, 0);
if (!cp) {
if (ct)
ip_vs_conn_put(ct);
return NULL;
}
if (ct) {
ip_vs_control_add(cp, ct);
ip_vs_conn_put(ct);
}
ip_vs_conn_stats(cp, svc);
/* return connection (will be used to handle outgoing packet) */
IP_VS_DBG_BUF(6, "New connection RS-initiated:%c c:%s:%u v:%s:%u "
"d:%s:%u conn->flags:%X conn->refcnt:%d\n",
ip_vs_fwd_tag(cp),
IP_VS_DBG_ADDR(cp->af, &cp->caddr), ntohs(cp->cport),
IP_VS_DBG_ADDR(cp->af, &cp->vaddr), ntohs(cp->vport),
IP_VS_DBG_ADDR(cp->af, &cp->daddr), ntohs(cp->dport),
cp->flags, refcount_read(&cp->refcnt));
return cp;
}
/* Handle outgoing packets which are considered requests initiated by
* real servers, so that subsequent responses from external client can be
* routed to the right real server.
* Used also for outgoing responses in OPS mode.
*
* Connection management is handled by persistent-engine specific callback.
*/
static struct ip_vs_conn *__ip_vs_rs_conn_out(unsigned int hooknum,
struct netns_ipvs *ipvs,
int af, struct sk_buff *skb,
const struct ip_vs_iphdr *iph)
{
struct ip_vs_dest *dest;
struct ip_vs_conn *cp = NULL;
__be16 _ports[2], *pptr;
if (hooknum == NF_INET_LOCAL_IN)
return NULL;
pptr = frag_safe_skb_hp(skb, iph->len,
sizeof(_ports), _ports);
if (!pptr)
return NULL;
dest = ip_vs_find_real_service(ipvs, af, iph->protocol,
&iph->saddr, pptr[0]);
if (dest) {
struct ip_vs_service *svc;
struct ip_vs_pe *pe;
svc = rcu_dereference(dest->svc);
if (svc) {
pe = rcu_dereference(svc->pe);
if (pe && pe->conn_out)
cp = pe->conn_out(svc, dest, skb, iph,
pptr[0], pptr[1]);
}
}
return cp;
}
/* Handle response packets: rewrite addresses and send away...
*/
static unsigned int
handle_response(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd,
struct ip_vs_conn *cp, struct ip_vs_iphdr *iph,
unsigned int hooknum)
{
struct ip_vs_protocol *pp = pd->pp;
if (IP_VS_FWD_METHOD(cp) != IP_VS_CONN_F_MASQ)
goto after_nat;
IP_VS_DBG_PKT(11, af, pp, skb, iph->off, "Outgoing packet");
if (skb_ensure_writable(skb, iph->len))
goto drop;
/* mangle the packet */
if (pp->snat_handler &&
!SNAT_CALL(pp->snat_handler, skb, pp, cp, iph))
goto drop;
#ifdef CONFIG_IP_VS_IPV6
if (af == AF_INET6)
ipv6_hdr(skb)->saddr = cp->vaddr.in6;
else
#endif
{
ip_hdr(skb)->saddr = cp->vaddr.ip;
ip_send_check(ip_hdr(skb));
}
/*
* nf_iterate does not expect change in the skb->dst->dev.
* It looks like it is not fatal to enable this code for hooks
* where our handlers are at the end of the chain list and
* when all next handlers use skb->dst->dev and not outdev.
* It will definitely route properly the inout NAT traffic
* when multiple paths are used.
*/
/* For policy routing, packets originating from this
* machine itself may be routed differently to packets
* passing through. We want this packet to be routed as
* if it came from this machine itself. So re-compute
* the routing information.
*/
if (ip_vs_route_me_harder(cp->ipvs, af, skb, hooknum))
goto drop;
IP_VS_DBG_PKT(10, af, pp, skb, iph->off, "After SNAT");
after_nat:
ip_vs_out_stats(cp, skb);
ip_vs_set_state(cp, IP_VS_DIR_OUTPUT, skb, pd);
skb->ipvs_property = 1;
if (!(cp->flags & IP_VS_CONN_F_NFCT))
ip_vs_notrack(skb);
else
ip_vs_update_conntrack(skb, cp, 0);
ip_vs_conn_put(cp);
return NF_ACCEPT;
drop:
ip_vs_conn_put(cp);
kfree_skb(skb);
return NF_STOLEN;
}
/*
* Check if outgoing packet belongs to the established ip_vs_conn.
*/
static unsigned int
ip_vs_out_hook(void *priv, struct sk_buff *skb, const struct nf_hook_state *state)
{
struct netns_ipvs *ipvs = net_ipvs(state->net);
unsigned int hooknum = state->hook;
struct ip_vs_iphdr iph;
struct ip_vs_protocol *pp;
struct ip_vs_proto_data *pd;
struct ip_vs_conn *cp;
int af = state->pf;
struct sock *sk;
/* Already marked as IPVS request or reply? */
if (skb->ipvs_property)
return NF_ACCEPT;
sk = skb_to_full_sk(skb);
/* Bad... Do not break raw sockets */
if (unlikely(sk && hooknum == NF_INET_LOCAL_OUT &&
af == AF_INET)) {
if (sk->sk_family == PF_INET && inet_test_bit(NODEFRAG, sk))
return NF_ACCEPT;
}
if (unlikely(!skb_dst(skb)))
return NF_ACCEPT;
if (!ipvs->enable)
return NF_ACCEPT;
ip_vs_fill_iph_skb(af, skb, false, &iph);
#ifdef CONFIG_IP_VS_IPV6
if (af == AF_INET6) {
if (unlikely(iph.protocol == IPPROTO_ICMPV6)) {
int related;
int verdict = ip_vs_out_icmp_v6(ipvs, skb, &related,
hooknum, &iph);
if (related)
return verdict;
}
} else
#endif
if (unlikely(iph.protocol == IPPROTO_ICMP)) {
int related;
int verdict = ip_vs_out_icmp(ipvs, skb, &related, hooknum);
if (related)
return verdict;
}
pd = ip_vs_proto_data_get(ipvs, iph.protocol);
if (unlikely(!pd))
return NF_ACCEPT;
pp = pd->pp;
/* reassemble IP fragments */
#ifdef CONFIG_IP_VS_IPV6
if (af == AF_INET)
#endif
if (unlikely(ip_is_fragment(ip_hdr(skb)) && !pp->dont_defrag)) {
if (ip_vs_gather_frags(ipvs, skb,
ip_vs_defrag_user(hooknum)))
return NF_STOLEN;
ip_vs_fill_iph_skb(AF_INET, skb, false, &iph);
}
/*
* Check if the packet belongs to an existing entry
*/
cp = INDIRECT_CALL_1(pp->conn_out_get, ip_vs_conn_out_get_proto,
ipvs, af, skb, &iph);
if (likely(cp))
return handle_response(af, skb, pd, cp, &iph, hooknum);
/* Check for real-server-started requests */
if (atomic_read(&ipvs->conn_out_counter)) {
/* Currently only for UDP:
* connection oriented protocols typically use
* ephemeral ports for outgoing connections, so
* related incoming responses would not match any VS
*/
if (pp->protocol == IPPROTO_UDP) {
cp = __ip_vs_rs_conn_out(hooknum, ipvs, af, skb, &iph);
if (likely(cp))
return handle_response(af, skb, pd, cp, &iph,
hooknum);
}
}
if (sysctl_nat_icmp_send(ipvs) &&
(pp->protocol == IPPROTO_TCP ||
pp->protocol == IPPROTO_UDP ||
pp->protocol == IPPROTO_SCTP)) {
__be16 _ports[2], *pptr;
pptr = frag_safe_skb_hp(skb, iph.len,
sizeof(_ports), _ports);
if (pptr == NULL)
return NF_ACCEPT; /* Not for me */
if (ip_vs_has_real_service(ipvs, af, iph.protocol, &iph.saddr,
pptr[0])) {
/*
* Notify the real server: there is no
* existing entry if it is not RST
* packet or not TCP packet.
*/
if ((iph.protocol != IPPROTO_TCP &&
iph.protocol != IPPROTO_SCTP)
|| ((iph.protocol == IPPROTO_TCP
&& !is_tcp_reset(skb, iph.len))
|| (iph.protocol == IPPROTO_SCTP
&& !is_sctp_abort(skb,
iph.len)))) {
#ifdef CONFIG_IP_VS_IPV6
if (af == AF_INET6) {
if (!skb->dev)
skb->dev = ipvs->net->loopback_dev;
icmpv6_send(skb,
ICMPV6_DEST_UNREACH,
ICMPV6_PORT_UNREACH,
0);
} else
#endif
icmp_send(skb,
ICMP_DEST_UNREACH,
ICMP_PORT_UNREACH, 0);
return NF_DROP;
}
}
}
IP_VS_DBG_PKT(12, af, pp, skb, iph.off,
"ip_vs_out: packet continues traversal as normal");
return NF_ACCEPT;
}
static unsigned int
ip_vs_try_to_schedule(struct netns_ipvs *ipvs, int af, struct sk_buff *skb,
struct ip_vs_proto_data *pd,
int *verdict, struct ip_vs_conn **cpp,
struct ip_vs_iphdr *iph)
{
struct ip_vs_protocol *pp = pd->pp;
if (!iph->fragoffs) {
/* No (second) fragments need to enter here, as nf_defrag_ipv6
* replayed fragment zero will already have created the cp
*/
/* Schedule and create new connection entry into cpp */
if (!pp->conn_schedule(ipvs, af, skb, pd, verdict, cpp, iph))
return 0;
}
if (unlikely(!*cpp)) {
/* sorry, all this trouble for a no-hit :) */
IP_VS_DBG_PKT(12, af, pp, skb, iph->off,
"ip_vs_in: packet continues traversal as normal");
/* Fragment couldn't be mapped to a conn entry */
if (iph->fragoffs)
IP_VS_DBG_PKT(7, af, pp, skb, iph->off,
"unhandled fragment");
*verdict = NF_ACCEPT;
return 0;
}
return 1;
}
/* Check the UDP tunnel and return its header length */
static int ipvs_udp_decap(struct netns_ipvs *ipvs, struct sk_buff *skb,
unsigned int offset, __u16 af,
const union nf_inet_addr *daddr, __u8 *proto)
{
struct udphdr _udph, *udph;
struct ip_vs_dest *dest;
udph = skb_header_pointer(skb, offset, sizeof(_udph), &_udph);
if (!udph)
goto unk;
offset += sizeof(struct udphdr);
dest = ip_vs_find_tunnel(ipvs, af, daddr, udph->dest);
if (!dest)
goto unk;
if (dest->tun_type == IP_VS_CONN_F_TUNNEL_TYPE_GUE) {
struct guehdr _gueh, *gueh;
gueh = skb_header_pointer(skb, offset, sizeof(_gueh), &_gueh);
if (!gueh)
goto unk;
if (gueh->control != 0 || gueh->version != 0)
goto unk;
/* Later we can support also IPPROTO_IPV6 */
if (gueh->proto_ctype != IPPROTO_IPIP)
goto unk;
*proto = gueh->proto_ctype;
return sizeof(struct udphdr) + sizeof(struct guehdr) +
(gueh->hlen << 2);
}
unk:
return 0;
}
/* Check the GRE tunnel and return its header length */
static int ipvs_gre_decap(struct netns_ipvs *ipvs, struct sk_buff *skb,
unsigned int offset, __u16 af,
const union nf_inet_addr *daddr, __u8 *proto)
{
struct gre_base_hdr _greh, *greh;
struct ip_vs_dest *dest;
greh = skb_header_pointer(skb, offset, sizeof(_greh), &_greh);
if (!greh)
goto unk;
dest = ip_vs_find_tunnel(ipvs, af, daddr, 0);
if (!dest)
goto unk;
if (dest->tun_type == IP_VS_CONN_F_TUNNEL_TYPE_GRE) {
__be16 type;
/* Only support version 0 and C (csum) */
if ((greh->flags & ~GRE_CSUM) != 0)
goto unk;
type = greh->protocol;
/* Later we can support also IPPROTO_IPV6 */
if (type != htons(ETH_P_IP))
goto unk;
*proto = IPPROTO_IPIP;
return gre_calc_hlen(gre_flags_to_tnl_flags(greh->flags));
}
unk:
return 0;
}
/*
* Handle ICMP messages in the outside-to-inside direction (incoming).
* Find any that might be relevant, check against existing connections,
* forward to the right destination host if relevant.
* Currently handles error types - unreachable, quench, ttl exceeded.
*/
static int
ip_vs_in_icmp(struct netns_ipvs *ipvs, struct sk_buff *skb, int *related,
unsigned int hooknum)
{
struct iphdr *iph;
struct icmphdr _icmph, *ic;
struct iphdr _ciph, *cih; /* The ip header contained within the ICMP */
struct ip_vs_iphdr ciph;
struct ip_vs_conn *cp;
struct ip_vs_protocol *pp;
struct ip_vs_proto_data *pd;
unsigned int offset, offset2, ihl, verdict;
bool tunnel, new_cp = false;
union nf_inet_addr *raddr;
char *outer_proto = "IPIP";
*related = 1;
/* reassemble IP fragments */
if (ip_is_fragment(ip_hdr(skb))) {
if (ip_vs_gather_frags(ipvs, skb, ip_vs_defrag_user(hooknum)))
return NF_STOLEN;
}
iph = ip_hdr(skb);
offset = ihl = iph->ihl * 4;
ic = skb_header_pointer(skb, offset, sizeof(_icmph), &_icmph);
if (ic == NULL)
return NF_DROP;
IP_VS_DBG(12, "Incoming ICMP (%d,%d) %pI4->%pI4\n",
ic->type, ntohs(icmp_id(ic)),
&iph->saddr, &iph->daddr);
/*
* Work through seeing if this is for us.
* These checks are supposed to be in an order that means easy
* things are checked first to speed up processing.... however
* this means that some packets will manage to get a long way
* down this stack and then be rejected, but that's life.
*/
if ((ic->type != ICMP_DEST_UNREACH) &&
(ic->type != ICMP_SOURCE_QUENCH) &&
(ic->type != ICMP_TIME_EXCEEDED)) {
*related = 0;
return NF_ACCEPT;
}
/* Now find the contained IP header */
offset += sizeof(_icmph);
cih = skb_header_pointer(skb, offset, sizeof(_ciph), &_ciph);
if (cih == NULL)
return NF_ACCEPT; /* The packet looks wrong, ignore */
raddr = (union nf_inet_addr *)&cih->daddr;
/* Special case for errors for IPIP/UDP/GRE tunnel packets */
tunnel = false;
if (cih->protocol == IPPROTO_IPIP) {
struct ip_vs_dest *dest;
if (unlikely(cih->frag_off & htons(IP_OFFSET)))
return NF_ACCEPT;
/* Error for our IPIP must arrive at LOCAL_IN */
if (!(skb_rtable(skb)->rt_flags & RTCF_LOCAL))
return NF_ACCEPT;
dest = ip_vs_find_tunnel(ipvs, AF_INET, raddr, 0);
/* Only for known tunnel */
if (!dest || dest->tun_type != IP_VS_CONN_F_TUNNEL_TYPE_IPIP)
return NF_ACCEPT;
offset += cih->ihl * 4;
cih = skb_header_pointer(skb, offset, sizeof(_ciph), &_ciph);
if (cih == NULL)
return NF_ACCEPT; /* The packet looks wrong, ignore */
tunnel = true;
} else if ((cih->protocol == IPPROTO_UDP || /* Can be UDP encap */
cih->protocol == IPPROTO_GRE) && /* Can be GRE encap */
/* Error for our tunnel must arrive at LOCAL_IN */
(skb_rtable(skb)->rt_flags & RTCF_LOCAL)) {
__u8 iproto;
int ulen;
/* Non-first fragment has no UDP/GRE header */
if (unlikely(cih->frag_off & htons(IP_OFFSET)))
return NF_ACCEPT;
offset2 = offset + cih->ihl * 4;
if (cih->protocol == IPPROTO_UDP) {
ulen = ipvs_udp_decap(ipvs, skb, offset2, AF_INET,
raddr, &iproto);
outer_proto = "UDP";
} else {
ulen = ipvs_gre_decap(ipvs, skb, offset2, AF_INET,
raddr, &iproto);
outer_proto = "GRE";
}
if (ulen > 0) {
/* Skip IP and UDP/GRE tunnel headers */
offset = offset2 + ulen;
/* Now we should be at the original IP header */
cih = skb_header_pointer(skb, offset, sizeof(_ciph),
&_ciph);
if (cih && cih->version == 4 && cih->ihl >= 5 &&
iproto == IPPROTO_IPIP)
tunnel = true;
else
return NF_ACCEPT;
}
}
pd = ip_vs_proto_data_get(ipvs, cih->protocol);
if (!pd)
return NF_ACCEPT;
pp = pd->pp;
/* Is the embedded protocol header present? */
if (unlikely(cih->frag_off & htons(IP_OFFSET) &&
pp->dont_defrag))
return NF_ACCEPT;
IP_VS_DBG_PKT(11, AF_INET, pp, skb, offset,
"Checking incoming ICMP for");
offset2 = offset;
ip_vs_fill_iph_skb_icmp(AF_INET, skb, offset, !tunnel, &ciph);
offset = ciph.len;
/* The embedded headers contain source and dest in reverse order.
* For IPIP/UDP/GRE tunnel this is error for request, not for reply.
*/
cp = INDIRECT_CALL_1(pp->conn_in_get, ip_vs_conn_in_get_proto,
ipvs, AF_INET, skb, &ciph);
if (!cp) {
int v;
if (tunnel || !sysctl_schedule_icmp(ipvs))
return NF_ACCEPT;
if (!ip_vs_try_to_schedule(ipvs, AF_INET, skb, pd, &v, &cp, &ciph))
return v;
new_cp = true;
}
verdict = NF_DROP;
/* Ensure the checksum is correct */
if (!skb_csum_unnecessary(skb) && ip_vs_checksum_complete(skb, ihl)) {
/* Failed checksum! */
IP_VS_DBG(1, "Incoming ICMP: failed checksum from %pI4!\n",
&iph->saddr);
goto out;
}
if (tunnel) {
__be32 info = ic->un.gateway;
__u8 type = ic->type;
__u8 code = ic->code;
/* Update the MTU */
if (ic->type == ICMP_DEST_UNREACH &&
ic->code == ICMP_FRAG_NEEDED) {
struct ip_vs_dest *dest = cp->dest;
u32 mtu = ntohs(ic->un.frag.mtu);
__be16 frag_off = cih->frag_off;
/* Strip outer IP and ICMP, go to IPIP/UDP/GRE header */
if (pskb_pull(skb, ihl + sizeof(_icmph)) == NULL)
goto ignore_tunnel;
offset2 -= ihl + sizeof(_icmph);
skb_reset_network_header(skb);
IP_VS_DBG(12, "ICMP for %s %pI4->%pI4: mtu=%u\n",
outer_proto, &ip_hdr(skb)->saddr,
&ip_hdr(skb)->daddr, mtu);
ipv4_update_pmtu(skb, ipvs->net, mtu, 0, 0);
/* Client uses PMTUD? */
if (!(frag_off & htons(IP_DF)))
goto ignore_tunnel;
/* Prefer the resulting PMTU */
if (dest) {
struct ip_vs_dest_dst *dest_dst;
dest_dst = rcu_dereference(dest->dest_dst);
if (dest_dst)
mtu = dst_mtu(dest_dst->dst_cache);
}
if (mtu > 68 + sizeof(struct iphdr))
mtu -= sizeof(struct iphdr);
info = htonl(mtu);
}
/* Strip outer IP, ICMP and IPIP/UDP/GRE, go to IP header of
* original request.
*/
if (pskb_pull(skb, offset2) == NULL)
goto ignore_tunnel;
skb_reset_network_header(skb);
IP_VS_DBG(12, "Sending ICMP for %pI4->%pI4: t=%u, c=%u, i=%u\n",
&ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr,
type, code, ntohl(info));
icmp_send(skb, type, code, info);
/* ICMP can be shorter but anyways, account it */
ip_vs_out_stats(cp, skb);
ignore_tunnel:
consume_skb(skb);
verdict = NF_STOLEN;
goto out;
}
/* do the statistics and put it back */
ip_vs_in_stats(cp, skb);
if (IPPROTO_TCP == cih->protocol || IPPROTO_UDP == cih->protocol ||
IPPROTO_SCTP == cih->protocol)
offset += 2 * sizeof(__u16);
verdict = ip_vs_icmp_xmit(skb, cp, pp, offset, hooknum, &ciph);
out:
if (likely(!new_cp))
__ip_vs_conn_put(cp);
else
ip_vs_conn_put(cp);
return verdict;
}
#ifdef CONFIG_IP_VS_IPV6
static int ip_vs_in_icmp_v6(struct netns_ipvs *ipvs, struct sk_buff *skb,
int *related, unsigned int hooknum,
struct ip_vs_iphdr *iph)
{
struct icmp6hdr _icmph, *ic;
struct ip_vs_iphdr ciph = {.flags = 0, .fragoffs = 0};/*Contained IP */
struct ip_vs_conn *cp;
struct ip_vs_protocol *pp;
struct ip_vs_proto_data *pd;
unsigned int offset, verdict;
bool new_cp = false;
*related = 1;
ic = frag_safe_skb_hp(skb, iph->len, sizeof(_icmph), &_icmph);
if (ic == NULL)
return NF_DROP;
/*
* Work through seeing if this is for us.
* These checks are supposed to be in an order that means easy
* things are checked first to speed up processing.... however
* this means that some packets will manage to get a long way
* down this stack and then be rejected, but that's life.
*/
if (ic->icmp6_type & ICMPV6_INFOMSG_MASK) {
*related = 0;
return NF_ACCEPT;
}
/* Fragment header that is before ICMP header tells us that:
* it's not an error message since they can't be fragmented.
*/
if (iph->flags & IP6_FH_F_FRAG)
return NF_DROP;
IP_VS_DBG(8, "Incoming ICMPv6 (%d,%d) %pI6c->%pI6c\n",
ic->icmp6_type, ntohs(icmpv6_id(ic)),
&iph->saddr, &iph->daddr);
offset = iph->len + sizeof(_icmph);
if (!ip_vs_fill_iph_skb_icmp(AF_INET6, skb, offset, true, &ciph))
return NF_ACCEPT;
pd = ip_vs_proto_data_get(ipvs, ciph.protocol);
if (!pd)
return NF_ACCEPT;
pp = pd->pp;
/* Cannot handle fragmented embedded protocol */
if (ciph.fragoffs)
return NF_ACCEPT;
IP_VS_DBG_PKT(11, AF_INET6, pp, skb, offset,
"Checking incoming ICMPv6 for");
/* The embedded headers contain source and dest in reverse order
* if not from localhost
*/
cp = INDIRECT_CALL_1(pp->conn_in_get, ip_vs_conn_in_get_proto,
ipvs, AF_INET6, skb, &ciph);
if (!cp) {
int v;
if (!sysctl_schedule_icmp(ipvs))
return NF_ACCEPT;
if (!ip_vs_try_to_schedule(ipvs, AF_INET6, skb, pd, &v, &cp, &ciph))
return v;
new_cp = true;
}
/* VS/TUN, VS/DR and LOCALNODE just let it go */
if ((hooknum == NF_INET_LOCAL_OUT) &&
(IP_VS_FWD_METHOD(cp) != IP_VS_CONN_F_MASQ)) {
verdict = NF_ACCEPT;
goto out;
}
/* do the statistics and put it back */
ip_vs_in_stats(cp, skb);
/* Need to mangle contained IPv6 header in ICMPv6 packet */
offset = ciph.len;
if (IPPROTO_TCP == ciph.protocol || IPPROTO_UDP == ciph.protocol ||
IPPROTO_SCTP == ciph.protocol)
offset += 2 * sizeof(__u16); /* Also mangle ports */
verdict = ip_vs_icmp_xmit_v6(skb, cp, pp, offset, hooknum, &ciph);
out:
if (likely(!new_cp))
__ip_vs_conn_put(cp);
else
ip_vs_conn_put(cp);
return verdict;
}
#endif
/*
* Check if it's for virtual services, look it up,
* and send it on its way...
*/
static unsigned int
ip_vs_in_hook(void *priv, struct sk_buff *skb, const struct nf_hook_state *state)
{
struct netns_ipvs *ipvs = net_ipvs(state->net);
unsigned int hooknum = state->hook;
struct ip_vs_iphdr iph;
struct ip_vs_protocol *pp;
struct ip_vs_proto_data *pd;
struct ip_vs_conn *cp;
int ret, pkts;
struct sock *sk;
int af = state->pf;
/* Already marked as IPVS request or reply? */
if (skb->ipvs_property)
return NF_ACCEPT;
/*
* Big tappo:
* - remote client: only PACKET_HOST
* - route: used for struct net when skb->dev is unset
*/
if (unlikely((skb->pkt_type != PACKET_HOST &&
hooknum != NF_INET_LOCAL_OUT) ||
!skb_dst(skb))) {
ip_vs_fill_iph_skb(af, skb, false, &iph);
IP_VS_DBG_BUF(12, "packet type=%d proto=%d daddr=%s"
" ignored in hook %u\n",
skb->pkt_type, iph.protocol,
IP_VS_DBG_ADDR(af, &iph.daddr), hooknum);
return NF_ACCEPT;
}
/* ipvs enabled in this netns ? */
if (unlikely(sysctl_backup_only(ipvs) || !ipvs->enable))
return NF_ACCEPT;
ip_vs_fill_iph_skb(af, skb, false, &iph);
/* Bad... Do not break raw sockets */
sk = skb_to_full_sk(skb);
if (unlikely(sk && hooknum == NF_INET_LOCAL_OUT &&
af == AF_INET)) {
if (sk->sk_family == PF_INET && inet_test_bit(NODEFRAG, sk))
return NF_ACCEPT;
}
#ifdef CONFIG_IP_VS_IPV6
if (af == AF_INET6) {
if (unlikely(iph.protocol == IPPROTO_ICMPV6)) {
int related;
int verdict = ip_vs_in_icmp_v6(ipvs, skb, &related,
hooknum, &iph);
if (related)
return verdict;
}
} else
#endif
if (unlikely(iph.protocol == IPPROTO_ICMP)) {
int related;
int verdict = ip_vs_in_icmp(ipvs, skb, &related,
hooknum);
if (related)
return verdict;
}
/* Protocol supported? */
pd = ip_vs_proto_data_get(ipvs, iph.protocol);
if (unlikely(!pd)) {
/* The only way we'll see this packet again is if it's
* encapsulated, so mark it with ipvs_property=1 so we
* skip it if we're ignoring tunneled packets
*/
if (sysctl_ignore_tunneled(ipvs))
skb->ipvs_property = 1;
return NF_ACCEPT;
}
pp = pd->pp;
/*
* Check if the packet belongs to an existing connection entry
*/
cp = INDIRECT_CALL_1(pp->conn_in_get, ip_vs_conn_in_get_proto,
ipvs, af, skb, &iph);
if (!iph.fragoffs && is_new_conn(skb, &iph) && cp) {
int conn_reuse_mode = sysctl_conn_reuse_mode(ipvs);
bool old_ct = false, resched = false;
if (unlikely(sysctl_expire_nodest_conn(ipvs)) && cp->dest &&
unlikely(!atomic_read(&cp->dest->weight))) {
resched = true;
old_ct = ip_vs_conn_uses_old_conntrack(cp, skb);
} else if (conn_reuse_mode &&
is_new_conn_expected(cp, conn_reuse_mode)) {
old_ct = ip_vs_conn_uses_old_conntrack(cp, skb);
if (!atomic_read(&cp->n_control)) {
resched = true;
} else {
/* Do not reschedule controlling connection
* that uses conntrack while it is still
* referenced by controlled connection(s).
*/
resched = !old_ct;
}
}
if (resched) {
if (!old_ct)
cp->flags &= ~IP_VS_CONN_F_NFCT;
if (!atomic_read(&cp->n_control))
ip_vs_conn_expire_now(cp);
__ip_vs_conn_put(cp);
if (old_ct)
return NF_DROP;
cp = NULL;
}
}
/* Check the server status */
if (cp && cp->dest && !(cp->dest->flags & IP_VS_DEST_F_AVAILABLE)) {
/* the destination server is not available */
if (sysctl_expire_nodest_conn(ipvs)) {
bool old_ct = ip_vs_conn_uses_old_conntrack(cp, skb);
if (!old_ct)
cp->flags &= ~IP_VS_CONN_F_NFCT;
ip_vs_conn_expire_now(cp);
__ip_vs_conn_put(cp);
if (old_ct)
return NF_DROP;
cp = NULL;
} else {
__ip_vs_conn_put(cp);
return NF_DROP;
}
}
if (unlikely(!cp)) {
int v;
if (!ip_vs_try_to_schedule(ipvs, af, skb, pd, &v, &cp, &iph))
return v;
}
IP_VS_DBG_PKT(11, af, pp, skb, iph.off, "Incoming packet");
ip_vs_in_stats(cp, skb);
ip_vs_set_state(cp, IP_VS_DIR_INPUT, skb, pd);
if (cp->packet_xmit)
ret = cp->packet_xmit(skb, cp, pp, &iph);
/* do not touch skb anymore */
else {
IP_VS_DBG_RL("warning: packet_xmit is null");
ret = NF_ACCEPT;
}
/* Increase its packet counter and check if it is needed
* to be synchronized
*
* Sync connection if it is about to close to
* encorage the standby servers to update the connections timeout
*
* For ONE_PKT let ip_vs_sync_conn() do the filter work.
*/
if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
pkts = sysctl_sync_threshold(ipvs);
else
pkts = atomic_inc_return(&cp->in_pkts);
if (ipvs->sync_state & IP_VS_STATE_MASTER)
ip_vs_sync_conn(ipvs, cp, pkts);
else if ((cp->flags & IP_VS_CONN_F_ONE_PACKET) && cp->control)
/* increment is done inside ip_vs_sync_conn too */
atomic_inc(&cp->control->in_pkts);
ip_vs_conn_put(cp);
return ret;
}
/*
* It is hooked at the NF_INET_FORWARD chain, in order to catch ICMP
* related packets destined for 0.0.0.0/0.
* When fwmark-based virtual service is used, such as transparent
* cache cluster, TCP packets can be marked and routed to ip_vs_in,
* but ICMP destined for 0.0.0.0/0 cannot not be easily marked and
* sent to ip_vs_in_icmp. So, catch them at the NF_INET_FORWARD chain
* and send them to ip_vs_in_icmp.
*/
static unsigned int
ip_vs_forward_icmp(void *priv, struct sk_buff *skb,
const struct nf_hook_state *state)
{
struct netns_ipvs *ipvs = net_ipvs(state->net);
int r;
/* ipvs enabled in this netns ? */
if (unlikely(sysctl_backup_only(ipvs) || !ipvs->enable))
return NF_ACCEPT;
if (state->pf == NFPROTO_IPV4) {
if (ip_hdr(skb)->protocol != IPPROTO_ICMP)
return NF_ACCEPT;
#ifdef CONFIG_IP_VS_IPV6
} else {
struct ip_vs_iphdr iphdr;
ip_vs_fill_iph_skb(AF_INET6, skb, false, &iphdr);
if (iphdr.protocol != IPPROTO_ICMPV6)
return NF_ACCEPT;
return ip_vs_in_icmp_v6(ipvs, skb, &r, state->hook, &iphdr);
#endif
}
return ip_vs_in_icmp(ipvs, skb, &r, state->hook);
}
static const struct nf_hook_ops ip_vs_ops4[] = {
/* After packet filtering, change source only for VS/NAT */
{
.hook = ip_vs_out_hook,
.pf = NFPROTO_IPV4,
.hooknum = NF_INET_LOCAL_IN,
.priority = NF_IP_PRI_NAT_SRC - 2,
},
/* After packet filtering, forward packet through VS/DR, VS/TUN,
* or VS/NAT(change destination), so that filtering rules can be
* applied to IPVS. */
{
.hook = ip_vs_in_hook,
.pf = NFPROTO_IPV4,
.hooknum = NF_INET_LOCAL_IN,
.priority = NF_IP_PRI_NAT_SRC - 1,
},
/* Before ip_vs_in, change source only for VS/NAT */
{
.hook = ip_vs_out_hook,
.pf = NFPROTO_IPV4,
.hooknum = NF_INET_LOCAL_OUT,
.priority = NF_IP_PRI_NAT_DST + 1,
},
/* After mangle, schedule and forward local requests */
{
.hook = ip_vs_in_hook,
.pf = NFPROTO_IPV4,
.hooknum = NF_INET_LOCAL_OUT,
.priority = NF_IP_PRI_NAT_DST + 2,
},
/* After packet filtering (but before ip_vs_out_icmp), catch icmp
* destined for 0.0.0.0/0, which is for incoming IPVS connections */
{
.hook = ip_vs_forward_icmp,
.pf = NFPROTO_IPV4,
.hooknum = NF_INET_FORWARD,
.priority = 99,
},
/* After packet filtering, change source only for VS/NAT */
{
.hook = ip_vs_out_hook,
.pf = NFPROTO_IPV4,
.hooknum = NF_INET_FORWARD,
.priority = 100,
},
};
#ifdef CONFIG_IP_VS_IPV6
static const struct nf_hook_ops ip_vs_ops6[] = {
/* After packet filtering, change source only for VS/NAT */
{
.hook = ip_vs_out_hook,
.pf = NFPROTO_IPV6,
.hooknum = NF_INET_LOCAL_IN,
.priority = NF_IP6_PRI_NAT_SRC - 2,
},
/* After packet filtering, forward packet through VS/DR, VS/TUN,
* or VS/NAT(change destination), so that filtering rules can be
* applied to IPVS. */
{
.hook = ip_vs_in_hook,
.pf = NFPROTO_IPV6,
.hooknum = NF_INET_LOCAL_IN,
.priority = NF_IP6_PRI_NAT_SRC - 1,
},
/* Before ip_vs_in, change source only for VS/NAT */
{
.hook = ip_vs_out_hook,
.pf = NFPROTO_IPV6,
.hooknum = NF_INET_LOCAL_OUT,
.priority = NF_IP6_PRI_NAT_DST + 1,
},
/* After mangle, schedule and forward local requests */
{
.hook = ip_vs_in_hook,
.pf = NFPROTO_IPV6,
.hooknum = NF_INET_LOCAL_OUT,
.priority = NF_IP6_PRI_NAT_DST + 2,
},
/* After packet filtering (but before ip_vs_out_icmp), catch icmp
* destined for 0.0.0.0/0, which is for incoming IPVS connections */
{
.hook = ip_vs_forward_icmp,
.pf = NFPROTO_IPV6,
.hooknum = NF_INET_FORWARD,
.priority = 99,
},
/* After packet filtering, change source only for VS/NAT */
{
.hook = ip_vs_out_hook,
.pf = NFPROTO_IPV6,
.hooknum = NF_INET_FORWARD,
.priority = 100,
},
};
#endif
int ip_vs_register_hooks(struct netns_ipvs *ipvs, unsigned int af)
{
const struct nf_hook_ops *ops;
unsigned int count;
unsigned int afmask;
int ret = 0;
if (af == AF_INET6) {
#ifdef CONFIG_IP_VS_IPV6
ops = ip_vs_ops6;
count = ARRAY_SIZE(ip_vs_ops6);
afmask = 2;
#else
return -EINVAL;
#endif
} else {
ops = ip_vs_ops4;
count = ARRAY_SIZE(ip_vs_ops4);
afmask = 1;
}
if (!(ipvs->hooks_afmask & afmask)) {
ret = nf_register_net_hooks(ipvs->net, ops, count);
if (ret >= 0)
ipvs->hooks_afmask |= afmask;
}
return ret;
}
void ip_vs_unregister_hooks(struct netns_ipvs *ipvs, unsigned int af)
{
const struct nf_hook_ops *ops;
unsigned int count;
unsigned int afmask;
if (af == AF_INET6) {
#ifdef CONFIG_IP_VS_IPV6
ops = ip_vs_ops6;
count = ARRAY_SIZE(ip_vs_ops6);
afmask = 2;
#else
return;
#endif
} else {
ops = ip_vs_ops4;
count = ARRAY_SIZE(ip_vs_ops4);
afmask = 1;
}
if (ipvs->hooks_afmask & afmask) {
nf_unregister_net_hooks(ipvs->net, ops, count);
ipvs->hooks_afmask &= ~afmask;
}
}
/*
* Initialize IP Virtual Server netns mem.
*/
static int __net_init __ip_vs_init(struct net *net)
{
struct netns_ipvs *ipvs;
ipvs = net_generic(net, ip_vs_net_id);
if (ipvs == NULL)
return -ENOMEM;
/* Hold the beast until a service is registered */
ipvs->enable = 0;
ipvs->net = net;
/* Counters used for creating unique names */
ipvs->gen = atomic_read(&ipvs_netns_cnt);
atomic_inc(&ipvs_netns_cnt);
net->ipvs = ipvs;
if (ip_vs_estimator_net_init(ipvs) < 0)
goto estimator_fail;
if (ip_vs_control_net_init(ipvs) < 0)
goto control_fail;
if (ip_vs_protocol_net_init(ipvs) < 0)
goto protocol_fail;
if (ip_vs_app_net_init(ipvs) < 0)
goto app_fail;
if (ip_vs_conn_net_init(ipvs) < 0)
goto conn_fail;
if (ip_vs_sync_net_init(ipvs) < 0)
goto sync_fail;
return 0;
/*
* Error handling
*/
sync_fail:
ip_vs_conn_net_cleanup(ipvs);
conn_fail:
ip_vs_app_net_cleanup(ipvs);
app_fail:
ip_vs_protocol_net_cleanup(ipvs);
protocol_fail:
ip_vs_control_net_cleanup(ipvs);
control_fail:
ip_vs_estimator_net_cleanup(ipvs);
estimator_fail:
net->ipvs = NULL;
return -ENOMEM;
}
static void __net_exit __ip_vs_cleanup_batch(struct list_head *net_list)
{
struct netns_ipvs *ipvs;
struct net *net;
ip_vs_service_nets_cleanup(net_list); /* ip_vs_flush() with locks */
list_for_each_entry(net, net_list, exit_list) {
ipvs = net_ipvs(net);
ip_vs_conn_net_cleanup(ipvs);
ip_vs_app_net_cleanup(ipvs);
ip_vs_protocol_net_cleanup(ipvs);
ip_vs_control_net_cleanup(ipvs);
ip_vs_estimator_net_cleanup(ipvs);
IP_VS_DBG(2, "ipvs netns %d released\n", ipvs->gen);
net->ipvs = NULL;
}
}
static void __net_exit __ip_vs_dev_cleanup_batch(struct list_head *net_list)
{
struct netns_ipvs *ipvs;
struct net *net;
list_for_each_entry(net, net_list, exit_list) {
ipvs = net_ipvs(net);
ip_vs_unregister_hooks(ipvs, AF_INET);
ip_vs_unregister_hooks(ipvs, AF_INET6);
ipvs->enable = 0; /* Disable packet reception */
smp_wmb();
ip_vs_sync_net_cleanup(ipvs);
}
}
static struct pernet_operations ipvs_core_ops = {
.init = __ip_vs_init,
.exit_batch = __ip_vs_cleanup_batch,
.id = &ip_vs_net_id,
.size = sizeof(struct netns_ipvs),
};
static struct pernet_operations ipvs_core_dev_ops = {
.exit_batch = __ip_vs_dev_cleanup_batch,
};
/*
* Initialize IP Virtual Server
*/
static int __init ip_vs_init(void)
{
int ret;
ret = ip_vs_control_init();
if (ret < 0) {
pr_err("can't setup control.\n");
goto exit;
}
ip_vs_protocol_init();
ret = ip_vs_conn_init();
if (ret < 0) {
pr_err("can't setup connection table.\n");
goto cleanup_protocol;
}
ret = register_pernet_subsys(&ipvs_core_ops); /* Alloc ip_vs struct */
if (ret < 0)
goto cleanup_conn;
ret = register_pernet_device(&ipvs_core_dev_ops);
if (ret < 0)
goto cleanup_sub;
ret = ip_vs_register_nl_ioctl();
if (ret < 0) {
pr_err("can't register netlink/ioctl.\n");
goto cleanup_dev;
}
pr_info("ipvs loaded.\n");
return ret;
cleanup_dev:
unregister_pernet_device(&ipvs_core_dev_ops);
cleanup_sub:
unregister_pernet_subsys(&ipvs_core_ops);
cleanup_conn:
ip_vs_conn_cleanup();
cleanup_protocol:
ip_vs_protocol_cleanup();
ip_vs_control_cleanup();
exit:
return ret;
}
static void __exit ip_vs_cleanup(void)
{
ip_vs_unregister_nl_ioctl();
unregister_pernet_device(&ipvs_core_dev_ops);
unregister_pernet_subsys(&ipvs_core_ops); /* free ip_vs struct */
ip_vs_conn_cleanup();
ip_vs_protocol_cleanup();
ip_vs_control_cleanup();
/* common rcu_barrier() used by:
* - ip_vs_control_cleanup()
*/
rcu_barrier();
pr_info("ipvs unloaded.\n");
}
module_init(ip_vs_init);
module_exit(ip_vs_cleanup);
MODULE_LICENSE("GPL");
| linux-master | net/netfilter/ipvs/ip_vs_core.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* ip_vs_proto_ah_esp.c: AH/ESP IPSec load balancing support for IPVS
*
* Authors: Julian Anastasov <[email protected]>, February 2002
* Wensong Zhang <[email protected]>
*/
#define KMSG_COMPONENT "IPVS"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/in.h>
#include <linux/ip.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/netfilter.h>
#include <linux/netfilter_ipv4.h>
#include <net/ip_vs.h>
/* TODO:
struct isakmp_hdr {
__u8 icookie[8];
__u8 rcookie[8];
__u8 np;
__u8 version;
__u8 xchgtype;
__u8 flags;
__u32 msgid;
__u32 length;
};
*/
#define PORT_ISAKMP 500
static void
ah_esp_conn_fill_param_proto(struct netns_ipvs *ipvs, int af,
const struct ip_vs_iphdr *iph,
struct ip_vs_conn_param *p)
{
if (likely(!ip_vs_iph_inverse(iph)))
ip_vs_conn_fill_param(ipvs, af, IPPROTO_UDP,
&iph->saddr, htons(PORT_ISAKMP),
&iph->daddr, htons(PORT_ISAKMP), p);
else
ip_vs_conn_fill_param(ipvs, af, IPPROTO_UDP,
&iph->daddr, htons(PORT_ISAKMP),
&iph->saddr, htons(PORT_ISAKMP), p);
}
static struct ip_vs_conn *
ah_esp_conn_in_get(struct netns_ipvs *ipvs, int af, const struct sk_buff *skb,
const struct ip_vs_iphdr *iph)
{
struct ip_vs_conn *cp;
struct ip_vs_conn_param p;
ah_esp_conn_fill_param_proto(ipvs, af, iph, &p);
cp = ip_vs_conn_in_get(&p);
if (!cp) {
/*
* We are not sure if the packet is from our
* service, so our conn_schedule hook should return NF_ACCEPT
*/
IP_VS_DBG_BUF(12, "Unknown ISAKMP entry for outin packet "
"%s%s %s->%s\n",
ip_vs_iph_icmp(iph) ? "ICMP+" : "",
ip_vs_proto_get(iph->protocol)->name,
IP_VS_DBG_ADDR(af, &iph->saddr),
IP_VS_DBG_ADDR(af, &iph->daddr));
}
return cp;
}
static struct ip_vs_conn *
ah_esp_conn_out_get(struct netns_ipvs *ipvs, int af, const struct sk_buff *skb,
const struct ip_vs_iphdr *iph)
{
struct ip_vs_conn *cp;
struct ip_vs_conn_param p;
ah_esp_conn_fill_param_proto(ipvs, af, iph, &p);
cp = ip_vs_conn_out_get(&p);
if (!cp) {
IP_VS_DBG_BUF(12, "Unknown ISAKMP entry for inout packet "
"%s%s %s->%s\n",
ip_vs_iph_icmp(iph) ? "ICMP+" : "",
ip_vs_proto_get(iph->protocol)->name,
IP_VS_DBG_ADDR(af, &iph->saddr),
IP_VS_DBG_ADDR(af, &iph->daddr));
}
return cp;
}
static int
ah_esp_conn_schedule(struct netns_ipvs *ipvs, int af, struct sk_buff *skb,
struct ip_vs_proto_data *pd,
int *verdict, struct ip_vs_conn **cpp,
struct ip_vs_iphdr *iph)
{
/*
* AH/ESP is only related traffic. Pass the packet to IP stack.
*/
*verdict = NF_ACCEPT;
return 0;
}
#ifdef CONFIG_IP_VS_PROTO_AH
struct ip_vs_protocol ip_vs_protocol_ah = {
.name = "AH",
.protocol = IPPROTO_AH,
.num_states = 1,
.dont_defrag = 1,
.init = NULL,
.exit = NULL,
.conn_schedule = ah_esp_conn_schedule,
.conn_in_get = ah_esp_conn_in_get,
.conn_out_get = ah_esp_conn_out_get,
.snat_handler = NULL,
.dnat_handler = NULL,
.state_transition = NULL,
.register_app = NULL,
.unregister_app = NULL,
.app_conn_bind = NULL,
.debug_packet = ip_vs_tcpudp_debug_packet,
.timeout_change = NULL, /* ISAKMP */
};
#endif
#ifdef CONFIG_IP_VS_PROTO_ESP
struct ip_vs_protocol ip_vs_protocol_esp = {
.name = "ESP",
.protocol = IPPROTO_ESP,
.num_states = 1,
.dont_defrag = 1,
.init = NULL,
.exit = NULL,
.conn_schedule = ah_esp_conn_schedule,
.conn_in_get = ah_esp_conn_in_get,
.conn_out_get = ah_esp_conn_out_get,
.snat_handler = NULL,
.dnat_handler = NULL,
.state_transition = NULL,
.register_app = NULL,
.unregister_app = NULL,
.app_conn_bind = NULL,
.debug_packet = ip_vs_tcpudp_debug_packet,
.timeout_change = NULL, /* ISAKMP */
};
#endif
| linux-master | net/netfilter/ipvs/ip_vs_proto_ah_esp.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* ip_vs_app.c: Application module support for IPVS
*
* Authors: Wensong Zhang <[email protected]>
*
* Most code here is taken from ip_masq_app.c in kernel 2.2. The difference
* is that ip_vs_app module handles the reverse direction (incoming requests
* and outgoing responses).
*
* IP_MASQ_APP application masquerading module
*
* Author: Juan Jose Ciarlante, <[email protected]>
*/
#define KMSG_COMPONENT "IPVS"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/skbuff.h>
#include <linux/in.h>
#include <linux/ip.h>
#include <linux/netfilter.h>
#include <linux/slab.h>
#include <net/net_namespace.h>
#include <net/protocol.h>
#include <net/tcp.h>
#include <linux/stat.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/mutex.h>
#include <net/ip_vs.h>
EXPORT_SYMBOL(register_ip_vs_app);
EXPORT_SYMBOL(unregister_ip_vs_app);
EXPORT_SYMBOL(register_ip_vs_app_inc);
static DEFINE_MUTEX(__ip_vs_app_mutex);
/*
* Get an ip_vs_app object
*/
static inline int ip_vs_app_get(struct ip_vs_app *app)
{
return try_module_get(app->module);
}
static inline void ip_vs_app_put(struct ip_vs_app *app)
{
module_put(app->module);
}
static void ip_vs_app_inc_destroy(struct ip_vs_app *inc)
{
kfree(inc->timeout_table);
kfree(inc);
}
static void ip_vs_app_inc_rcu_free(struct rcu_head *head)
{
struct ip_vs_app *inc = container_of(head, struct ip_vs_app, rcu_head);
ip_vs_app_inc_destroy(inc);
}
/*
* Allocate/initialize app incarnation and register it in proto apps.
*/
static int
ip_vs_app_inc_new(struct netns_ipvs *ipvs, struct ip_vs_app *app, __u16 proto,
__u16 port)
{
struct ip_vs_protocol *pp;
struct ip_vs_app *inc;
int ret;
if (!(pp = ip_vs_proto_get(proto)))
return -EPROTONOSUPPORT;
if (!pp->unregister_app)
return -EOPNOTSUPP;
inc = kmemdup(app, sizeof(*inc), GFP_KERNEL);
if (!inc)
return -ENOMEM;
INIT_LIST_HEAD(&inc->p_list);
INIT_LIST_HEAD(&inc->incs_list);
inc->app = app;
inc->port = htons(port);
atomic_set(&inc->usecnt, 0);
if (app->timeouts) {
inc->timeout_table =
ip_vs_create_timeout_table(app->timeouts,
app->timeouts_size);
if (!inc->timeout_table) {
ret = -ENOMEM;
goto out;
}
}
ret = pp->register_app(ipvs, inc);
if (ret)
goto out;
list_add(&inc->a_list, &app->incs_list);
IP_VS_DBG(9, "%s App %s:%u registered\n",
pp->name, inc->name, ntohs(inc->port));
return 0;
out:
ip_vs_app_inc_destroy(inc);
return ret;
}
/*
* Release app incarnation
*/
static void
ip_vs_app_inc_release(struct netns_ipvs *ipvs, struct ip_vs_app *inc)
{
struct ip_vs_protocol *pp;
if (!(pp = ip_vs_proto_get(inc->protocol)))
return;
if (pp->unregister_app)
pp->unregister_app(ipvs, inc);
IP_VS_DBG(9, "%s App %s:%u unregistered\n",
pp->name, inc->name, ntohs(inc->port));
list_del(&inc->a_list);
call_rcu(&inc->rcu_head, ip_vs_app_inc_rcu_free);
}
/*
* Get reference to app inc (only called from softirq)
*
*/
int ip_vs_app_inc_get(struct ip_vs_app *inc)
{
int result;
result = ip_vs_app_get(inc->app);
if (result)
atomic_inc(&inc->usecnt);
return result;
}
/*
* Put the app inc (only called from timer or net softirq)
*/
void ip_vs_app_inc_put(struct ip_vs_app *inc)
{
atomic_dec(&inc->usecnt);
ip_vs_app_put(inc->app);
}
/*
* Register an application incarnation in protocol applications
*/
int
register_ip_vs_app_inc(struct netns_ipvs *ipvs, struct ip_vs_app *app, __u16 proto,
__u16 port)
{
int result;
mutex_lock(&__ip_vs_app_mutex);
result = ip_vs_app_inc_new(ipvs, app, proto, port);
mutex_unlock(&__ip_vs_app_mutex);
return result;
}
/* Register application for netns */
struct ip_vs_app *register_ip_vs_app(struct netns_ipvs *ipvs, struct ip_vs_app *app)
{
struct ip_vs_app *a;
int err = 0;
mutex_lock(&__ip_vs_app_mutex);
/* increase the module use count */
if (!ip_vs_use_count_inc()) {
err = -ENOENT;
goto out_unlock;
}
list_for_each_entry(a, &ipvs->app_list, a_list) {
if (!strcmp(app->name, a->name)) {
err = -EEXIST;
/* decrease the module use count */
ip_vs_use_count_dec();
goto out_unlock;
}
}
a = kmemdup(app, sizeof(*app), GFP_KERNEL);
if (!a) {
err = -ENOMEM;
/* decrease the module use count */
ip_vs_use_count_dec();
goto out_unlock;
}
INIT_LIST_HEAD(&a->incs_list);
list_add(&a->a_list, &ipvs->app_list);
out_unlock:
mutex_unlock(&__ip_vs_app_mutex);
return err ? ERR_PTR(err) : a;
}
/*
* ip_vs_app unregistration routine
* We are sure there are no app incarnations attached to services
* Caller should use synchronize_rcu() or rcu_barrier()
*/
void unregister_ip_vs_app(struct netns_ipvs *ipvs, struct ip_vs_app *app)
{
struct ip_vs_app *a, *anxt, *inc, *nxt;
mutex_lock(&__ip_vs_app_mutex);
list_for_each_entry_safe(a, anxt, &ipvs->app_list, a_list) {
if (app && strcmp(app->name, a->name))
continue;
list_for_each_entry_safe(inc, nxt, &a->incs_list, a_list) {
ip_vs_app_inc_release(ipvs, inc);
}
list_del(&a->a_list);
kfree(a);
/* decrease the module use count */
ip_vs_use_count_dec();
}
mutex_unlock(&__ip_vs_app_mutex);
}
/*
* Bind ip_vs_conn to its ip_vs_app (called by cp constructor)
*/
int ip_vs_bind_app(struct ip_vs_conn *cp,
struct ip_vs_protocol *pp)
{
return pp->app_conn_bind(cp);
}
/*
* Unbind cp from application incarnation (called by cp destructor)
*/
void ip_vs_unbind_app(struct ip_vs_conn *cp)
{
struct ip_vs_app *inc = cp->app;
if (!inc)
return;
if (inc->unbind_conn)
inc->unbind_conn(inc, cp);
if (inc->done_conn)
inc->done_conn(inc, cp);
ip_vs_app_inc_put(inc);
cp->app = NULL;
}
/*
* Fixes th->seq based on ip_vs_seq info.
*/
static inline void vs_fix_seq(const struct ip_vs_seq *vseq, struct tcphdr *th)
{
__u32 seq = ntohl(th->seq);
/*
* Adjust seq with delta-offset for all packets after
* the most recent resized pkt seq and with previous_delta offset
* for all packets before most recent resized pkt seq.
*/
if (vseq->delta || vseq->previous_delta) {
if(after(seq, vseq->init_seq)) {
th->seq = htonl(seq + vseq->delta);
IP_VS_DBG(9, "%s(): added delta (%d) to seq\n",
__func__, vseq->delta);
} else {
th->seq = htonl(seq + vseq->previous_delta);
IP_VS_DBG(9, "%s(): added previous_delta (%d) to seq\n",
__func__, vseq->previous_delta);
}
}
}
/*
* Fixes th->ack_seq based on ip_vs_seq info.
*/
static inline void
vs_fix_ack_seq(const struct ip_vs_seq *vseq, struct tcphdr *th)
{
__u32 ack_seq = ntohl(th->ack_seq);
/*
* Adjust ack_seq with delta-offset for
* the packets AFTER most recent resized pkt has caused a shift
* for packets before most recent resized pkt, use previous_delta
*/
if (vseq->delta || vseq->previous_delta) {
/* since ack_seq is the number of octet that is expected
to receive next, so compare it with init_seq+delta */
if(after(ack_seq, vseq->init_seq+vseq->delta)) {
th->ack_seq = htonl(ack_seq - vseq->delta);
IP_VS_DBG(9, "%s(): subtracted delta "
"(%d) from ack_seq\n", __func__, vseq->delta);
} else {
th->ack_seq = htonl(ack_seq - vseq->previous_delta);
IP_VS_DBG(9, "%s(): subtracted "
"previous_delta (%d) from ack_seq\n",
__func__, vseq->previous_delta);
}
}
}
/*
* Updates ip_vs_seq if pkt has been resized
* Assumes already checked proto==IPPROTO_TCP and diff!=0.
*/
static inline void vs_seq_update(struct ip_vs_conn *cp, struct ip_vs_seq *vseq,
unsigned int flag, __u32 seq, int diff)
{
/* spinlock is to keep updating cp->flags atomic */
spin_lock_bh(&cp->lock);
if (!(cp->flags & flag) || after(seq, vseq->init_seq)) {
vseq->previous_delta = vseq->delta;
vseq->delta += diff;
vseq->init_seq = seq;
cp->flags |= flag;
}
spin_unlock_bh(&cp->lock);
}
static inline int app_tcp_pkt_out(struct ip_vs_conn *cp, struct sk_buff *skb,
struct ip_vs_app *app,
struct ip_vs_iphdr *ipvsh)
{
int diff;
const unsigned int tcp_offset = ip_hdrlen(skb);
struct tcphdr *th;
__u32 seq;
if (skb_ensure_writable(skb, tcp_offset + sizeof(*th)))
return 0;
th = (struct tcphdr *)(skb_network_header(skb) + tcp_offset);
/*
* Remember seq number in case this pkt gets resized
*/
seq = ntohl(th->seq);
/*
* Fix seq stuff if flagged as so.
*/
if (cp->flags & IP_VS_CONN_F_OUT_SEQ)
vs_fix_seq(&cp->out_seq, th);
if (cp->flags & IP_VS_CONN_F_IN_SEQ)
vs_fix_ack_seq(&cp->in_seq, th);
/*
* Call private output hook function
*/
if (app->pkt_out == NULL)
return 1;
if (!app->pkt_out(app, cp, skb, &diff, ipvsh))
return 0;
/*
* Update ip_vs seq stuff if len has changed.
*/
if (diff != 0)
vs_seq_update(cp, &cp->out_seq,
IP_VS_CONN_F_OUT_SEQ, seq, diff);
return 1;
}
/*
* Output pkt hook. Will call bound ip_vs_app specific function
* called by ipvs packet handler, assumes previously checked cp!=NULL
* returns false if it can't handle packet (oom)
*/
int ip_vs_app_pkt_out(struct ip_vs_conn *cp, struct sk_buff *skb,
struct ip_vs_iphdr *ipvsh)
{
struct ip_vs_app *app;
/*
* check if application module is bound to
* this ip_vs_conn.
*/
if ((app = cp->app) == NULL)
return 1;
/* TCP is complicated */
if (cp->protocol == IPPROTO_TCP)
return app_tcp_pkt_out(cp, skb, app, ipvsh);
/*
* Call private output hook function
*/
if (app->pkt_out == NULL)
return 1;
return app->pkt_out(app, cp, skb, NULL, ipvsh);
}
static inline int app_tcp_pkt_in(struct ip_vs_conn *cp, struct sk_buff *skb,
struct ip_vs_app *app,
struct ip_vs_iphdr *ipvsh)
{
int diff;
const unsigned int tcp_offset = ip_hdrlen(skb);
struct tcphdr *th;
__u32 seq;
if (skb_ensure_writable(skb, tcp_offset + sizeof(*th)))
return 0;
th = (struct tcphdr *)(skb_network_header(skb) + tcp_offset);
/*
* Remember seq number in case this pkt gets resized
*/
seq = ntohl(th->seq);
/*
* Fix seq stuff if flagged as so.
*/
if (cp->flags & IP_VS_CONN_F_IN_SEQ)
vs_fix_seq(&cp->in_seq, th);
if (cp->flags & IP_VS_CONN_F_OUT_SEQ)
vs_fix_ack_seq(&cp->out_seq, th);
/*
* Call private input hook function
*/
if (app->pkt_in == NULL)
return 1;
if (!app->pkt_in(app, cp, skb, &diff, ipvsh))
return 0;
/*
* Update ip_vs seq stuff if len has changed.
*/
if (diff != 0)
vs_seq_update(cp, &cp->in_seq,
IP_VS_CONN_F_IN_SEQ, seq, diff);
return 1;
}
/*
* Input pkt hook. Will call bound ip_vs_app specific function
* called by ipvs packet handler, assumes previously checked cp!=NULL.
* returns false if can't handle packet (oom).
*/
int ip_vs_app_pkt_in(struct ip_vs_conn *cp, struct sk_buff *skb,
struct ip_vs_iphdr *ipvsh)
{
struct ip_vs_app *app;
/*
* check if application module is bound to
* this ip_vs_conn.
*/
if ((app = cp->app) == NULL)
return 1;
/* TCP is complicated */
if (cp->protocol == IPPROTO_TCP)
return app_tcp_pkt_in(cp, skb, app, ipvsh);
/*
* Call private input hook function
*/
if (app->pkt_in == NULL)
return 1;
return app->pkt_in(app, cp, skb, NULL, ipvsh);
}
#ifdef CONFIG_PROC_FS
/*
* /proc/net/ip_vs_app entry function
*/
static struct ip_vs_app *ip_vs_app_idx(struct netns_ipvs *ipvs, loff_t pos)
{
struct ip_vs_app *app, *inc;
list_for_each_entry(app, &ipvs->app_list, a_list) {
list_for_each_entry(inc, &app->incs_list, a_list) {
if (pos-- == 0)
return inc;
}
}
return NULL;
}
static void *ip_vs_app_seq_start(struct seq_file *seq, loff_t *pos)
{
struct net *net = seq_file_net(seq);
struct netns_ipvs *ipvs = net_ipvs(net);
mutex_lock(&__ip_vs_app_mutex);
return *pos ? ip_vs_app_idx(ipvs, *pos - 1) : SEQ_START_TOKEN;
}
static void *ip_vs_app_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
struct ip_vs_app *inc, *app;
struct list_head *e;
struct net *net = seq_file_net(seq);
struct netns_ipvs *ipvs = net_ipvs(net);
++*pos;
if (v == SEQ_START_TOKEN)
return ip_vs_app_idx(ipvs, 0);
inc = v;
app = inc->app;
if ((e = inc->a_list.next) != &app->incs_list)
return list_entry(e, struct ip_vs_app, a_list);
/* go on to next application */
for (e = app->a_list.next; e != &ipvs->app_list; e = e->next) {
app = list_entry(e, struct ip_vs_app, a_list);
list_for_each_entry(inc, &app->incs_list, a_list) {
return inc;
}
}
return NULL;
}
static void ip_vs_app_seq_stop(struct seq_file *seq, void *v)
{
mutex_unlock(&__ip_vs_app_mutex);
}
static int ip_vs_app_seq_show(struct seq_file *seq, void *v)
{
if (v == SEQ_START_TOKEN)
seq_puts(seq, "prot port usecnt name\n");
else {
const struct ip_vs_app *inc = v;
seq_printf(seq, "%-3s %-7u %-6d %-17s\n",
ip_vs_proto_name(inc->protocol),
ntohs(inc->port),
atomic_read(&inc->usecnt),
inc->name);
}
return 0;
}
static const struct seq_operations ip_vs_app_seq_ops = {
.start = ip_vs_app_seq_start,
.next = ip_vs_app_seq_next,
.stop = ip_vs_app_seq_stop,
.show = ip_vs_app_seq_show,
};
#endif
int __net_init ip_vs_app_net_init(struct netns_ipvs *ipvs)
{
INIT_LIST_HEAD(&ipvs->app_list);
#ifdef CONFIG_PROC_FS
if (!proc_create_net("ip_vs_app", 0, ipvs->net->proc_net,
&ip_vs_app_seq_ops,
sizeof(struct seq_net_private)))
return -ENOMEM;
#endif
return 0;
}
void __net_exit ip_vs_app_net_cleanup(struct netns_ipvs *ipvs)
{
unregister_ip_vs_app(ipvs, NULL /* all */);
#ifdef CONFIG_PROC_FS
remove_proc_entry("ip_vs_app", ipvs->net->proc_net);
#endif
}
| linux-master | net/netfilter/ipvs/ip_vs_app.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* IPVS: Overflow-Connection Scheduling module
*
* Authors: Raducu Deaconu <[email protected]>
*
* Scheduler implements "overflow" loadbalancing according to number of active
* connections , will keep all connections to the node with the highest weight
* and overflow to the next node if the number of connections exceeds the node's
* weight.
* Note that this scheduler might not be suitable for UDP because it only uses
* active connections
*/
#define KMSG_COMPONENT "IPVS"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/module.h>
#include <linux/kernel.h>
#include <net/ip_vs.h>
/* OVF Connection scheduling */
static struct ip_vs_dest *
ip_vs_ovf_schedule(struct ip_vs_service *svc, const struct sk_buff *skb,
struct ip_vs_iphdr *iph)
{
struct ip_vs_dest *dest, *h = NULL;
int hw = 0, w;
IP_VS_DBG(6, "ip_vs_ovf_schedule(): Scheduling...\n");
/* select the node with highest weight, go to next in line if active
* connections exceed weight
*/
list_for_each_entry_rcu(dest, &svc->destinations, n_list) {
w = atomic_read(&dest->weight);
if ((dest->flags & IP_VS_DEST_F_OVERLOAD) ||
atomic_read(&dest->activeconns) > w ||
w == 0)
continue;
if (!h || w > hw) {
h = dest;
hw = w;
}
}
if (h) {
IP_VS_DBG_BUF(6, "OVF: server %s:%u active %d w %d\n",
IP_VS_DBG_ADDR(h->af, &h->addr),
ntohs(h->port),
atomic_read(&h->activeconns),
atomic_read(&h->weight));
return h;
}
ip_vs_scheduler_err(svc, "no destination available");
return NULL;
}
static struct ip_vs_scheduler ip_vs_ovf_scheduler = {
.name = "ovf",
.refcnt = ATOMIC_INIT(0),
.module = THIS_MODULE,
.n_list = LIST_HEAD_INIT(ip_vs_ovf_scheduler.n_list),
.schedule = ip_vs_ovf_schedule,
};
static int __init ip_vs_ovf_init(void)
{
return register_ip_vs_scheduler(&ip_vs_ovf_scheduler);
}
static void __exit ip_vs_ovf_cleanup(void)
{
unregister_ip_vs_scheduler(&ip_vs_ovf_scheduler);
synchronize_rcu();
}
module_init(ip_vs_ovf_init);
module_exit(ip_vs_ovf_cleanup);
MODULE_LICENSE("GPL");
| linux-master | net/netfilter/ipvs/ip_vs_ovf.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* IPVS: Destination Hashing scheduling module
*
* Authors: Wensong Zhang <[email protected]>
*
* Inspired by the consistent hashing scheduler patch from
* Thomas Proell <[email protected]>
*
* Changes:
*/
/*
* The dh algorithm is to select server by the hash key of destination IP
* address. The pseudo code is as follows:
*
* n <- servernode[dest_ip];
* if (n is dead) OR
* (n is overloaded) OR (n.weight <= 0) then
* return NULL;
*
* return n;
*
* Notes that servernode is a 256-bucket hash table that maps the hash
* index derived from packet destination IP address to the current server
* array. If the dh scheduler is used in cache cluster, it is good to
* combine it with cache_bypass feature. When the statically assigned
* server is dead or overloaded, the load balancer can bypass the cache
* server and send requests to the original server directly.
*
*/
#define KMSG_COMPONENT "IPVS"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/ip.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/skbuff.h>
#include <linux/hash.h>
#include <net/ip_vs.h>
/*
* IPVS DH bucket
*/
struct ip_vs_dh_bucket {
struct ip_vs_dest __rcu *dest; /* real server (cache) */
};
/*
* for IPVS DH entry hash table
*/
#ifndef CONFIG_IP_VS_DH_TAB_BITS
#define CONFIG_IP_VS_DH_TAB_BITS 8
#endif
#define IP_VS_DH_TAB_BITS CONFIG_IP_VS_DH_TAB_BITS
#define IP_VS_DH_TAB_SIZE (1 << IP_VS_DH_TAB_BITS)
#define IP_VS_DH_TAB_MASK (IP_VS_DH_TAB_SIZE - 1)
struct ip_vs_dh_state {
struct ip_vs_dh_bucket buckets[IP_VS_DH_TAB_SIZE];
struct rcu_head rcu_head;
};
/*
* Returns hash value for IPVS DH entry
*/
static inline unsigned int ip_vs_dh_hashkey(int af, const union nf_inet_addr *addr)
{
__be32 addr_fold = addr->ip;
#ifdef CONFIG_IP_VS_IPV6
if (af == AF_INET6)
addr_fold = addr->ip6[0]^addr->ip6[1]^
addr->ip6[2]^addr->ip6[3];
#endif
return hash_32(ntohl(addr_fold), IP_VS_DH_TAB_BITS);
}
/*
* Get ip_vs_dest associated with supplied parameters.
*/
static inline struct ip_vs_dest *
ip_vs_dh_get(int af, struct ip_vs_dh_state *s, const union nf_inet_addr *addr)
{
return rcu_dereference(s->buckets[ip_vs_dh_hashkey(af, addr)].dest);
}
/*
* Assign all the hash buckets of the specified table with the service.
*/
static int
ip_vs_dh_reassign(struct ip_vs_dh_state *s, struct ip_vs_service *svc)
{
int i;
struct ip_vs_dh_bucket *b;
struct list_head *p;
struct ip_vs_dest *dest;
bool empty;
b = &s->buckets[0];
p = &svc->destinations;
empty = list_empty(p);
for (i=0; i<IP_VS_DH_TAB_SIZE; i++) {
dest = rcu_dereference_protected(b->dest, 1);
if (dest)
ip_vs_dest_put(dest);
if (empty)
RCU_INIT_POINTER(b->dest, NULL);
else {
if (p == &svc->destinations)
p = p->next;
dest = list_entry(p, struct ip_vs_dest, n_list);
ip_vs_dest_hold(dest);
RCU_INIT_POINTER(b->dest, dest);
p = p->next;
}
b++;
}
return 0;
}
/*
* Flush all the hash buckets of the specified table.
*/
static void ip_vs_dh_flush(struct ip_vs_dh_state *s)
{
int i;
struct ip_vs_dh_bucket *b;
struct ip_vs_dest *dest;
b = &s->buckets[0];
for (i=0; i<IP_VS_DH_TAB_SIZE; i++) {
dest = rcu_dereference_protected(b->dest, 1);
if (dest) {
ip_vs_dest_put(dest);
RCU_INIT_POINTER(b->dest, NULL);
}
b++;
}
}
static int ip_vs_dh_init_svc(struct ip_vs_service *svc)
{
struct ip_vs_dh_state *s;
/* allocate the DH table for this service */
s = kzalloc(sizeof(struct ip_vs_dh_state), GFP_KERNEL);
if (s == NULL)
return -ENOMEM;
svc->sched_data = s;
IP_VS_DBG(6, "DH hash table (memory=%zdbytes) allocated for "
"current service\n",
sizeof(struct ip_vs_dh_bucket)*IP_VS_DH_TAB_SIZE);
/* assign the hash buckets with current dests */
ip_vs_dh_reassign(s, svc);
return 0;
}
static void ip_vs_dh_done_svc(struct ip_vs_service *svc)
{
struct ip_vs_dh_state *s = svc->sched_data;
/* got to clean up hash buckets here */
ip_vs_dh_flush(s);
/* release the table itself */
kfree_rcu(s, rcu_head);
IP_VS_DBG(6, "DH hash table (memory=%zdbytes) released\n",
sizeof(struct ip_vs_dh_bucket)*IP_VS_DH_TAB_SIZE);
}
static int ip_vs_dh_dest_changed(struct ip_vs_service *svc,
struct ip_vs_dest *dest)
{
struct ip_vs_dh_state *s = svc->sched_data;
/* assign the hash buckets with the updated service */
ip_vs_dh_reassign(s, svc);
return 0;
}
/*
* If the dest flags is set with IP_VS_DEST_F_OVERLOAD,
* consider that the server is overloaded here.
*/
static inline int is_overloaded(struct ip_vs_dest *dest)
{
return dest->flags & IP_VS_DEST_F_OVERLOAD;
}
/*
* Destination hashing scheduling
*/
static struct ip_vs_dest *
ip_vs_dh_schedule(struct ip_vs_service *svc, const struct sk_buff *skb,
struct ip_vs_iphdr *iph)
{
struct ip_vs_dest *dest;
struct ip_vs_dh_state *s;
IP_VS_DBG(6, "%s(): Scheduling...\n", __func__);
s = (struct ip_vs_dh_state *) svc->sched_data;
dest = ip_vs_dh_get(svc->af, s, &iph->daddr);
if (!dest
|| !(dest->flags & IP_VS_DEST_F_AVAILABLE)
|| atomic_read(&dest->weight) <= 0
|| is_overloaded(dest)) {
ip_vs_scheduler_err(svc, "no destination available");
return NULL;
}
IP_VS_DBG_BUF(6, "DH: destination IP address %s --> server %s:%d\n",
IP_VS_DBG_ADDR(svc->af, &iph->daddr),
IP_VS_DBG_ADDR(dest->af, &dest->addr),
ntohs(dest->port));
return dest;
}
/*
* IPVS DH Scheduler structure
*/
static struct ip_vs_scheduler ip_vs_dh_scheduler =
{
.name = "dh",
.refcnt = ATOMIC_INIT(0),
.module = THIS_MODULE,
.n_list = LIST_HEAD_INIT(ip_vs_dh_scheduler.n_list),
.init_service = ip_vs_dh_init_svc,
.done_service = ip_vs_dh_done_svc,
.add_dest = ip_vs_dh_dest_changed,
.del_dest = ip_vs_dh_dest_changed,
.schedule = ip_vs_dh_schedule,
};
static int __init ip_vs_dh_init(void)
{
return register_ip_vs_scheduler(&ip_vs_dh_scheduler);
}
static void __exit ip_vs_dh_cleanup(void)
{
unregister_ip_vs_scheduler(&ip_vs_dh_scheduler);
synchronize_rcu();
}
module_init(ip_vs_dh_init);
module_exit(ip_vs_dh_cleanup);
MODULE_LICENSE("GPL");
| linux-master | net/netfilter/ipvs/ip_vs_dh.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* IPVS: Locality-Based Least-Connection scheduling module
*
* Authors: Wensong Zhang <[email protected]>
*
* Changes:
* Martin Hamilton : fixed the terrible locking bugs
* *lock(tbl->lock) ==> *lock(&tbl->lock)
* Wensong Zhang : fixed the uninitialized tbl->lock bug
* Wensong Zhang : added doing full expiration check to
* collect stale entries of 24+ hours when
* no partial expire check in a half hour
* Julian Anastasov : replaced del_timer call with del_timer_sync
* to avoid the possible race between timer
* handler and del_timer thread in SMP
*/
/*
* The lblc algorithm is as follows (pseudo code):
*
* if cachenode[dest_ip] is null then
* n, cachenode[dest_ip] <- {weighted least-conn node};
* else
* n <- cachenode[dest_ip];
* if (n is dead) OR
* (n.conns>n.weight AND
* there is a node m with m.conns<m.weight/2) then
* n, cachenode[dest_ip] <- {weighted least-conn node};
*
* return n;
*
* Thanks must go to Wenzhuo Zhang for talking WCCP to me and pushing
* me to write this module.
*/
#define KMSG_COMPONENT "IPVS"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/ip.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/skbuff.h>
#include <linux/jiffies.h>
#include <linux/hash.h>
/* for sysctl */
#include <linux/fs.h>
#include <linux/sysctl.h>
#include <net/ip_vs.h>
/*
* It is for garbage collection of stale IPVS lblc entries,
* when the table is full.
*/
#define CHECK_EXPIRE_INTERVAL (60*HZ)
#define ENTRY_TIMEOUT (6*60*HZ)
#define DEFAULT_EXPIRATION (24*60*60*HZ)
/*
* It is for full expiration check.
* When there is no partial expiration check (garbage collection)
* in a half hour, do a full expiration check to collect stale
* entries that haven't been touched for a day.
*/
#define COUNT_FOR_FULL_EXPIRATION 30
/*
* for IPVS lblc entry hash table
*/
#ifndef CONFIG_IP_VS_LBLC_TAB_BITS
#define CONFIG_IP_VS_LBLC_TAB_BITS 10
#endif
#define IP_VS_LBLC_TAB_BITS CONFIG_IP_VS_LBLC_TAB_BITS
#define IP_VS_LBLC_TAB_SIZE (1 << IP_VS_LBLC_TAB_BITS)
#define IP_VS_LBLC_TAB_MASK (IP_VS_LBLC_TAB_SIZE - 1)
/*
* IPVS lblc entry represents an association between destination
* IP address and its destination server
*/
struct ip_vs_lblc_entry {
struct hlist_node list;
int af; /* address family */
union nf_inet_addr addr; /* destination IP address */
struct ip_vs_dest *dest; /* real server (cache) */
unsigned long lastuse; /* last used time */
struct rcu_head rcu_head;
};
/*
* IPVS lblc hash table
*/
struct ip_vs_lblc_table {
struct rcu_head rcu_head;
struct hlist_head bucket[IP_VS_LBLC_TAB_SIZE]; /* hash bucket */
struct timer_list periodic_timer; /* collect stale entries */
struct ip_vs_service *svc; /* pointer back to service */
atomic_t entries; /* number of entries */
int max_size; /* maximum size of entries */
int rover; /* rover for expire check */
int counter; /* counter for no expire */
bool dead;
};
/*
* IPVS LBLC sysctl table
*/
#ifdef CONFIG_SYSCTL
static struct ctl_table vs_vars_table[] = {
{
.procname = "lblc_expiration",
.data = NULL,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{ }
};
#endif
static void ip_vs_lblc_rcu_free(struct rcu_head *head)
{
struct ip_vs_lblc_entry *en = container_of(head,
struct ip_vs_lblc_entry,
rcu_head);
ip_vs_dest_put_and_free(en->dest);
kfree(en);
}
static inline void ip_vs_lblc_del(struct ip_vs_lblc_entry *en)
{
hlist_del_rcu(&en->list);
call_rcu(&en->rcu_head, ip_vs_lblc_rcu_free);
}
/*
* Returns hash value for IPVS LBLC entry
*/
static inline unsigned int
ip_vs_lblc_hashkey(int af, const union nf_inet_addr *addr)
{
__be32 addr_fold = addr->ip;
#ifdef CONFIG_IP_VS_IPV6
if (af == AF_INET6)
addr_fold = addr->ip6[0]^addr->ip6[1]^
addr->ip6[2]^addr->ip6[3];
#endif
return hash_32(ntohl(addr_fold), IP_VS_LBLC_TAB_BITS);
}
/*
* Hash an entry in the ip_vs_lblc_table.
* returns bool success.
*/
static void
ip_vs_lblc_hash(struct ip_vs_lblc_table *tbl, struct ip_vs_lblc_entry *en)
{
unsigned int hash = ip_vs_lblc_hashkey(en->af, &en->addr);
hlist_add_head_rcu(&en->list, &tbl->bucket[hash]);
atomic_inc(&tbl->entries);
}
/* Get ip_vs_lblc_entry associated with supplied parameters. */
static inline struct ip_vs_lblc_entry *
ip_vs_lblc_get(int af, struct ip_vs_lblc_table *tbl,
const union nf_inet_addr *addr)
{
unsigned int hash = ip_vs_lblc_hashkey(af, addr);
struct ip_vs_lblc_entry *en;
hlist_for_each_entry_rcu(en, &tbl->bucket[hash], list)
if (ip_vs_addr_equal(af, &en->addr, addr))
return en;
return NULL;
}
/*
* Create or update an ip_vs_lblc_entry, which is a mapping of a destination IP
* address to a server. Called under spin lock.
*/
static inline struct ip_vs_lblc_entry *
ip_vs_lblc_new(struct ip_vs_lblc_table *tbl, const union nf_inet_addr *daddr,
u16 af, struct ip_vs_dest *dest)
{
struct ip_vs_lblc_entry *en;
en = ip_vs_lblc_get(af, tbl, daddr);
if (en) {
if (en->dest == dest)
return en;
ip_vs_lblc_del(en);
}
en = kmalloc(sizeof(*en), GFP_ATOMIC);
if (!en)
return NULL;
en->af = af;
ip_vs_addr_copy(af, &en->addr, daddr);
en->lastuse = jiffies;
ip_vs_dest_hold(dest);
en->dest = dest;
ip_vs_lblc_hash(tbl, en);
return en;
}
/*
* Flush all the entries of the specified table.
*/
static void ip_vs_lblc_flush(struct ip_vs_service *svc)
{
struct ip_vs_lblc_table *tbl = svc->sched_data;
struct ip_vs_lblc_entry *en;
struct hlist_node *next;
int i;
spin_lock_bh(&svc->sched_lock);
tbl->dead = true;
for (i = 0; i < IP_VS_LBLC_TAB_SIZE; i++) {
hlist_for_each_entry_safe(en, next, &tbl->bucket[i], list) {
ip_vs_lblc_del(en);
atomic_dec(&tbl->entries);
}
}
spin_unlock_bh(&svc->sched_lock);
}
static int sysctl_lblc_expiration(struct ip_vs_service *svc)
{
#ifdef CONFIG_SYSCTL
return svc->ipvs->sysctl_lblc_expiration;
#else
return DEFAULT_EXPIRATION;
#endif
}
static inline void ip_vs_lblc_full_check(struct ip_vs_service *svc)
{
struct ip_vs_lblc_table *tbl = svc->sched_data;
struct ip_vs_lblc_entry *en;
struct hlist_node *next;
unsigned long now = jiffies;
int i, j;
for (i = 0, j = tbl->rover; i < IP_VS_LBLC_TAB_SIZE; i++) {
j = (j + 1) & IP_VS_LBLC_TAB_MASK;
spin_lock(&svc->sched_lock);
hlist_for_each_entry_safe(en, next, &tbl->bucket[j], list) {
if (time_before(now,
en->lastuse +
sysctl_lblc_expiration(svc)))
continue;
ip_vs_lblc_del(en);
atomic_dec(&tbl->entries);
}
spin_unlock(&svc->sched_lock);
}
tbl->rover = j;
}
/*
* Periodical timer handler for IPVS lblc table
* It is used to collect stale entries when the number of entries
* exceeds the maximum size of the table.
*
* Fixme: we probably need more complicated algorithm to collect
* entries that have not been used for a long time even
* if the number of entries doesn't exceed the maximum size
* of the table.
* The full expiration check is for this purpose now.
*/
static void ip_vs_lblc_check_expire(struct timer_list *t)
{
struct ip_vs_lblc_table *tbl = from_timer(tbl, t, periodic_timer);
struct ip_vs_service *svc = tbl->svc;
unsigned long now = jiffies;
int goal;
int i, j;
struct ip_vs_lblc_entry *en;
struct hlist_node *next;
if ((tbl->counter % COUNT_FOR_FULL_EXPIRATION) == 0) {
/* do full expiration check */
ip_vs_lblc_full_check(svc);
tbl->counter = 1;
goto out;
}
if (atomic_read(&tbl->entries) <= tbl->max_size) {
tbl->counter++;
goto out;
}
goal = (atomic_read(&tbl->entries) - tbl->max_size)*4/3;
if (goal > tbl->max_size/2)
goal = tbl->max_size/2;
for (i = 0, j = tbl->rover; i < IP_VS_LBLC_TAB_SIZE; i++) {
j = (j + 1) & IP_VS_LBLC_TAB_MASK;
spin_lock(&svc->sched_lock);
hlist_for_each_entry_safe(en, next, &tbl->bucket[j], list) {
if (time_before(now, en->lastuse + ENTRY_TIMEOUT))
continue;
ip_vs_lblc_del(en);
atomic_dec(&tbl->entries);
goal--;
}
spin_unlock(&svc->sched_lock);
if (goal <= 0)
break;
}
tbl->rover = j;
out:
mod_timer(&tbl->periodic_timer, jiffies + CHECK_EXPIRE_INTERVAL);
}
static int ip_vs_lblc_init_svc(struct ip_vs_service *svc)
{
int i;
struct ip_vs_lblc_table *tbl;
/*
* Allocate the ip_vs_lblc_table for this service
*/
tbl = kmalloc(sizeof(*tbl), GFP_KERNEL);
if (tbl == NULL)
return -ENOMEM;
svc->sched_data = tbl;
IP_VS_DBG(6, "LBLC hash table (memory=%zdbytes) allocated for "
"current service\n", sizeof(*tbl));
/*
* Initialize the hash buckets
*/
for (i = 0; i < IP_VS_LBLC_TAB_SIZE; i++) {
INIT_HLIST_HEAD(&tbl->bucket[i]);
}
tbl->max_size = IP_VS_LBLC_TAB_SIZE*16;
tbl->rover = 0;
tbl->counter = 1;
tbl->dead = false;
tbl->svc = svc;
atomic_set(&tbl->entries, 0);
/*
* Hook periodic timer for garbage collection
*/
timer_setup(&tbl->periodic_timer, ip_vs_lblc_check_expire, 0);
mod_timer(&tbl->periodic_timer, jiffies + CHECK_EXPIRE_INTERVAL);
return 0;
}
static void ip_vs_lblc_done_svc(struct ip_vs_service *svc)
{
struct ip_vs_lblc_table *tbl = svc->sched_data;
/* remove periodic timer */
timer_shutdown_sync(&tbl->periodic_timer);
/* got to clean up table entries here */
ip_vs_lblc_flush(svc);
/* release the table itself */
kfree_rcu(tbl, rcu_head);
IP_VS_DBG(6, "LBLC hash table (memory=%zdbytes) released\n",
sizeof(*tbl));
}
static inline struct ip_vs_dest *
__ip_vs_lblc_schedule(struct ip_vs_service *svc)
{
struct ip_vs_dest *dest, *least;
int loh, doh;
/*
* We use the following formula to estimate the load:
* (dest overhead) / dest->weight
*
* Remember -- no floats in kernel mode!!!
* The comparison of h1*w2 > h2*w1 is equivalent to that of
* h1/w1 > h2/w2
* if every weight is larger than zero.
*
* The server with weight=0 is quiesced and will not receive any
* new connection.
*/
list_for_each_entry_rcu(dest, &svc->destinations, n_list) {
if (dest->flags & IP_VS_DEST_F_OVERLOAD)
continue;
if (atomic_read(&dest->weight) > 0) {
least = dest;
loh = ip_vs_dest_conn_overhead(least);
goto nextstage;
}
}
return NULL;
/*
* Find the destination with the least load.
*/
nextstage:
list_for_each_entry_continue_rcu(dest, &svc->destinations, n_list) {
if (dest->flags & IP_VS_DEST_F_OVERLOAD)
continue;
doh = ip_vs_dest_conn_overhead(dest);
if ((__s64)loh * atomic_read(&dest->weight) >
(__s64)doh * atomic_read(&least->weight)) {
least = dest;
loh = doh;
}
}
IP_VS_DBG_BUF(6, "LBLC: server %s:%d "
"activeconns %d refcnt %d weight %d overhead %d\n",
IP_VS_DBG_ADDR(least->af, &least->addr),
ntohs(least->port),
atomic_read(&least->activeconns),
refcount_read(&least->refcnt),
atomic_read(&least->weight), loh);
return least;
}
/*
* If this destination server is overloaded and there is a less loaded
* server, then return true.
*/
static inline int
is_overloaded(struct ip_vs_dest *dest, struct ip_vs_service *svc)
{
if (atomic_read(&dest->activeconns) > atomic_read(&dest->weight)) {
struct ip_vs_dest *d;
list_for_each_entry_rcu(d, &svc->destinations, n_list) {
if (atomic_read(&d->activeconns)*2
< atomic_read(&d->weight)) {
return 1;
}
}
}
return 0;
}
/*
* Locality-Based (weighted) Least-Connection scheduling
*/
static struct ip_vs_dest *
ip_vs_lblc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb,
struct ip_vs_iphdr *iph)
{
struct ip_vs_lblc_table *tbl = svc->sched_data;
struct ip_vs_dest *dest = NULL;
struct ip_vs_lblc_entry *en;
IP_VS_DBG(6, "%s(): Scheduling...\n", __func__);
/* First look in our cache */
en = ip_vs_lblc_get(svc->af, tbl, &iph->daddr);
if (en) {
/* We only hold a read lock, but this is atomic */
en->lastuse = jiffies;
/*
* If the destination is not available, i.e. it's in the trash,
* we must ignore it, as it may be removed from under our feet,
* if someone drops our reference count. Our caller only makes
* sure that destinations, that are not in the trash, are not
* moved to the trash, while we are scheduling. But anyone can
* free up entries from the trash at any time.
*/
dest = en->dest;
if ((dest->flags & IP_VS_DEST_F_AVAILABLE) &&
atomic_read(&dest->weight) > 0 && !is_overloaded(dest, svc))
goto out;
}
/* No cache entry or it is invalid, time to schedule */
dest = __ip_vs_lblc_schedule(svc);
if (!dest) {
ip_vs_scheduler_err(svc, "no destination available");
return NULL;
}
/* If we fail to create a cache entry, we'll just use the valid dest */
spin_lock_bh(&svc->sched_lock);
if (!tbl->dead)
ip_vs_lblc_new(tbl, &iph->daddr, svc->af, dest);
spin_unlock_bh(&svc->sched_lock);
out:
IP_VS_DBG_BUF(6, "LBLC: destination IP address %s --> server %s:%d\n",
IP_VS_DBG_ADDR(svc->af, &iph->daddr),
IP_VS_DBG_ADDR(dest->af, &dest->addr), ntohs(dest->port));
return dest;
}
/*
* IPVS LBLC Scheduler structure
*/
static struct ip_vs_scheduler ip_vs_lblc_scheduler = {
.name = "lblc",
.refcnt = ATOMIC_INIT(0),
.module = THIS_MODULE,
.n_list = LIST_HEAD_INIT(ip_vs_lblc_scheduler.n_list),
.init_service = ip_vs_lblc_init_svc,
.done_service = ip_vs_lblc_done_svc,
.schedule = ip_vs_lblc_schedule,
};
/*
* per netns init.
*/
#ifdef CONFIG_SYSCTL
static int __net_init __ip_vs_lblc_init(struct net *net)
{
struct netns_ipvs *ipvs = net_ipvs(net);
size_t vars_table_size = ARRAY_SIZE(vs_vars_table);
if (!ipvs)
return -ENOENT;
if (!net_eq(net, &init_net)) {
ipvs->lblc_ctl_table = kmemdup(vs_vars_table,
sizeof(vs_vars_table),
GFP_KERNEL);
if (ipvs->lblc_ctl_table == NULL)
return -ENOMEM;
/* Don't export sysctls to unprivileged users */
if (net->user_ns != &init_user_ns) {
ipvs->lblc_ctl_table[0].procname = NULL;
vars_table_size = 0;
}
} else
ipvs->lblc_ctl_table = vs_vars_table;
ipvs->sysctl_lblc_expiration = DEFAULT_EXPIRATION;
ipvs->lblc_ctl_table[0].data = &ipvs->sysctl_lblc_expiration;
ipvs->lblc_ctl_header = register_net_sysctl_sz(net, "net/ipv4/vs",
ipvs->lblc_ctl_table,
vars_table_size);
if (!ipvs->lblc_ctl_header) {
if (!net_eq(net, &init_net))
kfree(ipvs->lblc_ctl_table);
return -ENOMEM;
}
return 0;
}
static void __net_exit __ip_vs_lblc_exit(struct net *net)
{
struct netns_ipvs *ipvs = net_ipvs(net);
unregister_net_sysctl_table(ipvs->lblc_ctl_header);
if (!net_eq(net, &init_net))
kfree(ipvs->lblc_ctl_table);
}
#else
static int __net_init __ip_vs_lblc_init(struct net *net) { return 0; }
static void __net_exit __ip_vs_lblc_exit(struct net *net) { }
#endif
static struct pernet_operations ip_vs_lblc_ops = {
.init = __ip_vs_lblc_init,
.exit = __ip_vs_lblc_exit,
};
static int __init ip_vs_lblc_init(void)
{
int ret;
ret = register_pernet_subsys(&ip_vs_lblc_ops);
if (ret)
return ret;
ret = register_ip_vs_scheduler(&ip_vs_lblc_scheduler);
if (ret)
unregister_pernet_subsys(&ip_vs_lblc_ops);
return ret;
}
static void __exit ip_vs_lblc_cleanup(void)
{
unregister_ip_vs_scheduler(&ip_vs_lblc_scheduler);
unregister_pernet_subsys(&ip_vs_lblc_ops);
rcu_barrier();
}
module_init(ip_vs_lblc_init);
module_exit(ip_vs_lblc_cleanup);
MODULE_LICENSE("GPL");
| linux-master | net/netfilter/ipvs/ip_vs_lblc.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* ip_vs_xmit.c: various packet transmitters for IPVS
*
* Authors: Wensong Zhang <[email protected]>
* Julian Anastasov <[email protected]>
*
* Changes:
*
* Description of forwarding methods:
* - all transmitters are called from LOCAL_IN (remote clients) and
* LOCAL_OUT (local clients) but for ICMP can be called from FORWARD
* - not all connections have destination server, for example,
* connections in backup server when fwmark is used
* - bypass connections use daddr from packet
* - we can use dst without ref while sending in RCU section, we use
* ref when returning NF_ACCEPT for NAT-ed packet via loopback
* LOCAL_OUT rules:
* - skb->dev is NULL, skb->protocol is not set (both are set in POST_ROUTING)
* - skb->pkt_type is not set yet
* - the only place where we can see skb->sk != NULL
*/
#define KMSG_COMPONENT "IPVS"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/tcp.h> /* for tcphdr */
#include <net/ip.h>
#include <net/gue.h>
#include <net/gre.h>
#include <net/tcp.h> /* for csum_tcpudp_magic */
#include <net/udp.h>
#include <net/icmp.h> /* for icmp_send */
#include <net/route.h> /* for ip_route_output */
#include <net/ipv6.h>
#include <net/ip6_route.h>
#include <net/ip_tunnels.h>
#include <net/ip6_checksum.h>
#include <net/addrconf.h>
#include <linux/icmpv6.h>
#include <linux/netfilter.h>
#include <linux/netfilter_ipv4.h>
#include <net/ip_vs.h>
enum {
IP_VS_RT_MODE_LOCAL = 1, /* Allow local dest */
IP_VS_RT_MODE_NON_LOCAL = 2, /* Allow non-local dest */
IP_VS_RT_MODE_RDR = 4, /* Allow redirect from remote daddr to
* local
*/
IP_VS_RT_MODE_CONNECT = 8, /* Always bind route to saddr */
IP_VS_RT_MODE_KNOWN_NH = 16,/* Route via remote addr */
IP_VS_RT_MODE_TUNNEL = 32,/* Tunnel mode */
};
static inline struct ip_vs_dest_dst *ip_vs_dest_dst_alloc(void)
{
return kmalloc(sizeof(struct ip_vs_dest_dst), GFP_ATOMIC);
}
static inline void ip_vs_dest_dst_free(struct ip_vs_dest_dst *dest_dst)
{
kfree(dest_dst);
}
/*
* Destination cache to speed up outgoing route lookup
*/
static inline void
__ip_vs_dst_set(struct ip_vs_dest *dest, struct ip_vs_dest_dst *dest_dst,
struct dst_entry *dst, u32 dst_cookie)
{
struct ip_vs_dest_dst *old;
old = rcu_dereference_protected(dest->dest_dst,
lockdep_is_held(&dest->dst_lock));
if (dest_dst) {
dest_dst->dst_cache = dst;
dest_dst->dst_cookie = dst_cookie;
}
rcu_assign_pointer(dest->dest_dst, dest_dst);
if (old)
call_rcu(&old->rcu_head, ip_vs_dest_dst_rcu_free);
}
static inline struct ip_vs_dest_dst *
__ip_vs_dst_check(struct ip_vs_dest *dest)
{
struct ip_vs_dest_dst *dest_dst = rcu_dereference(dest->dest_dst);
struct dst_entry *dst;
if (!dest_dst)
return NULL;
dst = dest_dst->dst_cache;
if (dst->obsolete &&
dst->ops->check(dst, dest_dst->dst_cookie) == NULL)
return NULL;
return dest_dst;
}
static inline bool
__mtu_check_toobig_v6(const struct sk_buff *skb, u32 mtu)
{
if (IP6CB(skb)->frag_max_size) {
/* frag_max_size tell us that, this packet have been
* defragmented by netfilter IPv6 conntrack module.
*/
if (IP6CB(skb)->frag_max_size > mtu)
return true; /* largest fragment violate MTU */
}
else if (skb->len > mtu && !skb_is_gso(skb)) {
return true; /* Packet size violate MTU size */
}
return false;
}
/* Get route to daddr, update *saddr, optionally bind route to saddr */
static struct rtable *do_output_route4(struct net *net, __be32 daddr,
int rt_mode, __be32 *saddr)
{
struct flowi4 fl4;
struct rtable *rt;
bool loop = false;
memset(&fl4, 0, sizeof(fl4));
fl4.daddr = daddr;
fl4.flowi4_flags = (rt_mode & IP_VS_RT_MODE_KNOWN_NH) ?
FLOWI_FLAG_KNOWN_NH : 0;
retry:
rt = ip_route_output_key(net, &fl4);
if (IS_ERR(rt)) {
/* Invalid saddr ? */
if (PTR_ERR(rt) == -EINVAL && *saddr &&
rt_mode & IP_VS_RT_MODE_CONNECT && !loop) {
*saddr = 0;
flowi4_update_output(&fl4, 0, daddr, 0);
goto retry;
}
IP_VS_DBG_RL("ip_route_output error, dest: %pI4\n", &daddr);
return NULL;
} else if (!*saddr && rt_mode & IP_VS_RT_MODE_CONNECT && fl4.saddr) {
ip_rt_put(rt);
*saddr = fl4.saddr;
flowi4_update_output(&fl4, 0, daddr, fl4.saddr);
loop = true;
goto retry;
}
*saddr = fl4.saddr;
return rt;
}
#ifdef CONFIG_IP_VS_IPV6
static inline int __ip_vs_is_local_route6(struct rt6_info *rt)
{
return rt->dst.dev && rt->dst.dev->flags & IFF_LOOPBACK;
}
#endif
static inline bool crosses_local_route_boundary(int skb_af, struct sk_buff *skb,
int rt_mode,
bool new_rt_is_local)
{
bool rt_mode_allow_local = !!(rt_mode & IP_VS_RT_MODE_LOCAL);
bool rt_mode_allow_non_local = !!(rt_mode & IP_VS_RT_MODE_NON_LOCAL);
bool rt_mode_allow_redirect = !!(rt_mode & IP_VS_RT_MODE_RDR);
bool source_is_loopback;
bool old_rt_is_local;
#ifdef CONFIG_IP_VS_IPV6
if (skb_af == AF_INET6) {
int addr_type = ipv6_addr_type(&ipv6_hdr(skb)->saddr);
source_is_loopback =
(!skb->dev || skb->dev->flags & IFF_LOOPBACK) &&
(addr_type & IPV6_ADDR_LOOPBACK);
old_rt_is_local = __ip_vs_is_local_route6(
(struct rt6_info *)skb_dst(skb));
} else
#endif
{
source_is_loopback = ipv4_is_loopback(ip_hdr(skb)->saddr);
old_rt_is_local = skb_rtable(skb)->rt_flags & RTCF_LOCAL;
}
if (unlikely(new_rt_is_local)) {
if (!rt_mode_allow_local)
return true;
if (!rt_mode_allow_redirect && !old_rt_is_local)
return true;
} else {
if (!rt_mode_allow_non_local)
return true;
if (source_is_loopback)
return true;
}
return false;
}
static inline void maybe_update_pmtu(int skb_af, struct sk_buff *skb, int mtu)
{
struct sock *sk = skb->sk;
struct rtable *ort = skb_rtable(skb);
if (!skb->dev && sk && sk_fullsock(sk))
ort->dst.ops->update_pmtu(&ort->dst, sk, NULL, mtu, true);
}
static inline bool ensure_mtu_is_adequate(struct netns_ipvs *ipvs, int skb_af,
int rt_mode,
struct ip_vs_iphdr *ipvsh,
struct sk_buff *skb, int mtu)
{
#ifdef CONFIG_IP_VS_IPV6
if (skb_af == AF_INET6) {
struct net *net = ipvs->net;
if (unlikely(__mtu_check_toobig_v6(skb, mtu))) {
if (!skb->dev)
skb->dev = net->loopback_dev;
/* only send ICMP too big on first fragment */
if (!ipvsh->fragoffs && !ip_vs_iph_icmp(ipvsh))
icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
IP_VS_DBG(1, "frag needed for %pI6c\n",
&ipv6_hdr(skb)->saddr);
return false;
}
} else
#endif
{
/* If we're going to tunnel the packet and pmtu discovery
* is disabled, we'll just fragment it anyway
*/
if ((rt_mode & IP_VS_RT_MODE_TUNNEL) && !sysctl_pmtu_disc(ipvs))
return true;
if (unlikely(ip_hdr(skb)->frag_off & htons(IP_DF) &&
skb->len > mtu && !skb_is_gso(skb) &&
!ip_vs_iph_icmp(ipvsh))) {
icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
htonl(mtu));
IP_VS_DBG(1, "frag needed for %pI4\n",
&ip_hdr(skb)->saddr);
return false;
}
}
return true;
}
static inline bool decrement_ttl(struct netns_ipvs *ipvs,
int skb_af,
struct sk_buff *skb)
{
struct net *net = ipvs->net;
#ifdef CONFIG_IP_VS_IPV6
if (skb_af == AF_INET6) {
struct dst_entry *dst = skb_dst(skb);
/* check and decrement ttl */
if (ipv6_hdr(skb)->hop_limit <= 1) {
struct inet6_dev *idev = __in6_dev_get_safely(skb->dev);
/* Force OUTPUT device used as source address */
skb->dev = dst->dev;
icmpv6_send(skb, ICMPV6_TIME_EXCEED,
ICMPV6_EXC_HOPLIMIT, 0);
__IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS);
return false;
}
/* don't propagate ttl change to cloned packets */
if (skb_ensure_writable(skb, sizeof(struct ipv6hdr)))
return false;
ipv6_hdr(skb)->hop_limit--;
} else
#endif
{
if (ip_hdr(skb)->ttl <= 1) {
/* Tell the sender its packet died... */
__IP_INC_STATS(net, IPSTATS_MIB_INHDRERRORS);
icmp_send(skb, ICMP_TIME_EXCEEDED, ICMP_EXC_TTL, 0);
return false;
}
/* don't propagate ttl change to cloned packets */
if (skb_ensure_writable(skb, sizeof(struct iphdr)))
return false;
/* Decrease ttl */
ip_decrease_ttl(ip_hdr(skb));
}
return true;
}
/* Get route to destination or remote server */
static int
__ip_vs_get_out_rt(struct netns_ipvs *ipvs, int skb_af, struct sk_buff *skb,
struct ip_vs_dest *dest,
__be32 daddr, int rt_mode, __be32 *ret_saddr,
struct ip_vs_iphdr *ipvsh)
{
struct net *net = ipvs->net;
struct ip_vs_dest_dst *dest_dst;
struct rtable *rt; /* Route to the other host */
int mtu;
int local, noref = 1;
if (dest) {
dest_dst = __ip_vs_dst_check(dest);
if (likely(dest_dst))
rt = (struct rtable *) dest_dst->dst_cache;
else {
dest_dst = ip_vs_dest_dst_alloc();
spin_lock_bh(&dest->dst_lock);
if (!dest_dst) {
__ip_vs_dst_set(dest, NULL, NULL, 0);
spin_unlock_bh(&dest->dst_lock);
goto err_unreach;
}
rt = do_output_route4(net, dest->addr.ip, rt_mode,
&dest_dst->dst_saddr.ip);
if (!rt) {
__ip_vs_dst_set(dest, NULL, NULL, 0);
spin_unlock_bh(&dest->dst_lock);
ip_vs_dest_dst_free(dest_dst);
goto err_unreach;
}
__ip_vs_dst_set(dest, dest_dst, &rt->dst, 0);
spin_unlock_bh(&dest->dst_lock);
IP_VS_DBG(10, "new dst %pI4, src %pI4, refcnt=%d\n",
&dest->addr.ip, &dest_dst->dst_saddr.ip,
rcuref_read(&rt->dst.__rcuref));
}
if (ret_saddr)
*ret_saddr = dest_dst->dst_saddr.ip;
} else {
__be32 saddr = htonl(INADDR_ANY);
noref = 0;
/* For such unconfigured boxes avoid many route lookups
* for performance reasons because we do not remember saddr
*/
rt_mode &= ~IP_VS_RT_MODE_CONNECT;
rt = do_output_route4(net, daddr, rt_mode, &saddr);
if (!rt)
goto err_unreach;
if (ret_saddr)
*ret_saddr = saddr;
}
local = (rt->rt_flags & RTCF_LOCAL) ? 1 : 0;
if (unlikely(crosses_local_route_boundary(skb_af, skb, rt_mode,
local))) {
IP_VS_DBG_RL("We are crossing local and non-local addresses"
" daddr=%pI4\n", &daddr);
goto err_put;
}
if (unlikely(local)) {
/* skb to local stack, preserve old route */
if (!noref)
ip_rt_put(rt);
return local;
}
if (!decrement_ttl(ipvs, skb_af, skb))
goto err_put;
if (likely(!(rt_mode & IP_VS_RT_MODE_TUNNEL))) {
mtu = dst_mtu(&rt->dst);
} else {
mtu = dst_mtu(&rt->dst) - sizeof(struct iphdr);
if (!dest)
goto err_put;
if (dest->tun_type == IP_VS_CONN_F_TUNNEL_TYPE_GUE) {
mtu -= sizeof(struct udphdr) + sizeof(struct guehdr);
if ((dest->tun_flags &
IP_VS_TUNNEL_ENCAP_FLAG_REMCSUM) &&
skb->ip_summed == CHECKSUM_PARTIAL)
mtu -= GUE_PLEN_REMCSUM + GUE_LEN_PRIV;
} else if (dest->tun_type == IP_VS_CONN_F_TUNNEL_TYPE_GRE) {
__be16 tflags = 0;
if (dest->tun_flags & IP_VS_TUNNEL_ENCAP_FLAG_CSUM)
tflags |= TUNNEL_CSUM;
mtu -= gre_calc_hlen(tflags);
}
if (mtu < 68) {
IP_VS_DBG_RL("%s(): mtu less than 68\n", __func__);
goto err_put;
}
maybe_update_pmtu(skb_af, skb, mtu);
}
if (!ensure_mtu_is_adequate(ipvs, skb_af, rt_mode, ipvsh, skb, mtu))
goto err_put;
skb_dst_drop(skb);
if (noref)
skb_dst_set_noref(skb, &rt->dst);
else
skb_dst_set(skb, &rt->dst);
return local;
err_put:
if (!noref)
ip_rt_put(rt);
return -1;
err_unreach:
dst_link_failure(skb);
return -1;
}
#ifdef CONFIG_IP_VS_IPV6
static struct dst_entry *
__ip_vs_route_output_v6(struct net *net, struct in6_addr *daddr,
struct in6_addr *ret_saddr, int do_xfrm, int rt_mode)
{
struct dst_entry *dst;
struct flowi6 fl6 = {
.daddr = *daddr,
};
if (rt_mode & IP_VS_RT_MODE_KNOWN_NH)
fl6.flowi6_flags = FLOWI_FLAG_KNOWN_NH;
dst = ip6_route_output(net, NULL, &fl6);
if (dst->error)
goto out_err;
if (!ret_saddr)
return dst;
if (ipv6_addr_any(&fl6.saddr) &&
ipv6_dev_get_saddr(net, ip6_dst_idev(dst)->dev,
&fl6.daddr, 0, &fl6.saddr) < 0)
goto out_err;
if (do_xfrm) {
dst = xfrm_lookup(net, dst, flowi6_to_flowi(&fl6), NULL, 0);
if (IS_ERR(dst)) {
dst = NULL;
goto out_err;
}
}
*ret_saddr = fl6.saddr;
return dst;
out_err:
dst_release(dst);
IP_VS_DBG_RL("ip6_route_output error, dest: %pI6\n", daddr);
return NULL;
}
/*
* Get route to destination or remote server
*/
static int
__ip_vs_get_out_rt_v6(struct netns_ipvs *ipvs, int skb_af, struct sk_buff *skb,
struct ip_vs_dest *dest,
struct in6_addr *daddr, struct in6_addr *ret_saddr,
struct ip_vs_iphdr *ipvsh, int do_xfrm, int rt_mode)
{
struct net *net = ipvs->net;
struct ip_vs_dest_dst *dest_dst;
struct rt6_info *rt; /* Route to the other host */
struct dst_entry *dst;
int mtu;
int local, noref = 1;
if (dest) {
dest_dst = __ip_vs_dst_check(dest);
if (likely(dest_dst))
rt = (struct rt6_info *) dest_dst->dst_cache;
else {
u32 cookie;
dest_dst = ip_vs_dest_dst_alloc();
spin_lock_bh(&dest->dst_lock);
if (!dest_dst) {
__ip_vs_dst_set(dest, NULL, NULL, 0);
spin_unlock_bh(&dest->dst_lock);
goto err_unreach;
}
dst = __ip_vs_route_output_v6(net, &dest->addr.in6,
&dest_dst->dst_saddr.in6,
do_xfrm, rt_mode);
if (!dst) {
__ip_vs_dst_set(dest, NULL, NULL, 0);
spin_unlock_bh(&dest->dst_lock);
ip_vs_dest_dst_free(dest_dst);
goto err_unreach;
}
rt = (struct rt6_info *) dst;
cookie = rt6_get_cookie(rt);
__ip_vs_dst_set(dest, dest_dst, &rt->dst, cookie);
spin_unlock_bh(&dest->dst_lock);
IP_VS_DBG(10, "new dst %pI6, src %pI6, refcnt=%d\n",
&dest->addr.in6, &dest_dst->dst_saddr.in6,
rcuref_read(&rt->dst.__rcuref));
}
if (ret_saddr)
*ret_saddr = dest_dst->dst_saddr.in6;
} else {
noref = 0;
dst = __ip_vs_route_output_v6(net, daddr, ret_saddr, do_xfrm,
rt_mode);
if (!dst)
goto err_unreach;
rt = (struct rt6_info *) dst;
}
local = __ip_vs_is_local_route6(rt);
if (unlikely(crosses_local_route_boundary(skb_af, skb, rt_mode,
local))) {
IP_VS_DBG_RL("We are crossing local and non-local addresses"
" daddr=%pI6\n", daddr);
goto err_put;
}
if (unlikely(local)) {
/* skb to local stack, preserve old route */
if (!noref)
dst_release(&rt->dst);
return local;
}
if (!decrement_ttl(ipvs, skb_af, skb))
goto err_put;
/* MTU checking */
if (likely(!(rt_mode & IP_VS_RT_MODE_TUNNEL)))
mtu = dst_mtu(&rt->dst);
else {
mtu = dst_mtu(&rt->dst) - sizeof(struct ipv6hdr);
if (!dest)
goto err_put;
if (dest->tun_type == IP_VS_CONN_F_TUNNEL_TYPE_GUE) {
mtu -= sizeof(struct udphdr) + sizeof(struct guehdr);
if ((dest->tun_flags &
IP_VS_TUNNEL_ENCAP_FLAG_REMCSUM) &&
skb->ip_summed == CHECKSUM_PARTIAL)
mtu -= GUE_PLEN_REMCSUM + GUE_LEN_PRIV;
} else if (dest->tun_type == IP_VS_CONN_F_TUNNEL_TYPE_GRE) {
__be16 tflags = 0;
if (dest->tun_flags & IP_VS_TUNNEL_ENCAP_FLAG_CSUM)
tflags |= TUNNEL_CSUM;
mtu -= gre_calc_hlen(tflags);
}
if (mtu < IPV6_MIN_MTU) {
IP_VS_DBG_RL("%s(): mtu less than %d\n", __func__,
IPV6_MIN_MTU);
goto err_put;
}
maybe_update_pmtu(skb_af, skb, mtu);
}
if (!ensure_mtu_is_adequate(ipvs, skb_af, rt_mode, ipvsh, skb, mtu))
goto err_put;
skb_dst_drop(skb);
if (noref)
skb_dst_set_noref(skb, &rt->dst);
else
skb_dst_set(skb, &rt->dst);
return local;
err_put:
if (!noref)
dst_release(&rt->dst);
return -1;
err_unreach:
/* The ip6_link_failure function requires the dev field to be set
* in order to get the net (further for the sake of fwmark
* reflection).
*/
if (!skb->dev)
skb->dev = skb_dst(skb)->dev;
dst_link_failure(skb);
return -1;
}
#endif
/* return NF_ACCEPT to allow forwarding or other NF_xxx on error */
static inline int ip_vs_tunnel_xmit_prepare(struct sk_buff *skb,
struct ip_vs_conn *cp)
{
int ret = NF_ACCEPT;
skb->ipvs_property = 1;
if (unlikely(cp->flags & IP_VS_CONN_F_NFCT))
ret = ip_vs_confirm_conntrack(skb);
if (ret == NF_ACCEPT) {
nf_reset_ct(skb);
skb_forward_csum(skb);
if (skb->dev)
skb_clear_tstamp(skb);
}
return ret;
}
/* In the event of a remote destination, it's possible that we would have
* matches against an old socket (particularly a TIME-WAIT socket). This
* causes havoc down the line (ip_local_out et. al. expect regular sockets
* and invalid memory accesses will happen) so simply drop the association
* in this case.
*/
static inline void ip_vs_drop_early_demux_sk(struct sk_buff *skb)
{
/* If dev is set, the packet came from the LOCAL_IN callback and
* not from a local TCP socket.
*/
if (skb->dev)
skb_orphan(skb);
}
/* return NF_STOLEN (sent) or NF_ACCEPT if local=1 (not sent) */
static inline int ip_vs_nat_send_or_cont(int pf, struct sk_buff *skb,
struct ip_vs_conn *cp, int local)
{
int ret = NF_STOLEN;
skb->ipvs_property = 1;
if (likely(!(cp->flags & IP_VS_CONN_F_NFCT)))
ip_vs_notrack(skb);
else
ip_vs_update_conntrack(skb, cp, 1);
/* Remove the early_demux association unless it's bound for the
* exact same port and address on this host after translation.
*/
if (!local || cp->vport != cp->dport ||
!ip_vs_addr_equal(cp->af, &cp->vaddr, &cp->daddr))
ip_vs_drop_early_demux_sk(skb);
if (!local) {
skb_forward_csum(skb);
if (skb->dev)
skb_clear_tstamp(skb);
NF_HOOK(pf, NF_INET_LOCAL_OUT, cp->ipvs->net, NULL, skb,
NULL, skb_dst(skb)->dev, dst_output);
} else
ret = NF_ACCEPT;
return ret;
}
/* return NF_STOLEN (sent) or NF_ACCEPT if local=1 (not sent) */
static inline int ip_vs_send_or_cont(int pf, struct sk_buff *skb,
struct ip_vs_conn *cp, int local)
{
int ret = NF_STOLEN;
skb->ipvs_property = 1;
if (likely(!(cp->flags & IP_VS_CONN_F_NFCT)))
ip_vs_notrack(skb);
if (!local) {
ip_vs_drop_early_demux_sk(skb);
skb_forward_csum(skb);
if (skb->dev)
skb_clear_tstamp(skb);
NF_HOOK(pf, NF_INET_LOCAL_OUT, cp->ipvs->net, NULL, skb,
NULL, skb_dst(skb)->dev, dst_output);
} else
ret = NF_ACCEPT;
return ret;
}
/*
* NULL transmitter (do nothing except return NF_ACCEPT)
*/
int
ip_vs_null_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
struct ip_vs_protocol *pp, struct ip_vs_iphdr *ipvsh)
{
/* we do not touch skb and do not need pskb ptr */
return ip_vs_send_or_cont(NFPROTO_IPV4, skb, cp, 1);
}
/*
* Bypass transmitter
* Let packets bypass the destination when the destination is not
* available, it may be only used in transparent cache cluster.
*/
int
ip_vs_bypass_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
struct ip_vs_protocol *pp, struct ip_vs_iphdr *ipvsh)
{
struct iphdr *iph = ip_hdr(skb);
if (__ip_vs_get_out_rt(cp->ipvs, cp->af, skb, NULL, iph->daddr,
IP_VS_RT_MODE_NON_LOCAL, NULL, ipvsh) < 0)
goto tx_error;
ip_send_check(iph);
/* Another hack: avoid icmp_send in ip_fragment */
skb->ignore_df = 1;
ip_vs_send_or_cont(NFPROTO_IPV4, skb, cp, 0);
return NF_STOLEN;
tx_error:
kfree_skb(skb);
return NF_STOLEN;
}
#ifdef CONFIG_IP_VS_IPV6
int
ip_vs_bypass_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
struct ip_vs_protocol *pp, struct ip_vs_iphdr *ipvsh)
{
struct ipv6hdr *iph = ipv6_hdr(skb);
if (__ip_vs_get_out_rt_v6(cp->ipvs, cp->af, skb, NULL,
&iph->daddr, NULL,
ipvsh, 0, IP_VS_RT_MODE_NON_LOCAL) < 0)
goto tx_error;
/* Another hack: avoid icmp_send in ip_fragment */
skb->ignore_df = 1;
ip_vs_send_or_cont(NFPROTO_IPV6, skb, cp, 0);
return NF_STOLEN;
tx_error:
kfree_skb(skb);
return NF_STOLEN;
}
#endif
/*
* NAT transmitter (only for outside-to-inside nat forwarding)
* Not used for related ICMP
*/
int
ip_vs_nat_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
struct ip_vs_protocol *pp, struct ip_vs_iphdr *ipvsh)
{
struct rtable *rt; /* Route to the other host */
int local, rc, was_input;
/* check if it is a connection of no-client-port */
if (unlikely(cp->flags & IP_VS_CONN_F_NO_CPORT)) {
__be16 _pt, *p;
p = skb_header_pointer(skb, ipvsh->len, sizeof(_pt), &_pt);
if (p == NULL)
goto tx_error;
ip_vs_conn_fill_cport(cp, *p);
IP_VS_DBG(10, "filled cport=%d\n", ntohs(*p));
}
was_input = rt_is_input_route(skb_rtable(skb));
local = __ip_vs_get_out_rt(cp->ipvs, cp->af, skb, cp->dest, cp->daddr.ip,
IP_VS_RT_MODE_LOCAL |
IP_VS_RT_MODE_NON_LOCAL |
IP_VS_RT_MODE_RDR, NULL, ipvsh);
if (local < 0)
goto tx_error;
rt = skb_rtable(skb);
/*
* Avoid duplicate tuple in reply direction for NAT traffic
* to local address when connection is sync-ed
*/
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
if (cp->flags & IP_VS_CONN_F_SYNC && local) {
enum ip_conntrack_info ctinfo;
struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
if (ct) {
IP_VS_DBG_RL_PKT(10, AF_INET, pp, skb, ipvsh->off,
"ip_vs_nat_xmit(): "
"stopping DNAT to local address");
goto tx_error;
}
}
#endif
/* From world but DNAT to loopback address? */
if (local && ipv4_is_loopback(cp->daddr.ip) && was_input) {
IP_VS_DBG_RL_PKT(1, AF_INET, pp, skb, ipvsh->off,
"ip_vs_nat_xmit(): stopping DNAT to loopback "
"address");
goto tx_error;
}
/* copy-on-write the packet before mangling it */
if (skb_ensure_writable(skb, sizeof(struct iphdr)))
goto tx_error;
if (skb_cow(skb, rt->dst.dev->hard_header_len))
goto tx_error;
/* mangle the packet */
if (pp->dnat_handler && !pp->dnat_handler(skb, pp, cp, ipvsh))
goto tx_error;
ip_hdr(skb)->daddr = cp->daddr.ip;
ip_send_check(ip_hdr(skb));
IP_VS_DBG_PKT(10, AF_INET, pp, skb, ipvsh->off, "After DNAT");
/* FIXME: when application helper enlarges the packet and the length
is larger than the MTU of outgoing device, there will be still
MTU problem. */
/* Another hack: avoid icmp_send in ip_fragment */
skb->ignore_df = 1;
rc = ip_vs_nat_send_or_cont(NFPROTO_IPV4, skb, cp, local);
return rc;
tx_error:
kfree_skb(skb);
return NF_STOLEN;
}
#ifdef CONFIG_IP_VS_IPV6
int
ip_vs_nat_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
struct ip_vs_protocol *pp, struct ip_vs_iphdr *ipvsh)
{
struct rt6_info *rt; /* Route to the other host */
int local, rc;
/* check if it is a connection of no-client-port */
if (unlikely(cp->flags & IP_VS_CONN_F_NO_CPORT && !ipvsh->fragoffs)) {
__be16 _pt, *p;
p = skb_header_pointer(skb, ipvsh->len, sizeof(_pt), &_pt);
if (p == NULL)
goto tx_error;
ip_vs_conn_fill_cport(cp, *p);
IP_VS_DBG(10, "filled cport=%d\n", ntohs(*p));
}
local = __ip_vs_get_out_rt_v6(cp->ipvs, cp->af, skb, cp->dest,
&cp->daddr.in6,
NULL, ipvsh, 0,
IP_VS_RT_MODE_LOCAL |
IP_VS_RT_MODE_NON_LOCAL |
IP_VS_RT_MODE_RDR);
if (local < 0)
goto tx_error;
rt = (struct rt6_info *) skb_dst(skb);
/*
* Avoid duplicate tuple in reply direction for NAT traffic
* to local address when connection is sync-ed
*/
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
if (cp->flags & IP_VS_CONN_F_SYNC && local) {
enum ip_conntrack_info ctinfo;
struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
if (ct) {
IP_VS_DBG_RL_PKT(10, AF_INET6, pp, skb, ipvsh->off,
"ip_vs_nat_xmit_v6(): "
"stopping DNAT to local address");
goto tx_error;
}
}
#endif
/* From world but DNAT to loopback address? */
if (local && skb->dev && !(skb->dev->flags & IFF_LOOPBACK) &&
ipv6_addr_type(&cp->daddr.in6) & IPV6_ADDR_LOOPBACK) {
IP_VS_DBG_RL_PKT(1, AF_INET6, pp, skb, ipvsh->off,
"ip_vs_nat_xmit_v6(): "
"stopping DNAT to loopback address");
goto tx_error;
}
/* copy-on-write the packet before mangling it */
if (skb_ensure_writable(skb, sizeof(struct ipv6hdr)))
goto tx_error;
if (skb_cow(skb, rt->dst.dev->hard_header_len))
goto tx_error;
/* mangle the packet */
if (pp->dnat_handler && !pp->dnat_handler(skb, pp, cp, ipvsh))
goto tx_error;
ipv6_hdr(skb)->daddr = cp->daddr.in6;
IP_VS_DBG_PKT(10, AF_INET6, pp, skb, ipvsh->off, "After DNAT");
/* FIXME: when application helper enlarges the packet and the length
is larger than the MTU of outgoing device, there will be still
MTU problem. */
/* Another hack: avoid icmp_send in ip_fragment */
skb->ignore_df = 1;
rc = ip_vs_nat_send_or_cont(NFPROTO_IPV6, skb, cp, local);
return rc;
tx_error:
kfree_skb(skb);
return NF_STOLEN;
}
#endif
/* When forwarding a packet, we must ensure that we've got enough headroom
* for the encapsulation packet in the skb. This also gives us an
* opportunity to figure out what the payload_len, dsfield, ttl, and df
* values should be, so that we won't need to look at the old ip header
* again
*/
static struct sk_buff *
ip_vs_prepare_tunneled_skb(struct sk_buff *skb, int skb_af,
unsigned int max_headroom, __u8 *next_protocol,
__u32 *payload_len, __u8 *dsfield, __u8 *ttl,
__be16 *df)
{
struct sk_buff *new_skb = NULL;
struct iphdr *old_iph = NULL;
__u8 old_dsfield;
#ifdef CONFIG_IP_VS_IPV6
struct ipv6hdr *old_ipv6h = NULL;
#endif
ip_vs_drop_early_demux_sk(skb);
if (skb_headroom(skb) < max_headroom || skb_cloned(skb)) {
new_skb = skb_realloc_headroom(skb, max_headroom);
if (!new_skb)
goto error;
if (skb->sk)
skb_set_owner_w(new_skb, skb->sk);
consume_skb(skb);
skb = new_skb;
}
#ifdef CONFIG_IP_VS_IPV6
if (skb_af == AF_INET6) {
old_ipv6h = ipv6_hdr(skb);
*next_protocol = IPPROTO_IPV6;
if (payload_len)
*payload_len =
ntohs(old_ipv6h->payload_len) +
sizeof(*old_ipv6h);
old_dsfield = ipv6_get_dsfield(old_ipv6h);
*ttl = old_ipv6h->hop_limit;
if (df)
*df = 0;
} else
#endif
{
old_iph = ip_hdr(skb);
/* Copy DF, reset fragment offset and MF */
if (df)
*df = (old_iph->frag_off & htons(IP_DF));
*next_protocol = IPPROTO_IPIP;
/* fix old IP header checksum */
ip_send_check(old_iph);
old_dsfield = ipv4_get_dsfield(old_iph);
*ttl = old_iph->ttl;
if (payload_len)
*payload_len = skb_ip_totlen(skb);
}
/* Implement full-functionality option for ECN encapsulation */
*dsfield = INET_ECN_encapsulate(old_dsfield, old_dsfield);
return skb;
error:
kfree_skb(skb);
return ERR_PTR(-ENOMEM);
}
static inline int __tun_gso_type_mask(int encaps_af, int orig_af)
{
switch (encaps_af) {
case AF_INET:
return SKB_GSO_IPXIP4;
case AF_INET6:
return SKB_GSO_IPXIP6;
default:
return 0;
}
}
static int
ipvs_gue_encap(struct net *net, struct sk_buff *skb,
struct ip_vs_conn *cp, __u8 *next_protocol)
{
__be16 dport;
__be16 sport = udp_flow_src_port(net, skb, 0, 0, false);
struct udphdr *udph; /* Our new UDP header */
struct guehdr *gueh; /* Our new GUE header */
size_t hdrlen, optlen = 0;
void *data;
bool need_priv = false;
if ((cp->dest->tun_flags & IP_VS_TUNNEL_ENCAP_FLAG_REMCSUM) &&
skb->ip_summed == CHECKSUM_PARTIAL) {
optlen += GUE_PLEN_REMCSUM + GUE_LEN_PRIV;
need_priv = true;
}
hdrlen = sizeof(struct guehdr) + optlen;
skb_push(skb, hdrlen);
gueh = (struct guehdr *)skb->data;
gueh->control = 0;
gueh->version = 0;
gueh->hlen = optlen >> 2;
gueh->flags = 0;
gueh->proto_ctype = *next_protocol;
data = &gueh[1];
if (need_priv) {
__be32 *flags = data;
u16 csum_start = skb_checksum_start_offset(skb);
__be16 *pd;
gueh->flags |= GUE_FLAG_PRIV;
*flags = 0;
data += GUE_LEN_PRIV;
if (csum_start < hdrlen)
return -EINVAL;
csum_start -= hdrlen;
pd = data;
pd[0] = htons(csum_start);
pd[1] = htons(csum_start + skb->csum_offset);
if (!skb_is_gso(skb)) {
skb->ip_summed = CHECKSUM_NONE;
skb->encapsulation = 0;
}
*flags |= GUE_PFLAG_REMCSUM;
data += GUE_PLEN_REMCSUM;
}
skb_push(skb, sizeof(struct udphdr));
skb_reset_transport_header(skb);
udph = udp_hdr(skb);
dport = cp->dest->tun_port;
udph->dest = dport;
udph->source = sport;
udph->len = htons(skb->len);
udph->check = 0;
*next_protocol = IPPROTO_UDP;
return 0;
}
static void
ipvs_gre_encap(struct net *net, struct sk_buff *skb,
struct ip_vs_conn *cp, __u8 *next_protocol)
{
__be16 proto = *next_protocol == IPPROTO_IPIP ?
htons(ETH_P_IP) : htons(ETH_P_IPV6);
__be16 tflags = 0;
size_t hdrlen;
if (cp->dest->tun_flags & IP_VS_TUNNEL_ENCAP_FLAG_CSUM)
tflags |= TUNNEL_CSUM;
hdrlen = gre_calc_hlen(tflags);
gre_build_header(skb, hdrlen, tflags, proto, 0, 0);
*next_protocol = IPPROTO_GRE;
}
/*
* IP Tunneling transmitter
*
* This function encapsulates the packet in a new IP packet, its
* destination will be set to cp->daddr. Most code of this function
* is taken from ipip.c.
*
* It is used in VS/TUN cluster. The load balancer selects a real
* server from a cluster based on a scheduling algorithm,
* encapsulates the request packet and forwards it to the selected
* server. For example, all real servers are configured with
* "ifconfig tunl0 <Virtual IP Address> up". When the server receives
* the encapsulated packet, it will decapsulate the packet, processe
* the request and return the response packets directly to the client
* without passing the load balancer. This can greatly increase the
* scalability of virtual server.
*
* Used for ANY protocol
*/
int
ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
struct ip_vs_protocol *pp, struct ip_vs_iphdr *ipvsh)
{
struct netns_ipvs *ipvs = cp->ipvs;
struct net *net = ipvs->net;
struct rtable *rt; /* Route to the other host */
__be32 saddr; /* Source for tunnel */
struct net_device *tdev; /* Device to other host */
__u8 next_protocol = 0;
__u8 dsfield = 0;
__u8 ttl = 0;
__be16 df = 0;
__be16 *dfp = NULL;
struct iphdr *iph; /* Our new IP header */
unsigned int max_headroom; /* The extra header space needed */
int ret, local;
int tun_type, gso_type;
int tun_flags;
local = __ip_vs_get_out_rt(ipvs, cp->af, skb, cp->dest, cp->daddr.ip,
IP_VS_RT_MODE_LOCAL |
IP_VS_RT_MODE_NON_LOCAL |
IP_VS_RT_MODE_CONNECT |
IP_VS_RT_MODE_TUNNEL, &saddr, ipvsh);
if (local < 0)
goto tx_error;
if (local)
return ip_vs_send_or_cont(NFPROTO_IPV4, skb, cp, 1);
rt = skb_rtable(skb);
tdev = rt->dst.dev;
/*
* Okay, now see if we can stuff it in the buffer as-is.
*/
max_headroom = LL_RESERVED_SPACE(tdev) + sizeof(struct iphdr);
tun_type = cp->dest->tun_type;
tun_flags = cp->dest->tun_flags;
if (tun_type == IP_VS_CONN_F_TUNNEL_TYPE_GUE) {
size_t gue_hdrlen, gue_optlen = 0;
if ((tun_flags & IP_VS_TUNNEL_ENCAP_FLAG_REMCSUM) &&
skb->ip_summed == CHECKSUM_PARTIAL) {
gue_optlen += GUE_PLEN_REMCSUM + GUE_LEN_PRIV;
}
gue_hdrlen = sizeof(struct guehdr) + gue_optlen;
max_headroom += sizeof(struct udphdr) + gue_hdrlen;
} else if (tun_type == IP_VS_CONN_F_TUNNEL_TYPE_GRE) {
size_t gre_hdrlen;
__be16 tflags = 0;
if (tun_flags & IP_VS_TUNNEL_ENCAP_FLAG_CSUM)
tflags |= TUNNEL_CSUM;
gre_hdrlen = gre_calc_hlen(tflags);
max_headroom += gre_hdrlen;
}
/* We only care about the df field if sysctl_pmtu_disc(ipvs) is set */
dfp = sysctl_pmtu_disc(ipvs) ? &df : NULL;
skb = ip_vs_prepare_tunneled_skb(skb, cp->af, max_headroom,
&next_protocol, NULL, &dsfield,
&ttl, dfp);
if (IS_ERR(skb))
return NF_STOLEN;
gso_type = __tun_gso_type_mask(AF_INET, cp->af);
if (tun_type == IP_VS_CONN_F_TUNNEL_TYPE_GUE) {
if ((tun_flags & IP_VS_TUNNEL_ENCAP_FLAG_CSUM) ||
(tun_flags & IP_VS_TUNNEL_ENCAP_FLAG_REMCSUM))
gso_type |= SKB_GSO_UDP_TUNNEL_CSUM;
else
gso_type |= SKB_GSO_UDP_TUNNEL;
if ((tun_flags & IP_VS_TUNNEL_ENCAP_FLAG_REMCSUM) &&
skb->ip_summed == CHECKSUM_PARTIAL) {
gso_type |= SKB_GSO_TUNNEL_REMCSUM;
}
} else if (tun_type == IP_VS_CONN_F_TUNNEL_TYPE_GRE) {
if (tun_flags & IP_VS_TUNNEL_ENCAP_FLAG_CSUM)
gso_type |= SKB_GSO_GRE_CSUM;
else
gso_type |= SKB_GSO_GRE;
}
if (iptunnel_handle_offloads(skb, gso_type))
goto tx_error;
skb->transport_header = skb->network_header;
skb_set_inner_ipproto(skb, next_protocol);
skb_set_inner_mac_header(skb, skb_inner_network_offset(skb));
if (tun_type == IP_VS_CONN_F_TUNNEL_TYPE_GUE) {
bool check = false;
if (ipvs_gue_encap(net, skb, cp, &next_protocol))
goto tx_error;
if ((tun_flags & IP_VS_TUNNEL_ENCAP_FLAG_CSUM) ||
(tun_flags & IP_VS_TUNNEL_ENCAP_FLAG_REMCSUM))
check = true;
udp_set_csum(!check, skb, saddr, cp->daddr.ip, skb->len);
} else if (tun_type == IP_VS_CONN_F_TUNNEL_TYPE_GRE)
ipvs_gre_encap(net, skb, cp, &next_protocol);
skb_push(skb, sizeof(struct iphdr));
skb_reset_network_header(skb);
memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
/*
* Push down and install the IPIP header.
*/
iph = ip_hdr(skb);
iph->version = 4;
iph->ihl = sizeof(struct iphdr)>>2;
iph->frag_off = df;
iph->protocol = next_protocol;
iph->tos = dsfield;
iph->daddr = cp->daddr.ip;
iph->saddr = saddr;
iph->ttl = ttl;
ip_select_ident(net, skb, NULL);
/* Another hack: avoid icmp_send in ip_fragment */
skb->ignore_df = 1;
ret = ip_vs_tunnel_xmit_prepare(skb, cp);
if (ret == NF_ACCEPT)
ip_local_out(net, skb->sk, skb);
else if (ret == NF_DROP)
kfree_skb(skb);
return NF_STOLEN;
tx_error:
kfree_skb(skb);
return NF_STOLEN;
}
#ifdef CONFIG_IP_VS_IPV6
int
ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
struct ip_vs_protocol *pp, struct ip_vs_iphdr *ipvsh)
{
struct netns_ipvs *ipvs = cp->ipvs;
struct net *net = ipvs->net;
struct rt6_info *rt; /* Route to the other host */
struct in6_addr saddr; /* Source for tunnel */
struct net_device *tdev; /* Device to other host */
__u8 next_protocol = 0;
__u32 payload_len = 0;
__u8 dsfield = 0;
__u8 ttl = 0;
struct ipv6hdr *iph; /* Our new IP header */
unsigned int max_headroom; /* The extra header space needed */
int ret, local;
int tun_type, gso_type;
int tun_flags;
local = __ip_vs_get_out_rt_v6(ipvs, cp->af, skb, cp->dest,
&cp->daddr.in6,
&saddr, ipvsh, 1,
IP_VS_RT_MODE_LOCAL |
IP_VS_RT_MODE_NON_LOCAL |
IP_VS_RT_MODE_TUNNEL);
if (local < 0)
goto tx_error;
if (local)
return ip_vs_send_or_cont(NFPROTO_IPV6, skb, cp, 1);
rt = (struct rt6_info *) skb_dst(skb);
tdev = rt->dst.dev;
/*
* Okay, now see if we can stuff it in the buffer as-is.
*/
max_headroom = LL_RESERVED_SPACE(tdev) + sizeof(struct ipv6hdr);
tun_type = cp->dest->tun_type;
tun_flags = cp->dest->tun_flags;
if (tun_type == IP_VS_CONN_F_TUNNEL_TYPE_GUE) {
size_t gue_hdrlen, gue_optlen = 0;
if ((tun_flags & IP_VS_TUNNEL_ENCAP_FLAG_REMCSUM) &&
skb->ip_summed == CHECKSUM_PARTIAL) {
gue_optlen += GUE_PLEN_REMCSUM + GUE_LEN_PRIV;
}
gue_hdrlen = sizeof(struct guehdr) + gue_optlen;
max_headroom += sizeof(struct udphdr) + gue_hdrlen;
} else if (tun_type == IP_VS_CONN_F_TUNNEL_TYPE_GRE) {
size_t gre_hdrlen;
__be16 tflags = 0;
if (tun_flags & IP_VS_TUNNEL_ENCAP_FLAG_CSUM)
tflags |= TUNNEL_CSUM;
gre_hdrlen = gre_calc_hlen(tflags);
max_headroom += gre_hdrlen;
}
skb = ip_vs_prepare_tunneled_skb(skb, cp->af, max_headroom,
&next_protocol, &payload_len,
&dsfield, &ttl, NULL);
if (IS_ERR(skb))
return NF_STOLEN;
gso_type = __tun_gso_type_mask(AF_INET6, cp->af);
if (tun_type == IP_VS_CONN_F_TUNNEL_TYPE_GUE) {
if ((tun_flags & IP_VS_TUNNEL_ENCAP_FLAG_CSUM) ||
(tun_flags & IP_VS_TUNNEL_ENCAP_FLAG_REMCSUM))
gso_type |= SKB_GSO_UDP_TUNNEL_CSUM;
else
gso_type |= SKB_GSO_UDP_TUNNEL;
if ((tun_flags & IP_VS_TUNNEL_ENCAP_FLAG_REMCSUM) &&
skb->ip_summed == CHECKSUM_PARTIAL) {
gso_type |= SKB_GSO_TUNNEL_REMCSUM;
}
} else if (tun_type == IP_VS_CONN_F_TUNNEL_TYPE_GRE) {
if (tun_flags & IP_VS_TUNNEL_ENCAP_FLAG_CSUM)
gso_type |= SKB_GSO_GRE_CSUM;
else
gso_type |= SKB_GSO_GRE;
}
if (iptunnel_handle_offloads(skb, gso_type))
goto tx_error;
skb->transport_header = skb->network_header;
skb_set_inner_ipproto(skb, next_protocol);
skb_set_inner_mac_header(skb, skb_inner_network_offset(skb));
if (tun_type == IP_VS_CONN_F_TUNNEL_TYPE_GUE) {
bool check = false;
if (ipvs_gue_encap(net, skb, cp, &next_protocol))
goto tx_error;
if ((tun_flags & IP_VS_TUNNEL_ENCAP_FLAG_CSUM) ||
(tun_flags & IP_VS_TUNNEL_ENCAP_FLAG_REMCSUM))
check = true;
udp6_set_csum(!check, skb, &saddr, &cp->daddr.in6, skb->len);
} else if (tun_type == IP_VS_CONN_F_TUNNEL_TYPE_GRE)
ipvs_gre_encap(net, skb, cp, &next_protocol);
skb_push(skb, sizeof(struct ipv6hdr));
skb_reset_network_header(skb);
memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
/*
* Push down and install the IPIP header.
*/
iph = ipv6_hdr(skb);
iph->version = 6;
iph->nexthdr = next_protocol;
iph->payload_len = htons(payload_len);
memset(&iph->flow_lbl, 0, sizeof(iph->flow_lbl));
ipv6_change_dsfield(iph, 0, dsfield);
iph->daddr = cp->daddr.in6;
iph->saddr = saddr;
iph->hop_limit = ttl;
/* Another hack: avoid icmp_send in ip_fragment */
skb->ignore_df = 1;
ret = ip_vs_tunnel_xmit_prepare(skb, cp);
if (ret == NF_ACCEPT)
ip6_local_out(net, skb->sk, skb);
else if (ret == NF_DROP)
kfree_skb(skb);
return NF_STOLEN;
tx_error:
kfree_skb(skb);
return NF_STOLEN;
}
#endif
/*
* Direct Routing transmitter
* Used for ANY protocol
*/
int
ip_vs_dr_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
struct ip_vs_protocol *pp, struct ip_vs_iphdr *ipvsh)
{
int local;
local = __ip_vs_get_out_rt(cp->ipvs, cp->af, skb, cp->dest, cp->daddr.ip,
IP_VS_RT_MODE_LOCAL |
IP_VS_RT_MODE_NON_LOCAL |
IP_VS_RT_MODE_KNOWN_NH, NULL, ipvsh);
if (local < 0)
goto tx_error;
if (local)
return ip_vs_send_or_cont(NFPROTO_IPV4, skb, cp, 1);
ip_send_check(ip_hdr(skb));
/* Another hack: avoid icmp_send in ip_fragment */
skb->ignore_df = 1;
ip_vs_send_or_cont(NFPROTO_IPV4, skb, cp, 0);
return NF_STOLEN;
tx_error:
kfree_skb(skb);
return NF_STOLEN;
}
#ifdef CONFIG_IP_VS_IPV6
int
ip_vs_dr_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
struct ip_vs_protocol *pp, struct ip_vs_iphdr *ipvsh)
{
int local;
local = __ip_vs_get_out_rt_v6(cp->ipvs, cp->af, skb, cp->dest,
&cp->daddr.in6,
NULL, ipvsh, 0,
IP_VS_RT_MODE_LOCAL |
IP_VS_RT_MODE_NON_LOCAL |
IP_VS_RT_MODE_KNOWN_NH);
if (local < 0)
goto tx_error;
if (local)
return ip_vs_send_or_cont(NFPROTO_IPV6, skb, cp, 1);
/* Another hack: avoid icmp_send in ip_fragment */
skb->ignore_df = 1;
ip_vs_send_or_cont(NFPROTO_IPV6, skb, cp, 0);
return NF_STOLEN;
tx_error:
kfree_skb(skb);
return NF_STOLEN;
}
#endif
/*
* ICMP packet transmitter
* called by the ip_vs_in_icmp
*/
int
ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
struct ip_vs_protocol *pp, int offset, unsigned int hooknum,
struct ip_vs_iphdr *iph)
{
struct rtable *rt; /* Route to the other host */
int rc;
int local;
int rt_mode, was_input;
/* The ICMP packet for VS/TUN, VS/DR and LOCALNODE will be
forwarded directly here, because there is no need to
translate address/port back */
if (IP_VS_FWD_METHOD(cp) != IP_VS_CONN_F_MASQ) {
if (cp->packet_xmit)
rc = cp->packet_xmit(skb, cp, pp, iph);
else
rc = NF_ACCEPT;
/* do not touch skb anymore */
atomic_inc(&cp->in_pkts);
return rc;
}
/*
* mangle and send the packet here (only for VS/NAT)
*/
was_input = rt_is_input_route(skb_rtable(skb));
/* LOCALNODE from FORWARD hook is not supported */
rt_mode = (hooknum != NF_INET_FORWARD) ?
IP_VS_RT_MODE_LOCAL | IP_VS_RT_MODE_NON_LOCAL |
IP_VS_RT_MODE_RDR : IP_VS_RT_MODE_NON_LOCAL;
local = __ip_vs_get_out_rt(cp->ipvs, cp->af, skb, cp->dest, cp->daddr.ip, rt_mode,
NULL, iph);
if (local < 0)
goto tx_error;
rt = skb_rtable(skb);
/*
* Avoid duplicate tuple in reply direction for NAT traffic
* to local address when connection is sync-ed
*/
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
if (cp->flags & IP_VS_CONN_F_SYNC && local) {
enum ip_conntrack_info ctinfo;
struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
if (ct) {
IP_VS_DBG(10, "%s(): "
"stopping DNAT to local address %pI4\n",
__func__, &cp->daddr.ip);
goto tx_error;
}
}
#endif
/* From world but DNAT to loopback address? */
if (local && ipv4_is_loopback(cp->daddr.ip) && was_input) {
IP_VS_DBG(1, "%s(): "
"stopping DNAT to loopback %pI4\n",
__func__, &cp->daddr.ip);
goto tx_error;
}
/* copy-on-write the packet before mangling it */
if (skb_ensure_writable(skb, offset))
goto tx_error;
if (skb_cow(skb, rt->dst.dev->hard_header_len))
goto tx_error;
ip_vs_nat_icmp(skb, pp, cp, 0);
/* Another hack: avoid icmp_send in ip_fragment */
skb->ignore_df = 1;
return ip_vs_nat_send_or_cont(NFPROTO_IPV4, skb, cp, local);
tx_error:
kfree_skb(skb);
rc = NF_STOLEN;
return rc;
}
#ifdef CONFIG_IP_VS_IPV6
int
ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
struct ip_vs_protocol *pp, int offset, unsigned int hooknum,
struct ip_vs_iphdr *ipvsh)
{
struct rt6_info *rt; /* Route to the other host */
int rc;
int local;
int rt_mode;
/* The ICMP packet for VS/TUN, VS/DR and LOCALNODE will be
forwarded directly here, because there is no need to
translate address/port back */
if (IP_VS_FWD_METHOD(cp) != IP_VS_CONN_F_MASQ) {
if (cp->packet_xmit)
rc = cp->packet_xmit(skb, cp, pp, ipvsh);
else
rc = NF_ACCEPT;
/* do not touch skb anymore */
atomic_inc(&cp->in_pkts);
return rc;
}
/*
* mangle and send the packet here (only for VS/NAT)
*/
/* LOCALNODE from FORWARD hook is not supported */
rt_mode = (hooknum != NF_INET_FORWARD) ?
IP_VS_RT_MODE_LOCAL | IP_VS_RT_MODE_NON_LOCAL |
IP_VS_RT_MODE_RDR : IP_VS_RT_MODE_NON_LOCAL;
local = __ip_vs_get_out_rt_v6(cp->ipvs, cp->af, skb, cp->dest,
&cp->daddr.in6, NULL, ipvsh, 0, rt_mode);
if (local < 0)
goto tx_error;
rt = (struct rt6_info *) skb_dst(skb);
/*
* Avoid duplicate tuple in reply direction for NAT traffic
* to local address when connection is sync-ed
*/
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
if (cp->flags & IP_VS_CONN_F_SYNC && local) {
enum ip_conntrack_info ctinfo;
struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
if (ct) {
IP_VS_DBG(10, "%s(): "
"stopping DNAT to local address %pI6\n",
__func__, &cp->daddr.in6);
goto tx_error;
}
}
#endif
/* From world but DNAT to loopback address? */
if (local && skb->dev && !(skb->dev->flags & IFF_LOOPBACK) &&
ipv6_addr_type(&cp->daddr.in6) & IPV6_ADDR_LOOPBACK) {
IP_VS_DBG(1, "%s(): "
"stopping DNAT to loopback %pI6\n",
__func__, &cp->daddr.in6);
goto tx_error;
}
/* copy-on-write the packet before mangling it */
if (skb_ensure_writable(skb, offset))
goto tx_error;
if (skb_cow(skb, rt->dst.dev->hard_header_len))
goto tx_error;
ip_vs_nat_icmp_v6(skb, pp, cp, 0);
/* Another hack: avoid icmp_send in ip_fragment */
skb->ignore_df = 1;
return ip_vs_nat_send_or_cont(NFPROTO_IPV6, skb, cp, local);
tx_error:
kfree_skb(skb);
rc = NF_STOLEN;
return rc;
}
#endif
| linux-master | net/netfilter/ipvs/ip_vs_xmit.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* IPVS: Shortest Expected Delay scheduling module
*
* Authors: Wensong Zhang <[email protected]>
*
* Changes:
*/
/*
* The SED algorithm attempts to minimize each job's expected delay until
* completion. The expected delay that the job will experience is
* (Ci + 1) / Ui if sent to the ith server, in which Ci is the number of
* jobs on the ith server and Ui is the fixed service rate (weight) of
* the ith server. The SED algorithm adopts a greedy policy that each does
* what is in its own best interest, i.e. to join the queue which would
* minimize its expected delay of completion.
*
* See the following paper for more information:
* A. Weinrib and S. Shenker, Greed is not enough: Adaptive load sharing
* in large heterogeneous systems. In Proceedings IEEE INFOCOM'88,
* pages 986-994, 1988.
*
* Thanks must go to Marko Buuri <[email protected]> for talking SED to me.
*
* The difference between SED and WLC is that SED includes the incoming
* job in the cost function (the increment of 1). SED may outperform
* WLC, while scheduling big jobs under larger heterogeneous systems
* (the server weight varies a lot).
*
*/
#define KMSG_COMPONENT "IPVS"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/module.h>
#include <linux/kernel.h>
#include <net/ip_vs.h>
static inline int
ip_vs_sed_dest_overhead(struct ip_vs_dest *dest)
{
/*
* We only use the active connection number in the cost
* calculation here.
*/
return atomic_read(&dest->activeconns) + 1;
}
/*
* Weighted Least Connection scheduling
*/
static struct ip_vs_dest *
ip_vs_sed_schedule(struct ip_vs_service *svc, const struct sk_buff *skb,
struct ip_vs_iphdr *iph)
{
struct ip_vs_dest *dest, *least;
int loh, doh;
IP_VS_DBG(6, "%s(): Scheduling...\n", __func__);
/*
* We calculate the load of each dest server as follows:
* (server expected overhead) / dest->weight
*
* Remember -- no floats in kernel mode!!!
* The comparison of h1*w2 > h2*w1 is equivalent to that of
* h1/w1 > h2/w2
* if every weight is larger than zero.
*
* The server with weight=0 is quiesced and will not receive any
* new connections.
*/
list_for_each_entry_rcu(dest, &svc->destinations, n_list) {
if (!(dest->flags & IP_VS_DEST_F_OVERLOAD) &&
atomic_read(&dest->weight) > 0) {
least = dest;
loh = ip_vs_sed_dest_overhead(least);
goto nextstage;
}
}
ip_vs_scheduler_err(svc, "no destination available");
return NULL;
/*
* Find the destination with the least load.
*/
nextstage:
list_for_each_entry_continue_rcu(dest, &svc->destinations, n_list) {
if (dest->flags & IP_VS_DEST_F_OVERLOAD)
continue;
doh = ip_vs_sed_dest_overhead(dest);
if ((__s64)loh * atomic_read(&dest->weight) >
(__s64)doh * atomic_read(&least->weight)) {
least = dest;
loh = doh;
}
}
IP_VS_DBG_BUF(6, "SED: server %s:%u "
"activeconns %d refcnt %d weight %d overhead %d\n",
IP_VS_DBG_ADDR(least->af, &least->addr),
ntohs(least->port),
atomic_read(&least->activeconns),
refcount_read(&least->refcnt),
atomic_read(&least->weight), loh);
return least;
}
static struct ip_vs_scheduler ip_vs_sed_scheduler =
{
.name = "sed",
.refcnt = ATOMIC_INIT(0),
.module = THIS_MODULE,
.n_list = LIST_HEAD_INIT(ip_vs_sed_scheduler.n_list),
.schedule = ip_vs_sed_schedule,
};
static int __init ip_vs_sed_init(void)
{
return register_ip_vs_scheduler(&ip_vs_sed_scheduler);
}
static void __exit ip_vs_sed_cleanup(void)
{
unregister_ip_vs_scheduler(&ip_vs_sed_scheduler);
synchronize_rcu();
}
module_init(ip_vs_sed_init);
module_exit(ip_vs_sed_cleanup);
MODULE_LICENSE("GPL");
| linux-master | net/netfilter/ipvs/ip_vs_sed.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* IPVS An implementation of the IP virtual server support for the
* LINUX operating system. IPVS is now implemented as a module
* over the NetFilter framework. IPVS can be used to build a
* high-performance and highly available server based on a
* cluster of servers.
*
* Authors: Wensong Zhang <[email protected]>
* Peter Kese <[email protected]>
* Julian Anastasov <[email protected]>
*
* Changes:
*/
#define KMSG_COMPONENT "IPVS"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/module.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/capability.h>
#include <linux/fs.h>
#include <linux/sysctl.h>
#include <linux/proc_fs.h>
#include <linux/workqueue.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/netfilter.h>
#include <linux/netfilter_ipv4.h>
#include <linux/mutex.h>
#include <net/net_namespace.h>
#include <linux/nsproxy.h>
#include <net/ip.h>
#ifdef CONFIG_IP_VS_IPV6
#include <net/ipv6.h>
#include <net/ip6_route.h>
#include <net/netfilter/ipv6/nf_defrag_ipv6.h>
#endif
#include <net/route.h>
#include <net/sock.h>
#include <net/genetlink.h>
#include <linux/uaccess.h>
#include <net/ip_vs.h>
MODULE_ALIAS_GENL_FAMILY(IPVS_GENL_NAME);
DEFINE_MUTEX(__ip_vs_mutex); /* Serialize configuration with sockopt/netlink */
/* sysctl variables */
#ifdef CONFIG_IP_VS_DEBUG
static int sysctl_ip_vs_debug_level = 0;
int ip_vs_get_debug_level(void)
{
return sysctl_ip_vs_debug_level;
}
#endif
/* Protos */
static void __ip_vs_del_service(struct ip_vs_service *svc, bool cleanup);
#ifdef CONFIG_IP_VS_IPV6
/* Taken from rt6_fill_node() in net/ipv6/route.c, is there a better way? */
static bool __ip_vs_addr_is_local_v6(struct net *net,
const struct in6_addr *addr)
{
struct flowi6 fl6 = {
.daddr = *addr,
};
struct dst_entry *dst = ip6_route_output(net, NULL, &fl6);
bool is_local;
is_local = !dst->error && dst->dev && (dst->dev->flags & IFF_LOOPBACK);
dst_release(dst);
return is_local;
}
#endif
#ifdef CONFIG_SYSCTL
/*
* update_defense_level is called from keventd and from sysctl,
* so it needs to protect itself from softirqs
*/
static void update_defense_level(struct netns_ipvs *ipvs)
{
struct sysinfo i;
int availmem;
int nomem;
int to_change = -1;
/* we only count free and buffered memory (in pages) */
si_meminfo(&i);
availmem = i.freeram + i.bufferram;
/* however in linux 2.5 the i.bufferram is total page cache size,
we need adjust it */
/* si_swapinfo(&i); */
/* availmem = availmem - (i.totalswap - i.freeswap); */
nomem = (availmem < ipvs->sysctl_amemthresh);
local_bh_disable();
/* drop_entry */
spin_lock(&ipvs->dropentry_lock);
switch (ipvs->sysctl_drop_entry) {
case 0:
atomic_set(&ipvs->dropentry, 0);
break;
case 1:
if (nomem) {
atomic_set(&ipvs->dropentry, 1);
ipvs->sysctl_drop_entry = 2;
} else {
atomic_set(&ipvs->dropentry, 0);
}
break;
case 2:
if (nomem) {
atomic_set(&ipvs->dropentry, 1);
} else {
atomic_set(&ipvs->dropentry, 0);
ipvs->sysctl_drop_entry = 1;
}
break;
case 3:
atomic_set(&ipvs->dropentry, 1);
break;
}
spin_unlock(&ipvs->dropentry_lock);
/* drop_packet */
spin_lock(&ipvs->droppacket_lock);
switch (ipvs->sysctl_drop_packet) {
case 0:
ipvs->drop_rate = 0;
break;
case 1:
if (nomem) {
ipvs->drop_rate = ipvs->drop_counter
= ipvs->sysctl_amemthresh /
(ipvs->sysctl_amemthresh-availmem);
ipvs->sysctl_drop_packet = 2;
} else {
ipvs->drop_rate = 0;
}
break;
case 2:
if (nomem) {
ipvs->drop_rate = ipvs->drop_counter
= ipvs->sysctl_amemthresh /
(ipvs->sysctl_amemthresh-availmem);
} else {
ipvs->drop_rate = 0;
ipvs->sysctl_drop_packet = 1;
}
break;
case 3:
ipvs->drop_rate = ipvs->sysctl_am_droprate;
break;
}
spin_unlock(&ipvs->droppacket_lock);
/* secure_tcp */
spin_lock(&ipvs->securetcp_lock);
switch (ipvs->sysctl_secure_tcp) {
case 0:
if (ipvs->old_secure_tcp >= 2)
to_change = 0;
break;
case 1:
if (nomem) {
if (ipvs->old_secure_tcp < 2)
to_change = 1;
ipvs->sysctl_secure_tcp = 2;
} else {
if (ipvs->old_secure_tcp >= 2)
to_change = 0;
}
break;
case 2:
if (nomem) {
if (ipvs->old_secure_tcp < 2)
to_change = 1;
} else {
if (ipvs->old_secure_tcp >= 2)
to_change = 0;
ipvs->sysctl_secure_tcp = 1;
}
break;
case 3:
if (ipvs->old_secure_tcp < 2)
to_change = 1;
break;
}
ipvs->old_secure_tcp = ipvs->sysctl_secure_tcp;
if (to_change >= 0)
ip_vs_protocol_timeout_change(ipvs,
ipvs->sysctl_secure_tcp > 1);
spin_unlock(&ipvs->securetcp_lock);
local_bh_enable();
}
/* Handler for delayed work for expiring no
* destination connections
*/
static void expire_nodest_conn_handler(struct work_struct *work)
{
struct netns_ipvs *ipvs;
ipvs = container_of(work, struct netns_ipvs,
expire_nodest_conn_work.work);
ip_vs_expire_nodest_conn_flush(ipvs);
}
/*
* Timer for checking the defense
*/
#define DEFENSE_TIMER_PERIOD 1*HZ
static void defense_work_handler(struct work_struct *work)
{
struct netns_ipvs *ipvs =
container_of(work, struct netns_ipvs, defense_work.work);
update_defense_level(ipvs);
if (atomic_read(&ipvs->dropentry))
ip_vs_random_dropentry(ipvs);
queue_delayed_work(system_long_wq, &ipvs->defense_work,
DEFENSE_TIMER_PERIOD);
}
#endif
static void est_reload_work_handler(struct work_struct *work)
{
struct netns_ipvs *ipvs =
container_of(work, struct netns_ipvs, est_reload_work.work);
int genid_done = atomic_read(&ipvs->est_genid_done);
unsigned long delay = HZ / 10; /* repeat startups after failure */
bool repeat = false;
int genid;
int id;
mutex_lock(&ipvs->est_mutex);
genid = atomic_read(&ipvs->est_genid);
for (id = 0; id < ipvs->est_kt_count; id++) {
struct ip_vs_est_kt_data *kd = ipvs->est_kt_arr[id];
/* netns clean up started, abort delayed work */
if (!ipvs->enable)
goto unlock;
if (!kd)
continue;
/* New config ? Stop kthread tasks */
if (genid != genid_done)
ip_vs_est_kthread_stop(kd);
if (!kd->task && !ip_vs_est_stopped(ipvs)) {
/* Do not start kthreads above 0 in calc phase */
if ((!id || !ipvs->est_calc_phase) &&
ip_vs_est_kthread_start(ipvs, kd) < 0)
repeat = true;
}
}
atomic_set(&ipvs->est_genid_done, genid);
if (repeat)
queue_delayed_work(system_long_wq, &ipvs->est_reload_work,
delay);
unlock:
mutex_unlock(&ipvs->est_mutex);
}
int
ip_vs_use_count_inc(void)
{
return try_module_get(THIS_MODULE);
}
void
ip_vs_use_count_dec(void)
{
module_put(THIS_MODULE);
}
/*
* Hash table: for virtual service lookups
*/
#define IP_VS_SVC_TAB_BITS 8
#define IP_VS_SVC_TAB_SIZE (1 << IP_VS_SVC_TAB_BITS)
#define IP_VS_SVC_TAB_MASK (IP_VS_SVC_TAB_SIZE - 1)
/* the service table hashed by <protocol, addr, port> */
static struct hlist_head ip_vs_svc_table[IP_VS_SVC_TAB_SIZE];
/* the service table hashed by fwmark */
static struct hlist_head ip_vs_svc_fwm_table[IP_VS_SVC_TAB_SIZE];
/*
* Returns hash value for virtual service
*/
static inline unsigned int
ip_vs_svc_hashkey(struct netns_ipvs *ipvs, int af, unsigned int proto,
const union nf_inet_addr *addr, __be16 port)
{
unsigned int porth = ntohs(port);
__be32 addr_fold = addr->ip;
__u32 ahash;
#ifdef CONFIG_IP_VS_IPV6
if (af == AF_INET6)
addr_fold = addr->ip6[0]^addr->ip6[1]^
addr->ip6[2]^addr->ip6[3];
#endif
ahash = ntohl(addr_fold);
ahash ^= ((size_t) ipvs >> 8);
return (proto ^ ahash ^ (porth >> IP_VS_SVC_TAB_BITS) ^ porth) &
IP_VS_SVC_TAB_MASK;
}
/*
* Returns hash value of fwmark for virtual service lookup
*/
static inline unsigned int ip_vs_svc_fwm_hashkey(struct netns_ipvs *ipvs, __u32 fwmark)
{
return (((size_t)ipvs>>8) ^ fwmark) & IP_VS_SVC_TAB_MASK;
}
/*
* Hashes a service in the ip_vs_svc_table by <netns,proto,addr,port>
* or in the ip_vs_svc_fwm_table by fwmark.
* Should be called with locked tables.
*/
static int ip_vs_svc_hash(struct ip_vs_service *svc)
{
unsigned int hash;
if (svc->flags & IP_VS_SVC_F_HASHED) {
pr_err("%s(): request for already hashed, called from %pS\n",
__func__, __builtin_return_address(0));
return 0;
}
if (svc->fwmark == 0) {
/*
* Hash it by <netns,protocol,addr,port> in ip_vs_svc_table
*/
hash = ip_vs_svc_hashkey(svc->ipvs, svc->af, svc->protocol,
&svc->addr, svc->port);
hlist_add_head_rcu(&svc->s_list, &ip_vs_svc_table[hash]);
} else {
/*
* Hash it by fwmark in svc_fwm_table
*/
hash = ip_vs_svc_fwm_hashkey(svc->ipvs, svc->fwmark);
hlist_add_head_rcu(&svc->f_list, &ip_vs_svc_fwm_table[hash]);
}
svc->flags |= IP_VS_SVC_F_HASHED;
/* increase its refcnt because it is referenced by the svc table */
atomic_inc(&svc->refcnt);
return 1;
}
/*
* Unhashes a service from svc_table / svc_fwm_table.
* Should be called with locked tables.
*/
static int ip_vs_svc_unhash(struct ip_vs_service *svc)
{
if (!(svc->flags & IP_VS_SVC_F_HASHED)) {
pr_err("%s(): request for unhash flagged, called from %pS\n",
__func__, __builtin_return_address(0));
return 0;
}
if (svc->fwmark == 0) {
/* Remove it from the svc_table table */
hlist_del_rcu(&svc->s_list);
} else {
/* Remove it from the svc_fwm_table table */
hlist_del_rcu(&svc->f_list);
}
svc->flags &= ~IP_VS_SVC_F_HASHED;
atomic_dec(&svc->refcnt);
return 1;
}
/*
* Get service by {netns, proto,addr,port} in the service table.
*/
static inline struct ip_vs_service *
__ip_vs_service_find(struct netns_ipvs *ipvs, int af, __u16 protocol,
const union nf_inet_addr *vaddr, __be16 vport)
{
unsigned int hash;
struct ip_vs_service *svc;
/* Check for "full" addressed entries */
hash = ip_vs_svc_hashkey(ipvs, af, protocol, vaddr, vport);
hlist_for_each_entry_rcu(svc, &ip_vs_svc_table[hash], s_list) {
if ((svc->af == af)
&& ip_vs_addr_equal(af, &svc->addr, vaddr)
&& (svc->port == vport)
&& (svc->protocol == protocol)
&& (svc->ipvs == ipvs)) {
/* HIT */
return svc;
}
}
return NULL;
}
/*
* Get service by {fwmark} in the service table.
*/
static inline struct ip_vs_service *
__ip_vs_svc_fwm_find(struct netns_ipvs *ipvs, int af, __u32 fwmark)
{
unsigned int hash;
struct ip_vs_service *svc;
/* Check for fwmark addressed entries */
hash = ip_vs_svc_fwm_hashkey(ipvs, fwmark);
hlist_for_each_entry_rcu(svc, &ip_vs_svc_fwm_table[hash], f_list) {
if (svc->fwmark == fwmark && svc->af == af
&& (svc->ipvs == ipvs)) {
/* HIT */
return svc;
}
}
return NULL;
}
/* Find service, called under RCU lock */
struct ip_vs_service *
ip_vs_service_find(struct netns_ipvs *ipvs, int af, __u32 fwmark, __u16 protocol,
const union nf_inet_addr *vaddr, __be16 vport)
{
struct ip_vs_service *svc;
/*
* Check the table hashed by fwmark first
*/
if (fwmark) {
svc = __ip_vs_svc_fwm_find(ipvs, af, fwmark);
if (svc)
goto out;
}
/*
* Check the table hashed by <protocol,addr,port>
* for "full" addressed entries
*/
svc = __ip_vs_service_find(ipvs, af, protocol, vaddr, vport);
if (!svc && protocol == IPPROTO_TCP &&
atomic_read(&ipvs->ftpsvc_counter) &&
(vport == FTPDATA || !inet_port_requires_bind_service(ipvs->net, ntohs(vport)))) {
/*
* Check if ftp service entry exists, the packet
* might belong to FTP data connections.
*/
svc = __ip_vs_service_find(ipvs, af, protocol, vaddr, FTPPORT);
}
if (svc == NULL
&& atomic_read(&ipvs->nullsvc_counter)) {
/*
* Check if the catch-all port (port zero) exists
*/
svc = __ip_vs_service_find(ipvs, af, protocol, vaddr, 0);
}
out:
IP_VS_DBG_BUF(9, "lookup service: fwm %u %s %s:%u %s\n",
fwmark, ip_vs_proto_name(protocol),
IP_VS_DBG_ADDR(af, vaddr), ntohs(vport),
svc ? "hit" : "not hit");
return svc;
}
static inline void
__ip_vs_bind_svc(struct ip_vs_dest *dest, struct ip_vs_service *svc)
{
atomic_inc(&svc->refcnt);
rcu_assign_pointer(dest->svc, svc);
}
static void ip_vs_service_free(struct ip_vs_service *svc)
{
ip_vs_stats_release(&svc->stats);
kfree(svc);
}
static void ip_vs_service_rcu_free(struct rcu_head *head)
{
struct ip_vs_service *svc;
svc = container_of(head, struct ip_vs_service, rcu_head);
ip_vs_service_free(svc);
}
static void __ip_vs_svc_put(struct ip_vs_service *svc)
{
if (atomic_dec_and_test(&svc->refcnt)) {
IP_VS_DBG_BUF(3, "Removing service %u/%s:%u\n",
svc->fwmark,
IP_VS_DBG_ADDR(svc->af, &svc->addr),
ntohs(svc->port));
call_rcu(&svc->rcu_head, ip_vs_service_rcu_free);
}
}
/*
* Returns hash value for real service
*/
static inline unsigned int ip_vs_rs_hashkey(int af,
const union nf_inet_addr *addr,
__be16 port)
{
unsigned int porth = ntohs(port);
__be32 addr_fold = addr->ip;
#ifdef CONFIG_IP_VS_IPV6
if (af == AF_INET6)
addr_fold = addr->ip6[0]^addr->ip6[1]^
addr->ip6[2]^addr->ip6[3];
#endif
return (ntohl(addr_fold)^(porth>>IP_VS_RTAB_BITS)^porth)
& IP_VS_RTAB_MASK;
}
/* Hash ip_vs_dest in rs_table by <proto,addr,port>. */
static void ip_vs_rs_hash(struct netns_ipvs *ipvs, struct ip_vs_dest *dest)
{
unsigned int hash;
__be16 port;
if (dest->in_rs_table)
return;
switch (IP_VS_DFWD_METHOD(dest)) {
case IP_VS_CONN_F_MASQ:
port = dest->port;
break;
case IP_VS_CONN_F_TUNNEL:
switch (dest->tun_type) {
case IP_VS_CONN_F_TUNNEL_TYPE_GUE:
port = dest->tun_port;
break;
case IP_VS_CONN_F_TUNNEL_TYPE_IPIP:
case IP_VS_CONN_F_TUNNEL_TYPE_GRE:
port = 0;
break;
default:
return;
}
break;
default:
return;
}
/*
* Hash by proto,addr,port,
* which are the parameters of the real service.
*/
hash = ip_vs_rs_hashkey(dest->af, &dest->addr, port);
hlist_add_head_rcu(&dest->d_list, &ipvs->rs_table[hash]);
dest->in_rs_table = 1;
}
/* Unhash ip_vs_dest from rs_table. */
static void ip_vs_rs_unhash(struct ip_vs_dest *dest)
{
/*
* Remove it from the rs_table table.
*/
if (dest->in_rs_table) {
hlist_del_rcu(&dest->d_list);
dest->in_rs_table = 0;
}
}
/* Check if real service by <proto,addr,port> is present */
bool ip_vs_has_real_service(struct netns_ipvs *ipvs, int af, __u16 protocol,
const union nf_inet_addr *daddr, __be16 dport)
{
unsigned int hash;
struct ip_vs_dest *dest;
/* Check for "full" addressed entries */
hash = ip_vs_rs_hashkey(af, daddr, dport);
hlist_for_each_entry_rcu(dest, &ipvs->rs_table[hash], d_list) {
if (dest->port == dport &&
dest->af == af &&
ip_vs_addr_equal(af, &dest->addr, daddr) &&
(dest->protocol == protocol || dest->vfwmark) &&
IP_VS_DFWD_METHOD(dest) == IP_VS_CONN_F_MASQ) {
/* HIT */
return true;
}
}
return false;
}
/* Find real service record by <proto,addr,port>.
* In case of multiple records with the same <proto,addr,port>, only
* the first found record is returned.
*
* To be called under RCU lock.
*/
struct ip_vs_dest *ip_vs_find_real_service(struct netns_ipvs *ipvs, int af,
__u16 protocol,
const union nf_inet_addr *daddr,
__be16 dport)
{
unsigned int hash;
struct ip_vs_dest *dest;
/* Check for "full" addressed entries */
hash = ip_vs_rs_hashkey(af, daddr, dport);
hlist_for_each_entry_rcu(dest, &ipvs->rs_table[hash], d_list) {
if (dest->port == dport &&
dest->af == af &&
ip_vs_addr_equal(af, &dest->addr, daddr) &&
(dest->protocol == protocol || dest->vfwmark) &&
IP_VS_DFWD_METHOD(dest) == IP_VS_CONN_F_MASQ) {
/* HIT */
return dest;
}
}
return NULL;
}
/* Find real service record by <af,addr,tun_port>.
* In case of multiple records with the same <af,addr,tun_port>, only
* the first found record is returned.
*
* To be called under RCU lock.
*/
struct ip_vs_dest *ip_vs_find_tunnel(struct netns_ipvs *ipvs, int af,
const union nf_inet_addr *daddr,
__be16 tun_port)
{
struct ip_vs_dest *dest;
unsigned int hash;
/* Check for "full" addressed entries */
hash = ip_vs_rs_hashkey(af, daddr, tun_port);
hlist_for_each_entry_rcu(dest, &ipvs->rs_table[hash], d_list) {
if (dest->tun_port == tun_port &&
dest->af == af &&
ip_vs_addr_equal(af, &dest->addr, daddr) &&
IP_VS_DFWD_METHOD(dest) == IP_VS_CONN_F_TUNNEL) {
/* HIT */
return dest;
}
}
return NULL;
}
/* Lookup destination by {addr,port} in the given service
* Called under RCU lock.
*/
static struct ip_vs_dest *
ip_vs_lookup_dest(struct ip_vs_service *svc, int dest_af,
const union nf_inet_addr *daddr, __be16 dport)
{
struct ip_vs_dest *dest;
/*
* Find the destination for the given service
*/
list_for_each_entry_rcu(dest, &svc->destinations, n_list) {
if ((dest->af == dest_af) &&
ip_vs_addr_equal(dest_af, &dest->addr, daddr) &&
(dest->port == dport)) {
/* HIT */
return dest;
}
}
return NULL;
}
/*
* Find destination by {daddr,dport,vaddr,protocol}
* Created to be used in ip_vs_process_message() in
* the backup synchronization daemon. It finds the
* destination to be bound to the received connection
* on the backup.
* Called under RCU lock, no refcnt is returned.
*/
struct ip_vs_dest *ip_vs_find_dest(struct netns_ipvs *ipvs, int svc_af, int dest_af,
const union nf_inet_addr *daddr,
__be16 dport,
const union nf_inet_addr *vaddr,
__be16 vport, __u16 protocol, __u32 fwmark,
__u32 flags)
{
struct ip_vs_dest *dest;
struct ip_vs_service *svc;
__be16 port = dport;
svc = ip_vs_service_find(ipvs, svc_af, fwmark, protocol, vaddr, vport);
if (!svc)
return NULL;
if (fwmark && (flags & IP_VS_CONN_F_FWD_MASK) != IP_VS_CONN_F_MASQ)
port = 0;
dest = ip_vs_lookup_dest(svc, dest_af, daddr, port);
if (!dest)
dest = ip_vs_lookup_dest(svc, dest_af, daddr, port ^ dport);
return dest;
}
void ip_vs_dest_dst_rcu_free(struct rcu_head *head)
{
struct ip_vs_dest_dst *dest_dst = container_of(head,
struct ip_vs_dest_dst,
rcu_head);
dst_release(dest_dst->dst_cache);
kfree(dest_dst);
}
/* Release dest_dst and dst_cache for dest in user context */
static void __ip_vs_dst_cache_reset(struct ip_vs_dest *dest)
{
struct ip_vs_dest_dst *old;
old = rcu_dereference_protected(dest->dest_dst, 1);
if (old) {
RCU_INIT_POINTER(dest->dest_dst, NULL);
call_rcu(&old->rcu_head, ip_vs_dest_dst_rcu_free);
}
}
/*
* Lookup dest by {svc,addr,port} in the destination trash.
* The destination trash is used to hold the destinations that are removed
* from the service table but are still referenced by some conn entries.
* The reason to add the destination trash is when the dest is temporary
* down (either by administrator or by monitor program), the dest can be
* picked back from the trash, the remaining connections to the dest can
* continue, and the counting information of the dest is also useful for
* scheduling.
*/
static struct ip_vs_dest *
ip_vs_trash_get_dest(struct ip_vs_service *svc, int dest_af,
const union nf_inet_addr *daddr, __be16 dport)
{
struct ip_vs_dest *dest;
struct netns_ipvs *ipvs = svc->ipvs;
/*
* Find the destination in trash
*/
spin_lock_bh(&ipvs->dest_trash_lock);
list_for_each_entry(dest, &ipvs->dest_trash, t_list) {
IP_VS_DBG_BUF(3, "Destination %u/%s:%u still in trash, "
"dest->refcnt=%d\n",
dest->vfwmark,
IP_VS_DBG_ADDR(dest->af, &dest->addr),
ntohs(dest->port),
refcount_read(&dest->refcnt));
if (dest->af == dest_af &&
ip_vs_addr_equal(dest_af, &dest->addr, daddr) &&
dest->port == dport &&
dest->vfwmark == svc->fwmark &&
dest->protocol == svc->protocol &&
(svc->fwmark ||
(ip_vs_addr_equal(svc->af, &dest->vaddr, &svc->addr) &&
dest->vport == svc->port))) {
/* HIT */
list_del(&dest->t_list);
goto out;
}
}
dest = NULL;
out:
spin_unlock_bh(&ipvs->dest_trash_lock);
return dest;
}
static void ip_vs_dest_rcu_free(struct rcu_head *head)
{
struct ip_vs_dest *dest;
dest = container_of(head, struct ip_vs_dest, rcu_head);
ip_vs_stats_release(&dest->stats);
ip_vs_dest_put_and_free(dest);
}
static void ip_vs_dest_free(struct ip_vs_dest *dest)
{
struct ip_vs_service *svc = rcu_dereference_protected(dest->svc, 1);
__ip_vs_dst_cache_reset(dest);
__ip_vs_svc_put(svc);
call_rcu(&dest->rcu_head, ip_vs_dest_rcu_free);
}
/*
* Clean up all the destinations in the trash
* Called by the ip_vs_control_cleanup()
*
* When the ip_vs_control_clearup is activated by ipvs module exit,
* the service tables must have been flushed and all the connections
* are expired, and the refcnt of each destination in the trash must
* be 1, so we simply release them here.
*/
static void ip_vs_trash_cleanup(struct netns_ipvs *ipvs)
{
struct ip_vs_dest *dest, *nxt;
del_timer_sync(&ipvs->dest_trash_timer);
/* No need to use dest_trash_lock */
list_for_each_entry_safe(dest, nxt, &ipvs->dest_trash, t_list) {
list_del(&dest->t_list);
ip_vs_dest_free(dest);
}
}
static void ip_vs_stats_rcu_free(struct rcu_head *head)
{
struct ip_vs_stats_rcu *rs = container_of(head,
struct ip_vs_stats_rcu,
rcu_head);
ip_vs_stats_release(&rs->s);
kfree(rs);
}
static void
ip_vs_copy_stats(struct ip_vs_kstats *dst, struct ip_vs_stats *src)
{
#define IP_VS_SHOW_STATS_COUNTER(c) dst->c = src->kstats.c - src->kstats0.c
spin_lock(&src->lock);
IP_VS_SHOW_STATS_COUNTER(conns);
IP_VS_SHOW_STATS_COUNTER(inpkts);
IP_VS_SHOW_STATS_COUNTER(outpkts);
IP_VS_SHOW_STATS_COUNTER(inbytes);
IP_VS_SHOW_STATS_COUNTER(outbytes);
ip_vs_read_estimator(dst, src);
spin_unlock(&src->lock);
}
static void
ip_vs_export_stats_user(struct ip_vs_stats_user *dst, struct ip_vs_kstats *src)
{
dst->conns = (u32)src->conns;
dst->inpkts = (u32)src->inpkts;
dst->outpkts = (u32)src->outpkts;
dst->inbytes = src->inbytes;
dst->outbytes = src->outbytes;
dst->cps = (u32)src->cps;
dst->inpps = (u32)src->inpps;
dst->outpps = (u32)src->outpps;
dst->inbps = (u32)src->inbps;
dst->outbps = (u32)src->outbps;
}
static void
ip_vs_zero_stats(struct ip_vs_stats *stats)
{
spin_lock(&stats->lock);
/* get current counters as zero point, rates are zeroed */
#define IP_VS_ZERO_STATS_COUNTER(c) stats->kstats0.c = stats->kstats.c
IP_VS_ZERO_STATS_COUNTER(conns);
IP_VS_ZERO_STATS_COUNTER(inpkts);
IP_VS_ZERO_STATS_COUNTER(outpkts);
IP_VS_ZERO_STATS_COUNTER(inbytes);
IP_VS_ZERO_STATS_COUNTER(outbytes);
ip_vs_zero_estimator(stats);
spin_unlock(&stats->lock);
}
/* Allocate fields after kzalloc */
int ip_vs_stats_init_alloc(struct ip_vs_stats *s)
{
int i;
spin_lock_init(&s->lock);
s->cpustats = alloc_percpu(struct ip_vs_cpu_stats);
if (!s->cpustats)
return -ENOMEM;
for_each_possible_cpu(i) {
struct ip_vs_cpu_stats *cs = per_cpu_ptr(s->cpustats, i);
u64_stats_init(&cs->syncp);
}
return 0;
}
struct ip_vs_stats *ip_vs_stats_alloc(void)
{
struct ip_vs_stats *s = kzalloc(sizeof(*s), GFP_KERNEL);
if (s && ip_vs_stats_init_alloc(s) >= 0)
return s;
kfree(s);
return NULL;
}
void ip_vs_stats_release(struct ip_vs_stats *stats)
{
free_percpu(stats->cpustats);
}
void ip_vs_stats_free(struct ip_vs_stats *stats)
{
if (stats) {
ip_vs_stats_release(stats);
kfree(stats);
}
}
/*
* Update a destination in the given service
*/
static void
__ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
struct ip_vs_dest_user_kern *udest, int add)
{
struct netns_ipvs *ipvs = svc->ipvs;
struct ip_vs_service *old_svc;
struct ip_vs_scheduler *sched;
int conn_flags;
/* We cannot modify an address and change the address family */
BUG_ON(!add && udest->af != dest->af);
if (add && udest->af != svc->af)
ipvs->mixed_address_family_dests++;
/* keep the last_weight with latest non-0 weight */
if (add || udest->weight != 0)
atomic_set(&dest->last_weight, udest->weight);
/* set the weight and the flags */
atomic_set(&dest->weight, udest->weight);
conn_flags = udest->conn_flags & IP_VS_CONN_F_DEST_MASK;
conn_flags |= IP_VS_CONN_F_INACTIVE;
/* Need to rehash? */
if ((udest->conn_flags & IP_VS_CONN_F_FWD_MASK) !=
IP_VS_DFWD_METHOD(dest) ||
udest->tun_type != dest->tun_type ||
udest->tun_port != dest->tun_port)
ip_vs_rs_unhash(dest);
/* set the tunnel info */
dest->tun_type = udest->tun_type;
dest->tun_port = udest->tun_port;
dest->tun_flags = udest->tun_flags;
/* set the IP_VS_CONN_F_NOOUTPUT flag if not masquerading/NAT */
if ((conn_flags & IP_VS_CONN_F_FWD_MASK) != IP_VS_CONN_F_MASQ) {
conn_flags |= IP_VS_CONN_F_NOOUTPUT;
} else {
/* FTP-NAT requires conntrack for mangling */
if (svc->port == FTPPORT)
ip_vs_register_conntrack(svc);
}
atomic_set(&dest->conn_flags, conn_flags);
/* Put the real service in rs_table if not present. */
ip_vs_rs_hash(ipvs, dest);
/* bind the service */
old_svc = rcu_dereference_protected(dest->svc, 1);
if (!old_svc) {
__ip_vs_bind_svc(dest, svc);
} else {
if (old_svc != svc) {
ip_vs_zero_stats(&dest->stats);
__ip_vs_bind_svc(dest, svc);
__ip_vs_svc_put(old_svc);
}
}
/* set the dest status flags */
dest->flags |= IP_VS_DEST_F_AVAILABLE;
if (udest->u_threshold == 0 || udest->u_threshold > dest->u_threshold)
dest->flags &= ~IP_VS_DEST_F_OVERLOAD;
dest->u_threshold = udest->u_threshold;
dest->l_threshold = udest->l_threshold;
dest->af = udest->af;
spin_lock_bh(&dest->dst_lock);
__ip_vs_dst_cache_reset(dest);
spin_unlock_bh(&dest->dst_lock);
if (add) {
list_add_rcu(&dest->n_list, &svc->destinations);
svc->num_dests++;
sched = rcu_dereference_protected(svc->scheduler, 1);
if (sched && sched->add_dest)
sched->add_dest(svc, dest);
} else {
sched = rcu_dereference_protected(svc->scheduler, 1);
if (sched && sched->upd_dest)
sched->upd_dest(svc, dest);
}
}
/*
* Create a destination for the given service
*/
static int
ip_vs_new_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest)
{
struct ip_vs_dest *dest;
unsigned int atype;
int ret;
#ifdef CONFIG_IP_VS_IPV6
if (udest->af == AF_INET6) {
atype = ipv6_addr_type(&udest->addr.in6);
if ((!(atype & IPV6_ADDR_UNICAST) ||
atype & IPV6_ADDR_LINKLOCAL) &&
!__ip_vs_addr_is_local_v6(svc->ipvs->net, &udest->addr.in6))
return -EINVAL;
ret = nf_defrag_ipv6_enable(svc->ipvs->net);
if (ret)
return ret;
} else
#endif
{
atype = inet_addr_type(svc->ipvs->net, udest->addr.ip);
if (atype != RTN_LOCAL && atype != RTN_UNICAST)
return -EINVAL;
}
dest = kzalloc(sizeof(struct ip_vs_dest), GFP_KERNEL);
if (dest == NULL)
return -ENOMEM;
ret = ip_vs_stats_init_alloc(&dest->stats);
if (ret < 0)
goto err_alloc;
ret = ip_vs_start_estimator(svc->ipvs, &dest->stats);
if (ret < 0)
goto err_stats;
dest->af = udest->af;
dest->protocol = svc->protocol;
dest->vaddr = svc->addr;
dest->vport = svc->port;
dest->vfwmark = svc->fwmark;
ip_vs_addr_copy(udest->af, &dest->addr, &udest->addr);
dest->port = udest->port;
atomic_set(&dest->activeconns, 0);
atomic_set(&dest->inactconns, 0);
atomic_set(&dest->persistconns, 0);
refcount_set(&dest->refcnt, 1);
INIT_HLIST_NODE(&dest->d_list);
spin_lock_init(&dest->dst_lock);
__ip_vs_update_dest(svc, dest, udest, 1);
return 0;
err_stats:
ip_vs_stats_release(&dest->stats);
err_alloc:
kfree(dest);
return ret;
}
/*
* Add a destination into an existing service
*/
static int
ip_vs_add_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest)
{
struct ip_vs_dest *dest;
union nf_inet_addr daddr;
__be16 dport = udest->port;
int ret;
if (udest->weight < 0) {
pr_err("%s(): server weight less than zero\n", __func__);
return -ERANGE;
}
if (udest->l_threshold > udest->u_threshold) {
pr_err("%s(): lower threshold is higher than upper threshold\n",
__func__);
return -ERANGE;
}
if (udest->tun_type == IP_VS_CONN_F_TUNNEL_TYPE_GUE) {
if (udest->tun_port == 0) {
pr_err("%s(): tunnel port is zero\n", __func__);
return -EINVAL;
}
}
ip_vs_addr_copy(udest->af, &daddr, &udest->addr);
/* We use function that requires RCU lock */
rcu_read_lock();
dest = ip_vs_lookup_dest(svc, udest->af, &daddr, dport);
rcu_read_unlock();
if (dest != NULL) {
IP_VS_DBG(1, "%s(): dest already exists\n", __func__);
return -EEXIST;
}
/*
* Check if the dest already exists in the trash and
* is from the same service
*/
dest = ip_vs_trash_get_dest(svc, udest->af, &daddr, dport);
if (dest != NULL) {
IP_VS_DBG_BUF(3, "Get destination %s:%u from trash, "
"dest->refcnt=%d, service %u/%s:%u\n",
IP_VS_DBG_ADDR(udest->af, &daddr), ntohs(dport),
refcount_read(&dest->refcnt),
dest->vfwmark,
IP_VS_DBG_ADDR(svc->af, &dest->vaddr),
ntohs(dest->vport));
ret = ip_vs_start_estimator(svc->ipvs, &dest->stats);
if (ret < 0)
return ret;
__ip_vs_update_dest(svc, dest, udest, 1);
} else {
/*
* Allocate and initialize the dest structure
*/
ret = ip_vs_new_dest(svc, udest);
}
return ret;
}
/*
* Edit a destination in the given service
*/
static int
ip_vs_edit_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest)
{
struct ip_vs_dest *dest;
union nf_inet_addr daddr;
__be16 dport = udest->port;
if (udest->weight < 0) {
pr_err("%s(): server weight less than zero\n", __func__);
return -ERANGE;
}
if (udest->l_threshold > udest->u_threshold) {
pr_err("%s(): lower threshold is higher than upper threshold\n",
__func__);
return -ERANGE;
}
if (udest->tun_type == IP_VS_CONN_F_TUNNEL_TYPE_GUE) {
if (udest->tun_port == 0) {
pr_err("%s(): tunnel port is zero\n", __func__);
return -EINVAL;
}
}
ip_vs_addr_copy(udest->af, &daddr, &udest->addr);
/* We use function that requires RCU lock */
rcu_read_lock();
dest = ip_vs_lookup_dest(svc, udest->af, &daddr, dport);
rcu_read_unlock();
if (dest == NULL) {
IP_VS_DBG(1, "%s(): dest doesn't exist\n", __func__);
return -ENOENT;
}
__ip_vs_update_dest(svc, dest, udest, 0);
return 0;
}
/*
* Delete a destination (must be already unlinked from the service)
*/
static void __ip_vs_del_dest(struct netns_ipvs *ipvs, struct ip_vs_dest *dest,
bool cleanup)
{
ip_vs_stop_estimator(ipvs, &dest->stats);
/*
* Remove it from the d-linked list with the real services.
*/
ip_vs_rs_unhash(dest);
spin_lock_bh(&ipvs->dest_trash_lock);
IP_VS_DBG_BUF(3, "Moving dest %s:%u into trash, dest->refcnt=%d\n",
IP_VS_DBG_ADDR(dest->af, &dest->addr), ntohs(dest->port),
refcount_read(&dest->refcnt));
if (list_empty(&ipvs->dest_trash) && !cleanup)
mod_timer(&ipvs->dest_trash_timer,
jiffies + (IP_VS_DEST_TRASH_PERIOD >> 1));
/* dest lives in trash with reference */
list_add(&dest->t_list, &ipvs->dest_trash);
dest->idle_start = 0;
spin_unlock_bh(&ipvs->dest_trash_lock);
/* Queue up delayed work to expire all no destination connections.
* No-op when CONFIG_SYSCTL is disabled.
*/
if (!cleanup)
ip_vs_enqueue_expire_nodest_conns(ipvs);
}
/*
* Unlink a destination from the given service
*/
static void __ip_vs_unlink_dest(struct ip_vs_service *svc,
struct ip_vs_dest *dest,
int svcupd)
{
dest->flags &= ~IP_VS_DEST_F_AVAILABLE;
/*
* Remove it from the d-linked destination list.
*/
list_del_rcu(&dest->n_list);
svc->num_dests--;
if (dest->af != svc->af)
svc->ipvs->mixed_address_family_dests--;
if (svcupd) {
struct ip_vs_scheduler *sched;
sched = rcu_dereference_protected(svc->scheduler, 1);
if (sched && sched->del_dest)
sched->del_dest(svc, dest);
}
}
/*
* Delete a destination server in the given service
*/
static int
ip_vs_del_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest)
{
struct ip_vs_dest *dest;
__be16 dport = udest->port;
/* We use function that requires RCU lock */
rcu_read_lock();
dest = ip_vs_lookup_dest(svc, udest->af, &udest->addr, dport);
rcu_read_unlock();
if (dest == NULL) {
IP_VS_DBG(1, "%s(): destination not found!\n", __func__);
return -ENOENT;
}
/*
* Unlink dest from the service
*/
__ip_vs_unlink_dest(svc, dest, 1);
/*
* Delete the destination
*/
__ip_vs_del_dest(svc->ipvs, dest, false);
return 0;
}
static void ip_vs_dest_trash_expire(struct timer_list *t)
{
struct netns_ipvs *ipvs = from_timer(ipvs, t, dest_trash_timer);
struct ip_vs_dest *dest, *next;
unsigned long now = jiffies;
spin_lock(&ipvs->dest_trash_lock);
list_for_each_entry_safe(dest, next, &ipvs->dest_trash, t_list) {
if (refcount_read(&dest->refcnt) > 1)
continue;
if (dest->idle_start) {
if (time_before(now, dest->idle_start +
IP_VS_DEST_TRASH_PERIOD))
continue;
} else {
dest->idle_start = max(1UL, now);
continue;
}
IP_VS_DBG_BUF(3, "Removing destination %u/%s:%u from trash\n",
dest->vfwmark,
IP_VS_DBG_ADDR(dest->af, &dest->addr),
ntohs(dest->port));
list_del(&dest->t_list);
ip_vs_dest_free(dest);
}
if (!list_empty(&ipvs->dest_trash))
mod_timer(&ipvs->dest_trash_timer,
jiffies + (IP_VS_DEST_TRASH_PERIOD >> 1));
spin_unlock(&ipvs->dest_trash_lock);
}
/*
* Add a service into the service hash table
*/
static int
ip_vs_add_service(struct netns_ipvs *ipvs, struct ip_vs_service_user_kern *u,
struct ip_vs_service **svc_p)
{
int ret = 0;
struct ip_vs_scheduler *sched = NULL;
struct ip_vs_pe *pe = NULL;
struct ip_vs_service *svc = NULL;
int ret_hooks = -1;
/* increase the module use count */
if (!ip_vs_use_count_inc())
return -ENOPROTOOPT;
/* Lookup the scheduler by 'u->sched_name' */
if (strcmp(u->sched_name, "none")) {
sched = ip_vs_scheduler_get(u->sched_name);
if (!sched) {
pr_info("Scheduler module ip_vs_%s not found\n",
u->sched_name);
ret = -ENOENT;
goto out_err;
}
}
if (u->pe_name && *u->pe_name) {
pe = ip_vs_pe_getbyname(u->pe_name);
if (pe == NULL) {
pr_info("persistence engine module ip_vs_pe_%s "
"not found\n", u->pe_name);
ret = -ENOENT;
goto out_err;
}
}
#ifdef CONFIG_IP_VS_IPV6
if (u->af == AF_INET6) {
__u32 plen = (__force __u32) u->netmask;
if (plen < 1 || plen > 128) {
ret = -EINVAL;
goto out_err;
}
ret = nf_defrag_ipv6_enable(ipvs->net);
if (ret)
goto out_err;
}
#endif
if ((u->af == AF_INET && !ipvs->num_services) ||
(u->af == AF_INET6 && !ipvs->num_services6)) {
ret = ip_vs_register_hooks(ipvs, u->af);
if (ret < 0)
goto out_err;
ret_hooks = ret;
}
svc = kzalloc(sizeof(struct ip_vs_service), GFP_KERNEL);
if (svc == NULL) {
IP_VS_DBG(1, "%s(): no memory\n", __func__);
ret = -ENOMEM;
goto out_err;
}
ret = ip_vs_stats_init_alloc(&svc->stats);
if (ret < 0)
goto out_err;
/* I'm the first user of the service */
atomic_set(&svc->refcnt, 0);
svc->af = u->af;
svc->protocol = u->protocol;
ip_vs_addr_copy(svc->af, &svc->addr, &u->addr);
svc->port = u->port;
svc->fwmark = u->fwmark;
svc->flags = u->flags & ~IP_VS_SVC_F_HASHED;
svc->timeout = u->timeout * HZ;
svc->netmask = u->netmask;
svc->ipvs = ipvs;
INIT_LIST_HEAD(&svc->destinations);
spin_lock_init(&svc->sched_lock);
/* Bind the scheduler */
if (sched) {
ret = ip_vs_bind_scheduler(svc, sched);
if (ret)
goto out_err;
sched = NULL;
}
ret = ip_vs_start_estimator(ipvs, &svc->stats);
if (ret < 0)
goto out_err;
/* Bind the ct retriever */
RCU_INIT_POINTER(svc->pe, pe);
pe = NULL;
/* Update the virtual service counters */
if (svc->port == FTPPORT)
atomic_inc(&ipvs->ftpsvc_counter);
else if (svc->port == 0)
atomic_inc(&ipvs->nullsvc_counter);
if (svc->pe && svc->pe->conn_out)
atomic_inc(&ipvs->conn_out_counter);
/* Count only IPv4 services for old get/setsockopt interface */
if (svc->af == AF_INET)
ipvs->num_services++;
else if (svc->af == AF_INET6)
ipvs->num_services6++;
/* Hash the service into the service table */
ip_vs_svc_hash(svc);
*svc_p = svc;
if (!ipvs->enable) {
/* Now there is a service - full throttle */
ipvs->enable = 1;
/* Start estimation for first time */
ip_vs_est_reload_start(ipvs);
}
return 0;
out_err:
if (ret_hooks >= 0)
ip_vs_unregister_hooks(ipvs, u->af);
if (svc != NULL) {
ip_vs_unbind_scheduler(svc, sched);
ip_vs_service_free(svc);
}
ip_vs_scheduler_put(sched);
ip_vs_pe_put(pe);
/* decrease the module use count */
ip_vs_use_count_dec();
return ret;
}
/*
* Edit a service and bind it with a new scheduler
*/
static int
ip_vs_edit_service(struct ip_vs_service *svc, struct ip_vs_service_user_kern *u)
{
struct ip_vs_scheduler *sched = NULL, *old_sched;
struct ip_vs_pe *pe = NULL, *old_pe = NULL;
int ret = 0;
bool new_pe_conn_out, old_pe_conn_out;
/*
* Lookup the scheduler, by 'u->sched_name'
*/
if (strcmp(u->sched_name, "none")) {
sched = ip_vs_scheduler_get(u->sched_name);
if (!sched) {
pr_info("Scheduler module ip_vs_%s not found\n",
u->sched_name);
return -ENOENT;
}
}
old_sched = sched;
if (u->pe_name && *u->pe_name) {
pe = ip_vs_pe_getbyname(u->pe_name);
if (pe == NULL) {
pr_info("persistence engine module ip_vs_pe_%s "
"not found\n", u->pe_name);
ret = -ENOENT;
goto out;
}
old_pe = pe;
}
#ifdef CONFIG_IP_VS_IPV6
if (u->af == AF_INET6) {
__u32 plen = (__force __u32) u->netmask;
if (plen < 1 || plen > 128) {
ret = -EINVAL;
goto out;
}
}
#endif
old_sched = rcu_dereference_protected(svc->scheduler, 1);
if (sched != old_sched) {
if (old_sched) {
ip_vs_unbind_scheduler(svc, old_sched);
RCU_INIT_POINTER(svc->scheduler, NULL);
/* Wait all svc->sched_data users */
synchronize_rcu();
}
/* Bind the new scheduler */
if (sched) {
ret = ip_vs_bind_scheduler(svc, sched);
if (ret) {
ip_vs_scheduler_put(sched);
goto out;
}
}
}
/*
* Set the flags and timeout value
*/
svc->flags = u->flags | IP_VS_SVC_F_HASHED;
svc->timeout = u->timeout * HZ;
svc->netmask = u->netmask;
old_pe = rcu_dereference_protected(svc->pe, 1);
if (pe != old_pe) {
rcu_assign_pointer(svc->pe, pe);
/* check for optional methods in new pe */
new_pe_conn_out = (pe && pe->conn_out) ? true : false;
old_pe_conn_out = (old_pe && old_pe->conn_out) ? true : false;
if (new_pe_conn_out && !old_pe_conn_out)
atomic_inc(&svc->ipvs->conn_out_counter);
if (old_pe_conn_out && !new_pe_conn_out)
atomic_dec(&svc->ipvs->conn_out_counter);
}
out:
ip_vs_scheduler_put(old_sched);
ip_vs_pe_put(old_pe);
return ret;
}
/*
* Delete a service from the service list
* - The service must be unlinked, unlocked and not referenced!
* - We are called under _bh lock
*/
static void __ip_vs_del_service(struct ip_vs_service *svc, bool cleanup)
{
struct ip_vs_dest *dest, *nxt;
struct ip_vs_scheduler *old_sched;
struct ip_vs_pe *old_pe;
struct netns_ipvs *ipvs = svc->ipvs;
if (svc->af == AF_INET) {
ipvs->num_services--;
if (!ipvs->num_services)
ip_vs_unregister_hooks(ipvs, svc->af);
} else if (svc->af == AF_INET6) {
ipvs->num_services6--;
if (!ipvs->num_services6)
ip_vs_unregister_hooks(ipvs, svc->af);
}
ip_vs_stop_estimator(svc->ipvs, &svc->stats);
/* Unbind scheduler */
old_sched = rcu_dereference_protected(svc->scheduler, 1);
ip_vs_unbind_scheduler(svc, old_sched);
ip_vs_scheduler_put(old_sched);
/* Unbind persistence engine, keep svc->pe */
old_pe = rcu_dereference_protected(svc->pe, 1);
if (old_pe && old_pe->conn_out)
atomic_dec(&ipvs->conn_out_counter);
ip_vs_pe_put(old_pe);
/*
* Unlink the whole destination list
*/
list_for_each_entry_safe(dest, nxt, &svc->destinations, n_list) {
__ip_vs_unlink_dest(svc, dest, 0);
__ip_vs_del_dest(svc->ipvs, dest, cleanup);
}
/*
* Update the virtual service counters
*/
if (svc->port == FTPPORT)
atomic_dec(&ipvs->ftpsvc_counter);
else if (svc->port == 0)
atomic_dec(&ipvs->nullsvc_counter);
/*
* Free the service if nobody refers to it
*/
__ip_vs_svc_put(svc);
/* decrease the module use count */
ip_vs_use_count_dec();
}
/*
* Unlink a service from list and try to delete it if its refcnt reached 0
*/
static void ip_vs_unlink_service(struct ip_vs_service *svc, bool cleanup)
{
ip_vs_unregister_conntrack(svc);
/* Hold svc to avoid double release from dest_trash */
atomic_inc(&svc->refcnt);
/*
* Unhash it from the service table
*/
ip_vs_svc_unhash(svc);
__ip_vs_del_service(svc, cleanup);
}
/*
* Delete a service from the service list
*/
static int ip_vs_del_service(struct ip_vs_service *svc)
{
if (svc == NULL)
return -EEXIST;
ip_vs_unlink_service(svc, false);
return 0;
}
/*
* Flush all the virtual services
*/
static int ip_vs_flush(struct netns_ipvs *ipvs, bool cleanup)
{
int idx;
struct ip_vs_service *svc;
struct hlist_node *n;
/*
* Flush the service table hashed by <netns,protocol,addr,port>
*/
for(idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
hlist_for_each_entry_safe(svc, n, &ip_vs_svc_table[idx],
s_list) {
if (svc->ipvs == ipvs)
ip_vs_unlink_service(svc, cleanup);
}
}
/*
* Flush the service table hashed by fwmark
*/
for(idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
hlist_for_each_entry_safe(svc, n, &ip_vs_svc_fwm_table[idx],
f_list) {
if (svc->ipvs == ipvs)
ip_vs_unlink_service(svc, cleanup);
}
}
return 0;
}
/*
* Delete service by {netns} in the service table.
* Called by __ip_vs_batch_cleanup()
*/
void ip_vs_service_nets_cleanup(struct list_head *net_list)
{
struct netns_ipvs *ipvs;
struct net *net;
/* Check for "full" addressed entries */
mutex_lock(&__ip_vs_mutex);
list_for_each_entry(net, net_list, exit_list) {
ipvs = net_ipvs(net);
ip_vs_flush(ipvs, true);
}
mutex_unlock(&__ip_vs_mutex);
}
/* Put all references for device (dst_cache) */
static inline void
ip_vs_forget_dev(struct ip_vs_dest *dest, struct net_device *dev)
{
struct ip_vs_dest_dst *dest_dst;
spin_lock_bh(&dest->dst_lock);
dest_dst = rcu_dereference_protected(dest->dest_dst, 1);
if (dest_dst && dest_dst->dst_cache->dev == dev) {
IP_VS_DBG_BUF(3, "Reset dev:%s dest %s:%u ,dest->refcnt=%d\n",
dev->name,
IP_VS_DBG_ADDR(dest->af, &dest->addr),
ntohs(dest->port),
refcount_read(&dest->refcnt));
__ip_vs_dst_cache_reset(dest);
}
spin_unlock_bh(&dest->dst_lock);
}
/* Netdev event receiver
* Currently only NETDEV_DOWN is handled to release refs to cached dsts
*/
static int ip_vs_dst_event(struct notifier_block *this, unsigned long event,
void *ptr)
{
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
struct net *net = dev_net(dev);
struct netns_ipvs *ipvs = net_ipvs(net);
struct ip_vs_service *svc;
struct ip_vs_dest *dest;
unsigned int idx;
if (event != NETDEV_DOWN || !ipvs)
return NOTIFY_DONE;
IP_VS_DBG(3, "%s() dev=%s\n", __func__, dev->name);
mutex_lock(&__ip_vs_mutex);
for (idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
hlist_for_each_entry(svc, &ip_vs_svc_table[idx], s_list) {
if (svc->ipvs == ipvs) {
list_for_each_entry(dest, &svc->destinations,
n_list) {
ip_vs_forget_dev(dest, dev);
}
}
}
hlist_for_each_entry(svc, &ip_vs_svc_fwm_table[idx], f_list) {
if (svc->ipvs == ipvs) {
list_for_each_entry(dest, &svc->destinations,
n_list) {
ip_vs_forget_dev(dest, dev);
}
}
}
}
spin_lock_bh(&ipvs->dest_trash_lock);
list_for_each_entry(dest, &ipvs->dest_trash, t_list) {
ip_vs_forget_dev(dest, dev);
}
spin_unlock_bh(&ipvs->dest_trash_lock);
mutex_unlock(&__ip_vs_mutex);
return NOTIFY_DONE;
}
/*
* Zero counters in a service or all services
*/
static int ip_vs_zero_service(struct ip_vs_service *svc)
{
struct ip_vs_dest *dest;
list_for_each_entry(dest, &svc->destinations, n_list) {
ip_vs_zero_stats(&dest->stats);
}
ip_vs_zero_stats(&svc->stats);
return 0;
}
static int ip_vs_zero_all(struct netns_ipvs *ipvs)
{
int idx;
struct ip_vs_service *svc;
for(idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
hlist_for_each_entry(svc, &ip_vs_svc_table[idx], s_list) {
if (svc->ipvs == ipvs)
ip_vs_zero_service(svc);
}
}
for(idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
hlist_for_each_entry(svc, &ip_vs_svc_fwm_table[idx], f_list) {
if (svc->ipvs == ipvs)
ip_vs_zero_service(svc);
}
}
ip_vs_zero_stats(&ipvs->tot_stats->s);
return 0;
}
#ifdef CONFIG_SYSCTL
static int
proc_do_defense_mode(struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
struct netns_ipvs *ipvs = table->extra2;
int *valp = table->data;
int val = *valp;
int rc;
struct ctl_table tmp = {
.data = &val,
.maxlen = sizeof(int),
.mode = table->mode,
};
rc = proc_dointvec(&tmp, write, buffer, lenp, ppos);
if (write && (*valp != val)) {
if (val < 0 || val > 3) {
rc = -EINVAL;
} else {
*valp = val;
update_defense_level(ipvs);
}
}
return rc;
}
static int
proc_do_sync_threshold(struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
struct netns_ipvs *ipvs = table->extra2;
int *valp = table->data;
int val[2];
int rc;
struct ctl_table tmp = {
.data = &val,
.maxlen = table->maxlen,
.mode = table->mode,
};
mutex_lock(&ipvs->sync_mutex);
memcpy(val, valp, sizeof(val));
rc = proc_dointvec(&tmp, write, buffer, lenp, ppos);
if (write) {
if (val[0] < 0 || val[1] < 0 ||
(val[0] >= val[1] && val[1]))
rc = -EINVAL;
else
memcpy(valp, val, sizeof(val));
}
mutex_unlock(&ipvs->sync_mutex);
return rc;
}
static int
proc_do_sync_ports(struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
int *valp = table->data;
int val = *valp;
int rc;
struct ctl_table tmp = {
.data = &val,
.maxlen = sizeof(int),
.mode = table->mode,
};
rc = proc_dointvec(&tmp, write, buffer, lenp, ppos);
if (write && (*valp != val)) {
if (val < 1 || !is_power_of_2(val))
rc = -EINVAL;
else
*valp = val;
}
return rc;
}
static int ipvs_proc_est_cpumask_set(struct ctl_table *table, void *buffer)
{
struct netns_ipvs *ipvs = table->extra2;
cpumask_var_t *valp = table->data;
cpumask_var_t newmask;
int ret;
if (!zalloc_cpumask_var(&newmask, GFP_KERNEL))
return -ENOMEM;
ret = cpulist_parse(buffer, newmask);
if (ret)
goto out;
mutex_lock(&ipvs->est_mutex);
if (!ipvs->est_cpulist_valid) {
if (!zalloc_cpumask_var(valp, GFP_KERNEL)) {
ret = -ENOMEM;
goto unlock;
}
ipvs->est_cpulist_valid = 1;
}
cpumask_and(newmask, newmask, ¤t->cpus_mask);
cpumask_copy(*valp, newmask);
/* est_max_threads may depend on cpulist size */
ipvs->est_max_threads = ip_vs_est_max_threads(ipvs);
ipvs->est_calc_phase = 1;
ip_vs_est_reload_start(ipvs);
unlock:
mutex_unlock(&ipvs->est_mutex);
out:
free_cpumask_var(newmask);
return ret;
}
static int ipvs_proc_est_cpumask_get(struct ctl_table *table, void *buffer,
size_t size)
{
struct netns_ipvs *ipvs = table->extra2;
cpumask_var_t *valp = table->data;
struct cpumask *mask;
int ret;
mutex_lock(&ipvs->est_mutex);
if (ipvs->est_cpulist_valid)
mask = *valp;
else
mask = (struct cpumask *)housekeeping_cpumask(HK_TYPE_KTHREAD);
ret = scnprintf(buffer, size, "%*pbl\n", cpumask_pr_args(mask));
mutex_unlock(&ipvs->est_mutex);
return ret;
}
static int ipvs_proc_est_cpulist(struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
int ret;
/* Ignore both read and write(append) if *ppos not 0 */
if (*ppos || !*lenp) {
*lenp = 0;
return 0;
}
if (write) {
/* proc_sys_call_handler() appends terminator */
ret = ipvs_proc_est_cpumask_set(table, buffer);
if (ret >= 0)
*ppos += *lenp;
} else {
/* proc_sys_call_handler() allocates 1 byte for terminator */
ret = ipvs_proc_est_cpumask_get(table, buffer, *lenp + 1);
if (ret >= 0) {
*lenp = ret;
*ppos += *lenp;
ret = 0;
}
}
return ret;
}
static int ipvs_proc_est_nice(struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
struct netns_ipvs *ipvs = table->extra2;
int *valp = table->data;
int val = *valp;
int ret;
struct ctl_table tmp_table = {
.data = &val,
.maxlen = sizeof(int),
.mode = table->mode,
};
ret = proc_dointvec(&tmp_table, write, buffer, lenp, ppos);
if (write && ret >= 0) {
if (val < MIN_NICE || val > MAX_NICE) {
ret = -EINVAL;
} else {
mutex_lock(&ipvs->est_mutex);
if (*valp != val) {
*valp = val;
ip_vs_est_reload_start(ipvs);
}
mutex_unlock(&ipvs->est_mutex);
}
}
return ret;
}
static int ipvs_proc_run_estimation(struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
struct netns_ipvs *ipvs = table->extra2;
int *valp = table->data;
int val = *valp;
int ret;
struct ctl_table tmp_table = {
.data = &val,
.maxlen = sizeof(int),
.mode = table->mode,
};
ret = proc_dointvec(&tmp_table, write, buffer, lenp, ppos);
if (write && ret >= 0) {
mutex_lock(&ipvs->est_mutex);
if (*valp != val) {
*valp = val;
ip_vs_est_reload_start(ipvs);
}
mutex_unlock(&ipvs->est_mutex);
}
return ret;
}
/*
* IPVS sysctl table (under the /proc/sys/net/ipv4/vs/)
* Do not change order or insert new entries without
* align with netns init in ip_vs_control_net_init()
*/
static struct ctl_table vs_vars[] = {
{
.procname = "amemthresh",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "am_droprate",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "drop_entry",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_do_defense_mode,
},
{
.procname = "drop_packet",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_do_defense_mode,
},
#ifdef CONFIG_IP_VS_NFCT
{
.procname = "conntrack",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = &proc_dointvec,
},
#endif
{
.procname = "secure_tcp",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_do_defense_mode,
},
{
.procname = "snat_reroute",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = &proc_dointvec,
},
{
.procname = "sync_version",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = SYSCTL_ZERO,
.extra2 = SYSCTL_ONE,
},
{
.procname = "sync_ports",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_do_sync_ports,
},
{
.procname = "sync_persist_mode",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "sync_qlen_max",
.maxlen = sizeof(unsigned long),
.mode = 0644,
.proc_handler = proc_doulongvec_minmax,
},
{
.procname = "sync_sock_size",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "cache_bypass",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "expire_nodest_conn",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "sloppy_tcp",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "sloppy_sctp",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "expire_quiescent_template",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "sync_threshold",
.maxlen =
sizeof(((struct netns_ipvs *)0)->sysctl_sync_threshold),
.mode = 0644,
.proc_handler = proc_do_sync_threshold,
},
{
.procname = "sync_refresh_period",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "sync_retries",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = SYSCTL_ZERO,
.extra2 = SYSCTL_THREE,
},
{
.procname = "nat_icmp_send",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "pmtu_disc",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "backup_only",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "conn_reuse_mode",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "schedule_icmp",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "ignore_tunneled",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "run_estimation",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = ipvs_proc_run_estimation,
},
{
.procname = "est_cpulist",
.maxlen = NR_CPUS, /* unused */
.mode = 0644,
.proc_handler = ipvs_proc_est_cpulist,
},
{
.procname = "est_nice",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = ipvs_proc_est_nice,
},
#ifdef CONFIG_IP_VS_DEBUG
{
.procname = "debug_level",
.data = &sysctl_ip_vs_debug_level,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
#endif
{ }
};
#endif
#ifdef CONFIG_PROC_FS
struct ip_vs_iter {
struct seq_net_private p; /* Do not move this, netns depends upon it*/
struct hlist_head *table;
int bucket;
};
/*
* Write the contents of the VS rule table to a PROCfs file.
* (It is kept just for backward compatibility)
*/
static inline const char *ip_vs_fwd_name(unsigned int flags)
{
switch (flags & IP_VS_CONN_F_FWD_MASK) {
case IP_VS_CONN_F_LOCALNODE:
return "Local";
case IP_VS_CONN_F_TUNNEL:
return "Tunnel";
case IP_VS_CONN_F_DROUTE:
return "Route";
default:
return "Masq";
}
}
/* Get the Nth entry in the two lists */
static struct ip_vs_service *ip_vs_info_array(struct seq_file *seq, loff_t pos)
{
struct net *net = seq_file_net(seq);
struct netns_ipvs *ipvs = net_ipvs(net);
struct ip_vs_iter *iter = seq->private;
int idx;
struct ip_vs_service *svc;
/* look in hash by protocol */
for (idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
hlist_for_each_entry_rcu(svc, &ip_vs_svc_table[idx], s_list) {
if ((svc->ipvs == ipvs) && pos-- == 0) {
iter->table = ip_vs_svc_table;
iter->bucket = idx;
return svc;
}
}
}
/* keep looking in fwmark */
for (idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
hlist_for_each_entry_rcu(svc, &ip_vs_svc_fwm_table[idx],
f_list) {
if ((svc->ipvs == ipvs) && pos-- == 0) {
iter->table = ip_vs_svc_fwm_table;
iter->bucket = idx;
return svc;
}
}
}
return NULL;
}
static void *ip_vs_info_seq_start(struct seq_file *seq, loff_t *pos)
__acquires(RCU)
{
rcu_read_lock();
return *pos ? ip_vs_info_array(seq, *pos - 1) : SEQ_START_TOKEN;
}
static void *ip_vs_info_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
struct hlist_node *e;
struct ip_vs_iter *iter;
struct ip_vs_service *svc;
++*pos;
if (v == SEQ_START_TOKEN)
return ip_vs_info_array(seq,0);
svc = v;
iter = seq->private;
if (iter->table == ip_vs_svc_table) {
/* next service in table hashed by protocol */
e = rcu_dereference(hlist_next_rcu(&svc->s_list));
if (e)
return hlist_entry(e, struct ip_vs_service, s_list);
while (++iter->bucket < IP_VS_SVC_TAB_SIZE) {
hlist_for_each_entry_rcu(svc,
&ip_vs_svc_table[iter->bucket],
s_list) {
return svc;
}
}
iter->table = ip_vs_svc_fwm_table;
iter->bucket = -1;
goto scan_fwmark;
}
/* next service in hashed by fwmark */
e = rcu_dereference(hlist_next_rcu(&svc->f_list));
if (e)
return hlist_entry(e, struct ip_vs_service, f_list);
scan_fwmark:
while (++iter->bucket < IP_VS_SVC_TAB_SIZE) {
hlist_for_each_entry_rcu(svc,
&ip_vs_svc_fwm_table[iter->bucket],
f_list)
return svc;
}
return NULL;
}
static void ip_vs_info_seq_stop(struct seq_file *seq, void *v)
__releases(RCU)
{
rcu_read_unlock();
}
static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
{
if (v == SEQ_START_TOKEN) {
seq_printf(seq,
"IP Virtual Server version %d.%d.%d (size=%d)\n",
NVERSION(IP_VS_VERSION_CODE), ip_vs_conn_tab_size);
seq_puts(seq,
"Prot LocalAddress:Port Scheduler Flags\n");
seq_puts(seq,
" -> RemoteAddress:Port Forward Weight ActiveConn InActConn\n");
} else {
struct net *net = seq_file_net(seq);
struct netns_ipvs *ipvs = net_ipvs(net);
const struct ip_vs_service *svc = v;
const struct ip_vs_iter *iter = seq->private;
const struct ip_vs_dest *dest;
struct ip_vs_scheduler *sched = rcu_dereference(svc->scheduler);
char *sched_name = sched ? sched->name : "none";
if (svc->ipvs != ipvs)
return 0;
if (iter->table == ip_vs_svc_table) {
#ifdef CONFIG_IP_VS_IPV6
if (svc->af == AF_INET6)
seq_printf(seq, "%s [%pI6]:%04X %s ",
ip_vs_proto_name(svc->protocol),
&svc->addr.in6,
ntohs(svc->port),
sched_name);
else
#endif
seq_printf(seq, "%s %08X:%04X %s %s ",
ip_vs_proto_name(svc->protocol),
ntohl(svc->addr.ip),
ntohs(svc->port),
sched_name,
(svc->flags & IP_VS_SVC_F_ONEPACKET)?"ops ":"");
} else {
seq_printf(seq, "FWM %08X %s %s",
svc->fwmark, sched_name,
(svc->flags & IP_VS_SVC_F_ONEPACKET)?"ops ":"");
}
if (svc->flags & IP_VS_SVC_F_PERSISTENT)
seq_printf(seq, "persistent %d %08X\n",
svc->timeout,
ntohl(svc->netmask));
else
seq_putc(seq, '\n');
list_for_each_entry_rcu(dest, &svc->destinations, n_list) {
#ifdef CONFIG_IP_VS_IPV6
if (dest->af == AF_INET6)
seq_printf(seq,
" -> [%pI6]:%04X"
" %-7s %-6d %-10d %-10d\n",
&dest->addr.in6,
ntohs(dest->port),
ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
atomic_read(&dest->weight),
atomic_read(&dest->activeconns),
atomic_read(&dest->inactconns));
else
#endif
seq_printf(seq,
" -> %08X:%04X "
"%-7s %-6d %-10d %-10d\n",
ntohl(dest->addr.ip),
ntohs(dest->port),
ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
atomic_read(&dest->weight),
atomic_read(&dest->activeconns),
atomic_read(&dest->inactconns));
}
}
return 0;
}
static const struct seq_operations ip_vs_info_seq_ops = {
.start = ip_vs_info_seq_start,
.next = ip_vs_info_seq_next,
.stop = ip_vs_info_seq_stop,
.show = ip_vs_info_seq_show,
};
static int ip_vs_stats_show(struct seq_file *seq, void *v)
{
struct net *net = seq_file_single_net(seq);
struct ip_vs_kstats show;
/* 01234567 01234567 01234567 0123456701234567 0123456701234567 */
seq_puts(seq,
" Total Incoming Outgoing Incoming Outgoing\n");
seq_puts(seq,
" Conns Packets Packets Bytes Bytes\n");
ip_vs_copy_stats(&show, &net_ipvs(net)->tot_stats->s);
seq_printf(seq, "%8LX %8LX %8LX %16LX %16LX\n\n",
(unsigned long long)show.conns,
(unsigned long long)show.inpkts,
(unsigned long long)show.outpkts,
(unsigned long long)show.inbytes,
(unsigned long long)show.outbytes);
/* 01234567 01234567 01234567 0123456701234567 0123456701234567*/
seq_puts(seq,
" Conns/s Pkts/s Pkts/s Bytes/s Bytes/s\n");
seq_printf(seq, "%8LX %8LX %8LX %16LX %16LX\n",
(unsigned long long)show.cps,
(unsigned long long)show.inpps,
(unsigned long long)show.outpps,
(unsigned long long)show.inbps,
(unsigned long long)show.outbps);
return 0;
}
static int ip_vs_stats_percpu_show(struct seq_file *seq, void *v)
{
struct net *net = seq_file_single_net(seq);
struct ip_vs_stats *tot_stats = &net_ipvs(net)->tot_stats->s;
struct ip_vs_cpu_stats __percpu *cpustats = tot_stats->cpustats;
struct ip_vs_kstats kstats;
int i;
/* 01234567 01234567 01234567 0123456701234567 0123456701234567 */
seq_puts(seq,
" Total Incoming Outgoing Incoming Outgoing\n");
seq_puts(seq,
"CPU Conns Packets Packets Bytes Bytes\n");
for_each_possible_cpu(i) {
struct ip_vs_cpu_stats *u = per_cpu_ptr(cpustats, i);
unsigned int start;
u64 conns, inpkts, outpkts, inbytes, outbytes;
do {
start = u64_stats_fetch_begin(&u->syncp);
conns = u64_stats_read(&u->cnt.conns);
inpkts = u64_stats_read(&u->cnt.inpkts);
outpkts = u64_stats_read(&u->cnt.outpkts);
inbytes = u64_stats_read(&u->cnt.inbytes);
outbytes = u64_stats_read(&u->cnt.outbytes);
} while (u64_stats_fetch_retry(&u->syncp, start));
seq_printf(seq, "%3X %8LX %8LX %8LX %16LX %16LX\n",
i, (u64)conns, (u64)inpkts,
(u64)outpkts, (u64)inbytes,
(u64)outbytes);
}
ip_vs_copy_stats(&kstats, tot_stats);
seq_printf(seq, " ~ %8LX %8LX %8LX %16LX %16LX\n\n",
(unsigned long long)kstats.conns,
(unsigned long long)kstats.inpkts,
(unsigned long long)kstats.outpkts,
(unsigned long long)kstats.inbytes,
(unsigned long long)kstats.outbytes);
/* ... 01234567 01234567 01234567 0123456701234567 0123456701234567 */
seq_puts(seq,
" Conns/s Pkts/s Pkts/s Bytes/s Bytes/s\n");
seq_printf(seq, " %8LX %8LX %8LX %16LX %16LX\n",
kstats.cps,
kstats.inpps,
kstats.outpps,
kstats.inbps,
kstats.outbps);
return 0;
}
#endif
/*
* Set timeout values for tcp tcpfin udp in the timeout_table.
*/
static int ip_vs_set_timeout(struct netns_ipvs *ipvs, struct ip_vs_timeout_user *u)
{
#if defined(CONFIG_IP_VS_PROTO_TCP) || defined(CONFIG_IP_VS_PROTO_UDP)
struct ip_vs_proto_data *pd;
#endif
IP_VS_DBG(2, "Setting timeout tcp:%d tcpfin:%d udp:%d\n",
u->tcp_timeout,
u->tcp_fin_timeout,
u->udp_timeout);
#ifdef CONFIG_IP_VS_PROTO_TCP
if (u->tcp_timeout < 0 || u->tcp_timeout > (INT_MAX / HZ) ||
u->tcp_fin_timeout < 0 || u->tcp_fin_timeout > (INT_MAX / HZ)) {
return -EINVAL;
}
#endif
#ifdef CONFIG_IP_VS_PROTO_UDP
if (u->udp_timeout < 0 || u->udp_timeout > (INT_MAX / HZ))
return -EINVAL;
#endif
#ifdef CONFIG_IP_VS_PROTO_TCP
if (u->tcp_timeout) {
pd = ip_vs_proto_data_get(ipvs, IPPROTO_TCP);
pd->timeout_table[IP_VS_TCP_S_ESTABLISHED]
= u->tcp_timeout * HZ;
}
if (u->tcp_fin_timeout) {
pd = ip_vs_proto_data_get(ipvs, IPPROTO_TCP);
pd->timeout_table[IP_VS_TCP_S_FIN_WAIT]
= u->tcp_fin_timeout * HZ;
}
#endif
#ifdef CONFIG_IP_VS_PROTO_UDP
if (u->udp_timeout) {
pd = ip_vs_proto_data_get(ipvs, IPPROTO_UDP);
pd->timeout_table[IP_VS_UDP_S_NORMAL]
= u->udp_timeout * HZ;
}
#endif
return 0;
}
#define CMDID(cmd) (cmd - IP_VS_BASE_CTL)
struct ip_vs_svcdest_user {
struct ip_vs_service_user s;
struct ip_vs_dest_user d;
};
static const unsigned char set_arglen[CMDID(IP_VS_SO_SET_MAX) + 1] = {
[CMDID(IP_VS_SO_SET_ADD)] = sizeof(struct ip_vs_service_user),
[CMDID(IP_VS_SO_SET_EDIT)] = sizeof(struct ip_vs_service_user),
[CMDID(IP_VS_SO_SET_DEL)] = sizeof(struct ip_vs_service_user),
[CMDID(IP_VS_SO_SET_ADDDEST)] = sizeof(struct ip_vs_svcdest_user),
[CMDID(IP_VS_SO_SET_DELDEST)] = sizeof(struct ip_vs_svcdest_user),
[CMDID(IP_VS_SO_SET_EDITDEST)] = sizeof(struct ip_vs_svcdest_user),
[CMDID(IP_VS_SO_SET_TIMEOUT)] = sizeof(struct ip_vs_timeout_user),
[CMDID(IP_VS_SO_SET_STARTDAEMON)] = sizeof(struct ip_vs_daemon_user),
[CMDID(IP_VS_SO_SET_STOPDAEMON)] = sizeof(struct ip_vs_daemon_user),
[CMDID(IP_VS_SO_SET_ZERO)] = sizeof(struct ip_vs_service_user),
};
union ip_vs_set_arglen {
struct ip_vs_service_user field_IP_VS_SO_SET_ADD;
struct ip_vs_service_user field_IP_VS_SO_SET_EDIT;
struct ip_vs_service_user field_IP_VS_SO_SET_DEL;
struct ip_vs_svcdest_user field_IP_VS_SO_SET_ADDDEST;
struct ip_vs_svcdest_user field_IP_VS_SO_SET_DELDEST;
struct ip_vs_svcdest_user field_IP_VS_SO_SET_EDITDEST;
struct ip_vs_timeout_user field_IP_VS_SO_SET_TIMEOUT;
struct ip_vs_daemon_user field_IP_VS_SO_SET_STARTDAEMON;
struct ip_vs_daemon_user field_IP_VS_SO_SET_STOPDAEMON;
struct ip_vs_service_user field_IP_VS_SO_SET_ZERO;
};
#define MAX_SET_ARGLEN sizeof(union ip_vs_set_arglen)
static void ip_vs_copy_usvc_compat(struct ip_vs_service_user_kern *usvc,
struct ip_vs_service_user *usvc_compat)
{
memset(usvc, 0, sizeof(*usvc));
usvc->af = AF_INET;
usvc->protocol = usvc_compat->protocol;
usvc->addr.ip = usvc_compat->addr;
usvc->port = usvc_compat->port;
usvc->fwmark = usvc_compat->fwmark;
/* Deep copy of sched_name is not needed here */
usvc->sched_name = usvc_compat->sched_name;
usvc->flags = usvc_compat->flags;
usvc->timeout = usvc_compat->timeout;
usvc->netmask = usvc_compat->netmask;
}
static void ip_vs_copy_udest_compat(struct ip_vs_dest_user_kern *udest,
struct ip_vs_dest_user *udest_compat)
{
memset(udest, 0, sizeof(*udest));
udest->addr.ip = udest_compat->addr;
udest->port = udest_compat->port;
udest->conn_flags = udest_compat->conn_flags;
udest->weight = udest_compat->weight;
udest->u_threshold = udest_compat->u_threshold;
udest->l_threshold = udest_compat->l_threshold;
udest->af = AF_INET;
udest->tun_type = IP_VS_CONN_F_TUNNEL_TYPE_IPIP;
}
static int
do_ip_vs_set_ctl(struct sock *sk, int cmd, sockptr_t ptr, unsigned int len)
{
struct net *net = sock_net(sk);
int ret;
unsigned char arg[MAX_SET_ARGLEN];
struct ip_vs_service_user *usvc_compat;
struct ip_vs_service_user_kern usvc;
struct ip_vs_service *svc;
struct ip_vs_dest_user *udest_compat;
struct ip_vs_dest_user_kern udest;
struct netns_ipvs *ipvs = net_ipvs(net);
BUILD_BUG_ON(sizeof(arg) > 255);
if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
return -EPERM;
if (cmd < IP_VS_BASE_CTL || cmd > IP_VS_SO_SET_MAX)
return -EINVAL;
if (len != set_arglen[CMDID(cmd)]) {
IP_VS_DBG(1, "set_ctl: len %u != %u\n",
len, set_arglen[CMDID(cmd)]);
return -EINVAL;
}
if (copy_from_sockptr(arg, ptr, len) != 0)
return -EFAULT;
/* Handle daemons since they have another lock */
if (cmd == IP_VS_SO_SET_STARTDAEMON ||
cmd == IP_VS_SO_SET_STOPDAEMON) {
struct ip_vs_daemon_user *dm = (struct ip_vs_daemon_user *)arg;
if (cmd == IP_VS_SO_SET_STARTDAEMON) {
struct ipvs_sync_daemon_cfg cfg;
memset(&cfg, 0, sizeof(cfg));
ret = -EINVAL;
if (strscpy(cfg.mcast_ifn, dm->mcast_ifn,
sizeof(cfg.mcast_ifn)) <= 0)
return ret;
cfg.syncid = dm->syncid;
ret = start_sync_thread(ipvs, &cfg, dm->state);
} else {
ret = stop_sync_thread(ipvs, dm->state);
}
return ret;
}
mutex_lock(&__ip_vs_mutex);
if (cmd == IP_VS_SO_SET_FLUSH) {
/* Flush the virtual service */
ret = ip_vs_flush(ipvs, false);
goto out_unlock;
} else if (cmd == IP_VS_SO_SET_TIMEOUT) {
/* Set timeout values for (tcp tcpfin udp) */
ret = ip_vs_set_timeout(ipvs, (struct ip_vs_timeout_user *)arg);
goto out_unlock;
} else if (!len) {
/* No more commands with len == 0 below */
ret = -EINVAL;
goto out_unlock;
}
usvc_compat = (struct ip_vs_service_user *)arg;
udest_compat = (struct ip_vs_dest_user *)(usvc_compat + 1);
/* We only use the new structs internally, so copy userspace compat
* structs to extended internal versions */
ip_vs_copy_usvc_compat(&usvc, usvc_compat);
ip_vs_copy_udest_compat(&udest, udest_compat);
if (cmd == IP_VS_SO_SET_ZERO) {
/* if no service address is set, zero counters in all */
if (!usvc.fwmark && !usvc.addr.ip && !usvc.port) {
ret = ip_vs_zero_all(ipvs);
goto out_unlock;
}
}
if ((cmd == IP_VS_SO_SET_ADD || cmd == IP_VS_SO_SET_EDIT) &&
strnlen(usvc.sched_name, IP_VS_SCHEDNAME_MAXLEN) ==
IP_VS_SCHEDNAME_MAXLEN) {
ret = -EINVAL;
goto out_unlock;
}
/* Check for valid protocol: TCP or UDP or SCTP, even for fwmark!=0 */
if (usvc.protocol != IPPROTO_TCP && usvc.protocol != IPPROTO_UDP &&
usvc.protocol != IPPROTO_SCTP) {
pr_err("set_ctl: invalid protocol: %d %pI4:%d\n",
usvc.protocol, &usvc.addr.ip,
ntohs(usvc.port));
ret = -EFAULT;
goto out_unlock;
}
/* Lookup the exact service by <protocol, addr, port> or fwmark */
rcu_read_lock();
if (usvc.fwmark == 0)
svc = __ip_vs_service_find(ipvs, usvc.af, usvc.protocol,
&usvc.addr, usvc.port);
else
svc = __ip_vs_svc_fwm_find(ipvs, usvc.af, usvc.fwmark);
rcu_read_unlock();
if (cmd != IP_VS_SO_SET_ADD
&& (svc == NULL || svc->protocol != usvc.protocol)) {
ret = -ESRCH;
goto out_unlock;
}
switch (cmd) {
case IP_VS_SO_SET_ADD:
if (svc != NULL)
ret = -EEXIST;
else
ret = ip_vs_add_service(ipvs, &usvc, &svc);
break;
case IP_VS_SO_SET_EDIT:
ret = ip_vs_edit_service(svc, &usvc);
break;
case IP_VS_SO_SET_DEL:
ret = ip_vs_del_service(svc);
if (!ret)
goto out_unlock;
break;
case IP_VS_SO_SET_ZERO:
ret = ip_vs_zero_service(svc);
break;
case IP_VS_SO_SET_ADDDEST:
ret = ip_vs_add_dest(svc, &udest);
break;
case IP_VS_SO_SET_EDITDEST:
ret = ip_vs_edit_dest(svc, &udest);
break;
case IP_VS_SO_SET_DELDEST:
ret = ip_vs_del_dest(svc, &udest);
break;
default:
WARN_ON_ONCE(1);
ret = -EINVAL;
break;
}
out_unlock:
mutex_unlock(&__ip_vs_mutex);
return ret;
}
static void
ip_vs_copy_service(struct ip_vs_service_entry *dst, struct ip_vs_service *src)
{
struct ip_vs_scheduler *sched;
struct ip_vs_kstats kstats;
char *sched_name;
sched = rcu_dereference_protected(src->scheduler, 1);
sched_name = sched ? sched->name : "none";
dst->protocol = src->protocol;
dst->addr = src->addr.ip;
dst->port = src->port;
dst->fwmark = src->fwmark;
strscpy(dst->sched_name, sched_name, sizeof(dst->sched_name));
dst->flags = src->flags;
dst->timeout = src->timeout / HZ;
dst->netmask = src->netmask;
dst->num_dests = src->num_dests;
ip_vs_copy_stats(&kstats, &src->stats);
ip_vs_export_stats_user(&dst->stats, &kstats);
}
static inline int
__ip_vs_get_service_entries(struct netns_ipvs *ipvs,
const struct ip_vs_get_services *get,
struct ip_vs_get_services __user *uptr)
{
int idx, count=0;
struct ip_vs_service *svc;
struct ip_vs_service_entry entry;
int ret = 0;
for (idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
hlist_for_each_entry(svc, &ip_vs_svc_table[idx], s_list) {
/* Only expose IPv4 entries to old interface */
if (svc->af != AF_INET || (svc->ipvs != ipvs))
continue;
if (count >= get->num_services)
goto out;
memset(&entry, 0, sizeof(entry));
ip_vs_copy_service(&entry, svc);
if (copy_to_user(&uptr->entrytable[count],
&entry, sizeof(entry))) {
ret = -EFAULT;
goto out;
}
count++;
}
}
for (idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
hlist_for_each_entry(svc, &ip_vs_svc_fwm_table[idx], f_list) {
/* Only expose IPv4 entries to old interface */
if (svc->af != AF_INET || (svc->ipvs != ipvs))
continue;
if (count >= get->num_services)
goto out;
memset(&entry, 0, sizeof(entry));
ip_vs_copy_service(&entry, svc);
if (copy_to_user(&uptr->entrytable[count],
&entry, sizeof(entry))) {
ret = -EFAULT;
goto out;
}
count++;
}
}
out:
return ret;
}
static inline int
__ip_vs_get_dest_entries(struct netns_ipvs *ipvs, const struct ip_vs_get_dests *get,
struct ip_vs_get_dests __user *uptr)
{
struct ip_vs_service *svc;
union nf_inet_addr addr = { .ip = get->addr };
int ret = 0;
rcu_read_lock();
if (get->fwmark)
svc = __ip_vs_svc_fwm_find(ipvs, AF_INET, get->fwmark);
else
svc = __ip_vs_service_find(ipvs, AF_INET, get->protocol, &addr,
get->port);
rcu_read_unlock();
if (svc) {
int count = 0;
struct ip_vs_dest *dest;
struct ip_vs_dest_entry entry;
struct ip_vs_kstats kstats;
memset(&entry, 0, sizeof(entry));
list_for_each_entry(dest, &svc->destinations, n_list) {
if (count >= get->num_dests)
break;
/* Cannot expose heterogeneous members via sockopt
* interface
*/
if (dest->af != svc->af)
continue;
entry.addr = dest->addr.ip;
entry.port = dest->port;
entry.conn_flags = atomic_read(&dest->conn_flags);
entry.weight = atomic_read(&dest->weight);
entry.u_threshold = dest->u_threshold;
entry.l_threshold = dest->l_threshold;
entry.activeconns = atomic_read(&dest->activeconns);
entry.inactconns = atomic_read(&dest->inactconns);
entry.persistconns = atomic_read(&dest->persistconns);
ip_vs_copy_stats(&kstats, &dest->stats);
ip_vs_export_stats_user(&entry.stats, &kstats);
if (copy_to_user(&uptr->entrytable[count],
&entry, sizeof(entry))) {
ret = -EFAULT;
break;
}
count++;
}
} else
ret = -ESRCH;
return ret;
}
static inline void
__ip_vs_get_timeouts(struct netns_ipvs *ipvs, struct ip_vs_timeout_user *u)
{
#if defined(CONFIG_IP_VS_PROTO_TCP) || defined(CONFIG_IP_VS_PROTO_UDP)
struct ip_vs_proto_data *pd;
#endif
memset(u, 0, sizeof (*u));
#ifdef CONFIG_IP_VS_PROTO_TCP
pd = ip_vs_proto_data_get(ipvs, IPPROTO_TCP);
u->tcp_timeout = pd->timeout_table[IP_VS_TCP_S_ESTABLISHED] / HZ;
u->tcp_fin_timeout = pd->timeout_table[IP_VS_TCP_S_FIN_WAIT] / HZ;
#endif
#ifdef CONFIG_IP_VS_PROTO_UDP
pd = ip_vs_proto_data_get(ipvs, IPPROTO_UDP);
u->udp_timeout =
pd->timeout_table[IP_VS_UDP_S_NORMAL] / HZ;
#endif
}
static const unsigned char get_arglen[CMDID(IP_VS_SO_GET_MAX) + 1] = {
[CMDID(IP_VS_SO_GET_VERSION)] = 64,
[CMDID(IP_VS_SO_GET_INFO)] = sizeof(struct ip_vs_getinfo),
[CMDID(IP_VS_SO_GET_SERVICES)] = sizeof(struct ip_vs_get_services),
[CMDID(IP_VS_SO_GET_SERVICE)] = sizeof(struct ip_vs_service_entry),
[CMDID(IP_VS_SO_GET_DESTS)] = sizeof(struct ip_vs_get_dests),
[CMDID(IP_VS_SO_GET_TIMEOUT)] = sizeof(struct ip_vs_timeout_user),
[CMDID(IP_VS_SO_GET_DAEMON)] = 2 * sizeof(struct ip_vs_daemon_user),
};
union ip_vs_get_arglen {
char field_IP_VS_SO_GET_VERSION[64];
struct ip_vs_getinfo field_IP_VS_SO_GET_INFO;
struct ip_vs_get_services field_IP_VS_SO_GET_SERVICES;
struct ip_vs_service_entry field_IP_VS_SO_GET_SERVICE;
struct ip_vs_get_dests field_IP_VS_SO_GET_DESTS;
struct ip_vs_timeout_user field_IP_VS_SO_GET_TIMEOUT;
struct ip_vs_daemon_user field_IP_VS_SO_GET_DAEMON[2];
};
#define MAX_GET_ARGLEN sizeof(union ip_vs_get_arglen)
static int
do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
{
unsigned char arg[MAX_GET_ARGLEN];
int ret = 0;
unsigned int copylen;
struct net *net = sock_net(sk);
struct netns_ipvs *ipvs = net_ipvs(net);
BUG_ON(!net);
BUILD_BUG_ON(sizeof(arg) > 255);
if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
return -EPERM;
if (cmd < IP_VS_BASE_CTL || cmd > IP_VS_SO_GET_MAX)
return -EINVAL;
copylen = get_arglen[CMDID(cmd)];
if (*len < (int) copylen) {
IP_VS_DBG(1, "get_ctl: len %d < %u\n", *len, copylen);
return -EINVAL;
}
if (copy_from_user(arg, user, copylen) != 0)
return -EFAULT;
/*
* Handle daemons first since it has its own locking
*/
if (cmd == IP_VS_SO_GET_DAEMON) {
struct ip_vs_daemon_user d[2];
memset(&d, 0, sizeof(d));
mutex_lock(&ipvs->sync_mutex);
if (ipvs->sync_state & IP_VS_STATE_MASTER) {
d[0].state = IP_VS_STATE_MASTER;
strscpy(d[0].mcast_ifn, ipvs->mcfg.mcast_ifn,
sizeof(d[0].mcast_ifn));
d[0].syncid = ipvs->mcfg.syncid;
}
if (ipvs->sync_state & IP_VS_STATE_BACKUP) {
d[1].state = IP_VS_STATE_BACKUP;
strscpy(d[1].mcast_ifn, ipvs->bcfg.mcast_ifn,
sizeof(d[1].mcast_ifn));
d[1].syncid = ipvs->bcfg.syncid;
}
if (copy_to_user(user, &d, sizeof(d)) != 0)
ret = -EFAULT;
mutex_unlock(&ipvs->sync_mutex);
return ret;
}
mutex_lock(&__ip_vs_mutex);
switch (cmd) {
case IP_VS_SO_GET_VERSION:
{
char buf[64];
sprintf(buf, "IP Virtual Server version %d.%d.%d (size=%d)",
NVERSION(IP_VS_VERSION_CODE), ip_vs_conn_tab_size);
if (copy_to_user(user, buf, strlen(buf)+1) != 0) {
ret = -EFAULT;
goto out;
}
*len = strlen(buf)+1;
}
break;
case IP_VS_SO_GET_INFO:
{
struct ip_vs_getinfo info;
info.version = IP_VS_VERSION_CODE;
info.size = ip_vs_conn_tab_size;
info.num_services = ipvs->num_services;
if (copy_to_user(user, &info, sizeof(info)) != 0)
ret = -EFAULT;
}
break;
case IP_VS_SO_GET_SERVICES:
{
struct ip_vs_get_services *get;
int size;
get = (struct ip_vs_get_services *)arg;
size = struct_size(get, entrytable, get->num_services);
if (*len != size) {
pr_err("length: %u != %u\n", *len, size);
ret = -EINVAL;
goto out;
}
ret = __ip_vs_get_service_entries(ipvs, get, user);
}
break;
case IP_VS_SO_GET_SERVICE:
{
struct ip_vs_service_entry *entry;
struct ip_vs_service *svc;
union nf_inet_addr addr;
entry = (struct ip_vs_service_entry *)arg;
addr.ip = entry->addr;
rcu_read_lock();
if (entry->fwmark)
svc = __ip_vs_svc_fwm_find(ipvs, AF_INET, entry->fwmark);
else
svc = __ip_vs_service_find(ipvs, AF_INET,
entry->protocol, &addr,
entry->port);
rcu_read_unlock();
if (svc) {
ip_vs_copy_service(entry, svc);
if (copy_to_user(user, entry, sizeof(*entry)) != 0)
ret = -EFAULT;
} else
ret = -ESRCH;
}
break;
case IP_VS_SO_GET_DESTS:
{
struct ip_vs_get_dests *get;
int size;
get = (struct ip_vs_get_dests *)arg;
size = struct_size(get, entrytable, get->num_dests);
if (*len != size) {
pr_err("length: %u != %u\n", *len, size);
ret = -EINVAL;
goto out;
}
ret = __ip_vs_get_dest_entries(ipvs, get, user);
}
break;
case IP_VS_SO_GET_TIMEOUT:
{
struct ip_vs_timeout_user t;
__ip_vs_get_timeouts(ipvs, &t);
if (copy_to_user(user, &t, sizeof(t)) != 0)
ret = -EFAULT;
}
break;
default:
ret = -EINVAL;
}
out:
mutex_unlock(&__ip_vs_mutex);
return ret;
}
static struct nf_sockopt_ops ip_vs_sockopts = {
.pf = PF_INET,
.set_optmin = IP_VS_BASE_CTL,
.set_optmax = IP_VS_SO_SET_MAX+1,
.set = do_ip_vs_set_ctl,
.get_optmin = IP_VS_BASE_CTL,
.get_optmax = IP_VS_SO_GET_MAX+1,
.get = do_ip_vs_get_ctl,
.owner = THIS_MODULE,
};
/*
* Generic Netlink interface
*/
/* IPVS genetlink family */
static struct genl_family ip_vs_genl_family;
/* Policy used for first-level command attributes */
static const struct nla_policy ip_vs_cmd_policy[IPVS_CMD_ATTR_MAX + 1] = {
[IPVS_CMD_ATTR_SERVICE] = { .type = NLA_NESTED },
[IPVS_CMD_ATTR_DEST] = { .type = NLA_NESTED },
[IPVS_CMD_ATTR_DAEMON] = { .type = NLA_NESTED },
[IPVS_CMD_ATTR_TIMEOUT_TCP] = { .type = NLA_U32 },
[IPVS_CMD_ATTR_TIMEOUT_TCP_FIN] = { .type = NLA_U32 },
[IPVS_CMD_ATTR_TIMEOUT_UDP] = { .type = NLA_U32 },
};
/* Policy used for attributes in nested attribute IPVS_CMD_ATTR_DAEMON */
static const struct nla_policy ip_vs_daemon_policy[IPVS_DAEMON_ATTR_MAX + 1] = {
[IPVS_DAEMON_ATTR_STATE] = { .type = NLA_U32 },
[IPVS_DAEMON_ATTR_MCAST_IFN] = { .type = NLA_NUL_STRING,
.len = IP_VS_IFNAME_MAXLEN - 1 },
[IPVS_DAEMON_ATTR_SYNC_ID] = { .type = NLA_U32 },
[IPVS_DAEMON_ATTR_SYNC_MAXLEN] = { .type = NLA_U16 },
[IPVS_DAEMON_ATTR_MCAST_GROUP] = { .type = NLA_U32 },
[IPVS_DAEMON_ATTR_MCAST_GROUP6] = { .len = sizeof(struct in6_addr) },
[IPVS_DAEMON_ATTR_MCAST_PORT] = { .type = NLA_U16 },
[IPVS_DAEMON_ATTR_MCAST_TTL] = { .type = NLA_U8 },
};
/* Policy used for attributes in nested attribute IPVS_CMD_ATTR_SERVICE */
static const struct nla_policy ip_vs_svc_policy[IPVS_SVC_ATTR_MAX + 1] = {
[IPVS_SVC_ATTR_AF] = { .type = NLA_U16 },
[IPVS_SVC_ATTR_PROTOCOL] = { .type = NLA_U16 },
[IPVS_SVC_ATTR_ADDR] = { .type = NLA_BINARY,
.len = sizeof(union nf_inet_addr) },
[IPVS_SVC_ATTR_PORT] = { .type = NLA_U16 },
[IPVS_SVC_ATTR_FWMARK] = { .type = NLA_U32 },
[IPVS_SVC_ATTR_SCHED_NAME] = { .type = NLA_NUL_STRING,
.len = IP_VS_SCHEDNAME_MAXLEN - 1 },
[IPVS_SVC_ATTR_PE_NAME] = { .type = NLA_NUL_STRING,
.len = IP_VS_PENAME_MAXLEN },
[IPVS_SVC_ATTR_FLAGS] = { .type = NLA_BINARY,
.len = sizeof(struct ip_vs_flags) },
[IPVS_SVC_ATTR_TIMEOUT] = { .type = NLA_U32 },
[IPVS_SVC_ATTR_NETMASK] = { .type = NLA_U32 },
[IPVS_SVC_ATTR_STATS] = { .type = NLA_NESTED },
};
/* Policy used for attributes in nested attribute IPVS_CMD_ATTR_DEST */
static const struct nla_policy ip_vs_dest_policy[IPVS_DEST_ATTR_MAX + 1] = {
[IPVS_DEST_ATTR_ADDR] = { .type = NLA_BINARY,
.len = sizeof(union nf_inet_addr) },
[IPVS_DEST_ATTR_PORT] = { .type = NLA_U16 },
[IPVS_DEST_ATTR_FWD_METHOD] = { .type = NLA_U32 },
[IPVS_DEST_ATTR_WEIGHT] = { .type = NLA_U32 },
[IPVS_DEST_ATTR_U_THRESH] = { .type = NLA_U32 },
[IPVS_DEST_ATTR_L_THRESH] = { .type = NLA_U32 },
[IPVS_DEST_ATTR_ACTIVE_CONNS] = { .type = NLA_U32 },
[IPVS_DEST_ATTR_INACT_CONNS] = { .type = NLA_U32 },
[IPVS_DEST_ATTR_PERSIST_CONNS] = { .type = NLA_U32 },
[IPVS_DEST_ATTR_STATS] = { .type = NLA_NESTED },
[IPVS_DEST_ATTR_ADDR_FAMILY] = { .type = NLA_U16 },
[IPVS_DEST_ATTR_TUN_TYPE] = { .type = NLA_U8 },
[IPVS_DEST_ATTR_TUN_PORT] = { .type = NLA_U16 },
[IPVS_DEST_ATTR_TUN_FLAGS] = { .type = NLA_U16 },
};
static int ip_vs_genl_fill_stats(struct sk_buff *skb, int container_type,
struct ip_vs_kstats *kstats)
{
struct nlattr *nl_stats = nla_nest_start_noflag(skb, container_type);
if (!nl_stats)
return -EMSGSIZE;
if (nla_put_u32(skb, IPVS_STATS_ATTR_CONNS, (u32)kstats->conns) ||
nla_put_u32(skb, IPVS_STATS_ATTR_INPKTS, (u32)kstats->inpkts) ||
nla_put_u32(skb, IPVS_STATS_ATTR_OUTPKTS, (u32)kstats->outpkts) ||
nla_put_u64_64bit(skb, IPVS_STATS_ATTR_INBYTES, kstats->inbytes,
IPVS_STATS_ATTR_PAD) ||
nla_put_u64_64bit(skb, IPVS_STATS_ATTR_OUTBYTES, kstats->outbytes,
IPVS_STATS_ATTR_PAD) ||
nla_put_u32(skb, IPVS_STATS_ATTR_CPS, (u32)kstats->cps) ||
nla_put_u32(skb, IPVS_STATS_ATTR_INPPS, (u32)kstats->inpps) ||
nla_put_u32(skb, IPVS_STATS_ATTR_OUTPPS, (u32)kstats->outpps) ||
nla_put_u32(skb, IPVS_STATS_ATTR_INBPS, (u32)kstats->inbps) ||
nla_put_u32(skb, IPVS_STATS_ATTR_OUTBPS, (u32)kstats->outbps))
goto nla_put_failure;
nla_nest_end(skb, nl_stats);
return 0;
nla_put_failure:
nla_nest_cancel(skb, nl_stats);
return -EMSGSIZE;
}
static int ip_vs_genl_fill_stats64(struct sk_buff *skb, int container_type,
struct ip_vs_kstats *kstats)
{
struct nlattr *nl_stats = nla_nest_start_noflag(skb, container_type);
if (!nl_stats)
return -EMSGSIZE;
if (nla_put_u64_64bit(skb, IPVS_STATS_ATTR_CONNS, kstats->conns,
IPVS_STATS_ATTR_PAD) ||
nla_put_u64_64bit(skb, IPVS_STATS_ATTR_INPKTS, kstats->inpkts,
IPVS_STATS_ATTR_PAD) ||
nla_put_u64_64bit(skb, IPVS_STATS_ATTR_OUTPKTS, kstats->outpkts,
IPVS_STATS_ATTR_PAD) ||
nla_put_u64_64bit(skb, IPVS_STATS_ATTR_INBYTES, kstats->inbytes,
IPVS_STATS_ATTR_PAD) ||
nla_put_u64_64bit(skb, IPVS_STATS_ATTR_OUTBYTES, kstats->outbytes,
IPVS_STATS_ATTR_PAD) ||
nla_put_u64_64bit(skb, IPVS_STATS_ATTR_CPS, kstats->cps,
IPVS_STATS_ATTR_PAD) ||
nla_put_u64_64bit(skb, IPVS_STATS_ATTR_INPPS, kstats->inpps,
IPVS_STATS_ATTR_PAD) ||
nla_put_u64_64bit(skb, IPVS_STATS_ATTR_OUTPPS, kstats->outpps,
IPVS_STATS_ATTR_PAD) ||
nla_put_u64_64bit(skb, IPVS_STATS_ATTR_INBPS, kstats->inbps,
IPVS_STATS_ATTR_PAD) ||
nla_put_u64_64bit(skb, IPVS_STATS_ATTR_OUTBPS, kstats->outbps,
IPVS_STATS_ATTR_PAD))
goto nla_put_failure;
nla_nest_end(skb, nl_stats);
return 0;
nla_put_failure:
nla_nest_cancel(skb, nl_stats);
return -EMSGSIZE;
}
static int ip_vs_genl_fill_service(struct sk_buff *skb,
struct ip_vs_service *svc)
{
struct ip_vs_scheduler *sched;
struct ip_vs_pe *pe;
struct nlattr *nl_service;
struct ip_vs_flags flags = { .flags = svc->flags,
.mask = ~0 };
struct ip_vs_kstats kstats;
char *sched_name;
nl_service = nla_nest_start_noflag(skb, IPVS_CMD_ATTR_SERVICE);
if (!nl_service)
return -EMSGSIZE;
if (nla_put_u16(skb, IPVS_SVC_ATTR_AF, svc->af))
goto nla_put_failure;
if (svc->fwmark) {
if (nla_put_u32(skb, IPVS_SVC_ATTR_FWMARK, svc->fwmark))
goto nla_put_failure;
} else {
if (nla_put_u16(skb, IPVS_SVC_ATTR_PROTOCOL, svc->protocol) ||
nla_put(skb, IPVS_SVC_ATTR_ADDR, sizeof(svc->addr), &svc->addr) ||
nla_put_be16(skb, IPVS_SVC_ATTR_PORT, svc->port))
goto nla_put_failure;
}
sched = rcu_dereference_protected(svc->scheduler, 1);
sched_name = sched ? sched->name : "none";
pe = rcu_dereference_protected(svc->pe, 1);
if (nla_put_string(skb, IPVS_SVC_ATTR_SCHED_NAME, sched_name) ||
(pe && nla_put_string(skb, IPVS_SVC_ATTR_PE_NAME, pe->name)) ||
nla_put(skb, IPVS_SVC_ATTR_FLAGS, sizeof(flags), &flags) ||
nla_put_u32(skb, IPVS_SVC_ATTR_TIMEOUT, svc->timeout / HZ) ||
nla_put_be32(skb, IPVS_SVC_ATTR_NETMASK, svc->netmask))
goto nla_put_failure;
ip_vs_copy_stats(&kstats, &svc->stats);
if (ip_vs_genl_fill_stats(skb, IPVS_SVC_ATTR_STATS, &kstats))
goto nla_put_failure;
if (ip_vs_genl_fill_stats64(skb, IPVS_SVC_ATTR_STATS64, &kstats))
goto nla_put_failure;
nla_nest_end(skb, nl_service);
return 0;
nla_put_failure:
nla_nest_cancel(skb, nl_service);
return -EMSGSIZE;
}
static int ip_vs_genl_dump_service(struct sk_buff *skb,
struct ip_vs_service *svc,
struct netlink_callback *cb)
{
void *hdr;
hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
&ip_vs_genl_family, NLM_F_MULTI,
IPVS_CMD_NEW_SERVICE);
if (!hdr)
return -EMSGSIZE;
if (ip_vs_genl_fill_service(skb, svc) < 0)
goto nla_put_failure;
genlmsg_end(skb, hdr);
return 0;
nla_put_failure:
genlmsg_cancel(skb, hdr);
return -EMSGSIZE;
}
static int ip_vs_genl_dump_services(struct sk_buff *skb,
struct netlink_callback *cb)
{
int idx = 0, i;
int start = cb->args[0];
struct ip_vs_service *svc;
struct net *net = sock_net(skb->sk);
struct netns_ipvs *ipvs = net_ipvs(net);
mutex_lock(&__ip_vs_mutex);
for (i = 0; i < IP_VS_SVC_TAB_SIZE; i++) {
hlist_for_each_entry(svc, &ip_vs_svc_table[i], s_list) {
if (++idx <= start || (svc->ipvs != ipvs))
continue;
if (ip_vs_genl_dump_service(skb, svc, cb) < 0) {
idx--;
goto nla_put_failure;
}
}
}
for (i = 0; i < IP_VS_SVC_TAB_SIZE; i++) {
hlist_for_each_entry(svc, &ip_vs_svc_fwm_table[i], f_list) {
if (++idx <= start || (svc->ipvs != ipvs))
continue;
if (ip_vs_genl_dump_service(skb, svc, cb) < 0) {
idx--;
goto nla_put_failure;
}
}
}
nla_put_failure:
mutex_unlock(&__ip_vs_mutex);
cb->args[0] = idx;
return skb->len;
}
static bool ip_vs_is_af_valid(int af)
{
if (af == AF_INET)
return true;
#ifdef CONFIG_IP_VS_IPV6
if (af == AF_INET6 && ipv6_mod_enabled())
return true;
#endif
return false;
}
static int ip_vs_genl_parse_service(struct netns_ipvs *ipvs,
struct ip_vs_service_user_kern *usvc,
struct nlattr *nla, bool full_entry,
struct ip_vs_service **ret_svc)
{
struct nlattr *attrs[IPVS_SVC_ATTR_MAX + 1];
struct nlattr *nla_af, *nla_port, *nla_fwmark, *nla_protocol, *nla_addr;
struct ip_vs_service *svc;
/* Parse mandatory identifying service fields first */
if (nla == NULL ||
nla_parse_nested_deprecated(attrs, IPVS_SVC_ATTR_MAX, nla, ip_vs_svc_policy, NULL))
return -EINVAL;
nla_af = attrs[IPVS_SVC_ATTR_AF];
nla_protocol = attrs[IPVS_SVC_ATTR_PROTOCOL];
nla_addr = attrs[IPVS_SVC_ATTR_ADDR];
nla_port = attrs[IPVS_SVC_ATTR_PORT];
nla_fwmark = attrs[IPVS_SVC_ATTR_FWMARK];
if (!(nla_af && (nla_fwmark || (nla_port && nla_protocol && nla_addr))))
return -EINVAL;
memset(usvc, 0, sizeof(*usvc));
usvc->af = nla_get_u16(nla_af);
if (!ip_vs_is_af_valid(usvc->af))
return -EAFNOSUPPORT;
if (nla_fwmark) {
usvc->protocol = IPPROTO_TCP;
usvc->fwmark = nla_get_u32(nla_fwmark);
} else {
usvc->protocol = nla_get_u16(nla_protocol);
nla_memcpy(&usvc->addr, nla_addr, sizeof(usvc->addr));
usvc->port = nla_get_be16(nla_port);
usvc->fwmark = 0;
}
rcu_read_lock();
if (usvc->fwmark)
svc = __ip_vs_svc_fwm_find(ipvs, usvc->af, usvc->fwmark);
else
svc = __ip_vs_service_find(ipvs, usvc->af, usvc->protocol,
&usvc->addr, usvc->port);
rcu_read_unlock();
*ret_svc = svc;
/* If a full entry was requested, check for the additional fields */
if (full_entry) {
struct nlattr *nla_sched, *nla_flags, *nla_pe, *nla_timeout,
*nla_netmask;
struct ip_vs_flags flags;
nla_sched = attrs[IPVS_SVC_ATTR_SCHED_NAME];
nla_pe = attrs[IPVS_SVC_ATTR_PE_NAME];
nla_flags = attrs[IPVS_SVC_ATTR_FLAGS];
nla_timeout = attrs[IPVS_SVC_ATTR_TIMEOUT];
nla_netmask = attrs[IPVS_SVC_ATTR_NETMASK];
if (!(nla_sched && nla_flags && nla_timeout && nla_netmask))
return -EINVAL;
nla_memcpy(&flags, nla_flags, sizeof(flags));
/* prefill flags from service if it already exists */
if (svc)
usvc->flags = svc->flags;
/* set new flags from userland */
usvc->flags = (usvc->flags & ~flags.mask) |
(flags.flags & flags.mask);
usvc->sched_name = nla_data(nla_sched);
usvc->pe_name = nla_pe ? nla_data(nla_pe) : NULL;
usvc->timeout = nla_get_u32(nla_timeout);
usvc->netmask = nla_get_be32(nla_netmask);
}
return 0;
}
static struct ip_vs_service *ip_vs_genl_find_service(struct netns_ipvs *ipvs,
struct nlattr *nla)
{
struct ip_vs_service_user_kern usvc;
struct ip_vs_service *svc;
int ret;
ret = ip_vs_genl_parse_service(ipvs, &usvc, nla, false, &svc);
return ret ? ERR_PTR(ret) : svc;
}
static int ip_vs_genl_fill_dest(struct sk_buff *skb, struct ip_vs_dest *dest)
{
struct nlattr *nl_dest;
struct ip_vs_kstats kstats;
nl_dest = nla_nest_start_noflag(skb, IPVS_CMD_ATTR_DEST);
if (!nl_dest)
return -EMSGSIZE;
if (nla_put(skb, IPVS_DEST_ATTR_ADDR, sizeof(dest->addr), &dest->addr) ||
nla_put_be16(skb, IPVS_DEST_ATTR_PORT, dest->port) ||
nla_put_u32(skb, IPVS_DEST_ATTR_FWD_METHOD,
(atomic_read(&dest->conn_flags) &
IP_VS_CONN_F_FWD_MASK)) ||
nla_put_u32(skb, IPVS_DEST_ATTR_WEIGHT,
atomic_read(&dest->weight)) ||
nla_put_u8(skb, IPVS_DEST_ATTR_TUN_TYPE,
dest->tun_type) ||
nla_put_be16(skb, IPVS_DEST_ATTR_TUN_PORT,
dest->tun_port) ||
nla_put_u16(skb, IPVS_DEST_ATTR_TUN_FLAGS,
dest->tun_flags) ||
nla_put_u32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold) ||
nla_put_u32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold) ||
nla_put_u32(skb, IPVS_DEST_ATTR_ACTIVE_CONNS,
atomic_read(&dest->activeconns)) ||
nla_put_u32(skb, IPVS_DEST_ATTR_INACT_CONNS,
atomic_read(&dest->inactconns)) ||
nla_put_u32(skb, IPVS_DEST_ATTR_PERSIST_CONNS,
atomic_read(&dest->persistconns)) ||
nla_put_u16(skb, IPVS_DEST_ATTR_ADDR_FAMILY, dest->af))
goto nla_put_failure;
ip_vs_copy_stats(&kstats, &dest->stats);
if (ip_vs_genl_fill_stats(skb, IPVS_DEST_ATTR_STATS, &kstats))
goto nla_put_failure;
if (ip_vs_genl_fill_stats64(skb, IPVS_DEST_ATTR_STATS64, &kstats))
goto nla_put_failure;
nla_nest_end(skb, nl_dest);
return 0;
nla_put_failure:
nla_nest_cancel(skb, nl_dest);
return -EMSGSIZE;
}
static int ip_vs_genl_dump_dest(struct sk_buff *skb, struct ip_vs_dest *dest,
struct netlink_callback *cb)
{
void *hdr;
hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
&ip_vs_genl_family, NLM_F_MULTI,
IPVS_CMD_NEW_DEST);
if (!hdr)
return -EMSGSIZE;
if (ip_vs_genl_fill_dest(skb, dest) < 0)
goto nla_put_failure;
genlmsg_end(skb, hdr);
return 0;
nla_put_failure:
genlmsg_cancel(skb, hdr);
return -EMSGSIZE;
}
static int ip_vs_genl_dump_dests(struct sk_buff *skb,
struct netlink_callback *cb)
{
int idx = 0;
int start = cb->args[0];
struct ip_vs_service *svc;
struct ip_vs_dest *dest;
struct nlattr *attrs[IPVS_CMD_ATTR_MAX + 1];
struct net *net = sock_net(skb->sk);
struct netns_ipvs *ipvs = net_ipvs(net);
mutex_lock(&__ip_vs_mutex);
/* Try to find the service for which to dump destinations */
if (nlmsg_parse_deprecated(cb->nlh, GENL_HDRLEN, attrs, IPVS_CMD_ATTR_MAX, ip_vs_cmd_policy, cb->extack))
goto out_err;
svc = ip_vs_genl_find_service(ipvs, attrs[IPVS_CMD_ATTR_SERVICE]);
if (IS_ERR_OR_NULL(svc))
goto out_err;
/* Dump the destinations */
list_for_each_entry(dest, &svc->destinations, n_list) {
if (++idx <= start)
continue;
if (ip_vs_genl_dump_dest(skb, dest, cb) < 0) {
idx--;
goto nla_put_failure;
}
}
nla_put_failure:
cb->args[0] = idx;
out_err:
mutex_unlock(&__ip_vs_mutex);
return skb->len;
}
static int ip_vs_genl_parse_dest(struct ip_vs_dest_user_kern *udest,
struct nlattr *nla, bool full_entry)
{
struct nlattr *attrs[IPVS_DEST_ATTR_MAX + 1];
struct nlattr *nla_addr, *nla_port;
struct nlattr *nla_addr_family;
/* Parse mandatory identifying destination fields first */
if (nla == NULL ||
nla_parse_nested_deprecated(attrs, IPVS_DEST_ATTR_MAX, nla, ip_vs_dest_policy, NULL))
return -EINVAL;
nla_addr = attrs[IPVS_DEST_ATTR_ADDR];
nla_port = attrs[IPVS_DEST_ATTR_PORT];
nla_addr_family = attrs[IPVS_DEST_ATTR_ADDR_FAMILY];
if (!(nla_addr && nla_port))
return -EINVAL;
memset(udest, 0, sizeof(*udest));
nla_memcpy(&udest->addr, nla_addr, sizeof(udest->addr));
udest->port = nla_get_be16(nla_port);
if (nla_addr_family)
udest->af = nla_get_u16(nla_addr_family);
else
udest->af = 0;
/* If a full entry was requested, check for the additional fields */
if (full_entry) {
struct nlattr *nla_fwd, *nla_weight, *nla_u_thresh,
*nla_l_thresh, *nla_tun_type, *nla_tun_port,
*nla_tun_flags;
nla_fwd = attrs[IPVS_DEST_ATTR_FWD_METHOD];
nla_weight = attrs[IPVS_DEST_ATTR_WEIGHT];
nla_u_thresh = attrs[IPVS_DEST_ATTR_U_THRESH];
nla_l_thresh = attrs[IPVS_DEST_ATTR_L_THRESH];
nla_tun_type = attrs[IPVS_DEST_ATTR_TUN_TYPE];
nla_tun_port = attrs[IPVS_DEST_ATTR_TUN_PORT];
nla_tun_flags = attrs[IPVS_DEST_ATTR_TUN_FLAGS];
if (!(nla_fwd && nla_weight && nla_u_thresh && nla_l_thresh))
return -EINVAL;
udest->conn_flags = nla_get_u32(nla_fwd)
& IP_VS_CONN_F_FWD_MASK;
udest->weight = nla_get_u32(nla_weight);
udest->u_threshold = nla_get_u32(nla_u_thresh);
udest->l_threshold = nla_get_u32(nla_l_thresh);
if (nla_tun_type)
udest->tun_type = nla_get_u8(nla_tun_type);
if (nla_tun_port)
udest->tun_port = nla_get_be16(nla_tun_port);
if (nla_tun_flags)
udest->tun_flags = nla_get_u16(nla_tun_flags);
}
return 0;
}
static int ip_vs_genl_fill_daemon(struct sk_buff *skb, __u32 state,
struct ipvs_sync_daemon_cfg *c)
{
struct nlattr *nl_daemon;
nl_daemon = nla_nest_start_noflag(skb, IPVS_CMD_ATTR_DAEMON);
if (!nl_daemon)
return -EMSGSIZE;
if (nla_put_u32(skb, IPVS_DAEMON_ATTR_STATE, state) ||
nla_put_string(skb, IPVS_DAEMON_ATTR_MCAST_IFN, c->mcast_ifn) ||
nla_put_u32(skb, IPVS_DAEMON_ATTR_SYNC_ID, c->syncid) ||
nla_put_u16(skb, IPVS_DAEMON_ATTR_SYNC_MAXLEN, c->sync_maxlen) ||
nla_put_u16(skb, IPVS_DAEMON_ATTR_MCAST_PORT, c->mcast_port) ||
nla_put_u8(skb, IPVS_DAEMON_ATTR_MCAST_TTL, c->mcast_ttl))
goto nla_put_failure;
#ifdef CONFIG_IP_VS_IPV6
if (c->mcast_af == AF_INET6) {
if (nla_put_in6_addr(skb, IPVS_DAEMON_ATTR_MCAST_GROUP6,
&c->mcast_group.in6))
goto nla_put_failure;
} else
#endif
if (c->mcast_af == AF_INET &&
nla_put_in_addr(skb, IPVS_DAEMON_ATTR_MCAST_GROUP,
c->mcast_group.ip))
goto nla_put_failure;
nla_nest_end(skb, nl_daemon);
return 0;
nla_put_failure:
nla_nest_cancel(skb, nl_daemon);
return -EMSGSIZE;
}
static int ip_vs_genl_dump_daemon(struct sk_buff *skb, __u32 state,
struct ipvs_sync_daemon_cfg *c,
struct netlink_callback *cb)
{
void *hdr;
hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
&ip_vs_genl_family, NLM_F_MULTI,
IPVS_CMD_NEW_DAEMON);
if (!hdr)
return -EMSGSIZE;
if (ip_vs_genl_fill_daemon(skb, state, c))
goto nla_put_failure;
genlmsg_end(skb, hdr);
return 0;
nla_put_failure:
genlmsg_cancel(skb, hdr);
return -EMSGSIZE;
}
static int ip_vs_genl_dump_daemons(struct sk_buff *skb,
struct netlink_callback *cb)
{
struct net *net = sock_net(skb->sk);
struct netns_ipvs *ipvs = net_ipvs(net);
mutex_lock(&ipvs->sync_mutex);
if ((ipvs->sync_state & IP_VS_STATE_MASTER) && !cb->args[0]) {
if (ip_vs_genl_dump_daemon(skb, IP_VS_STATE_MASTER,
&ipvs->mcfg, cb) < 0)
goto nla_put_failure;
cb->args[0] = 1;
}
if ((ipvs->sync_state & IP_VS_STATE_BACKUP) && !cb->args[1]) {
if (ip_vs_genl_dump_daemon(skb, IP_VS_STATE_BACKUP,
&ipvs->bcfg, cb) < 0)
goto nla_put_failure;
cb->args[1] = 1;
}
nla_put_failure:
mutex_unlock(&ipvs->sync_mutex);
return skb->len;
}
static int ip_vs_genl_new_daemon(struct netns_ipvs *ipvs, struct nlattr **attrs)
{
struct ipvs_sync_daemon_cfg c;
struct nlattr *a;
int ret;
memset(&c, 0, sizeof(c));
if (!(attrs[IPVS_DAEMON_ATTR_STATE] &&
attrs[IPVS_DAEMON_ATTR_MCAST_IFN] &&
attrs[IPVS_DAEMON_ATTR_SYNC_ID]))
return -EINVAL;
strscpy(c.mcast_ifn, nla_data(attrs[IPVS_DAEMON_ATTR_MCAST_IFN]),
sizeof(c.mcast_ifn));
c.syncid = nla_get_u32(attrs[IPVS_DAEMON_ATTR_SYNC_ID]);
a = attrs[IPVS_DAEMON_ATTR_SYNC_MAXLEN];
if (a)
c.sync_maxlen = nla_get_u16(a);
a = attrs[IPVS_DAEMON_ATTR_MCAST_GROUP];
if (a) {
c.mcast_af = AF_INET;
c.mcast_group.ip = nla_get_in_addr(a);
if (!ipv4_is_multicast(c.mcast_group.ip))
return -EINVAL;
} else {
a = attrs[IPVS_DAEMON_ATTR_MCAST_GROUP6];
if (a) {
#ifdef CONFIG_IP_VS_IPV6
int addr_type;
c.mcast_af = AF_INET6;
c.mcast_group.in6 = nla_get_in6_addr(a);
addr_type = ipv6_addr_type(&c.mcast_group.in6);
if (!(addr_type & IPV6_ADDR_MULTICAST))
return -EINVAL;
#else
return -EAFNOSUPPORT;
#endif
}
}
a = attrs[IPVS_DAEMON_ATTR_MCAST_PORT];
if (a)
c.mcast_port = nla_get_u16(a);
a = attrs[IPVS_DAEMON_ATTR_MCAST_TTL];
if (a)
c.mcast_ttl = nla_get_u8(a);
/* The synchronization protocol is incompatible with mixed family
* services
*/
if (ipvs->mixed_address_family_dests > 0)
return -EINVAL;
ret = start_sync_thread(ipvs, &c,
nla_get_u32(attrs[IPVS_DAEMON_ATTR_STATE]));
return ret;
}
static int ip_vs_genl_del_daemon(struct netns_ipvs *ipvs, struct nlattr **attrs)
{
int ret;
if (!attrs[IPVS_DAEMON_ATTR_STATE])
return -EINVAL;
ret = stop_sync_thread(ipvs,
nla_get_u32(attrs[IPVS_DAEMON_ATTR_STATE]));
return ret;
}
static int ip_vs_genl_set_config(struct netns_ipvs *ipvs, struct nlattr **attrs)
{
struct ip_vs_timeout_user t;
__ip_vs_get_timeouts(ipvs, &t);
if (attrs[IPVS_CMD_ATTR_TIMEOUT_TCP])
t.tcp_timeout = nla_get_u32(attrs[IPVS_CMD_ATTR_TIMEOUT_TCP]);
if (attrs[IPVS_CMD_ATTR_TIMEOUT_TCP_FIN])
t.tcp_fin_timeout =
nla_get_u32(attrs[IPVS_CMD_ATTR_TIMEOUT_TCP_FIN]);
if (attrs[IPVS_CMD_ATTR_TIMEOUT_UDP])
t.udp_timeout = nla_get_u32(attrs[IPVS_CMD_ATTR_TIMEOUT_UDP]);
return ip_vs_set_timeout(ipvs, &t);
}
static int ip_vs_genl_set_daemon(struct sk_buff *skb, struct genl_info *info)
{
int ret = -EINVAL, cmd;
struct net *net = sock_net(skb->sk);
struct netns_ipvs *ipvs = net_ipvs(net);
cmd = info->genlhdr->cmd;
if (cmd == IPVS_CMD_NEW_DAEMON || cmd == IPVS_CMD_DEL_DAEMON) {
struct nlattr *daemon_attrs[IPVS_DAEMON_ATTR_MAX + 1];
if (!info->attrs[IPVS_CMD_ATTR_DAEMON] ||
nla_parse_nested_deprecated(daemon_attrs, IPVS_DAEMON_ATTR_MAX, info->attrs[IPVS_CMD_ATTR_DAEMON], ip_vs_daemon_policy, info->extack))
goto out;
if (cmd == IPVS_CMD_NEW_DAEMON)
ret = ip_vs_genl_new_daemon(ipvs, daemon_attrs);
else
ret = ip_vs_genl_del_daemon(ipvs, daemon_attrs);
}
out:
return ret;
}
static int ip_vs_genl_set_cmd(struct sk_buff *skb, struct genl_info *info)
{
bool need_full_svc = false, need_full_dest = false;
struct ip_vs_service *svc = NULL;
struct ip_vs_service_user_kern usvc;
struct ip_vs_dest_user_kern udest;
int ret = 0, cmd;
struct net *net = sock_net(skb->sk);
struct netns_ipvs *ipvs = net_ipvs(net);
cmd = info->genlhdr->cmd;
mutex_lock(&__ip_vs_mutex);
if (cmd == IPVS_CMD_FLUSH) {
ret = ip_vs_flush(ipvs, false);
goto out;
} else if (cmd == IPVS_CMD_SET_CONFIG) {
ret = ip_vs_genl_set_config(ipvs, info->attrs);
goto out;
} else if (cmd == IPVS_CMD_ZERO &&
!info->attrs[IPVS_CMD_ATTR_SERVICE]) {
ret = ip_vs_zero_all(ipvs);
goto out;
}
/* All following commands require a service argument, so check if we
* received a valid one. We need a full service specification when
* adding / editing a service. Only identifying members otherwise. */
if (cmd == IPVS_CMD_NEW_SERVICE || cmd == IPVS_CMD_SET_SERVICE)
need_full_svc = true;
ret = ip_vs_genl_parse_service(ipvs, &usvc,
info->attrs[IPVS_CMD_ATTR_SERVICE],
need_full_svc, &svc);
if (ret)
goto out;
/* Unless we're adding a new service, the service must already exist */
if ((cmd != IPVS_CMD_NEW_SERVICE) && (svc == NULL)) {
ret = -ESRCH;
goto out;
}
/* Destination commands require a valid destination argument. For
* adding / editing a destination, we need a full destination
* specification. */
if (cmd == IPVS_CMD_NEW_DEST || cmd == IPVS_CMD_SET_DEST ||
cmd == IPVS_CMD_DEL_DEST) {
if (cmd != IPVS_CMD_DEL_DEST)
need_full_dest = true;
ret = ip_vs_genl_parse_dest(&udest,
info->attrs[IPVS_CMD_ATTR_DEST],
need_full_dest);
if (ret)
goto out;
/* Old protocols did not allow the user to specify address
* family, so we set it to zero instead. We also didn't
* allow heterogeneous pools in the old code, so it's safe
* to assume that this will have the same address family as
* the service.
*/
if (udest.af == 0)
udest.af = svc->af;
if (!ip_vs_is_af_valid(udest.af)) {
ret = -EAFNOSUPPORT;
goto out;
}
if (udest.af != svc->af && cmd != IPVS_CMD_DEL_DEST) {
/* The synchronization protocol is incompatible
* with mixed family services
*/
if (ipvs->sync_state) {
ret = -EINVAL;
goto out;
}
/* Which connection types do we support? */
switch (udest.conn_flags) {
case IP_VS_CONN_F_TUNNEL:
/* We are able to forward this */
break;
default:
ret = -EINVAL;
goto out;
}
}
}
switch (cmd) {
case IPVS_CMD_NEW_SERVICE:
if (svc == NULL)
ret = ip_vs_add_service(ipvs, &usvc, &svc);
else
ret = -EEXIST;
break;
case IPVS_CMD_SET_SERVICE:
ret = ip_vs_edit_service(svc, &usvc);
break;
case IPVS_CMD_DEL_SERVICE:
ret = ip_vs_del_service(svc);
/* do not use svc, it can be freed */
break;
case IPVS_CMD_NEW_DEST:
ret = ip_vs_add_dest(svc, &udest);
break;
case IPVS_CMD_SET_DEST:
ret = ip_vs_edit_dest(svc, &udest);
break;
case IPVS_CMD_DEL_DEST:
ret = ip_vs_del_dest(svc, &udest);
break;
case IPVS_CMD_ZERO:
ret = ip_vs_zero_service(svc);
break;
default:
ret = -EINVAL;
}
out:
mutex_unlock(&__ip_vs_mutex);
return ret;
}
static int ip_vs_genl_get_cmd(struct sk_buff *skb, struct genl_info *info)
{
struct sk_buff *msg;
void *reply;
int ret, cmd, reply_cmd;
struct net *net = sock_net(skb->sk);
struct netns_ipvs *ipvs = net_ipvs(net);
cmd = info->genlhdr->cmd;
if (cmd == IPVS_CMD_GET_SERVICE)
reply_cmd = IPVS_CMD_NEW_SERVICE;
else if (cmd == IPVS_CMD_GET_INFO)
reply_cmd = IPVS_CMD_SET_INFO;
else if (cmd == IPVS_CMD_GET_CONFIG)
reply_cmd = IPVS_CMD_SET_CONFIG;
else {
pr_err("unknown Generic Netlink command\n");
return -EINVAL;
}
msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!msg)
return -ENOMEM;
mutex_lock(&__ip_vs_mutex);
reply = genlmsg_put_reply(msg, info, &ip_vs_genl_family, 0, reply_cmd);
if (reply == NULL)
goto nla_put_failure;
switch (cmd) {
case IPVS_CMD_GET_SERVICE:
{
struct ip_vs_service *svc;
svc = ip_vs_genl_find_service(ipvs,
info->attrs[IPVS_CMD_ATTR_SERVICE]);
if (IS_ERR(svc)) {
ret = PTR_ERR(svc);
goto out_err;
} else if (svc) {
ret = ip_vs_genl_fill_service(msg, svc);
if (ret)
goto nla_put_failure;
} else {
ret = -ESRCH;
goto out_err;
}
break;
}
case IPVS_CMD_GET_CONFIG:
{
struct ip_vs_timeout_user t;
__ip_vs_get_timeouts(ipvs, &t);
#ifdef CONFIG_IP_VS_PROTO_TCP
if (nla_put_u32(msg, IPVS_CMD_ATTR_TIMEOUT_TCP,
t.tcp_timeout) ||
nla_put_u32(msg, IPVS_CMD_ATTR_TIMEOUT_TCP_FIN,
t.tcp_fin_timeout))
goto nla_put_failure;
#endif
#ifdef CONFIG_IP_VS_PROTO_UDP
if (nla_put_u32(msg, IPVS_CMD_ATTR_TIMEOUT_UDP, t.udp_timeout))
goto nla_put_failure;
#endif
break;
}
case IPVS_CMD_GET_INFO:
if (nla_put_u32(msg, IPVS_INFO_ATTR_VERSION,
IP_VS_VERSION_CODE) ||
nla_put_u32(msg, IPVS_INFO_ATTR_CONN_TAB_SIZE,
ip_vs_conn_tab_size))
goto nla_put_failure;
break;
}
genlmsg_end(msg, reply);
ret = genlmsg_reply(msg, info);
goto out;
nla_put_failure:
pr_err("not enough space in Netlink message\n");
ret = -EMSGSIZE;
out_err:
nlmsg_free(msg);
out:
mutex_unlock(&__ip_vs_mutex);
return ret;
}
static const struct genl_small_ops ip_vs_genl_ops[] = {
{
.cmd = IPVS_CMD_NEW_SERVICE,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.flags = GENL_ADMIN_PERM,
.doit = ip_vs_genl_set_cmd,
},
{
.cmd = IPVS_CMD_SET_SERVICE,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.flags = GENL_ADMIN_PERM,
.doit = ip_vs_genl_set_cmd,
},
{
.cmd = IPVS_CMD_DEL_SERVICE,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.flags = GENL_ADMIN_PERM,
.doit = ip_vs_genl_set_cmd,
},
{
.cmd = IPVS_CMD_GET_SERVICE,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.flags = GENL_ADMIN_PERM,
.doit = ip_vs_genl_get_cmd,
.dumpit = ip_vs_genl_dump_services,
},
{
.cmd = IPVS_CMD_NEW_DEST,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.flags = GENL_ADMIN_PERM,
.doit = ip_vs_genl_set_cmd,
},
{
.cmd = IPVS_CMD_SET_DEST,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.flags = GENL_ADMIN_PERM,
.doit = ip_vs_genl_set_cmd,
},
{
.cmd = IPVS_CMD_DEL_DEST,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.flags = GENL_ADMIN_PERM,
.doit = ip_vs_genl_set_cmd,
},
{
.cmd = IPVS_CMD_GET_DEST,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.flags = GENL_ADMIN_PERM,
.dumpit = ip_vs_genl_dump_dests,
},
{
.cmd = IPVS_CMD_NEW_DAEMON,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.flags = GENL_ADMIN_PERM,
.doit = ip_vs_genl_set_daemon,
},
{
.cmd = IPVS_CMD_DEL_DAEMON,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.flags = GENL_ADMIN_PERM,
.doit = ip_vs_genl_set_daemon,
},
{
.cmd = IPVS_CMD_GET_DAEMON,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.flags = GENL_ADMIN_PERM,
.dumpit = ip_vs_genl_dump_daemons,
},
{
.cmd = IPVS_CMD_SET_CONFIG,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.flags = GENL_ADMIN_PERM,
.doit = ip_vs_genl_set_cmd,
},
{
.cmd = IPVS_CMD_GET_CONFIG,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.flags = GENL_ADMIN_PERM,
.doit = ip_vs_genl_get_cmd,
},
{
.cmd = IPVS_CMD_GET_INFO,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.flags = GENL_ADMIN_PERM,
.doit = ip_vs_genl_get_cmd,
},
{
.cmd = IPVS_CMD_ZERO,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.flags = GENL_ADMIN_PERM,
.doit = ip_vs_genl_set_cmd,
},
{
.cmd = IPVS_CMD_FLUSH,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.flags = GENL_ADMIN_PERM,
.doit = ip_vs_genl_set_cmd,
},
};
static struct genl_family ip_vs_genl_family __ro_after_init = {
.hdrsize = 0,
.name = IPVS_GENL_NAME,
.version = IPVS_GENL_VERSION,
.maxattr = IPVS_CMD_ATTR_MAX,
.policy = ip_vs_cmd_policy,
.netnsok = true, /* Make ipvsadm to work on netns */
.module = THIS_MODULE,
.small_ops = ip_vs_genl_ops,
.n_small_ops = ARRAY_SIZE(ip_vs_genl_ops),
.resv_start_op = IPVS_CMD_FLUSH + 1,
};
static int __init ip_vs_genl_register(void)
{
return genl_register_family(&ip_vs_genl_family);
}
static void ip_vs_genl_unregister(void)
{
genl_unregister_family(&ip_vs_genl_family);
}
/* End of Generic Netlink interface definitions */
/*
* per netns intit/exit func.
*/
#ifdef CONFIG_SYSCTL
static int __net_init ip_vs_control_net_init_sysctl(struct netns_ipvs *ipvs)
{
struct net *net = ipvs->net;
struct ctl_table *tbl;
int idx, ret;
size_t ctl_table_size = ARRAY_SIZE(vs_vars);
atomic_set(&ipvs->dropentry, 0);
spin_lock_init(&ipvs->dropentry_lock);
spin_lock_init(&ipvs->droppacket_lock);
spin_lock_init(&ipvs->securetcp_lock);
INIT_DELAYED_WORK(&ipvs->defense_work, defense_work_handler);
INIT_DELAYED_WORK(&ipvs->expire_nodest_conn_work,
expire_nodest_conn_handler);
ipvs->est_stopped = 0;
if (!net_eq(net, &init_net)) {
tbl = kmemdup(vs_vars, sizeof(vs_vars), GFP_KERNEL);
if (tbl == NULL)
return -ENOMEM;
/* Don't export sysctls to unprivileged users */
if (net->user_ns != &init_user_ns) {
tbl[0].procname = NULL;
ctl_table_size = 0;
}
} else
tbl = vs_vars;
/* Initialize sysctl defaults */
for (idx = 0; idx < ARRAY_SIZE(vs_vars); idx++) {
if (tbl[idx].proc_handler == proc_do_defense_mode)
tbl[idx].extra2 = ipvs;
}
idx = 0;
ipvs->sysctl_amemthresh = 1024;
tbl[idx++].data = &ipvs->sysctl_amemthresh;
ipvs->sysctl_am_droprate = 10;
tbl[idx++].data = &ipvs->sysctl_am_droprate;
tbl[idx++].data = &ipvs->sysctl_drop_entry;
tbl[idx++].data = &ipvs->sysctl_drop_packet;
#ifdef CONFIG_IP_VS_NFCT
tbl[idx++].data = &ipvs->sysctl_conntrack;
#endif
tbl[idx++].data = &ipvs->sysctl_secure_tcp;
ipvs->sysctl_snat_reroute = 1;
tbl[idx++].data = &ipvs->sysctl_snat_reroute;
ipvs->sysctl_sync_ver = 1;
tbl[idx++].data = &ipvs->sysctl_sync_ver;
ipvs->sysctl_sync_ports = 1;
tbl[idx++].data = &ipvs->sysctl_sync_ports;
tbl[idx++].data = &ipvs->sysctl_sync_persist_mode;
ipvs->sysctl_sync_qlen_max = nr_free_buffer_pages() / 32;
tbl[idx++].data = &ipvs->sysctl_sync_qlen_max;
ipvs->sysctl_sync_sock_size = 0;
tbl[idx++].data = &ipvs->sysctl_sync_sock_size;
tbl[idx++].data = &ipvs->sysctl_cache_bypass;
tbl[idx++].data = &ipvs->sysctl_expire_nodest_conn;
tbl[idx++].data = &ipvs->sysctl_sloppy_tcp;
tbl[idx++].data = &ipvs->sysctl_sloppy_sctp;
tbl[idx++].data = &ipvs->sysctl_expire_quiescent_template;
ipvs->sysctl_sync_threshold[0] = DEFAULT_SYNC_THRESHOLD;
ipvs->sysctl_sync_threshold[1] = DEFAULT_SYNC_PERIOD;
tbl[idx].data = &ipvs->sysctl_sync_threshold;
tbl[idx].extra2 = ipvs;
tbl[idx++].maxlen = sizeof(ipvs->sysctl_sync_threshold);
ipvs->sysctl_sync_refresh_period = DEFAULT_SYNC_REFRESH_PERIOD;
tbl[idx++].data = &ipvs->sysctl_sync_refresh_period;
ipvs->sysctl_sync_retries = clamp_t(int, DEFAULT_SYNC_RETRIES, 0, 3);
tbl[idx++].data = &ipvs->sysctl_sync_retries;
tbl[idx++].data = &ipvs->sysctl_nat_icmp_send;
ipvs->sysctl_pmtu_disc = 1;
tbl[idx++].data = &ipvs->sysctl_pmtu_disc;
tbl[idx++].data = &ipvs->sysctl_backup_only;
ipvs->sysctl_conn_reuse_mode = 1;
tbl[idx++].data = &ipvs->sysctl_conn_reuse_mode;
tbl[idx++].data = &ipvs->sysctl_schedule_icmp;
tbl[idx++].data = &ipvs->sysctl_ignore_tunneled;
ipvs->sysctl_run_estimation = 1;
tbl[idx].extra2 = ipvs;
tbl[idx++].data = &ipvs->sysctl_run_estimation;
ipvs->est_cpulist_valid = 0;
tbl[idx].extra2 = ipvs;
tbl[idx++].data = &ipvs->sysctl_est_cpulist;
ipvs->sysctl_est_nice = IPVS_EST_NICE;
tbl[idx].extra2 = ipvs;
tbl[idx++].data = &ipvs->sysctl_est_nice;
#ifdef CONFIG_IP_VS_DEBUG
/* Global sysctls must be ro in non-init netns */
if (!net_eq(net, &init_net))
tbl[idx++].mode = 0444;
#endif
ret = -ENOMEM;
ipvs->sysctl_hdr = register_net_sysctl_sz(net, "net/ipv4/vs", tbl,
ctl_table_size);
if (!ipvs->sysctl_hdr)
goto err;
ipvs->sysctl_tbl = tbl;
ret = ip_vs_start_estimator(ipvs, &ipvs->tot_stats->s);
if (ret < 0)
goto err;
/* Schedule defense work */
queue_delayed_work(system_long_wq, &ipvs->defense_work,
DEFENSE_TIMER_PERIOD);
return 0;
err:
unregister_net_sysctl_table(ipvs->sysctl_hdr);
if (!net_eq(net, &init_net))
kfree(tbl);
return ret;
}
static void __net_exit ip_vs_control_net_cleanup_sysctl(struct netns_ipvs *ipvs)
{
struct net *net = ipvs->net;
cancel_delayed_work_sync(&ipvs->expire_nodest_conn_work);
cancel_delayed_work_sync(&ipvs->defense_work);
cancel_work_sync(&ipvs->defense_work.work);
unregister_net_sysctl_table(ipvs->sysctl_hdr);
ip_vs_stop_estimator(ipvs, &ipvs->tot_stats->s);
if (ipvs->est_cpulist_valid)
free_cpumask_var(ipvs->sysctl_est_cpulist);
if (!net_eq(net, &init_net))
kfree(ipvs->sysctl_tbl);
}
#else
static int __net_init ip_vs_control_net_init_sysctl(struct netns_ipvs *ipvs) { return 0; }
static void __net_exit ip_vs_control_net_cleanup_sysctl(struct netns_ipvs *ipvs) { }
#endif
static struct notifier_block ip_vs_dst_notifier = {
.notifier_call = ip_vs_dst_event,
#ifdef CONFIG_IP_VS_IPV6
.priority = ADDRCONF_NOTIFY_PRIORITY + 5,
#endif
};
int __net_init ip_vs_control_net_init(struct netns_ipvs *ipvs)
{
int ret = -ENOMEM;
int idx;
/* Initialize rs_table */
for (idx = 0; idx < IP_VS_RTAB_SIZE; idx++)
INIT_HLIST_HEAD(&ipvs->rs_table[idx]);
INIT_LIST_HEAD(&ipvs->dest_trash);
spin_lock_init(&ipvs->dest_trash_lock);
timer_setup(&ipvs->dest_trash_timer, ip_vs_dest_trash_expire, 0);
atomic_set(&ipvs->ftpsvc_counter, 0);
atomic_set(&ipvs->nullsvc_counter, 0);
atomic_set(&ipvs->conn_out_counter, 0);
INIT_DELAYED_WORK(&ipvs->est_reload_work, est_reload_work_handler);
/* procfs stats */
ipvs->tot_stats = kzalloc(sizeof(*ipvs->tot_stats), GFP_KERNEL);
if (!ipvs->tot_stats)
goto out;
if (ip_vs_stats_init_alloc(&ipvs->tot_stats->s) < 0)
goto err_tot_stats;
#ifdef CONFIG_PROC_FS
if (!proc_create_net("ip_vs", 0, ipvs->net->proc_net,
&ip_vs_info_seq_ops, sizeof(struct ip_vs_iter)))
goto err_vs;
if (!proc_create_net_single("ip_vs_stats", 0, ipvs->net->proc_net,
ip_vs_stats_show, NULL))
goto err_stats;
if (!proc_create_net_single("ip_vs_stats_percpu", 0,
ipvs->net->proc_net,
ip_vs_stats_percpu_show, NULL))
goto err_percpu;
#endif
ret = ip_vs_control_net_init_sysctl(ipvs);
if (ret < 0)
goto err;
return 0;
err:
#ifdef CONFIG_PROC_FS
remove_proc_entry("ip_vs_stats_percpu", ipvs->net->proc_net);
err_percpu:
remove_proc_entry("ip_vs_stats", ipvs->net->proc_net);
err_stats:
remove_proc_entry("ip_vs", ipvs->net->proc_net);
err_vs:
#endif
ip_vs_stats_release(&ipvs->tot_stats->s);
err_tot_stats:
kfree(ipvs->tot_stats);
out:
return ret;
}
void __net_exit ip_vs_control_net_cleanup(struct netns_ipvs *ipvs)
{
ip_vs_trash_cleanup(ipvs);
ip_vs_control_net_cleanup_sysctl(ipvs);
cancel_delayed_work_sync(&ipvs->est_reload_work);
#ifdef CONFIG_PROC_FS
remove_proc_entry("ip_vs_stats_percpu", ipvs->net->proc_net);
remove_proc_entry("ip_vs_stats", ipvs->net->proc_net);
remove_proc_entry("ip_vs", ipvs->net->proc_net);
#endif
call_rcu(&ipvs->tot_stats->rcu_head, ip_vs_stats_rcu_free);
}
int __init ip_vs_register_nl_ioctl(void)
{
int ret;
ret = nf_register_sockopt(&ip_vs_sockopts);
if (ret) {
pr_err("cannot register sockopt.\n");
goto err_sock;
}
ret = ip_vs_genl_register();
if (ret) {
pr_err("cannot register Generic Netlink interface.\n");
goto err_genl;
}
return 0;
err_genl:
nf_unregister_sockopt(&ip_vs_sockopts);
err_sock:
return ret;
}
void ip_vs_unregister_nl_ioctl(void)
{
ip_vs_genl_unregister();
nf_unregister_sockopt(&ip_vs_sockopts);
}
int __init ip_vs_control_init(void)
{
int idx;
int ret;
/* Initialize svc_table, ip_vs_svc_fwm_table */
for (idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
INIT_HLIST_HEAD(&ip_vs_svc_table[idx]);
INIT_HLIST_HEAD(&ip_vs_svc_fwm_table[idx]);
}
smp_wmb(); /* Do we really need it now ? */
ret = register_netdevice_notifier(&ip_vs_dst_notifier);
if (ret < 0)
return ret;
return 0;
}
void ip_vs_control_cleanup(void)
{
unregister_netdevice_notifier(&ip_vs_dst_notifier);
/* relying on common rcu_barrier() in ip_vs_cleanup() */
}
| linux-master | net/netfilter/ipvs/ip_vs_ctl.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* ip_vs_ftp.c: IPVS ftp application module
*
* Authors: Wensong Zhang <[email protected]>
*
* Changes:
*
* Most code here is taken from ip_masq_ftp.c in kernel 2.2. The difference
* is that ip_vs_ftp module handles the reverse direction to ip_masq_ftp.
*
* IP_MASQ_FTP ftp masquerading module
*
* Version: @(#)ip_masq_ftp.c 0.04 02/05/96
*
* Author: Wouter Gadeyne
*/
#define KMSG_COMPONENT "IPVS"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/kernel.h>
#include <linux/skbuff.h>
#include <linux/ctype.h>
#include <linux/inet.h>
#include <linux/in.h>
#include <linux/ip.h>
#include <linux/netfilter.h>
#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_conntrack_expect.h>
#include <net/netfilter/nf_nat.h>
#include <net/netfilter/nf_nat_helper.h>
#include <linux/gfp.h>
#include <net/protocol.h>
#include <net/tcp.h>
#include <asm/unaligned.h>
#include <net/ip_vs.h>
#define SERVER_STRING_PASV "227 "
#define CLIENT_STRING_PORT "PORT"
#define SERVER_STRING_EPSV "229 "
#define CLIENT_STRING_EPRT "EPRT"
enum {
IP_VS_FTP_ACTIVE = 0,
IP_VS_FTP_PORT = 0,
IP_VS_FTP_PASV,
IP_VS_FTP_EPRT,
IP_VS_FTP_EPSV,
};
/*
* List of ports (up to IP_VS_APP_MAX_PORTS) to be handled by helper
* First port is set to the default port.
*/
static unsigned int ports_count = 1;
static unsigned short ports[IP_VS_APP_MAX_PORTS] = {21, 0};
module_param_array(ports, ushort, &ports_count, 0444);
MODULE_PARM_DESC(ports, "Ports to monitor for FTP control commands");
static char *ip_vs_ftp_data_ptr(struct sk_buff *skb, struct ip_vs_iphdr *ipvsh)
{
struct tcphdr *th = (struct tcphdr *)((char *)skb->data + ipvsh->len);
if ((th->doff << 2) < sizeof(struct tcphdr))
return NULL;
return (char *)th + (th->doff << 2);
}
static int
ip_vs_ftp_init_conn(struct ip_vs_app *app, struct ip_vs_conn *cp)
{
/* We use connection tracking for the command connection */
cp->flags |= IP_VS_CONN_F_NFCT;
return 0;
}
static int
ip_vs_ftp_done_conn(struct ip_vs_app *app, struct ip_vs_conn *cp)
{
return 0;
}
/* Get <addr,port> from the string "xxx.xxx.xxx.xxx,ppp,ppp", started
* with the "pattern". <addr,port> is in network order.
* Parse extended format depending on ext. In this case addr can be pre-set.
*/
static int ip_vs_ftp_get_addrport(char *data, char *data_limit,
const char *pattern, size_t plen,
char skip, bool ext, int mode,
union nf_inet_addr *addr, __be16 *port,
__u16 af, char **start, char **end)
{
char *s, c;
unsigned char p[6];
char edelim;
__u16 hport;
int i = 0;
if (data_limit - data < plen) {
/* check if there is partial match */
if (strncasecmp(data, pattern, data_limit - data) == 0)
return -1;
else
return 0;
}
if (strncasecmp(data, pattern, plen) != 0) {
return 0;
}
s = data + plen;
if (skip) {
bool found = false;
for (;; s++) {
if (s == data_limit)
return -1;
if (!found) {
/* "(" is optional for non-extended format,
* so catch the start of IPv4 address
*/
if (!ext && isdigit(*s))
break;
if (*s == skip)
found = true;
} else if (*s != skip) {
break;
}
}
}
/* Old IPv4-only format? */
if (!ext) {
p[0] = 0;
for (data = s; ; data++) {
if (data == data_limit)
return -1;
c = *data;
if (isdigit(c)) {
p[i] = p[i]*10 + c - '0';
} else if (c == ',' && i < 5) {
i++;
p[i] = 0;
} else {
/* unexpected character or terminator */
break;
}
}
if (i != 5)
return -1;
*start = s;
*end = data;
addr->ip = get_unaligned((__be32 *) p);
*port = get_unaligned((__be16 *) (p + 4));
return 1;
}
if (s == data_limit)
return -1;
*start = s;
edelim = *s++;
if (edelim < 33 || edelim > 126)
return -1;
if (s == data_limit)
return -1;
if (*s == edelim) {
/* Address family is usually missing for EPSV response */
if (mode != IP_VS_FTP_EPSV)
return -1;
s++;
if (s == data_limit)
return -1;
/* Then address should be missing too */
if (*s != edelim)
return -1;
/* Caller can pre-set addr, if needed */
s++;
} else {
const char *ep;
/* We allow address only from same family */
if (af == AF_INET6 && *s != '2')
return -1;
if (af == AF_INET && *s != '1')
return -1;
s++;
if (s == data_limit)
return -1;
if (*s != edelim)
return -1;
s++;
if (s == data_limit)
return -1;
if (af == AF_INET6) {
if (in6_pton(s, data_limit - s, (u8 *)addr, edelim,
&ep) <= 0)
return -1;
} else {
if (in4_pton(s, data_limit - s, (u8 *)addr, edelim,
&ep) <= 0)
return -1;
}
s = (char *) ep;
if (s == data_limit)
return -1;
if (*s != edelim)
return -1;
s++;
}
for (hport = 0; ; s++)
{
if (s == data_limit)
return -1;
if (!isdigit(*s))
break;
hport = hport * 10 + *s - '0';
}
if (s == data_limit || !hport || *s != edelim)
return -1;
s++;
*end = s;
*port = htons(hport);
return 1;
}
/* Look at outgoing ftp packets to catch the response to a PASV/EPSV command
* from the server (inside-to-outside).
* When we see one, we build a connection entry with the client address,
* client port 0 (unknown at the moment), the server address and the
* server port. Mark the current connection entry as a control channel
* of the new entry. All this work is just to make the data connection
* can be scheduled to the right server later.
*
* The outgoing packet should be something like
* "227 Entering Passive Mode (xxx,xxx,xxx,xxx,ppp,ppp)".
* xxx,xxx,xxx,xxx is the server address, ppp,ppp is the server port number.
* The extended format for EPSV response provides usually only port:
* "229 Entering Extended Passive Mode (|||ppp|)"
*/
static int ip_vs_ftp_out(struct ip_vs_app *app, struct ip_vs_conn *cp,
struct sk_buff *skb, int *diff,
struct ip_vs_iphdr *ipvsh)
{
char *data, *data_limit;
char *start, *end;
union nf_inet_addr from;
__be16 port;
struct ip_vs_conn *n_cp;
char buf[24]; /* xxx.xxx.xxx.xxx,ppp,ppp\000 */
unsigned int buf_len;
int ret = 0;
enum ip_conntrack_info ctinfo;
struct nf_conn *ct;
*diff = 0;
/* Only useful for established sessions */
if (cp->state != IP_VS_TCP_S_ESTABLISHED)
return 1;
/* Linear packets are much easier to deal with. */
if (skb_ensure_writable(skb, skb->len))
return 0;
if (cp->app_data == (void *) IP_VS_FTP_PASV) {
data = ip_vs_ftp_data_ptr(skb, ipvsh);
data_limit = skb_tail_pointer(skb);
if (!data || data >= data_limit)
return 1;
if (ip_vs_ftp_get_addrport(data, data_limit,
SERVER_STRING_PASV,
sizeof(SERVER_STRING_PASV)-1,
'(', false, IP_VS_FTP_PASV,
&from, &port, cp->af,
&start, &end) != 1)
return 1;
IP_VS_DBG(7, "PASV response (%pI4:%u) -> %pI4:%u detected\n",
&from.ip, ntohs(port), &cp->caddr.ip, 0);
} else if (cp->app_data == (void *) IP_VS_FTP_EPSV) {
data = ip_vs_ftp_data_ptr(skb, ipvsh);
data_limit = skb_tail_pointer(skb);
if (!data || data >= data_limit)
return 1;
/* Usually, data address is not specified but
* we support different address, so pre-set it.
*/
from = cp->daddr;
if (ip_vs_ftp_get_addrport(data, data_limit,
SERVER_STRING_EPSV,
sizeof(SERVER_STRING_EPSV)-1,
'(', true, IP_VS_FTP_EPSV,
&from, &port, cp->af,
&start, &end) != 1)
return 1;
IP_VS_DBG_BUF(7, "EPSV response (%s:%u) -> %s:%u detected\n",
IP_VS_DBG_ADDR(cp->af, &from), ntohs(port),
IP_VS_DBG_ADDR(cp->af, &cp->caddr), 0);
} else {
return 1;
}
/* Now update or create a connection entry for it */
{
struct ip_vs_conn_param p;
ip_vs_conn_fill_param(cp->ipvs, cp->af,
ipvsh->protocol, &from, port,
&cp->caddr, 0, &p);
n_cp = ip_vs_conn_out_get(&p);
}
if (!n_cp) {
struct ip_vs_conn_param p;
ip_vs_conn_fill_param(cp->ipvs,
cp->af, ipvsh->protocol, &cp->caddr,
0, &cp->vaddr, port, &p);
n_cp = ip_vs_conn_new(&p, cp->af, &from, port,
IP_VS_CONN_F_NO_CPORT |
IP_VS_CONN_F_NFCT,
cp->dest, skb->mark);
if (!n_cp)
return 0;
/* add its controller */
ip_vs_control_add(n_cp, cp);
}
/* Replace the old passive address with the new one */
if (cp->app_data == (void *) IP_VS_FTP_PASV) {
from.ip = n_cp->vaddr.ip;
port = n_cp->vport;
snprintf(buf, sizeof(buf), "%u,%u,%u,%u,%u,%u",
((unsigned char *)&from.ip)[0],
((unsigned char *)&from.ip)[1],
((unsigned char *)&from.ip)[2],
((unsigned char *)&from.ip)[3],
ntohs(port) >> 8,
ntohs(port) & 0xFF);
} else if (cp->app_data == (void *) IP_VS_FTP_EPSV) {
from = n_cp->vaddr;
port = n_cp->vport;
/* Only port, client will use VIP for the data connection */
snprintf(buf, sizeof(buf), "|||%u|",
ntohs(port));
} else {
*buf = 0;
}
buf_len = strlen(buf);
ct = nf_ct_get(skb, &ctinfo);
if (ct) {
bool mangled;
/* If mangling fails this function will return 0
* which will cause the packet to be dropped.
* Mangling can only fail under memory pressure,
* hopefully it will succeed on the retransmitted
* packet.
*/
mangled = nf_nat_mangle_tcp_packet(skb, ct, ctinfo,
ipvsh->len,
start - data,
end - start,
buf, buf_len);
if (mangled) {
ip_vs_nfct_expect_related(skb, ct, n_cp,
ipvsh->protocol, 0, 0);
if (skb->ip_summed == CHECKSUM_COMPLETE)
skb->ip_summed = CHECKSUM_UNNECESSARY;
/* csum is updated */
ret = 1;
}
}
/* Not setting 'diff' is intentional, otherwise the sequence
* would be adjusted twice.
*/
cp->app_data = (void *) IP_VS_FTP_ACTIVE;
ip_vs_tcp_conn_listen(n_cp);
ip_vs_conn_put(n_cp);
return ret;
}
/* Look at incoming ftp packets to catch the PASV/PORT/EPRT/EPSV command
* (outside-to-inside).
*
* The incoming packet having the PORT command should be something like
* "PORT xxx,xxx,xxx,xxx,ppp,ppp\n".
* xxx,xxx,xxx,xxx is the client address, ppp,ppp is the client port number.
* In this case, we create a connection entry using the client address and
* port, so that the active ftp data connection from the server can reach
* the client.
* Extended format:
* "EPSV\r\n" when client requests server address from same family
* "EPSV 1\r\n" when client requests IPv4 server address
* "EPSV 2\r\n" when client requests IPv6 server address
* "EPSV ALL\r\n" - not supported
* EPRT with specified delimiter (ASCII 33..126), "|" by default:
* "EPRT |1|IPv4ADDR|PORT|\r\n" when client provides IPv4 addrport
* "EPRT |2|IPv6ADDR|PORT|\r\n" when client provides IPv6 addrport
*/
static int ip_vs_ftp_in(struct ip_vs_app *app, struct ip_vs_conn *cp,
struct sk_buff *skb, int *diff,
struct ip_vs_iphdr *ipvsh)
{
char *data, *data_start, *data_limit;
char *start, *end;
union nf_inet_addr to;
__be16 port;
struct ip_vs_conn *n_cp;
/* no diff required for incoming packets */
*diff = 0;
/* Only useful for established sessions */
if (cp->state != IP_VS_TCP_S_ESTABLISHED)
return 1;
/* Linear packets are much easier to deal with. */
if (skb_ensure_writable(skb, skb->len))
return 0;
data = data_start = ip_vs_ftp_data_ptr(skb, ipvsh);
data_limit = skb_tail_pointer(skb);
if (!data || data >= data_limit)
return 1;
while (data <= data_limit - 6) {
if (cp->af == AF_INET &&
strncasecmp(data, "PASV\r\n", 6) == 0) {
/* Passive mode on */
IP_VS_DBG(7, "got PASV at %td of %td\n",
data - data_start,
data_limit - data_start);
cp->app_data = (void *) IP_VS_FTP_PASV;
return 1;
}
/* EPSV or EPSV<space><net-prt> */
if (strncasecmp(data, "EPSV", 4) == 0 &&
(data[4] == ' ' || data[4] == '\r')) {
if (data[4] == ' ') {
char proto = data[5];
if (data > data_limit - 7 || data[6] != '\r')
return 1;
#ifdef CONFIG_IP_VS_IPV6
if (cp->af == AF_INET6 && proto == '2') {
} else
#endif
if (cp->af == AF_INET && proto == '1') {
} else {
return 1;
}
}
/* Extended Passive mode on */
IP_VS_DBG(7, "got EPSV at %td of %td\n",
data - data_start,
data_limit - data_start);
cp->app_data = (void *) IP_VS_FTP_EPSV;
return 1;
}
data++;
}
/*
* To support virtual FTP server, the scenerio is as follows:
* FTP client ----> Load Balancer ----> FTP server
* First detect the port number in the application data,
* then create a new connection entry for the coming data
* connection.
*/
if (cp->af == AF_INET &&
ip_vs_ftp_get_addrport(data_start, data_limit,
CLIENT_STRING_PORT,
sizeof(CLIENT_STRING_PORT)-1,
' ', false, IP_VS_FTP_PORT,
&to, &port, cp->af,
&start, &end) == 1) {
IP_VS_DBG(7, "PORT %pI4:%u detected\n", &to.ip, ntohs(port));
/* Now update or create a connection entry for it */
IP_VS_DBG(7, "protocol %s %pI4:%u %pI4:%u\n",
ip_vs_proto_name(ipvsh->protocol),
&to.ip, ntohs(port), &cp->vaddr.ip,
ntohs(cp->vport)-1);
} else if (ip_vs_ftp_get_addrport(data_start, data_limit,
CLIENT_STRING_EPRT,
sizeof(CLIENT_STRING_EPRT)-1,
' ', true, IP_VS_FTP_EPRT,
&to, &port, cp->af,
&start, &end) == 1) {
IP_VS_DBG_BUF(7, "EPRT %s:%u detected\n",
IP_VS_DBG_ADDR(cp->af, &to), ntohs(port));
/* Now update or create a connection entry for it */
IP_VS_DBG_BUF(7, "protocol %s %s:%u %s:%u\n",
ip_vs_proto_name(ipvsh->protocol),
IP_VS_DBG_ADDR(cp->af, &to), ntohs(port),
IP_VS_DBG_ADDR(cp->af, &cp->vaddr),
ntohs(cp->vport)-1);
} else {
return 1;
}
/* Passive mode off */
cp->app_data = (void *) IP_VS_FTP_ACTIVE;
{
struct ip_vs_conn_param p;
ip_vs_conn_fill_param(cp->ipvs, cp->af,
ipvsh->protocol, &to, port, &cp->vaddr,
htons(ntohs(cp->vport)-1), &p);
n_cp = ip_vs_conn_in_get(&p);
if (!n_cp) {
n_cp = ip_vs_conn_new(&p, cp->af, &cp->daddr,
htons(ntohs(cp->dport)-1),
IP_VS_CONN_F_NFCT, cp->dest,
skb->mark);
if (!n_cp)
return 0;
/* add its controller */
ip_vs_control_add(n_cp, cp);
}
}
/*
* Move tunnel to listen state
*/
ip_vs_tcp_conn_listen(n_cp);
ip_vs_conn_put(n_cp);
return 1;
}
static struct ip_vs_app ip_vs_ftp = {
.name = "ftp",
.type = IP_VS_APP_TYPE_FTP,
.protocol = IPPROTO_TCP,
.module = THIS_MODULE,
.incs_list = LIST_HEAD_INIT(ip_vs_ftp.incs_list),
.init_conn = ip_vs_ftp_init_conn,
.done_conn = ip_vs_ftp_done_conn,
.bind_conn = NULL,
.unbind_conn = NULL,
.pkt_out = ip_vs_ftp_out,
.pkt_in = ip_vs_ftp_in,
};
/*
* per netns ip_vs_ftp initialization
*/
static int __net_init __ip_vs_ftp_init(struct net *net)
{
int i, ret;
struct ip_vs_app *app;
struct netns_ipvs *ipvs = net_ipvs(net);
if (!ipvs)
return -ENOENT;
app = register_ip_vs_app(ipvs, &ip_vs_ftp);
if (IS_ERR(app))
return PTR_ERR(app);
for (i = 0; i < ports_count; i++) {
if (!ports[i])
continue;
ret = register_ip_vs_app_inc(ipvs, app, app->protocol, ports[i]);
if (ret)
goto err_unreg;
}
return 0;
err_unreg:
unregister_ip_vs_app(ipvs, &ip_vs_ftp);
return ret;
}
/*
* netns exit
*/
static void __ip_vs_ftp_exit(struct net *net)
{
struct netns_ipvs *ipvs = net_ipvs(net);
if (!ipvs)
return;
unregister_ip_vs_app(ipvs, &ip_vs_ftp);
}
static struct pernet_operations ip_vs_ftp_ops = {
.init = __ip_vs_ftp_init,
.exit = __ip_vs_ftp_exit,
};
static int __init ip_vs_ftp_init(void)
{
/* rcu_barrier() is called by netns on error */
return register_pernet_subsys(&ip_vs_ftp_ops);
}
/*
* ip_vs_ftp finish.
*/
static void __exit ip_vs_ftp_exit(void)
{
unregister_pernet_subsys(&ip_vs_ftp_ops);
/* rcu_barrier() is called by netns */
}
module_init(ip_vs_ftp_init);
module_exit(ip_vs_ftp_exit);
MODULE_LICENSE("GPL");
| linux-master | net/netfilter/ipvs/ip_vs_ftp.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* ip_vs_proto.c: transport protocol load balancing support for IPVS
*
* Authors: Wensong Zhang <[email protected]>
* Julian Anastasov <[email protected]>
*
* Changes:
*/
#define KMSG_COMPONENT "IPVS"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/skbuff.h>
#include <linux/gfp.h>
#include <linux/in.h>
#include <linux/ip.h>
#include <net/protocol.h>
#include <net/tcp.h>
#include <net/udp.h>
#include <linux/stat.h>
#include <linux/proc_fs.h>
#include <net/ip_vs.h>
/*
* IPVS protocols can only be registered/unregistered when the ipvs
* module is loaded/unloaded, so no lock is needed in accessing the
* ipvs protocol table.
*/
#define IP_VS_PROTO_TAB_SIZE 32 /* must be power of 2 */
#define IP_VS_PROTO_HASH(proto) ((proto) & (IP_VS_PROTO_TAB_SIZE-1))
static struct ip_vs_protocol *ip_vs_proto_table[IP_VS_PROTO_TAB_SIZE];
/* States for conn templates: NONE or words separated with ",", max 15 chars */
static const char *ip_vs_ctpl_state_name_table[IP_VS_CTPL_S_LAST] = {
[IP_VS_CTPL_S_NONE] = "NONE",
[IP_VS_CTPL_S_ASSURED] = "ASSURED",
};
/*
* register an ipvs protocol
*/
static int __used __init register_ip_vs_protocol(struct ip_vs_protocol *pp)
{
unsigned int hash = IP_VS_PROTO_HASH(pp->protocol);
pp->next = ip_vs_proto_table[hash];
ip_vs_proto_table[hash] = pp;
if (pp->init != NULL)
pp->init(pp);
return 0;
}
/*
* register an ipvs protocols netns related data
*/
static int
register_ip_vs_proto_netns(struct netns_ipvs *ipvs, struct ip_vs_protocol *pp)
{
unsigned int hash = IP_VS_PROTO_HASH(pp->protocol);
struct ip_vs_proto_data *pd =
kzalloc(sizeof(struct ip_vs_proto_data), GFP_KERNEL);
if (!pd)
return -ENOMEM;
pd->pp = pp; /* For speed issues */
pd->next = ipvs->proto_data_table[hash];
ipvs->proto_data_table[hash] = pd;
atomic_set(&pd->appcnt, 0); /* Init app counter */
if (pp->init_netns != NULL) {
int ret = pp->init_netns(ipvs, pd);
if (ret) {
/* unlink an free proto data */
ipvs->proto_data_table[hash] = pd->next;
kfree(pd);
return ret;
}
}
return 0;
}
/*
* unregister an ipvs protocol
*/
static int unregister_ip_vs_protocol(struct ip_vs_protocol *pp)
{
struct ip_vs_protocol **pp_p;
unsigned int hash = IP_VS_PROTO_HASH(pp->protocol);
pp_p = &ip_vs_proto_table[hash];
for (; *pp_p; pp_p = &(*pp_p)->next) {
if (*pp_p == pp) {
*pp_p = pp->next;
if (pp->exit != NULL)
pp->exit(pp);
return 0;
}
}
return -ESRCH;
}
/*
* unregister an ipvs protocols netns data
*/
static int
unregister_ip_vs_proto_netns(struct netns_ipvs *ipvs, struct ip_vs_proto_data *pd)
{
struct ip_vs_proto_data **pd_p;
unsigned int hash = IP_VS_PROTO_HASH(pd->pp->protocol);
pd_p = &ipvs->proto_data_table[hash];
for (; *pd_p; pd_p = &(*pd_p)->next) {
if (*pd_p == pd) {
*pd_p = pd->next;
if (pd->pp->exit_netns != NULL)
pd->pp->exit_netns(ipvs, pd);
kfree(pd);
return 0;
}
}
return -ESRCH;
}
/*
* get ip_vs_protocol object by its proto.
*/
struct ip_vs_protocol * ip_vs_proto_get(unsigned short proto)
{
struct ip_vs_protocol *pp;
unsigned int hash = IP_VS_PROTO_HASH(proto);
for (pp = ip_vs_proto_table[hash]; pp; pp = pp->next) {
if (pp->protocol == proto)
return pp;
}
return NULL;
}
EXPORT_SYMBOL(ip_vs_proto_get);
/*
* get ip_vs_protocol object data by netns and proto
*/
struct ip_vs_proto_data *
ip_vs_proto_data_get(struct netns_ipvs *ipvs, unsigned short proto)
{
struct ip_vs_proto_data *pd;
unsigned int hash = IP_VS_PROTO_HASH(proto);
for (pd = ipvs->proto_data_table[hash]; pd; pd = pd->next) {
if (pd->pp->protocol == proto)
return pd;
}
return NULL;
}
EXPORT_SYMBOL(ip_vs_proto_data_get);
/*
* Propagate event for state change to all protocols
*/
void ip_vs_protocol_timeout_change(struct netns_ipvs *ipvs, int flags)
{
struct ip_vs_proto_data *pd;
int i;
for (i = 0; i < IP_VS_PROTO_TAB_SIZE; i++) {
for (pd = ipvs->proto_data_table[i]; pd; pd = pd->next) {
if (pd->pp->timeout_change)
pd->pp->timeout_change(pd, flags);
}
}
}
int *
ip_vs_create_timeout_table(int *table, int size)
{
return kmemdup(table, size, GFP_KERNEL);
}
const char *ip_vs_state_name(const struct ip_vs_conn *cp)
{
unsigned int state = cp->state;
struct ip_vs_protocol *pp;
if (cp->flags & IP_VS_CONN_F_TEMPLATE) {
if (state >= IP_VS_CTPL_S_LAST)
return "ERR!";
return ip_vs_ctpl_state_name_table[state] ? : "?";
}
pp = ip_vs_proto_get(cp->protocol);
if (pp == NULL || pp->state_name == NULL)
return (cp->protocol == IPPROTO_IP) ? "NONE" : "ERR!";
return pp->state_name(state);
}
static void
ip_vs_tcpudp_debug_packet_v4(struct ip_vs_protocol *pp,
const struct sk_buff *skb,
int offset,
const char *msg)
{
char buf[128];
struct iphdr _iph, *ih;
ih = skb_header_pointer(skb, offset, sizeof(_iph), &_iph);
if (ih == NULL)
sprintf(buf, "TRUNCATED");
else if (ih->frag_off & htons(IP_OFFSET))
sprintf(buf, "%pI4->%pI4 frag", &ih->saddr, &ih->daddr);
else {
__be16 _ports[2], *pptr;
pptr = skb_header_pointer(skb, offset + ih->ihl*4,
sizeof(_ports), _ports);
if (pptr == NULL)
sprintf(buf, "TRUNCATED %pI4->%pI4",
&ih->saddr, &ih->daddr);
else
sprintf(buf, "%pI4:%u->%pI4:%u",
&ih->saddr, ntohs(pptr[0]),
&ih->daddr, ntohs(pptr[1]));
}
pr_debug("%s: %s %s\n", msg, pp->name, buf);
}
#ifdef CONFIG_IP_VS_IPV6
static void
ip_vs_tcpudp_debug_packet_v6(struct ip_vs_protocol *pp,
const struct sk_buff *skb,
int offset,
const char *msg)
{
char buf[192];
struct ipv6hdr _iph, *ih;
ih = skb_header_pointer(skb, offset, sizeof(_iph), &_iph);
if (ih == NULL)
sprintf(buf, "TRUNCATED");
else if (ih->nexthdr == IPPROTO_FRAGMENT)
sprintf(buf, "%pI6c->%pI6c frag", &ih->saddr, &ih->daddr);
else {
__be16 _ports[2], *pptr;
pptr = skb_header_pointer(skb, offset + sizeof(struct ipv6hdr),
sizeof(_ports), _ports);
if (pptr == NULL)
sprintf(buf, "TRUNCATED %pI6c->%pI6c",
&ih->saddr, &ih->daddr);
else
sprintf(buf, "%pI6c:%u->%pI6c:%u",
&ih->saddr, ntohs(pptr[0]),
&ih->daddr, ntohs(pptr[1]));
}
pr_debug("%s: %s %s\n", msg, pp->name, buf);
}
#endif
void
ip_vs_tcpudp_debug_packet(int af, struct ip_vs_protocol *pp,
const struct sk_buff *skb,
int offset,
const char *msg)
{
#ifdef CONFIG_IP_VS_IPV6
if (af == AF_INET6)
ip_vs_tcpudp_debug_packet_v6(pp, skb, offset, msg);
else
#endif
ip_vs_tcpudp_debug_packet_v4(pp, skb, offset, msg);
}
/*
* per network name-space init
*/
int __net_init ip_vs_protocol_net_init(struct netns_ipvs *ipvs)
{
int i, ret;
static struct ip_vs_protocol *protos[] = {
#ifdef CONFIG_IP_VS_PROTO_TCP
&ip_vs_protocol_tcp,
#endif
#ifdef CONFIG_IP_VS_PROTO_UDP
&ip_vs_protocol_udp,
#endif
#ifdef CONFIG_IP_VS_PROTO_SCTP
&ip_vs_protocol_sctp,
#endif
#ifdef CONFIG_IP_VS_PROTO_AH
&ip_vs_protocol_ah,
#endif
#ifdef CONFIG_IP_VS_PROTO_ESP
&ip_vs_protocol_esp,
#endif
};
for (i = 0; i < ARRAY_SIZE(protos); i++) {
ret = register_ip_vs_proto_netns(ipvs, protos[i]);
if (ret < 0)
goto cleanup;
}
return 0;
cleanup:
ip_vs_protocol_net_cleanup(ipvs);
return ret;
}
void __net_exit ip_vs_protocol_net_cleanup(struct netns_ipvs *ipvs)
{
struct ip_vs_proto_data *pd;
int i;
/* unregister all the ipvs proto data for this netns */
for (i = 0; i < IP_VS_PROTO_TAB_SIZE; i++) {
while ((pd = ipvs->proto_data_table[i]) != NULL)
unregister_ip_vs_proto_netns(ipvs, pd);
}
}
int __init ip_vs_protocol_init(void)
{
char protocols[64];
#define REGISTER_PROTOCOL(p) \
do { \
register_ip_vs_protocol(p); \
strcat(protocols, ", "); \
strcat(protocols, (p)->name); \
} while (0)
protocols[0] = '\0';
protocols[2] = '\0';
#ifdef CONFIG_IP_VS_PROTO_TCP
REGISTER_PROTOCOL(&ip_vs_protocol_tcp);
#endif
#ifdef CONFIG_IP_VS_PROTO_UDP
REGISTER_PROTOCOL(&ip_vs_protocol_udp);
#endif
#ifdef CONFIG_IP_VS_PROTO_SCTP
REGISTER_PROTOCOL(&ip_vs_protocol_sctp);
#endif
#ifdef CONFIG_IP_VS_PROTO_AH
REGISTER_PROTOCOL(&ip_vs_protocol_ah);
#endif
#ifdef CONFIG_IP_VS_PROTO_ESP
REGISTER_PROTOCOL(&ip_vs_protocol_esp);
#endif
pr_info("Registered protocols (%s)\n", &protocols[2]);
return 0;
}
void ip_vs_protocol_cleanup(void)
{
struct ip_vs_protocol *pp;
int i;
/* unregister all the ipvs protocols */
for (i = 0; i < IP_VS_PROTO_TAB_SIZE; i++) {
while ((pp = ip_vs_proto_table[i]) != NULL)
unregister_ip_vs_protocol(pp);
}
}
| linux-master | net/netfilter/ipvs/ip_vs_proto.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* net/key/af_key.c An implementation of PF_KEYv2 sockets.
*
* Authors: Maxim Giryaev <[email protected]>
* David S. Miller <[email protected]>
* Alexey Kuznetsov <[email protected]>
* Kunihiro Ishiguro <[email protected]>
* Kazunori MIYAZAWA / USAGI Project <[email protected]>
* Derek Atkins <[email protected]>
*/
#include <linux/capability.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/socket.h>
#include <linux/pfkeyv2.h>
#include <linux/ipsec.h>
#include <linux/skbuff.h>
#include <linux/rtnetlink.h>
#include <linux/in.h>
#include <linux/in6.h>
#include <linux/proc_fs.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <net/net_namespace.h>
#include <net/netns/generic.h>
#include <net/xfrm.h>
#include <net/sock.h>
#define _X2KEY(x) ((x) == XFRM_INF ? 0 : (x))
#define _KEY2X(x) ((x) == 0 ? XFRM_INF : (x))
static unsigned int pfkey_net_id __read_mostly;
struct netns_pfkey {
/* List of all pfkey sockets. */
struct hlist_head table;
atomic_t socks_nr;
};
static DEFINE_MUTEX(pfkey_mutex);
#define DUMMY_MARK 0
static const struct xfrm_mark dummy_mark = {0, 0};
struct pfkey_sock {
/* struct sock must be the first member of struct pfkey_sock */
struct sock sk;
int registered;
int promisc;
struct {
uint8_t msg_version;
uint32_t msg_portid;
int (*dump)(struct pfkey_sock *sk);
void (*done)(struct pfkey_sock *sk);
union {
struct xfrm_policy_walk policy;
struct xfrm_state_walk state;
} u;
struct sk_buff *skb;
} dump;
struct mutex dump_lock;
};
static int parse_sockaddr_pair(struct sockaddr *sa, int ext_len,
xfrm_address_t *saddr, xfrm_address_t *daddr,
u16 *family);
static inline struct pfkey_sock *pfkey_sk(struct sock *sk)
{
return (struct pfkey_sock *)sk;
}
static int pfkey_can_dump(const struct sock *sk)
{
if (3 * atomic_read(&sk->sk_rmem_alloc) <= 2 * sk->sk_rcvbuf)
return 1;
return 0;
}
static void pfkey_terminate_dump(struct pfkey_sock *pfk)
{
if (pfk->dump.dump) {
if (pfk->dump.skb) {
kfree_skb(pfk->dump.skb);
pfk->dump.skb = NULL;
}
pfk->dump.done(pfk);
pfk->dump.dump = NULL;
pfk->dump.done = NULL;
}
}
static void pfkey_sock_destruct(struct sock *sk)
{
struct net *net = sock_net(sk);
struct netns_pfkey *net_pfkey = net_generic(net, pfkey_net_id);
pfkey_terminate_dump(pfkey_sk(sk));
skb_queue_purge(&sk->sk_receive_queue);
if (!sock_flag(sk, SOCK_DEAD)) {
pr_err("Attempt to release alive pfkey socket: %p\n", sk);
return;
}
WARN_ON(atomic_read(&sk->sk_rmem_alloc));
WARN_ON(refcount_read(&sk->sk_wmem_alloc));
atomic_dec(&net_pfkey->socks_nr);
}
static const struct proto_ops pfkey_ops;
static void pfkey_insert(struct sock *sk)
{
struct net *net = sock_net(sk);
struct netns_pfkey *net_pfkey = net_generic(net, pfkey_net_id);
mutex_lock(&pfkey_mutex);
sk_add_node_rcu(sk, &net_pfkey->table);
mutex_unlock(&pfkey_mutex);
}
static void pfkey_remove(struct sock *sk)
{
mutex_lock(&pfkey_mutex);
sk_del_node_init_rcu(sk);
mutex_unlock(&pfkey_mutex);
}
static struct proto key_proto = {
.name = "KEY",
.owner = THIS_MODULE,
.obj_size = sizeof(struct pfkey_sock),
};
static int pfkey_create(struct net *net, struct socket *sock, int protocol,
int kern)
{
struct netns_pfkey *net_pfkey = net_generic(net, pfkey_net_id);
struct sock *sk;
struct pfkey_sock *pfk;
if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
return -EPERM;
if (sock->type != SOCK_RAW)
return -ESOCKTNOSUPPORT;
if (protocol != PF_KEY_V2)
return -EPROTONOSUPPORT;
sk = sk_alloc(net, PF_KEY, GFP_KERNEL, &key_proto, kern);
if (sk == NULL)
return -ENOMEM;
pfk = pfkey_sk(sk);
mutex_init(&pfk->dump_lock);
sock->ops = &pfkey_ops;
sock_init_data(sock, sk);
sk->sk_family = PF_KEY;
sk->sk_destruct = pfkey_sock_destruct;
atomic_inc(&net_pfkey->socks_nr);
pfkey_insert(sk);
return 0;
}
static int pfkey_release(struct socket *sock)
{
struct sock *sk = sock->sk;
if (!sk)
return 0;
pfkey_remove(sk);
sock_orphan(sk);
sock->sk = NULL;
skb_queue_purge(&sk->sk_write_queue);
synchronize_rcu();
sock_put(sk);
return 0;
}
static int pfkey_broadcast_one(struct sk_buff *skb, gfp_t allocation,
struct sock *sk)
{
int err = -ENOBUFS;
if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf)
return err;
skb = skb_clone(skb, allocation);
if (skb) {
skb_set_owner_r(skb, sk);
skb_queue_tail(&sk->sk_receive_queue, skb);
sk->sk_data_ready(sk);
err = 0;
}
return err;
}
/* Send SKB to all pfkey sockets matching selected criteria. */
#define BROADCAST_ALL 0
#define BROADCAST_ONE 1
#define BROADCAST_REGISTERED 2
#define BROADCAST_PROMISC_ONLY 4
static int pfkey_broadcast(struct sk_buff *skb, gfp_t allocation,
int broadcast_flags, struct sock *one_sk,
struct net *net)
{
struct netns_pfkey *net_pfkey = net_generic(net, pfkey_net_id);
struct sock *sk;
int err = -ESRCH;
/* XXX Do we need something like netlink_overrun? I think
* XXX PF_KEY socket apps will not mind current behavior.
*/
if (!skb)
return -ENOMEM;
rcu_read_lock();
sk_for_each_rcu(sk, &net_pfkey->table) {
struct pfkey_sock *pfk = pfkey_sk(sk);
int err2;
/* Yes, it means that if you are meant to receive this
* pfkey message you receive it twice as promiscuous
* socket.
*/
if (pfk->promisc)
pfkey_broadcast_one(skb, GFP_ATOMIC, sk);
/* the exact target will be processed later */
if (sk == one_sk)
continue;
if (broadcast_flags != BROADCAST_ALL) {
if (broadcast_flags & BROADCAST_PROMISC_ONLY)
continue;
if ((broadcast_flags & BROADCAST_REGISTERED) &&
!pfk->registered)
continue;
if (broadcast_flags & BROADCAST_ONE)
continue;
}
err2 = pfkey_broadcast_one(skb, GFP_ATOMIC, sk);
/* Error is cleared after successful sending to at least one
* registered KM */
if ((broadcast_flags & BROADCAST_REGISTERED) && err)
err = err2;
}
rcu_read_unlock();
if (one_sk != NULL)
err = pfkey_broadcast_one(skb, allocation, one_sk);
kfree_skb(skb);
return err;
}
static int pfkey_do_dump(struct pfkey_sock *pfk)
{
struct sadb_msg *hdr;
int rc;
mutex_lock(&pfk->dump_lock);
if (!pfk->dump.dump) {
rc = 0;
goto out;
}
rc = pfk->dump.dump(pfk);
if (rc == -ENOBUFS) {
rc = 0;
goto out;
}
if (pfk->dump.skb) {
if (!pfkey_can_dump(&pfk->sk)) {
rc = 0;
goto out;
}
hdr = (struct sadb_msg *) pfk->dump.skb->data;
hdr->sadb_msg_seq = 0;
hdr->sadb_msg_errno = rc;
pfkey_broadcast(pfk->dump.skb, GFP_ATOMIC, BROADCAST_ONE,
&pfk->sk, sock_net(&pfk->sk));
pfk->dump.skb = NULL;
}
pfkey_terminate_dump(pfk);
out:
mutex_unlock(&pfk->dump_lock);
return rc;
}
static inline void pfkey_hdr_dup(struct sadb_msg *new,
const struct sadb_msg *orig)
{
*new = *orig;
}
static int pfkey_error(const struct sadb_msg *orig, int err, struct sock *sk)
{
struct sk_buff *skb = alloc_skb(sizeof(struct sadb_msg) + 16, GFP_KERNEL);
struct sadb_msg *hdr;
if (!skb)
return -ENOBUFS;
/* Woe be to the platform trying to support PFKEY yet
* having normal errnos outside the 1-255 range, inclusive.
*/
err = -err;
if (err == ERESTARTSYS ||
err == ERESTARTNOHAND ||
err == ERESTARTNOINTR)
err = EINTR;
if (err >= 512)
err = EINVAL;
BUG_ON(err <= 0 || err >= 256);
hdr = skb_put(skb, sizeof(struct sadb_msg));
pfkey_hdr_dup(hdr, orig);
hdr->sadb_msg_errno = (uint8_t) err;
hdr->sadb_msg_len = (sizeof(struct sadb_msg) /
sizeof(uint64_t));
pfkey_broadcast(skb, GFP_KERNEL, BROADCAST_ONE, sk, sock_net(sk));
return 0;
}
static const u8 sadb_ext_min_len[] = {
[SADB_EXT_RESERVED] = (u8) 0,
[SADB_EXT_SA] = (u8) sizeof(struct sadb_sa),
[SADB_EXT_LIFETIME_CURRENT] = (u8) sizeof(struct sadb_lifetime),
[SADB_EXT_LIFETIME_HARD] = (u8) sizeof(struct sadb_lifetime),
[SADB_EXT_LIFETIME_SOFT] = (u8) sizeof(struct sadb_lifetime),
[SADB_EXT_ADDRESS_SRC] = (u8) sizeof(struct sadb_address),
[SADB_EXT_ADDRESS_DST] = (u8) sizeof(struct sadb_address),
[SADB_EXT_ADDRESS_PROXY] = (u8) sizeof(struct sadb_address),
[SADB_EXT_KEY_AUTH] = (u8) sizeof(struct sadb_key),
[SADB_EXT_KEY_ENCRYPT] = (u8) sizeof(struct sadb_key),
[SADB_EXT_IDENTITY_SRC] = (u8) sizeof(struct sadb_ident),
[SADB_EXT_IDENTITY_DST] = (u8) sizeof(struct sadb_ident),
[SADB_EXT_SENSITIVITY] = (u8) sizeof(struct sadb_sens),
[SADB_EXT_PROPOSAL] = (u8) sizeof(struct sadb_prop),
[SADB_EXT_SUPPORTED_AUTH] = (u8) sizeof(struct sadb_supported),
[SADB_EXT_SUPPORTED_ENCRYPT] = (u8) sizeof(struct sadb_supported),
[SADB_EXT_SPIRANGE] = (u8) sizeof(struct sadb_spirange),
[SADB_X_EXT_KMPRIVATE] = (u8) sizeof(struct sadb_x_kmprivate),
[SADB_X_EXT_POLICY] = (u8) sizeof(struct sadb_x_policy),
[SADB_X_EXT_SA2] = (u8) sizeof(struct sadb_x_sa2),
[SADB_X_EXT_NAT_T_TYPE] = (u8) sizeof(struct sadb_x_nat_t_type),
[SADB_X_EXT_NAT_T_SPORT] = (u8) sizeof(struct sadb_x_nat_t_port),
[SADB_X_EXT_NAT_T_DPORT] = (u8) sizeof(struct sadb_x_nat_t_port),
[SADB_X_EXT_NAT_T_OA] = (u8) sizeof(struct sadb_address),
[SADB_X_EXT_SEC_CTX] = (u8) sizeof(struct sadb_x_sec_ctx),
[SADB_X_EXT_KMADDRESS] = (u8) sizeof(struct sadb_x_kmaddress),
[SADB_X_EXT_FILTER] = (u8) sizeof(struct sadb_x_filter),
};
/* Verify sadb_address_{len,prefixlen} against sa_family. */
static int verify_address_len(const void *p)
{
const struct sadb_address *sp = p;
const struct sockaddr *addr = (const struct sockaddr *)(sp + 1);
const struct sockaddr_in *sin;
#if IS_ENABLED(CONFIG_IPV6)
const struct sockaddr_in6 *sin6;
#endif
int len;
if (sp->sadb_address_len <
DIV_ROUND_UP(sizeof(*sp) + offsetofend(typeof(*addr), sa_family),
sizeof(uint64_t)))
return -EINVAL;
switch (addr->sa_family) {
case AF_INET:
len = DIV_ROUND_UP(sizeof(*sp) + sizeof(*sin), sizeof(uint64_t));
if (sp->sadb_address_len != len ||
sp->sadb_address_prefixlen > 32)
return -EINVAL;
break;
#if IS_ENABLED(CONFIG_IPV6)
case AF_INET6:
len = DIV_ROUND_UP(sizeof(*sp) + sizeof(*sin6), sizeof(uint64_t));
if (sp->sadb_address_len != len ||
sp->sadb_address_prefixlen > 128)
return -EINVAL;
break;
#endif
default:
/* It is user using kernel to keep track of security
* associations for another protocol, such as
* OSPF/RSVP/RIPV2/MIP. It is user's job to verify
* lengths.
*
* XXX Actually, association/policy database is not yet
* XXX able to cope with arbitrary sockaddr families.
* XXX When it can, remove this -EINVAL. -DaveM
*/
return -EINVAL;
}
return 0;
}
static inline int sadb_key_len(const struct sadb_key *key)
{
int key_bytes = DIV_ROUND_UP(key->sadb_key_bits, 8);
return DIV_ROUND_UP(sizeof(struct sadb_key) + key_bytes,
sizeof(uint64_t));
}
static int verify_key_len(const void *p)
{
const struct sadb_key *key = p;
if (sadb_key_len(key) > key->sadb_key_len)
return -EINVAL;
return 0;
}
static inline int pfkey_sec_ctx_len(const struct sadb_x_sec_ctx *sec_ctx)
{
return DIV_ROUND_UP(sizeof(struct sadb_x_sec_ctx) +
sec_ctx->sadb_x_ctx_len,
sizeof(uint64_t));
}
static inline int verify_sec_ctx_len(const void *p)
{
const struct sadb_x_sec_ctx *sec_ctx = p;
int len = sec_ctx->sadb_x_ctx_len;
if (len > PAGE_SIZE)
return -EINVAL;
len = pfkey_sec_ctx_len(sec_ctx);
if (sec_ctx->sadb_x_sec_len != len)
return -EINVAL;
return 0;
}
static inline struct xfrm_user_sec_ctx *pfkey_sadb2xfrm_user_sec_ctx(const struct sadb_x_sec_ctx *sec_ctx,
gfp_t gfp)
{
struct xfrm_user_sec_ctx *uctx = NULL;
int ctx_size = sec_ctx->sadb_x_ctx_len;
uctx = kmalloc((sizeof(*uctx)+ctx_size), gfp);
if (!uctx)
return NULL;
uctx->len = pfkey_sec_ctx_len(sec_ctx);
uctx->exttype = sec_ctx->sadb_x_sec_exttype;
uctx->ctx_doi = sec_ctx->sadb_x_ctx_doi;
uctx->ctx_alg = sec_ctx->sadb_x_ctx_alg;
uctx->ctx_len = sec_ctx->sadb_x_ctx_len;
memcpy(uctx + 1, sec_ctx + 1,
uctx->ctx_len);
return uctx;
}
static int present_and_same_family(const struct sadb_address *src,
const struct sadb_address *dst)
{
const struct sockaddr *s_addr, *d_addr;
if (!src || !dst)
return 0;
s_addr = (const struct sockaddr *)(src + 1);
d_addr = (const struct sockaddr *)(dst + 1);
if (s_addr->sa_family != d_addr->sa_family)
return 0;
if (s_addr->sa_family != AF_INET
#if IS_ENABLED(CONFIG_IPV6)
&& s_addr->sa_family != AF_INET6
#endif
)
return 0;
return 1;
}
static int parse_exthdrs(struct sk_buff *skb, const struct sadb_msg *hdr, void **ext_hdrs)
{
const char *p = (char *) hdr;
int len = skb->len;
len -= sizeof(*hdr);
p += sizeof(*hdr);
while (len > 0) {
const struct sadb_ext *ehdr = (const struct sadb_ext *) p;
uint16_t ext_type;
int ext_len;
if (len < sizeof(*ehdr))
return -EINVAL;
ext_len = ehdr->sadb_ext_len;
ext_len *= sizeof(uint64_t);
ext_type = ehdr->sadb_ext_type;
if (ext_len < sizeof(uint64_t) ||
ext_len > len ||
ext_type == SADB_EXT_RESERVED)
return -EINVAL;
if (ext_type <= SADB_EXT_MAX) {
int min = (int) sadb_ext_min_len[ext_type];
if (ext_len < min)
return -EINVAL;
if (ext_hdrs[ext_type-1] != NULL)
return -EINVAL;
switch (ext_type) {
case SADB_EXT_ADDRESS_SRC:
case SADB_EXT_ADDRESS_DST:
case SADB_EXT_ADDRESS_PROXY:
case SADB_X_EXT_NAT_T_OA:
if (verify_address_len(p))
return -EINVAL;
break;
case SADB_X_EXT_SEC_CTX:
if (verify_sec_ctx_len(p))
return -EINVAL;
break;
case SADB_EXT_KEY_AUTH:
case SADB_EXT_KEY_ENCRYPT:
if (verify_key_len(p))
return -EINVAL;
break;
default:
break;
}
ext_hdrs[ext_type-1] = (void *) p;
}
p += ext_len;
len -= ext_len;
}
return 0;
}
static uint16_t
pfkey_satype2proto(uint8_t satype)
{
switch (satype) {
case SADB_SATYPE_UNSPEC:
return IPSEC_PROTO_ANY;
case SADB_SATYPE_AH:
return IPPROTO_AH;
case SADB_SATYPE_ESP:
return IPPROTO_ESP;
case SADB_X_SATYPE_IPCOMP:
return IPPROTO_COMP;
default:
return 0;
}
/* NOTREACHED */
}
static uint8_t
pfkey_proto2satype(uint16_t proto)
{
switch (proto) {
case IPPROTO_AH:
return SADB_SATYPE_AH;
case IPPROTO_ESP:
return SADB_SATYPE_ESP;
case IPPROTO_COMP:
return SADB_X_SATYPE_IPCOMP;
default:
return 0;
}
/* NOTREACHED */
}
/* BTW, this scheme means that there is no way with PFKEY2 sockets to
* say specifically 'just raw sockets' as we encode them as 255.
*/
static uint8_t pfkey_proto_to_xfrm(uint8_t proto)
{
return proto == IPSEC_PROTO_ANY ? 0 : proto;
}
static uint8_t pfkey_proto_from_xfrm(uint8_t proto)
{
return proto ? proto : IPSEC_PROTO_ANY;
}
static inline int pfkey_sockaddr_len(sa_family_t family)
{
switch (family) {
case AF_INET:
return sizeof(struct sockaddr_in);
#if IS_ENABLED(CONFIG_IPV6)
case AF_INET6:
return sizeof(struct sockaddr_in6);
#endif
}
return 0;
}
static
int pfkey_sockaddr_extract(const struct sockaddr *sa, xfrm_address_t *xaddr)
{
switch (sa->sa_family) {
case AF_INET:
xaddr->a4 =
((struct sockaddr_in *)sa)->sin_addr.s_addr;
return AF_INET;
#if IS_ENABLED(CONFIG_IPV6)
case AF_INET6:
memcpy(xaddr->a6,
&((struct sockaddr_in6 *)sa)->sin6_addr,
sizeof(struct in6_addr));
return AF_INET6;
#endif
}
return 0;
}
static
int pfkey_sadb_addr2xfrm_addr(const struct sadb_address *addr, xfrm_address_t *xaddr)
{
return pfkey_sockaddr_extract((struct sockaddr *)(addr + 1),
xaddr);
}
static struct xfrm_state *pfkey_xfrm_state_lookup(struct net *net, const struct sadb_msg *hdr, void * const *ext_hdrs)
{
const struct sadb_sa *sa;
const struct sadb_address *addr;
uint16_t proto;
unsigned short family;
xfrm_address_t *xaddr;
sa = ext_hdrs[SADB_EXT_SA - 1];
if (sa == NULL)
return NULL;
proto = pfkey_satype2proto(hdr->sadb_msg_satype);
if (proto == 0)
return NULL;
/* sadb_address_len should be checked by caller */
addr = ext_hdrs[SADB_EXT_ADDRESS_DST - 1];
if (addr == NULL)
return NULL;
family = ((const struct sockaddr *)(addr + 1))->sa_family;
switch (family) {
case AF_INET:
xaddr = (xfrm_address_t *)&((const struct sockaddr_in *)(addr + 1))->sin_addr;
break;
#if IS_ENABLED(CONFIG_IPV6)
case AF_INET6:
xaddr = (xfrm_address_t *)&((const struct sockaddr_in6 *)(addr + 1))->sin6_addr;
break;
#endif
default:
xaddr = NULL;
}
if (!xaddr)
return NULL;
return xfrm_state_lookup(net, DUMMY_MARK, xaddr, sa->sadb_sa_spi, proto, family);
}
#define PFKEY_ALIGN8(a) (1 + (((a) - 1) | (8 - 1)))
static int
pfkey_sockaddr_size(sa_family_t family)
{
return PFKEY_ALIGN8(pfkey_sockaddr_len(family));
}
static inline int pfkey_mode_from_xfrm(int mode)
{
switch(mode) {
case XFRM_MODE_TRANSPORT:
return IPSEC_MODE_TRANSPORT;
case XFRM_MODE_TUNNEL:
return IPSEC_MODE_TUNNEL;
case XFRM_MODE_BEET:
return IPSEC_MODE_BEET;
default:
return -1;
}
}
static inline int pfkey_mode_to_xfrm(int mode)
{
switch(mode) {
case IPSEC_MODE_ANY: /*XXX*/
case IPSEC_MODE_TRANSPORT:
return XFRM_MODE_TRANSPORT;
case IPSEC_MODE_TUNNEL:
return XFRM_MODE_TUNNEL;
case IPSEC_MODE_BEET:
return XFRM_MODE_BEET;
default:
return -1;
}
}
static unsigned int pfkey_sockaddr_fill(const xfrm_address_t *xaddr, __be16 port,
struct sockaddr *sa,
unsigned short family)
{
switch (family) {
case AF_INET:
{
struct sockaddr_in *sin = (struct sockaddr_in *)sa;
sin->sin_family = AF_INET;
sin->sin_port = port;
sin->sin_addr.s_addr = xaddr->a4;
memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
return 32;
}
#if IS_ENABLED(CONFIG_IPV6)
case AF_INET6:
{
struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sa;
sin6->sin6_family = AF_INET6;
sin6->sin6_port = port;
sin6->sin6_flowinfo = 0;
sin6->sin6_addr = xaddr->in6;
sin6->sin6_scope_id = 0;
return 128;
}
#endif
}
return 0;
}
static struct sk_buff *__pfkey_xfrm_state2msg(const struct xfrm_state *x,
int add_keys, int hsc)
{
struct sk_buff *skb;
struct sadb_msg *hdr;
struct sadb_sa *sa;
struct sadb_lifetime *lifetime;
struct sadb_address *addr;
struct sadb_key *key;
struct sadb_x_sa2 *sa2;
struct sadb_x_sec_ctx *sec_ctx;
struct xfrm_sec_ctx *xfrm_ctx;
int ctx_size = 0;
int size;
int auth_key_size = 0;
int encrypt_key_size = 0;
int sockaddr_size;
struct xfrm_encap_tmpl *natt = NULL;
int mode;
/* address family check */
sockaddr_size = pfkey_sockaddr_size(x->props.family);
if (!sockaddr_size)
return ERR_PTR(-EINVAL);
/* base, SA, (lifetime (HSC),) address(SD), (address(P),)
key(AE), (identity(SD),) (sensitivity)> */
size = sizeof(struct sadb_msg) +sizeof(struct sadb_sa) +
sizeof(struct sadb_lifetime) +
((hsc & 1) ? sizeof(struct sadb_lifetime) : 0) +
((hsc & 2) ? sizeof(struct sadb_lifetime) : 0) +
sizeof(struct sadb_address)*2 +
sockaddr_size*2 +
sizeof(struct sadb_x_sa2);
if ((xfrm_ctx = x->security)) {
ctx_size = PFKEY_ALIGN8(xfrm_ctx->ctx_len);
size += sizeof(struct sadb_x_sec_ctx) + ctx_size;
}
/* identity & sensitivity */
if (!xfrm_addr_equal(&x->sel.saddr, &x->props.saddr, x->props.family))
size += sizeof(struct sadb_address) + sockaddr_size;
if (add_keys) {
if (x->aalg && x->aalg->alg_key_len) {
auth_key_size =
PFKEY_ALIGN8((x->aalg->alg_key_len + 7) / 8);
size += sizeof(struct sadb_key) + auth_key_size;
}
if (x->ealg && x->ealg->alg_key_len) {
encrypt_key_size =
PFKEY_ALIGN8((x->ealg->alg_key_len+7) / 8);
size += sizeof(struct sadb_key) + encrypt_key_size;
}
}
if (x->encap)
natt = x->encap;
if (natt && natt->encap_type) {
size += sizeof(struct sadb_x_nat_t_type);
size += sizeof(struct sadb_x_nat_t_port);
size += sizeof(struct sadb_x_nat_t_port);
}
skb = alloc_skb(size + 16, GFP_ATOMIC);
if (skb == NULL)
return ERR_PTR(-ENOBUFS);
/* call should fill header later */
hdr = skb_put(skb, sizeof(struct sadb_msg));
memset(hdr, 0, size); /* XXX do we need this ? */
hdr->sadb_msg_len = size / sizeof(uint64_t);
/* sa */
sa = skb_put(skb, sizeof(struct sadb_sa));
sa->sadb_sa_len = sizeof(struct sadb_sa)/sizeof(uint64_t);
sa->sadb_sa_exttype = SADB_EXT_SA;
sa->sadb_sa_spi = x->id.spi;
sa->sadb_sa_replay = x->props.replay_window;
switch (x->km.state) {
case XFRM_STATE_VALID:
sa->sadb_sa_state = x->km.dying ?
SADB_SASTATE_DYING : SADB_SASTATE_MATURE;
break;
case XFRM_STATE_ACQ:
sa->sadb_sa_state = SADB_SASTATE_LARVAL;
break;
default:
sa->sadb_sa_state = SADB_SASTATE_DEAD;
break;
}
sa->sadb_sa_auth = 0;
if (x->aalg) {
struct xfrm_algo_desc *a = xfrm_aalg_get_byname(x->aalg->alg_name, 0);
sa->sadb_sa_auth = (a && a->pfkey_supported) ?
a->desc.sadb_alg_id : 0;
}
sa->sadb_sa_encrypt = 0;
BUG_ON(x->ealg && x->calg);
if (x->ealg) {
struct xfrm_algo_desc *a = xfrm_ealg_get_byname(x->ealg->alg_name, 0);
sa->sadb_sa_encrypt = (a && a->pfkey_supported) ?
a->desc.sadb_alg_id : 0;
}
/* KAME compatible: sadb_sa_encrypt is overloaded with calg id */
if (x->calg) {
struct xfrm_algo_desc *a = xfrm_calg_get_byname(x->calg->alg_name, 0);
sa->sadb_sa_encrypt = (a && a->pfkey_supported) ?
a->desc.sadb_alg_id : 0;
}
sa->sadb_sa_flags = 0;
if (x->props.flags & XFRM_STATE_NOECN)
sa->sadb_sa_flags |= SADB_SAFLAGS_NOECN;
if (x->props.flags & XFRM_STATE_DECAP_DSCP)
sa->sadb_sa_flags |= SADB_SAFLAGS_DECAP_DSCP;
if (x->props.flags & XFRM_STATE_NOPMTUDISC)
sa->sadb_sa_flags |= SADB_SAFLAGS_NOPMTUDISC;
/* hard time */
if (hsc & 2) {
lifetime = skb_put(skb, sizeof(struct sadb_lifetime));
lifetime->sadb_lifetime_len =
sizeof(struct sadb_lifetime)/sizeof(uint64_t);
lifetime->sadb_lifetime_exttype = SADB_EXT_LIFETIME_HARD;
lifetime->sadb_lifetime_allocations = _X2KEY(x->lft.hard_packet_limit);
lifetime->sadb_lifetime_bytes = _X2KEY(x->lft.hard_byte_limit);
lifetime->sadb_lifetime_addtime = x->lft.hard_add_expires_seconds;
lifetime->sadb_lifetime_usetime = x->lft.hard_use_expires_seconds;
}
/* soft time */
if (hsc & 1) {
lifetime = skb_put(skb, sizeof(struct sadb_lifetime));
lifetime->sadb_lifetime_len =
sizeof(struct sadb_lifetime)/sizeof(uint64_t);
lifetime->sadb_lifetime_exttype = SADB_EXT_LIFETIME_SOFT;
lifetime->sadb_lifetime_allocations = _X2KEY(x->lft.soft_packet_limit);
lifetime->sadb_lifetime_bytes = _X2KEY(x->lft.soft_byte_limit);
lifetime->sadb_lifetime_addtime = x->lft.soft_add_expires_seconds;
lifetime->sadb_lifetime_usetime = x->lft.soft_use_expires_seconds;
}
/* current time */
lifetime = skb_put(skb, sizeof(struct sadb_lifetime));
lifetime->sadb_lifetime_len =
sizeof(struct sadb_lifetime)/sizeof(uint64_t);
lifetime->sadb_lifetime_exttype = SADB_EXT_LIFETIME_CURRENT;
lifetime->sadb_lifetime_allocations = x->curlft.packets;
lifetime->sadb_lifetime_bytes = x->curlft.bytes;
lifetime->sadb_lifetime_addtime = x->curlft.add_time;
lifetime->sadb_lifetime_usetime = x->curlft.use_time;
/* src address */
addr = skb_put(skb, sizeof(struct sadb_address) + sockaddr_size);
addr->sadb_address_len =
(sizeof(struct sadb_address)+sockaddr_size)/
sizeof(uint64_t);
addr->sadb_address_exttype = SADB_EXT_ADDRESS_SRC;
/* "if the ports are non-zero, then the sadb_address_proto field,
normally zero, MUST be filled in with the transport
protocol's number." - RFC2367 */
addr->sadb_address_proto = 0;
addr->sadb_address_reserved = 0;
addr->sadb_address_prefixlen =
pfkey_sockaddr_fill(&x->props.saddr, 0,
(struct sockaddr *) (addr + 1),
x->props.family);
BUG_ON(!addr->sadb_address_prefixlen);
/* dst address */
addr = skb_put(skb, sizeof(struct sadb_address) + sockaddr_size);
addr->sadb_address_len =
(sizeof(struct sadb_address)+sockaddr_size)/
sizeof(uint64_t);
addr->sadb_address_exttype = SADB_EXT_ADDRESS_DST;
addr->sadb_address_proto = 0;
addr->sadb_address_reserved = 0;
addr->sadb_address_prefixlen =
pfkey_sockaddr_fill(&x->id.daddr, 0,
(struct sockaddr *) (addr + 1),
x->props.family);
BUG_ON(!addr->sadb_address_prefixlen);
if (!xfrm_addr_equal(&x->sel.saddr, &x->props.saddr,
x->props.family)) {
addr = skb_put(skb,
sizeof(struct sadb_address) + sockaddr_size);
addr->sadb_address_len =
(sizeof(struct sadb_address)+sockaddr_size)/
sizeof(uint64_t);
addr->sadb_address_exttype = SADB_EXT_ADDRESS_PROXY;
addr->sadb_address_proto =
pfkey_proto_from_xfrm(x->sel.proto);
addr->sadb_address_prefixlen = x->sel.prefixlen_s;
addr->sadb_address_reserved = 0;
pfkey_sockaddr_fill(&x->sel.saddr, x->sel.sport,
(struct sockaddr *) (addr + 1),
x->props.family);
}
/* auth key */
if (add_keys && auth_key_size) {
key = skb_put(skb, sizeof(struct sadb_key) + auth_key_size);
key->sadb_key_len = (sizeof(struct sadb_key) + auth_key_size) /
sizeof(uint64_t);
key->sadb_key_exttype = SADB_EXT_KEY_AUTH;
key->sadb_key_bits = x->aalg->alg_key_len;
key->sadb_key_reserved = 0;
memcpy(key + 1, x->aalg->alg_key, (x->aalg->alg_key_len+7)/8);
}
/* encrypt key */
if (add_keys && encrypt_key_size) {
key = skb_put(skb, sizeof(struct sadb_key) + encrypt_key_size);
key->sadb_key_len = (sizeof(struct sadb_key) +
encrypt_key_size) / sizeof(uint64_t);
key->sadb_key_exttype = SADB_EXT_KEY_ENCRYPT;
key->sadb_key_bits = x->ealg->alg_key_len;
key->sadb_key_reserved = 0;
memcpy(key + 1, x->ealg->alg_key,
(x->ealg->alg_key_len+7)/8);
}
/* sa */
sa2 = skb_put(skb, sizeof(struct sadb_x_sa2));
sa2->sadb_x_sa2_len = sizeof(struct sadb_x_sa2)/sizeof(uint64_t);
sa2->sadb_x_sa2_exttype = SADB_X_EXT_SA2;
if ((mode = pfkey_mode_from_xfrm(x->props.mode)) < 0) {
kfree_skb(skb);
return ERR_PTR(-EINVAL);
}
sa2->sadb_x_sa2_mode = mode;
sa2->sadb_x_sa2_reserved1 = 0;
sa2->sadb_x_sa2_reserved2 = 0;
sa2->sadb_x_sa2_sequence = 0;
sa2->sadb_x_sa2_reqid = x->props.reqid;
if (natt && natt->encap_type) {
struct sadb_x_nat_t_type *n_type;
struct sadb_x_nat_t_port *n_port;
/* type */
n_type = skb_put(skb, sizeof(*n_type));
n_type->sadb_x_nat_t_type_len = sizeof(*n_type)/sizeof(uint64_t);
n_type->sadb_x_nat_t_type_exttype = SADB_X_EXT_NAT_T_TYPE;
n_type->sadb_x_nat_t_type_type = natt->encap_type;
n_type->sadb_x_nat_t_type_reserved[0] = 0;
n_type->sadb_x_nat_t_type_reserved[1] = 0;
n_type->sadb_x_nat_t_type_reserved[2] = 0;
/* source port */
n_port = skb_put(skb, sizeof(*n_port));
n_port->sadb_x_nat_t_port_len = sizeof(*n_port)/sizeof(uint64_t);
n_port->sadb_x_nat_t_port_exttype = SADB_X_EXT_NAT_T_SPORT;
n_port->sadb_x_nat_t_port_port = natt->encap_sport;
n_port->sadb_x_nat_t_port_reserved = 0;
/* dest port */
n_port = skb_put(skb, sizeof(*n_port));
n_port->sadb_x_nat_t_port_len = sizeof(*n_port)/sizeof(uint64_t);
n_port->sadb_x_nat_t_port_exttype = SADB_X_EXT_NAT_T_DPORT;
n_port->sadb_x_nat_t_port_port = natt->encap_dport;
n_port->sadb_x_nat_t_port_reserved = 0;
}
/* security context */
if (xfrm_ctx) {
sec_ctx = skb_put(skb,
sizeof(struct sadb_x_sec_ctx) + ctx_size);
sec_ctx->sadb_x_sec_len =
(sizeof(struct sadb_x_sec_ctx) + ctx_size) / sizeof(uint64_t);
sec_ctx->sadb_x_sec_exttype = SADB_X_EXT_SEC_CTX;
sec_ctx->sadb_x_ctx_doi = xfrm_ctx->ctx_doi;
sec_ctx->sadb_x_ctx_alg = xfrm_ctx->ctx_alg;
sec_ctx->sadb_x_ctx_len = xfrm_ctx->ctx_len;
memcpy(sec_ctx + 1, xfrm_ctx->ctx_str,
xfrm_ctx->ctx_len);
}
return skb;
}
static inline struct sk_buff *pfkey_xfrm_state2msg(const struct xfrm_state *x)
{
struct sk_buff *skb;
skb = __pfkey_xfrm_state2msg(x, 1, 3);
return skb;
}
static inline struct sk_buff *pfkey_xfrm_state2msg_expire(const struct xfrm_state *x,
int hsc)
{
return __pfkey_xfrm_state2msg(x, 0, hsc);
}
static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net,
const struct sadb_msg *hdr,
void * const *ext_hdrs)
{
struct xfrm_state *x;
const struct sadb_lifetime *lifetime;
const struct sadb_sa *sa;
const struct sadb_key *key;
const struct sadb_x_sec_ctx *sec_ctx;
uint16_t proto;
int err;
sa = ext_hdrs[SADB_EXT_SA - 1];
if (!sa ||
!present_and_same_family(ext_hdrs[SADB_EXT_ADDRESS_SRC-1],
ext_hdrs[SADB_EXT_ADDRESS_DST-1]))
return ERR_PTR(-EINVAL);
if (hdr->sadb_msg_satype == SADB_SATYPE_ESP &&
!ext_hdrs[SADB_EXT_KEY_ENCRYPT-1])
return ERR_PTR(-EINVAL);
if (hdr->sadb_msg_satype == SADB_SATYPE_AH &&
!ext_hdrs[SADB_EXT_KEY_AUTH-1])
return ERR_PTR(-EINVAL);
if (!!ext_hdrs[SADB_EXT_LIFETIME_HARD-1] !=
!!ext_hdrs[SADB_EXT_LIFETIME_SOFT-1])
return ERR_PTR(-EINVAL);
proto = pfkey_satype2proto(hdr->sadb_msg_satype);
if (proto == 0)
return ERR_PTR(-EINVAL);
/* default error is no buffer space */
err = -ENOBUFS;
/* RFC2367:
Only SADB_SASTATE_MATURE SAs may be submitted in an SADB_ADD message.
SADB_SASTATE_LARVAL SAs are created by SADB_GETSPI and it is not
sensible to add a new SA in the DYING or SADB_SASTATE_DEAD state.
Therefore, the sadb_sa_state field of all submitted SAs MUST be
SADB_SASTATE_MATURE and the kernel MUST return an error if this is
not true.
However, KAME setkey always uses SADB_SASTATE_LARVAL.
Hence, we have to _ignore_ sadb_sa_state, which is also reasonable.
*/
if (sa->sadb_sa_auth > SADB_AALG_MAX ||
(hdr->sadb_msg_satype == SADB_X_SATYPE_IPCOMP &&
sa->sadb_sa_encrypt > SADB_X_CALG_MAX) ||
sa->sadb_sa_encrypt > SADB_EALG_MAX)
return ERR_PTR(-EINVAL);
key = ext_hdrs[SADB_EXT_KEY_AUTH - 1];
if (key != NULL &&
sa->sadb_sa_auth != SADB_X_AALG_NULL &&
key->sadb_key_bits == 0)
return ERR_PTR(-EINVAL);
key = ext_hdrs[SADB_EXT_KEY_ENCRYPT-1];
if (key != NULL &&
sa->sadb_sa_encrypt != SADB_EALG_NULL &&
key->sadb_key_bits == 0)
return ERR_PTR(-EINVAL);
x = xfrm_state_alloc(net);
if (x == NULL)
return ERR_PTR(-ENOBUFS);
x->id.proto = proto;
x->id.spi = sa->sadb_sa_spi;
x->props.replay_window = min_t(unsigned int, sa->sadb_sa_replay,
(sizeof(x->replay.bitmap) * 8));
if (sa->sadb_sa_flags & SADB_SAFLAGS_NOECN)
x->props.flags |= XFRM_STATE_NOECN;
if (sa->sadb_sa_flags & SADB_SAFLAGS_DECAP_DSCP)
x->props.flags |= XFRM_STATE_DECAP_DSCP;
if (sa->sadb_sa_flags & SADB_SAFLAGS_NOPMTUDISC)
x->props.flags |= XFRM_STATE_NOPMTUDISC;
lifetime = ext_hdrs[SADB_EXT_LIFETIME_HARD - 1];
if (lifetime != NULL) {
x->lft.hard_packet_limit = _KEY2X(lifetime->sadb_lifetime_allocations);
x->lft.hard_byte_limit = _KEY2X(lifetime->sadb_lifetime_bytes);
x->lft.hard_add_expires_seconds = lifetime->sadb_lifetime_addtime;
x->lft.hard_use_expires_seconds = lifetime->sadb_lifetime_usetime;
}
lifetime = ext_hdrs[SADB_EXT_LIFETIME_SOFT - 1];
if (lifetime != NULL) {
x->lft.soft_packet_limit = _KEY2X(lifetime->sadb_lifetime_allocations);
x->lft.soft_byte_limit = _KEY2X(lifetime->sadb_lifetime_bytes);
x->lft.soft_add_expires_seconds = lifetime->sadb_lifetime_addtime;
x->lft.soft_use_expires_seconds = lifetime->sadb_lifetime_usetime;
}
sec_ctx = ext_hdrs[SADB_X_EXT_SEC_CTX - 1];
if (sec_ctx != NULL) {
struct xfrm_user_sec_ctx *uctx = pfkey_sadb2xfrm_user_sec_ctx(sec_ctx, GFP_KERNEL);
if (!uctx)
goto out;
err = security_xfrm_state_alloc(x, uctx);
kfree(uctx);
if (err)
goto out;
}
err = -ENOBUFS;
key = ext_hdrs[SADB_EXT_KEY_AUTH - 1];
if (sa->sadb_sa_auth) {
int keysize = 0;
struct xfrm_algo_desc *a = xfrm_aalg_get_byid(sa->sadb_sa_auth);
if (!a || !a->pfkey_supported) {
err = -ENOSYS;
goto out;
}
if (key)
keysize = (key->sadb_key_bits + 7) / 8;
x->aalg = kmalloc(sizeof(*x->aalg) + keysize, GFP_KERNEL);
if (!x->aalg) {
err = -ENOMEM;
goto out;
}
strcpy(x->aalg->alg_name, a->name);
x->aalg->alg_key_len = 0;
if (key) {
x->aalg->alg_key_len = key->sadb_key_bits;
memcpy(x->aalg->alg_key, key+1, keysize);
}
x->aalg->alg_trunc_len = a->uinfo.auth.icv_truncbits;
x->props.aalgo = sa->sadb_sa_auth;
/* x->algo.flags = sa->sadb_sa_flags; */
}
if (sa->sadb_sa_encrypt) {
if (hdr->sadb_msg_satype == SADB_X_SATYPE_IPCOMP) {
struct xfrm_algo_desc *a = xfrm_calg_get_byid(sa->sadb_sa_encrypt);
if (!a || !a->pfkey_supported) {
err = -ENOSYS;
goto out;
}
x->calg = kmalloc(sizeof(*x->calg), GFP_KERNEL);
if (!x->calg) {
err = -ENOMEM;
goto out;
}
strcpy(x->calg->alg_name, a->name);
x->props.calgo = sa->sadb_sa_encrypt;
} else {
int keysize = 0;
struct xfrm_algo_desc *a = xfrm_ealg_get_byid(sa->sadb_sa_encrypt);
if (!a || !a->pfkey_supported) {
err = -ENOSYS;
goto out;
}
key = (struct sadb_key*) ext_hdrs[SADB_EXT_KEY_ENCRYPT-1];
if (key)
keysize = (key->sadb_key_bits + 7) / 8;
x->ealg = kmalloc(sizeof(*x->ealg) + keysize, GFP_KERNEL);
if (!x->ealg) {
err = -ENOMEM;
goto out;
}
strcpy(x->ealg->alg_name, a->name);
x->ealg->alg_key_len = 0;
if (key) {
x->ealg->alg_key_len = key->sadb_key_bits;
memcpy(x->ealg->alg_key, key+1, keysize);
}
x->props.ealgo = sa->sadb_sa_encrypt;
x->geniv = a->uinfo.encr.geniv;
}
}
/* x->algo.flags = sa->sadb_sa_flags; */
x->props.family = pfkey_sadb_addr2xfrm_addr((struct sadb_address *) ext_hdrs[SADB_EXT_ADDRESS_SRC-1],
&x->props.saddr);
pfkey_sadb_addr2xfrm_addr((struct sadb_address *) ext_hdrs[SADB_EXT_ADDRESS_DST-1],
&x->id.daddr);
if (ext_hdrs[SADB_X_EXT_SA2-1]) {
const struct sadb_x_sa2 *sa2 = ext_hdrs[SADB_X_EXT_SA2-1];
int mode = pfkey_mode_to_xfrm(sa2->sadb_x_sa2_mode);
if (mode < 0) {
err = -EINVAL;
goto out;
}
x->props.mode = mode;
x->props.reqid = sa2->sadb_x_sa2_reqid;
}
if (ext_hdrs[SADB_EXT_ADDRESS_PROXY-1]) {
const struct sadb_address *addr = ext_hdrs[SADB_EXT_ADDRESS_PROXY-1];
/* Nobody uses this, but we try. */
x->sel.family = pfkey_sadb_addr2xfrm_addr(addr, &x->sel.saddr);
x->sel.prefixlen_s = addr->sadb_address_prefixlen;
}
if (!x->sel.family)
x->sel.family = x->props.family;
if (ext_hdrs[SADB_X_EXT_NAT_T_TYPE-1]) {
const struct sadb_x_nat_t_type* n_type;
struct xfrm_encap_tmpl *natt;
x->encap = kzalloc(sizeof(*x->encap), GFP_KERNEL);
if (!x->encap) {
err = -ENOMEM;
goto out;
}
natt = x->encap;
n_type = ext_hdrs[SADB_X_EXT_NAT_T_TYPE-1];
natt->encap_type = n_type->sadb_x_nat_t_type_type;
if (ext_hdrs[SADB_X_EXT_NAT_T_SPORT-1]) {
const struct sadb_x_nat_t_port *n_port =
ext_hdrs[SADB_X_EXT_NAT_T_SPORT-1];
natt->encap_sport = n_port->sadb_x_nat_t_port_port;
}
if (ext_hdrs[SADB_X_EXT_NAT_T_DPORT-1]) {
const struct sadb_x_nat_t_port *n_port =
ext_hdrs[SADB_X_EXT_NAT_T_DPORT-1];
natt->encap_dport = n_port->sadb_x_nat_t_port_port;
}
}
err = xfrm_init_state(x);
if (err)
goto out;
x->km.seq = hdr->sadb_msg_seq;
return x;
out:
x->km.state = XFRM_STATE_DEAD;
xfrm_state_put(x);
return ERR_PTR(err);
}
static int pfkey_reserved(struct sock *sk, struct sk_buff *skb, const struct sadb_msg *hdr, void * const *ext_hdrs)
{
return -EOPNOTSUPP;
}
static int pfkey_getspi(struct sock *sk, struct sk_buff *skb, const struct sadb_msg *hdr, void * const *ext_hdrs)
{
struct net *net = sock_net(sk);
struct sk_buff *resp_skb;
struct sadb_x_sa2 *sa2;
struct sadb_address *saddr, *daddr;
struct sadb_msg *out_hdr;
struct sadb_spirange *range;
struct xfrm_state *x = NULL;
int mode;
int err;
u32 min_spi, max_spi;
u32 reqid;
u8 proto;
unsigned short family;
xfrm_address_t *xsaddr = NULL, *xdaddr = NULL;
if (!present_and_same_family(ext_hdrs[SADB_EXT_ADDRESS_SRC-1],
ext_hdrs[SADB_EXT_ADDRESS_DST-1]))
return -EINVAL;
proto = pfkey_satype2proto(hdr->sadb_msg_satype);
if (proto == 0)
return -EINVAL;
if ((sa2 = ext_hdrs[SADB_X_EXT_SA2-1]) != NULL) {
mode = pfkey_mode_to_xfrm(sa2->sadb_x_sa2_mode);
if (mode < 0)
return -EINVAL;
reqid = sa2->sadb_x_sa2_reqid;
} else {
mode = 0;
reqid = 0;
}
saddr = ext_hdrs[SADB_EXT_ADDRESS_SRC-1];
daddr = ext_hdrs[SADB_EXT_ADDRESS_DST-1];
family = ((struct sockaddr *)(saddr + 1))->sa_family;
switch (family) {
case AF_INET:
xdaddr = (xfrm_address_t *)&((struct sockaddr_in *)(daddr + 1))->sin_addr.s_addr;
xsaddr = (xfrm_address_t *)&((struct sockaddr_in *)(saddr + 1))->sin_addr.s_addr;
break;
#if IS_ENABLED(CONFIG_IPV6)
case AF_INET6:
xdaddr = (xfrm_address_t *)&((struct sockaddr_in6 *)(daddr + 1))->sin6_addr;
xsaddr = (xfrm_address_t *)&((struct sockaddr_in6 *)(saddr + 1))->sin6_addr;
break;
#endif
}
if (hdr->sadb_msg_seq) {
x = xfrm_find_acq_byseq(net, DUMMY_MARK, hdr->sadb_msg_seq);
if (x && !xfrm_addr_equal(&x->id.daddr, xdaddr, family)) {
xfrm_state_put(x);
x = NULL;
}
}
if (!x)
x = xfrm_find_acq(net, &dummy_mark, mode, reqid, 0, proto, xdaddr, xsaddr, 1, family);
if (x == NULL)
return -ENOENT;
min_spi = 0x100;
max_spi = 0x0fffffff;
range = ext_hdrs[SADB_EXT_SPIRANGE-1];
if (range) {
min_spi = range->sadb_spirange_min;
max_spi = range->sadb_spirange_max;
}
err = verify_spi_info(x->id.proto, min_spi, max_spi, NULL);
if (err) {
xfrm_state_put(x);
return err;
}
err = xfrm_alloc_spi(x, min_spi, max_spi, NULL);
resp_skb = err ? ERR_PTR(err) : pfkey_xfrm_state2msg(x);
if (IS_ERR(resp_skb)) {
xfrm_state_put(x);
return PTR_ERR(resp_skb);
}
out_hdr = (struct sadb_msg *) resp_skb->data;
out_hdr->sadb_msg_version = hdr->sadb_msg_version;
out_hdr->sadb_msg_type = SADB_GETSPI;
out_hdr->sadb_msg_satype = pfkey_proto2satype(proto);
out_hdr->sadb_msg_errno = 0;
out_hdr->sadb_msg_reserved = 0;
out_hdr->sadb_msg_seq = hdr->sadb_msg_seq;
out_hdr->sadb_msg_pid = hdr->sadb_msg_pid;
xfrm_state_put(x);
pfkey_broadcast(resp_skb, GFP_KERNEL, BROADCAST_ONE, sk, net);
return 0;
}
static int pfkey_acquire(struct sock *sk, struct sk_buff *skb, const struct sadb_msg *hdr, void * const *ext_hdrs)
{
struct net *net = sock_net(sk);
struct xfrm_state *x;
if (hdr->sadb_msg_len != sizeof(struct sadb_msg)/8)
return -EOPNOTSUPP;
if (hdr->sadb_msg_seq == 0 || hdr->sadb_msg_errno == 0)
return 0;
x = xfrm_find_acq_byseq(net, DUMMY_MARK, hdr->sadb_msg_seq);
if (x == NULL)
return 0;
spin_lock_bh(&x->lock);
if (x->km.state == XFRM_STATE_ACQ)
x->km.state = XFRM_STATE_ERROR;
spin_unlock_bh(&x->lock);
xfrm_state_put(x);
return 0;
}
static inline int event2poltype(int event)
{
switch (event) {
case XFRM_MSG_DELPOLICY:
return SADB_X_SPDDELETE;
case XFRM_MSG_NEWPOLICY:
return SADB_X_SPDADD;
case XFRM_MSG_UPDPOLICY:
return SADB_X_SPDUPDATE;
case XFRM_MSG_POLEXPIRE:
// return SADB_X_SPDEXPIRE;
default:
pr_err("pfkey: Unknown policy event %d\n", event);
break;
}
return 0;
}
static inline int event2keytype(int event)
{
switch (event) {
case XFRM_MSG_DELSA:
return SADB_DELETE;
case XFRM_MSG_NEWSA:
return SADB_ADD;
case XFRM_MSG_UPDSA:
return SADB_UPDATE;
case XFRM_MSG_EXPIRE:
return SADB_EXPIRE;
default:
pr_err("pfkey: Unknown SA event %d\n", event);
break;
}
return 0;
}
/* ADD/UPD/DEL */
static int key_notify_sa(struct xfrm_state *x, const struct km_event *c)
{
struct sk_buff *skb;
struct sadb_msg *hdr;
skb = pfkey_xfrm_state2msg(x);
if (IS_ERR(skb))
return PTR_ERR(skb);
hdr = (struct sadb_msg *) skb->data;
hdr->sadb_msg_version = PF_KEY_V2;
hdr->sadb_msg_type = event2keytype(c->event);
hdr->sadb_msg_satype = pfkey_proto2satype(x->id.proto);
hdr->sadb_msg_errno = 0;
hdr->sadb_msg_reserved = 0;
hdr->sadb_msg_seq = c->seq;
hdr->sadb_msg_pid = c->portid;
pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_ALL, NULL, xs_net(x));
return 0;
}
static int pfkey_add(struct sock *sk, struct sk_buff *skb, const struct sadb_msg *hdr, void * const *ext_hdrs)
{
struct net *net = sock_net(sk);
struct xfrm_state *x;
int err;
struct km_event c;
x = pfkey_msg2xfrm_state(net, hdr, ext_hdrs);
if (IS_ERR(x))
return PTR_ERR(x);
xfrm_state_hold(x);
if (hdr->sadb_msg_type == SADB_ADD)
err = xfrm_state_add(x);
else
err = xfrm_state_update(x);
xfrm_audit_state_add(x, err ? 0 : 1, true);
if (err < 0) {
x->km.state = XFRM_STATE_DEAD;
__xfrm_state_put(x);
goto out;
}
if (hdr->sadb_msg_type == SADB_ADD)
c.event = XFRM_MSG_NEWSA;
else
c.event = XFRM_MSG_UPDSA;
c.seq = hdr->sadb_msg_seq;
c.portid = hdr->sadb_msg_pid;
km_state_notify(x, &c);
out:
xfrm_state_put(x);
return err;
}
static int pfkey_delete(struct sock *sk, struct sk_buff *skb, const struct sadb_msg *hdr, void * const *ext_hdrs)
{
struct net *net = sock_net(sk);
struct xfrm_state *x;
struct km_event c;
int err;
if (!ext_hdrs[SADB_EXT_SA-1] ||
!present_and_same_family(ext_hdrs[SADB_EXT_ADDRESS_SRC-1],
ext_hdrs[SADB_EXT_ADDRESS_DST-1]))
return -EINVAL;
x = pfkey_xfrm_state_lookup(net, hdr, ext_hdrs);
if (x == NULL)
return -ESRCH;
if ((err = security_xfrm_state_delete(x)))
goto out;
if (xfrm_state_kern(x)) {
err = -EPERM;
goto out;
}
err = xfrm_state_delete(x);
if (err < 0)
goto out;
c.seq = hdr->sadb_msg_seq;
c.portid = hdr->sadb_msg_pid;
c.event = XFRM_MSG_DELSA;
km_state_notify(x, &c);
out:
xfrm_audit_state_delete(x, err ? 0 : 1, true);
xfrm_state_put(x);
return err;
}
static int pfkey_get(struct sock *sk, struct sk_buff *skb, const struct sadb_msg *hdr, void * const *ext_hdrs)
{
struct net *net = sock_net(sk);
__u8 proto;
struct sk_buff *out_skb;
struct sadb_msg *out_hdr;
struct xfrm_state *x;
if (!ext_hdrs[SADB_EXT_SA-1] ||
!present_and_same_family(ext_hdrs[SADB_EXT_ADDRESS_SRC-1],
ext_hdrs[SADB_EXT_ADDRESS_DST-1]))
return -EINVAL;
x = pfkey_xfrm_state_lookup(net, hdr, ext_hdrs);
if (x == NULL)
return -ESRCH;
out_skb = pfkey_xfrm_state2msg(x);
proto = x->id.proto;
xfrm_state_put(x);
if (IS_ERR(out_skb))
return PTR_ERR(out_skb);
out_hdr = (struct sadb_msg *) out_skb->data;
out_hdr->sadb_msg_version = hdr->sadb_msg_version;
out_hdr->sadb_msg_type = SADB_GET;
out_hdr->sadb_msg_satype = pfkey_proto2satype(proto);
out_hdr->sadb_msg_errno = 0;
out_hdr->sadb_msg_reserved = 0;
out_hdr->sadb_msg_seq = hdr->sadb_msg_seq;
out_hdr->sadb_msg_pid = hdr->sadb_msg_pid;
pfkey_broadcast(out_skb, GFP_ATOMIC, BROADCAST_ONE, sk, sock_net(sk));
return 0;
}
static struct sk_buff *compose_sadb_supported(const struct sadb_msg *orig,
gfp_t allocation)
{
struct sk_buff *skb;
struct sadb_msg *hdr;
int len, auth_len, enc_len, i;
auth_len = xfrm_count_pfkey_auth_supported();
if (auth_len) {
auth_len *= sizeof(struct sadb_alg);
auth_len += sizeof(struct sadb_supported);
}
enc_len = xfrm_count_pfkey_enc_supported();
if (enc_len) {
enc_len *= sizeof(struct sadb_alg);
enc_len += sizeof(struct sadb_supported);
}
len = enc_len + auth_len + sizeof(struct sadb_msg);
skb = alloc_skb(len + 16, allocation);
if (!skb)
goto out_put_algs;
hdr = skb_put(skb, sizeof(*hdr));
pfkey_hdr_dup(hdr, orig);
hdr->sadb_msg_errno = 0;
hdr->sadb_msg_len = len / sizeof(uint64_t);
if (auth_len) {
struct sadb_supported *sp;
struct sadb_alg *ap;
sp = skb_put(skb, auth_len);
ap = (struct sadb_alg *) (sp + 1);
sp->sadb_supported_len = auth_len / sizeof(uint64_t);
sp->sadb_supported_exttype = SADB_EXT_SUPPORTED_AUTH;
for (i = 0; ; i++) {
struct xfrm_algo_desc *aalg = xfrm_aalg_get_byidx(i);
if (!aalg)
break;
if (!aalg->pfkey_supported)
continue;
if (aalg->available)
*ap++ = aalg->desc;
}
}
if (enc_len) {
struct sadb_supported *sp;
struct sadb_alg *ap;
sp = skb_put(skb, enc_len);
ap = (struct sadb_alg *) (sp + 1);
sp->sadb_supported_len = enc_len / sizeof(uint64_t);
sp->sadb_supported_exttype = SADB_EXT_SUPPORTED_ENCRYPT;
for (i = 0; ; i++) {
struct xfrm_algo_desc *ealg = xfrm_ealg_get_byidx(i);
if (!ealg)
break;
if (!ealg->pfkey_supported)
continue;
if (ealg->available)
*ap++ = ealg->desc;
}
}
out_put_algs:
return skb;
}
static int pfkey_register(struct sock *sk, struct sk_buff *skb, const struct sadb_msg *hdr, void * const *ext_hdrs)
{
struct pfkey_sock *pfk = pfkey_sk(sk);
struct sk_buff *supp_skb;
if (hdr->sadb_msg_satype > SADB_SATYPE_MAX)
return -EINVAL;
if (hdr->sadb_msg_satype != SADB_SATYPE_UNSPEC) {
if (pfk->registered&(1<<hdr->sadb_msg_satype))
return -EEXIST;
pfk->registered |= (1<<hdr->sadb_msg_satype);
}
mutex_lock(&pfkey_mutex);
xfrm_probe_algs();
supp_skb = compose_sadb_supported(hdr, GFP_KERNEL | __GFP_ZERO);
mutex_unlock(&pfkey_mutex);
if (!supp_skb) {
if (hdr->sadb_msg_satype != SADB_SATYPE_UNSPEC)
pfk->registered &= ~(1<<hdr->sadb_msg_satype);
return -ENOBUFS;
}
pfkey_broadcast(supp_skb, GFP_KERNEL, BROADCAST_REGISTERED, sk,
sock_net(sk));
return 0;
}
static int unicast_flush_resp(struct sock *sk, const struct sadb_msg *ihdr)
{
struct sk_buff *skb;
struct sadb_msg *hdr;
skb = alloc_skb(sizeof(struct sadb_msg) + 16, GFP_ATOMIC);
if (!skb)
return -ENOBUFS;
hdr = skb_put_data(skb, ihdr, sizeof(struct sadb_msg));
hdr->sadb_msg_errno = (uint8_t) 0;
hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t));
return pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_ONE, sk,
sock_net(sk));
}
static int key_notify_sa_flush(const struct km_event *c)
{
struct sk_buff *skb;
struct sadb_msg *hdr;
skb = alloc_skb(sizeof(struct sadb_msg) + 16, GFP_ATOMIC);
if (!skb)
return -ENOBUFS;
hdr = skb_put(skb, sizeof(struct sadb_msg));
hdr->sadb_msg_satype = pfkey_proto2satype(c->data.proto);
hdr->sadb_msg_type = SADB_FLUSH;
hdr->sadb_msg_seq = c->seq;
hdr->sadb_msg_pid = c->portid;
hdr->sadb_msg_version = PF_KEY_V2;
hdr->sadb_msg_errno = (uint8_t) 0;
hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t));
hdr->sadb_msg_reserved = 0;
pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_ALL, NULL, c->net);
return 0;
}
static int pfkey_flush(struct sock *sk, struct sk_buff *skb, const struct sadb_msg *hdr, void * const *ext_hdrs)
{
struct net *net = sock_net(sk);
unsigned int proto;
struct km_event c;
int err, err2;
proto = pfkey_satype2proto(hdr->sadb_msg_satype);
if (proto == 0)
return -EINVAL;
err = xfrm_state_flush(net, proto, true, false);
err2 = unicast_flush_resp(sk, hdr);
if (err || err2) {
if (err == -ESRCH) /* empty table - go quietly */
err = 0;
return err ? err : err2;
}
c.data.proto = proto;
c.seq = hdr->sadb_msg_seq;
c.portid = hdr->sadb_msg_pid;
c.event = XFRM_MSG_FLUSHSA;
c.net = net;
km_state_notify(NULL, &c);
return 0;
}
static int dump_sa(struct xfrm_state *x, int count, void *ptr)
{
struct pfkey_sock *pfk = ptr;
struct sk_buff *out_skb;
struct sadb_msg *out_hdr;
if (!pfkey_can_dump(&pfk->sk))
return -ENOBUFS;
out_skb = pfkey_xfrm_state2msg(x);
if (IS_ERR(out_skb))
return PTR_ERR(out_skb);
out_hdr = (struct sadb_msg *) out_skb->data;
out_hdr->sadb_msg_version = pfk->dump.msg_version;
out_hdr->sadb_msg_type = SADB_DUMP;
out_hdr->sadb_msg_satype = pfkey_proto2satype(x->id.proto);
out_hdr->sadb_msg_errno = 0;
out_hdr->sadb_msg_reserved = 0;
out_hdr->sadb_msg_seq = count + 1;
out_hdr->sadb_msg_pid = pfk->dump.msg_portid;
if (pfk->dump.skb)
pfkey_broadcast(pfk->dump.skb, GFP_ATOMIC, BROADCAST_ONE,
&pfk->sk, sock_net(&pfk->sk));
pfk->dump.skb = out_skb;
return 0;
}
static int pfkey_dump_sa(struct pfkey_sock *pfk)
{
struct net *net = sock_net(&pfk->sk);
return xfrm_state_walk(net, &pfk->dump.u.state, dump_sa, (void *) pfk);
}
static void pfkey_dump_sa_done(struct pfkey_sock *pfk)
{
struct net *net = sock_net(&pfk->sk);
xfrm_state_walk_done(&pfk->dump.u.state, net);
}
static int pfkey_dump(struct sock *sk, struct sk_buff *skb, const struct sadb_msg *hdr, void * const *ext_hdrs)
{
u8 proto;
struct xfrm_address_filter *filter = NULL;
struct pfkey_sock *pfk = pfkey_sk(sk);
mutex_lock(&pfk->dump_lock);
if (pfk->dump.dump != NULL) {
mutex_unlock(&pfk->dump_lock);
return -EBUSY;
}
proto = pfkey_satype2proto(hdr->sadb_msg_satype);
if (proto == 0) {
mutex_unlock(&pfk->dump_lock);
return -EINVAL;
}
if (ext_hdrs[SADB_X_EXT_FILTER - 1]) {
struct sadb_x_filter *xfilter = ext_hdrs[SADB_X_EXT_FILTER - 1];
if ((xfilter->sadb_x_filter_splen >
(sizeof(xfrm_address_t) << 3)) ||
(xfilter->sadb_x_filter_dplen >
(sizeof(xfrm_address_t) << 3))) {
mutex_unlock(&pfk->dump_lock);
return -EINVAL;
}
filter = kmalloc(sizeof(*filter), GFP_KERNEL);
if (filter == NULL) {
mutex_unlock(&pfk->dump_lock);
return -ENOMEM;
}
memcpy(&filter->saddr, &xfilter->sadb_x_filter_saddr,
sizeof(xfrm_address_t));
memcpy(&filter->daddr, &xfilter->sadb_x_filter_daddr,
sizeof(xfrm_address_t));
filter->family = xfilter->sadb_x_filter_family;
filter->splen = xfilter->sadb_x_filter_splen;
filter->dplen = xfilter->sadb_x_filter_dplen;
}
pfk->dump.msg_version = hdr->sadb_msg_version;
pfk->dump.msg_portid = hdr->sadb_msg_pid;
pfk->dump.dump = pfkey_dump_sa;
pfk->dump.done = pfkey_dump_sa_done;
xfrm_state_walk_init(&pfk->dump.u.state, proto, filter);
mutex_unlock(&pfk->dump_lock);
return pfkey_do_dump(pfk);
}
static int pfkey_promisc(struct sock *sk, struct sk_buff *skb, const struct sadb_msg *hdr, void * const *ext_hdrs)
{
struct pfkey_sock *pfk = pfkey_sk(sk);
int satype = hdr->sadb_msg_satype;
bool reset_errno = false;
if (hdr->sadb_msg_len == (sizeof(*hdr) / sizeof(uint64_t))) {
reset_errno = true;
if (satype != 0 && satype != 1)
return -EINVAL;
pfk->promisc = satype;
}
if (reset_errno && skb_cloned(skb))
skb = skb_copy(skb, GFP_KERNEL);
else
skb = skb_clone(skb, GFP_KERNEL);
if (reset_errno && skb) {
struct sadb_msg *new_hdr = (struct sadb_msg *) skb->data;
new_hdr->sadb_msg_errno = 0;
}
pfkey_broadcast(skb, GFP_KERNEL, BROADCAST_ALL, NULL, sock_net(sk));
return 0;
}
static int check_reqid(struct xfrm_policy *xp, int dir, int count, void *ptr)
{
int i;
u32 reqid = *(u32*)ptr;
for (i=0; i<xp->xfrm_nr; i++) {
if (xp->xfrm_vec[i].reqid == reqid)
return -EEXIST;
}
return 0;
}
static u32 gen_reqid(struct net *net)
{
struct xfrm_policy_walk walk;
u32 start;
int rc;
static u32 reqid = IPSEC_MANUAL_REQID_MAX;
start = reqid;
do {
++reqid;
if (reqid == 0)
reqid = IPSEC_MANUAL_REQID_MAX+1;
xfrm_policy_walk_init(&walk, XFRM_POLICY_TYPE_MAIN);
rc = xfrm_policy_walk(net, &walk, check_reqid, (void*)&reqid);
xfrm_policy_walk_done(&walk, net);
if (rc != -EEXIST)
return reqid;
} while (reqid != start);
return 0;
}
static int
parse_ipsecrequest(struct xfrm_policy *xp, struct sadb_x_policy *pol,
struct sadb_x_ipsecrequest *rq)
{
struct net *net = xp_net(xp);
struct xfrm_tmpl *t = xp->xfrm_vec + xp->xfrm_nr;
int mode;
if (xp->xfrm_nr >= XFRM_MAX_DEPTH)
return -ELOOP;
if (rq->sadb_x_ipsecrequest_mode == 0)
return -EINVAL;
if (!xfrm_id_proto_valid(rq->sadb_x_ipsecrequest_proto))
return -EINVAL;
t->id.proto = rq->sadb_x_ipsecrequest_proto;
if ((mode = pfkey_mode_to_xfrm(rq->sadb_x_ipsecrequest_mode)) < 0)
return -EINVAL;
t->mode = mode;
if (rq->sadb_x_ipsecrequest_level == IPSEC_LEVEL_USE) {
if ((mode == XFRM_MODE_TUNNEL || mode == XFRM_MODE_BEET) &&
pol->sadb_x_policy_dir == IPSEC_DIR_OUTBOUND)
return -EINVAL;
t->optional = 1;
} else if (rq->sadb_x_ipsecrequest_level == IPSEC_LEVEL_UNIQUE) {
t->reqid = rq->sadb_x_ipsecrequest_reqid;
if (t->reqid > IPSEC_MANUAL_REQID_MAX)
t->reqid = 0;
if (!t->reqid && !(t->reqid = gen_reqid(net)))
return -ENOBUFS;
}
/* addresses present only in tunnel mode */
if (t->mode == XFRM_MODE_TUNNEL) {
int err;
err = parse_sockaddr_pair(
(struct sockaddr *)(rq + 1),
rq->sadb_x_ipsecrequest_len - sizeof(*rq),
&t->saddr, &t->id.daddr, &t->encap_family);
if (err)
return err;
} else
t->encap_family = xp->family;
/* No way to set this via kame pfkey */
t->allalgs = 1;
xp->xfrm_nr++;
return 0;
}
static int
parse_ipsecrequests(struct xfrm_policy *xp, struct sadb_x_policy *pol)
{
int err;
int len = pol->sadb_x_policy_len*8 - sizeof(struct sadb_x_policy);
struct sadb_x_ipsecrequest *rq = (void*)(pol+1);
if (pol->sadb_x_policy_len * 8 < sizeof(struct sadb_x_policy))
return -EINVAL;
while (len >= sizeof(*rq)) {
if (len < rq->sadb_x_ipsecrequest_len ||
rq->sadb_x_ipsecrequest_len < sizeof(*rq))
return -EINVAL;
if ((err = parse_ipsecrequest(xp, pol, rq)) < 0)
return err;
len -= rq->sadb_x_ipsecrequest_len;
rq = (void*)((u8*)rq + rq->sadb_x_ipsecrequest_len);
}
return 0;
}
static inline int pfkey_xfrm_policy2sec_ctx_size(const struct xfrm_policy *xp)
{
struct xfrm_sec_ctx *xfrm_ctx = xp->security;
if (xfrm_ctx) {
int len = sizeof(struct sadb_x_sec_ctx);
len += xfrm_ctx->ctx_len;
return PFKEY_ALIGN8(len);
}
return 0;
}
static int pfkey_xfrm_policy2msg_size(const struct xfrm_policy *xp)
{
const struct xfrm_tmpl *t;
int sockaddr_size = pfkey_sockaddr_size(xp->family);
int socklen = 0;
int i;
for (i=0; i<xp->xfrm_nr; i++) {
t = xp->xfrm_vec + i;
socklen += pfkey_sockaddr_len(t->encap_family);
}
return sizeof(struct sadb_msg) +
(sizeof(struct sadb_lifetime) * 3) +
(sizeof(struct sadb_address) * 2) +
(sockaddr_size * 2) +
sizeof(struct sadb_x_policy) +
(xp->xfrm_nr * sizeof(struct sadb_x_ipsecrequest)) +
(socklen * 2) +
pfkey_xfrm_policy2sec_ctx_size(xp);
}
static struct sk_buff * pfkey_xfrm_policy2msg_prep(const struct xfrm_policy *xp)
{
struct sk_buff *skb;
int size;
size = pfkey_xfrm_policy2msg_size(xp);
skb = alloc_skb(size + 16, GFP_ATOMIC);
if (skb == NULL)
return ERR_PTR(-ENOBUFS);
return skb;
}
static int pfkey_xfrm_policy2msg(struct sk_buff *skb, const struct xfrm_policy *xp, int dir)
{
struct sadb_msg *hdr;
struct sadb_address *addr;
struct sadb_lifetime *lifetime;
struct sadb_x_policy *pol;
struct sadb_x_sec_ctx *sec_ctx;
struct xfrm_sec_ctx *xfrm_ctx;
int i;
int size;
int sockaddr_size = pfkey_sockaddr_size(xp->family);
int socklen = pfkey_sockaddr_len(xp->family);
size = pfkey_xfrm_policy2msg_size(xp);
/* call should fill header later */
hdr = skb_put(skb, sizeof(struct sadb_msg));
memset(hdr, 0, size); /* XXX do we need this ? */
/* src address */
addr = skb_put(skb, sizeof(struct sadb_address) + sockaddr_size);
addr->sadb_address_len =
(sizeof(struct sadb_address)+sockaddr_size)/
sizeof(uint64_t);
addr->sadb_address_exttype = SADB_EXT_ADDRESS_SRC;
addr->sadb_address_proto = pfkey_proto_from_xfrm(xp->selector.proto);
addr->sadb_address_prefixlen = xp->selector.prefixlen_s;
addr->sadb_address_reserved = 0;
if (!pfkey_sockaddr_fill(&xp->selector.saddr,
xp->selector.sport,
(struct sockaddr *) (addr + 1),
xp->family))
BUG();
/* dst address */
addr = skb_put(skb, sizeof(struct sadb_address) + sockaddr_size);
addr->sadb_address_len =
(sizeof(struct sadb_address)+sockaddr_size)/
sizeof(uint64_t);
addr->sadb_address_exttype = SADB_EXT_ADDRESS_DST;
addr->sadb_address_proto = pfkey_proto_from_xfrm(xp->selector.proto);
addr->sadb_address_prefixlen = xp->selector.prefixlen_d;
addr->sadb_address_reserved = 0;
pfkey_sockaddr_fill(&xp->selector.daddr, xp->selector.dport,
(struct sockaddr *) (addr + 1),
xp->family);
/* hard time */
lifetime = skb_put(skb, sizeof(struct sadb_lifetime));
lifetime->sadb_lifetime_len =
sizeof(struct sadb_lifetime)/sizeof(uint64_t);
lifetime->sadb_lifetime_exttype = SADB_EXT_LIFETIME_HARD;
lifetime->sadb_lifetime_allocations = _X2KEY(xp->lft.hard_packet_limit);
lifetime->sadb_lifetime_bytes = _X2KEY(xp->lft.hard_byte_limit);
lifetime->sadb_lifetime_addtime = xp->lft.hard_add_expires_seconds;
lifetime->sadb_lifetime_usetime = xp->lft.hard_use_expires_seconds;
/* soft time */
lifetime = skb_put(skb, sizeof(struct sadb_lifetime));
lifetime->sadb_lifetime_len =
sizeof(struct sadb_lifetime)/sizeof(uint64_t);
lifetime->sadb_lifetime_exttype = SADB_EXT_LIFETIME_SOFT;
lifetime->sadb_lifetime_allocations = _X2KEY(xp->lft.soft_packet_limit);
lifetime->sadb_lifetime_bytes = _X2KEY(xp->lft.soft_byte_limit);
lifetime->sadb_lifetime_addtime = xp->lft.soft_add_expires_seconds;
lifetime->sadb_lifetime_usetime = xp->lft.soft_use_expires_seconds;
/* current time */
lifetime = skb_put(skb, sizeof(struct sadb_lifetime));
lifetime->sadb_lifetime_len =
sizeof(struct sadb_lifetime)/sizeof(uint64_t);
lifetime->sadb_lifetime_exttype = SADB_EXT_LIFETIME_CURRENT;
lifetime->sadb_lifetime_allocations = xp->curlft.packets;
lifetime->sadb_lifetime_bytes = xp->curlft.bytes;
lifetime->sadb_lifetime_addtime = xp->curlft.add_time;
lifetime->sadb_lifetime_usetime = xp->curlft.use_time;
pol = skb_put(skb, sizeof(struct sadb_x_policy));
pol->sadb_x_policy_len = sizeof(struct sadb_x_policy)/sizeof(uint64_t);
pol->sadb_x_policy_exttype = SADB_X_EXT_POLICY;
pol->sadb_x_policy_type = IPSEC_POLICY_DISCARD;
if (xp->action == XFRM_POLICY_ALLOW) {
if (xp->xfrm_nr)
pol->sadb_x_policy_type = IPSEC_POLICY_IPSEC;
else
pol->sadb_x_policy_type = IPSEC_POLICY_NONE;
}
pol->sadb_x_policy_dir = dir+1;
pol->sadb_x_policy_reserved = 0;
pol->sadb_x_policy_id = xp->index;
pol->sadb_x_policy_priority = xp->priority;
for (i=0; i<xp->xfrm_nr; i++) {
const struct xfrm_tmpl *t = xp->xfrm_vec + i;
struct sadb_x_ipsecrequest *rq;
int req_size;
int mode;
req_size = sizeof(struct sadb_x_ipsecrequest);
if (t->mode == XFRM_MODE_TUNNEL) {
socklen = pfkey_sockaddr_len(t->encap_family);
req_size += socklen * 2;
} else {
size -= 2*socklen;
}
rq = skb_put(skb, req_size);
pol->sadb_x_policy_len += req_size/8;
memset(rq, 0, sizeof(*rq));
rq->sadb_x_ipsecrequest_len = req_size;
rq->sadb_x_ipsecrequest_proto = t->id.proto;
if ((mode = pfkey_mode_from_xfrm(t->mode)) < 0)
return -EINVAL;
rq->sadb_x_ipsecrequest_mode = mode;
rq->sadb_x_ipsecrequest_level = IPSEC_LEVEL_REQUIRE;
if (t->reqid)
rq->sadb_x_ipsecrequest_level = IPSEC_LEVEL_UNIQUE;
if (t->optional)
rq->sadb_x_ipsecrequest_level = IPSEC_LEVEL_USE;
rq->sadb_x_ipsecrequest_reqid = t->reqid;
if (t->mode == XFRM_MODE_TUNNEL) {
u8 *sa = (void *)(rq + 1);
pfkey_sockaddr_fill(&t->saddr, 0,
(struct sockaddr *)sa,
t->encap_family);
pfkey_sockaddr_fill(&t->id.daddr, 0,
(struct sockaddr *) (sa + socklen),
t->encap_family);
}
}
/* security context */
if ((xfrm_ctx = xp->security)) {
int ctx_size = pfkey_xfrm_policy2sec_ctx_size(xp);
sec_ctx = skb_put(skb, ctx_size);
sec_ctx->sadb_x_sec_len = ctx_size / sizeof(uint64_t);
sec_ctx->sadb_x_sec_exttype = SADB_X_EXT_SEC_CTX;
sec_ctx->sadb_x_ctx_doi = xfrm_ctx->ctx_doi;
sec_ctx->sadb_x_ctx_alg = xfrm_ctx->ctx_alg;
sec_ctx->sadb_x_ctx_len = xfrm_ctx->ctx_len;
memcpy(sec_ctx + 1, xfrm_ctx->ctx_str,
xfrm_ctx->ctx_len);
}
hdr->sadb_msg_len = size / sizeof(uint64_t);
hdr->sadb_msg_reserved = refcount_read(&xp->refcnt);
return 0;
}
static int key_notify_policy(struct xfrm_policy *xp, int dir, const struct km_event *c)
{
struct sk_buff *out_skb;
struct sadb_msg *out_hdr;
int err;
out_skb = pfkey_xfrm_policy2msg_prep(xp);
if (IS_ERR(out_skb))
return PTR_ERR(out_skb);
err = pfkey_xfrm_policy2msg(out_skb, xp, dir);
if (err < 0) {
kfree_skb(out_skb);
return err;
}
out_hdr = (struct sadb_msg *) out_skb->data;
out_hdr->sadb_msg_version = PF_KEY_V2;
if (c->data.byid && c->event == XFRM_MSG_DELPOLICY)
out_hdr->sadb_msg_type = SADB_X_SPDDELETE2;
else
out_hdr->sadb_msg_type = event2poltype(c->event);
out_hdr->sadb_msg_errno = 0;
out_hdr->sadb_msg_seq = c->seq;
out_hdr->sadb_msg_pid = c->portid;
pfkey_broadcast(out_skb, GFP_ATOMIC, BROADCAST_ALL, NULL, xp_net(xp));
return 0;
}
static int pfkey_spdadd(struct sock *sk, struct sk_buff *skb, const struct sadb_msg *hdr, void * const *ext_hdrs)
{
struct net *net = sock_net(sk);
int err = 0;
struct sadb_lifetime *lifetime;
struct sadb_address *sa;
struct sadb_x_policy *pol;
struct xfrm_policy *xp;
struct km_event c;
struct sadb_x_sec_ctx *sec_ctx;
if (!present_and_same_family(ext_hdrs[SADB_EXT_ADDRESS_SRC-1],
ext_hdrs[SADB_EXT_ADDRESS_DST-1]) ||
!ext_hdrs[SADB_X_EXT_POLICY-1])
return -EINVAL;
pol = ext_hdrs[SADB_X_EXT_POLICY-1];
if (pol->sadb_x_policy_type > IPSEC_POLICY_IPSEC)
return -EINVAL;
if (!pol->sadb_x_policy_dir || pol->sadb_x_policy_dir >= IPSEC_DIR_MAX)
return -EINVAL;
xp = xfrm_policy_alloc(net, GFP_KERNEL);
if (xp == NULL)
return -ENOBUFS;
xp->action = (pol->sadb_x_policy_type == IPSEC_POLICY_DISCARD ?
XFRM_POLICY_BLOCK : XFRM_POLICY_ALLOW);
xp->priority = pol->sadb_x_policy_priority;
sa = ext_hdrs[SADB_EXT_ADDRESS_SRC-1];
xp->family = pfkey_sadb_addr2xfrm_addr(sa, &xp->selector.saddr);
xp->selector.family = xp->family;
xp->selector.prefixlen_s = sa->sadb_address_prefixlen;
xp->selector.proto = pfkey_proto_to_xfrm(sa->sadb_address_proto);
xp->selector.sport = ((struct sockaddr_in *)(sa+1))->sin_port;
if (xp->selector.sport)
xp->selector.sport_mask = htons(0xffff);
sa = ext_hdrs[SADB_EXT_ADDRESS_DST-1];
pfkey_sadb_addr2xfrm_addr(sa, &xp->selector.daddr);
xp->selector.prefixlen_d = sa->sadb_address_prefixlen;
/* Amusing, we set this twice. KAME apps appear to set same value
* in both addresses.
*/
xp->selector.proto = pfkey_proto_to_xfrm(sa->sadb_address_proto);
xp->selector.dport = ((struct sockaddr_in *)(sa+1))->sin_port;
if (xp->selector.dport)
xp->selector.dport_mask = htons(0xffff);
sec_ctx = ext_hdrs[SADB_X_EXT_SEC_CTX - 1];
if (sec_ctx != NULL) {
struct xfrm_user_sec_ctx *uctx = pfkey_sadb2xfrm_user_sec_ctx(sec_ctx, GFP_KERNEL);
if (!uctx) {
err = -ENOBUFS;
goto out;
}
err = security_xfrm_policy_alloc(&xp->security, uctx, GFP_KERNEL);
kfree(uctx);
if (err)
goto out;
}
xp->lft.soft_byte_limit = XFRM_INF;
xp->lft.hard_byte_limit = XFRM_INF;
xp->lft.soft_packet_limit = XFRM_INF;
xp->lft.hard_packet_limit = XFRM_INF;
if ((lifetime = ext_hdrs[SADB_EXT_LIFETIME_HARD-1]) != NULL) {
xp->lft.hard_packet_limit = _KEY2X(lifetime->sadb_lifetime_allocations);
xp->lft.hard_byte_limit = _KEY2X(lifetime->sadb_lifetime_bytes);
xp->lft.hard_add_expires_seconds = lifetime->sadb_lifetime_addtime;
xp->lft.hard_use_expires_seconds = lifetime->sadb_lifetime_usetime;
}
if ((lifetime = ext_hdrs[SADB_EXT_LIFETIME_SOFT-1]) != NULL) {
xp->lft.soft_packet_limit = _KEY2X(lifetime->sadb_lifetime_allocations);
xp->lft.soft_byte_limit = _KEY2X(lifetime->sadb_lifetime_bytes);
xp->lft.soft_add_expires_seconds = lifetime->sadb_lifetime_addtime;
xp->lft.soft_use_expires_seconds = lifetime->sadb_lifetime_usetime;
}
xp->xfrm_nr = 0;
if (pol->sadb_x_policy_type == IPSEC_POLICY_IPSEC &&
(err = parse_ipsecrequests(xp, pol)) < 0)
goto out;
err = xfrm_policy_insert(pol->sadb_x_policy_dir-1, xp,
hdr->sadb_msg_type != SADB_X_SPDUPDATE);
xfrm_audit_policy_add(xp, err ? 0 : 1, true);
if (err)
goto out;
if (hdr->sadb_msg_type == SADB_X_SPDUPDATE)
c.event = XFRM_MSG_UPDPOLICY;
else
c.event = XFRM_MSG_NEWPOLICY;
c.seq = hdr->sadb_msg_seq;
c.portid = hdr->sadb_msg_pid;
km_policy_notify(xp, pol->sadb_x_policy_dir-1, &c);
xfrm_pol_put(xp);
return 0;
out:
xp->walk.dead = 1;
xfrm_policy_destroy(xp);
return err;
}
static int pfkey_spddelete(struct sock *sk, struct sk_buff *skb, const struct sadb_msg *hdr, void * const *ext_hdrs)
{
struct net *net = sock_net(sk);
int err;
struct sadb_address *sa;
struct sadb_x_policy *pol;
struct xfrm_policy *xp;
struct xfrm_selector sel;
struct km_event c;
struct sadb_x_sec_ctx *sec_ctx;
struct xfrm_sec_ctx *pol_ctx = NULL;
if (!present_and_same_family(ext_hdrs[SADB_EXT_ADDRESS_SRC-1],
ext_hdrs[SADB_EXT_ADDRESS_DST-1]) ||
!ext_hdrs[SADB_X_EXT_POLICY-1])
return -EINVAL;
pol = ext_hdrs[SADB_X_EXT_POLICY-1];
if (!pol->sadb_x_policy_dir || pol->sadb_x_policy_dir >= IPSEC_DIR_MAX)
return -EINVAL;
memset(&sel, 0, sizeof(sel));
sa = ext_hdrs[SADB_EXT_ADDRESS_SRC-1];
sel.family = pfkey_sadb_addr2xfrm_addr(sa, &sel.saddr);
sel.prefixlen_s = sa->sadb_address_prefixlen;
sel.proto = pfkey_proto_to_xfrm(sa->sadb_address_proto);
sel.sport = ((struct sockaddr_in *)(sa+1))->sin_port;
if (sel.sport)
sel.sport_mask = htons(0xffff);
sa = ext_hdrs[SADB_EXT_ADDRESS_DST-1];
pfkey_sadb_addr2xfrm_addr(sa, &sel.daddr);
sel.prefixlen_d = sa->sadb_address_prefixlen;
sel.proto = pfkey_proto_to_xfrm(sa->sadb_address_proto);
sel.dport = ((struct sockaddr_in *)(sa+1))->sin_port;
if (sel.dport)
sel.dport_mask = htons(0xffff);
sec_ctx = ext_hdrs[SADB_X_EXT_SEC_CTX - 1];
if (sec_ctx != NULL) {
struct xfrm_user_sec_ctx *uctx = pfkey_sadb2xfrm_user_sec_ctx(sec_ctx, GFP_KERNEL);
if (!uctx)
return -ENOMEM;
err = security_xfrm_policy_alloc(&pol_ctx, uctx, GFP_KERNEL);
kfree(uctx);
if (err)
return err;
}
xp = xfrm_policy_bysel_ctx(net, &dummy_mark, 0, XFRM_POLICY_TYPE_MAIN,
pol->sadb_x_policy_dir - 1, &sel, pol_ctx,
1, &err);
security_xfrm_policy_free(pol_ctx);
if (xp == NULL)
return -ENOENT;
xfrm_audit_policy_delete(xp, err ? 0 : 1, true);
if (err)
goto out;
c.seq = hdr->sadb_msg_seq;
c.portid = hdr->sadb_msg_pid;
c.data.byid = 0;
c.event = XFRM_MSG_DELPOLICY;
km_policy_notify(xp, pol->sadb_x_policy_dir-1, &c);
out:
xfrm_pol_put(xp);
return err;
}
static int key_pol_get_resp(struct sock *sk, struct xfrm_policy *xp, const struct sadb_msg *hdr, int dir)
{
int err;
struct sk_buff *out_skb;
struct sadb_msg *out_hdr;
err = 0;
out_skb = pfkey_xfrm_policy2msg_prep(xp);
if (IS_ERR(out_skb)) {
err = PTR_ERR(out_skb);
goto out;
}
err = pfkey_xfrm_policy2msg(out_skb, xp, dir);
if (err < 0) {
kfree_skb(out_skb);
goto out;
}
out_hdr = (struct sadb_msg *) out_skb->data;
out_hdr->sadb_msg_version = hdr->sadb_msg_version;
out_hdr->sadb_msg_type = hdr->sadb_msg_type;
out_hdr->sadb_msg_satype = 0;
out_hdr->sadb_msg_errno = 0;
out_hdr->sadb_msg_seq = hdr->sadb_msg_seq;
out_hdr->sadb_msg_pid = hdr->sadb_msg_pid;
pfkey_broadcast(out_skb, GFP_ATOMIC, BROADCAST_ONE, sk, xp_net(xp));
err = 0;
out:
return err;
}
static int pfkey_sockaddr_pair_size(sa_family_t family)
{
return PFKEY_ALIGN8(pfkey_sockaddr_len(family) * 2);
}
static int parse_sockaddr_pair(struct sockaddr *sa, int ext_len,
xfrm_address_t *saddr, xfrm_address_t *daddr,
u16 *family)
{
int af, socklen;
if (ext_len < 2 || ext_len < pfkey_sockaddr_pair_size(sa->sa_family))
return -EINVAL;
af = pfkey_sockaddr_extract(sa, saddr);
if (!af)
return -EINVAL;
socklen = pfkey_sockaddr_len(af);
if (pfkey_sockaddr_extract((struct sockaddr *) (((u8 *)sa) + socklen),
daddr) != af)
return -EINVAL;
*family = af;
return 0;
}
#ifdef CONFIG_NET_KEY_MIGRATE
static int ipsecrequests_to_migrate(struct sadb_x_ipsecrequest *rq1, int len,
struct xfrm_migrate *m)
{
int err;
struct sadb_x_ipsecrequest *rq2;
int mode;
if (len < sizeof(*rq1) ||
len < rq1->sadb_x_ipsecrequest_len ||
rq1->sadb_x_ipsecrequest_len < sizeof(*rq1))
return -EINVAL;
/* old endoints */
err = parse_sockaddr_pair((struct sockaddr *)(rq1 + 1),
rq1->sadb_x_ipsecrequest_len - sizeof(*rq1),
&m->old_saddr, &m->old_daddr,
&m->old_family);
if (err)
return err;
rq2 = (struct sadb_x_ipsecrequest *)((u8 *)rq1 + rq1->sadb_x_ipsecrequest_len);
len -= rq1->sadb_x_ipsecrequest_len;
if (len <= sizeof(*rq2) ||
len < rq2->sadb_x_ipsecrequest_len ||
rq2->sadb_x_ipsecrequest_len < sizeof(*rq2))
return -EINVAL;
/* new endpoints */
err = parse_sockaddr_pair((struct sockaddr *)(rq2 + 1),
rq2->sadb_x_ipsecrequest_len - sizeof(*rq2),
&m->new_saddr, &m->new_daddr,
&m->new_family);
if (err)
return err;
if (rq1->sadb_x_ipsecrequest_proto != rq2->sadb_x_ipsecrequest_proto ||
rq1->sadb_x_ipsecrequest_mode != rq2->sadb_x_ipsecrequest_mode ||
rq1->sadb_x_ipsecrequest_reqid != rq2->sadb_x_ipsecrequest_reqid)
return -EINVAL;
m->proto = rq1->sadb_x_ipsecrequest_proto;
if ((mode = pfkey_mode_to_xfrm(rq1->sadb_x_ipsecrequest_mode)) < 0)
return -EINVAL;
m->mode = mode;
m->reqid = rq1->sadb_x_ipsecrequest_reqid;
return ((int)(rq1->sadb_x_ipsecrequest_len +
rq2->sadb_x_ipsecrequest_len));
}
static int pfkey_migrate(struct sock *sk, struct sk_buff *skb,
const struct sadb_msg *hdr, void * const *ext_hdrs)
{
int i, len, ret, err = -EINVAL;
u8 dir;
struct sadb_address *sa;
struct sadb_x_kmaddress *kma;
struct sadb_x_policy *pol;
struct sadb_x_ipsecrequest *rq;
struct xfrm_selector sel;
struct xfrm_migrate m[XFRM_MAX_DEPTH];
struct xfrm_kmaddress k;
struct net *net = sock_net(sk);
if (!present_and_same_family(ext_hdrs[SADB_EXT_ADDRESS_SRC - 1],
ext_hdrs[SADB_EXT_ADDRESS_DST - 1]) ||
!ext_hdrs[SADB_X_EXT_POLICY - 1]) {
err = -EINVAL;
goto out;
}
kma = ext_hdrs[SADB_X_EXT_KMADDRESS - 1];
pol = ext_hdrs[SADB_X_EXT_POLICY - 1];
if (pol->sadb_x_policy_dir >= IPSEC_DIR_MAX) {
err = -EINVAL;
goto out;
}
if (kma) {
/* convert sadb_x_kmaddress to xfrm_kmaddress */
k.reserved = kma->sadb_x_kmaddress_reserved;
ret = parse_sockaddr_pair((struct sockaddr *)(kma + 1),
8*(kma->sadb_x_kmaddress_len) - sizeof(*kma),
&k.local, &k.remote, &k.family);
if (ret < 0) {
err = ret;
goto out;
}
}
dir = pol->sadb_x_policy_dir - 1;
memset(&sel, 0, sizeof(sel));
/* set source address info of selector */
sa = ext_hdrs[SADB_EXT_ADDRESS_SRC - 1];
sel.family = pfkey_sadb_addr2xfrm_addr(sa, &sel.saddr);
sel.prefixlen_s = sa->sadb_address_prefixlen;
sel.proto = pfkey_proto_to_xfrm(sa->sadb_address_proto);
sel.sport = ((struct sockaddr_in *)(sa + 1))->sin_port;
if (sel.sport)
sel.sport_mask = htons(0xffff);
/* set destination address info of selector */
sa = ext_hdrs[SADB_EXT_ADDRESS_DST - 1];
pfkey_sadb_addr2xfrm_addr(sa, &sel.daddr);
sel.prefixlen_d = sa->sadb_address_prefixlen;
sel.proto = pfkey_proto_to_xfrm(sa->sadb_address_proto);
sel.dport = ((struct sockaddr_in *)(sa + 1))->sin_port;
if (sel.dport)
sel.dport_mask = htons(0xffff);
rq = (struct sadb_x_ipsecrequest *)(pol + 1);
/* extract ipsecrequests */
i = 0;
len = pol->sadb_x_policy_len * 8 - sizeof(struct sadb_x_policy);
while (len > 0 && i < XFRM_MAX_DEPTH) {
ret = ipsecrequests_to_migrate(rq, len, &m[i]);
if (ret < 0) {
err = ret;
goto out;
} else {
rq = (struct sadb_x_ipsecrequest *)((u8 *)rq + ret);
len -= ret;
i++;
}
}
if (!i || len > 0) {
err = -EINVAL;
goto out;
}
return xfrm_migrate(&sel, dir, XFRM_POLICY_TYPE_MAIN, m, i,
kma ? &k : NULL, net, NULL, 0, NULL);
out:
return err;
}
#else
static int pfkey_migrate(struct sock *sk, struct sk_buff *skb,
const struct sadb_msg *hdr, void * const *ext_hdrs)
{
return -ENOPROTOOPT;
}
#endif
static int pfkey_spdget(struct sock *sk, struct sk_buff *skb, const struct sadb_msg *hdr, void * const *ext_hdrs)
{
struct net *net = sock_net(sk);
unsigned int dir;
int err = 0, delete;
struct sadb_x_policy *pol;
struct xfrm_policy *xp;
struct km_event c;
if ((pol = ext_hdrs[SADB_X_EXT_POLICY-1]) == NULL)
return -EINVAL;
dir = xfrm_policy_id2dir(pol->sadb_x_policy_id);
if (dir >= XFRM_POLICY_MAX)
return -EINVAL;
delete = (hdr->sadb_msg_type == SADB_X_SPDDELETE2);
xp = xfrm_policy_byid(net, &dummy_mark, 0, XFRM_POLICY_TYPE_MAIN,
dir, pol->sadb_x_policy_id, delete, &err);
if (xp == NULL)
return -ENOENT;
if (delete) {
xfrm_audit_policy_delete(xp, err ? 0 : 1, true);
if (err)
goto out;
c.seq = hdr->sadb_msg_seq;
c.portid = hdr->sadb_msg_pid;
c.data.byid = 1;
c.event = XFRM_MSG_DELPOLICY;
km_policy_notify(xp, dir, &c);
} else {
err = key_pol_get_resp(sk, xp, hdr, dir);
}
out:
xfrm_pol_put(xp);
return err;
}
static int dump_sp(struct xfrm_policy *xp, int dir, int count, void *ptr)
{
struct pfkey_sock *pfk = ptr;
struct sk_buff *out_skb;
struct sadb_msg *out_hdr;
int err;
if (!pfkey_can_dump(&pfk->sk))
return -ENOBUFS;
out_skb = pfkey_xfrm_policy2msg_prep(xp);
if (IS_ERR(out_skb))
return PTR_ERR(out_skb);
err = pfkey_xfrm_policy2msg(out_skb, xp, dir);
if (err < 0) {
kfree_skb(out_skb);
return err;
}
out_hdr = (struct sadb_msg *) out_skb->data;
out_hdr->sadb_msg_version = pfk->dump.msg_version;
out_hdr->sadb_msg_type = SADB_X_SPDDUMP;
out_hdr->sadb_msg_satype = SADB_SATYPE_UNSPEC;
out_hdr->sadb_msg_errno = 0;
out_hdr->sadb_msg_seq = count + 1;
out_hdr->sadb_msg_pid = pfk->dump.msg_portid;
if (pfk->dump.skb)
pfkey_broadcast(pfk->dump.skb, GFP_ATOMIC, BROADCAST_ONE,
&pfk->sk, sock_net(&pfk->sk));
pfk->dump.skb = out_skb;
return 0;
}
static int pfkey_dump_sp(struct pfkey_sock *pfk)
{
struct net *net = sock_net(&pfk->sk);
return xfrm_policy_walk(net, &pfk->dump.u.policy, dump_sp, (void *) pfk);
}
static void pfkey_dump_sp_done(struct pfkey_sock *pfk)
{
struct net *net = sock_net((struct sock *)pfk);
xfrm_policy_walk_done(&pfk->dump.u.policy, net);
}
static int pfkey_spddump(struct sock *sk, struct sk_buff *skb, const struct sadb_msg *hdr, void * const *ext_hdrs)
{
struct pfkey_sock *pfk = pfkey_sk(sk);
mutex_lock(&pfk->dump_lock);
if (pfk->dump.dump != NULL) {
mutex_unlock(&pfk->dump_lock);
return -EBUSY;
}
pfk->dump.msg_version = hdr->sadb_msg_version;
pfk->dump.msg_portid = hdr->sadb_msg_pid;
pfk->dump.dump = pfkey_dump_sp;
pfk->dump.done = pfkey_dump_sp_done;
xfrm_policy_walk_init(&pfk->dump.u.policy, XFRM_POLICY_TYPE_MAIN);
mutex_unlock(&pfk->dump_lock);
return pfkey_do_dump(pfk);
}
static int key_notify_policy_flush(const struct km_event *c)
{
struct sk_buff *skb_out;
struct sadb_msg *hdr;
skb_out = alloc_skb(sizeof(struct sadb_msg) + 16, GFP_ATOMIC);
if (!skb_out)
return -ENOBUFS;
hdr = skb_put(skb_out, sizeof(struct sadb_msg));
hdr->sadb_msg_type = SADB_X_SPDFLUSH;
hdr->sadb_msg_seq = c->seq;
hdr->sadb_msg_pid = c->portid;
hdr->sadb_msg_version = PF_KEY_V2;
hdr->sadb_msg_errno = (uint8_t) 0;
hdr->sadb_msg_satype = SADB_SATYPE_UNSPEC;
hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t));
hdr->sadb_msg_reserved = 0;
pfkey_broadcast(skb_out, GFP_ATOMIC, BROADCAST_ALL, NULL, c->net);
return 0;
}
static int pfkey_spdflush(struct sock *sk, struct sk_buff *skb, const struct sadb_msg *hdr, void * const *ext_hdrs)
{
struct net *net = sock_net(sk);
struct km_event c;
int err, err2;
err = xfrm_policy_flush(net, XFRM_POLICY_TYPE_MAIN, true);
err2 = unicast_flush_resp(sk, hdr);
if (err || err2) {
if (err == -ESRCH) /* empty table - old silent behavior */
return 0;
return err;
}
c.data.type = XFRM_POLICY_TYPE_MAIN;
c.event = XFRM_MSG_FLUSHPOLICY;
c.portid = hdr->sadb_msg_pid;
c.seq = hdr->sadb_msg_seq;
c.net = net;
km_policy_notify(NULL, 0, &c);
return 0;
}
typedef int (*pfkey_handler)(struct sock *sk, struct sk_buff *skb,
const struct sadb_msg *hdr, void * const *ext_hdrs);
static const pfkey_handler pfkey_funcs[SADB_MAX + 1] = {
[SADB_RESERVED] = pfkey_reserved,
[SADB_GETSPI] = pfkey_getspi,
[SADB_UPDATE] = pfkey_add,
[SADB_ADD] = pfkey_add,
[SADB_DELETE] = pfkey_delete,
[SADB_GET] = pfkey_get,
[SADB_ACQUIRE] = pfkey_acquire,
[SADB_REGISTER] = pfkey_register,
[SADB_EXPIRE] = NULL,
[SADB_FLUSH] = pfkey_flush,
[SADB_DUMP] = pfkey_dump,
[SADB_X_PROMISC] = pfkey_promisc,
[SADB_X_PCHANGE] = NULL,
[SADB_X_SPDUPDATE] = pfkey_spdadd,
[SADB_X_SPDADD] = pfkey_spdadd,
[SADB_X_SPDDELETE] = pfkey_spddelete,
[SADB_X_SPDGET] = pfkey_spdget,
[SADB_X_SPDACQUIRE] = NULL,
[SADB_X_SPDDUMP] = pfkey_spddump,
[SADB_X_SPDFLUSH] = pfkey_spdflush,
[SADB_X_SPDSETIDX] = pfkey_spdadd,
[SADB_X_SPDDELETE2] = pfkey_spdget,
[SADB_X_MIGRATE] = pfkey_migrate,
};
static int pfkey_process(struct sock *sk, struct sk_buff *skb, const struct sadb_msg *hdr)
{
void *ext_hdrs[SADB_EXT_MAX];
int err;
/* Non-zero return value of pfkey_broadcast() does not always signal
* an error and even on an actual error we may still want to process
* the message so rather ignore the return value.
*/
pfkey_broadcast(skb_clone(skb, GFP_KERNEL), GFP_KERNEL,
BROADCAST_PROMISC_ONLY, NULL, sock_net(sk));
memset(ext_hdrs, 0, sizeof(ext_hdrs));
err = parse_exthdrs(skb, hdr, ext_hdrs);
if (!err) {
err = -EOPNOTSUPP;
if (pfkey_funcs[hdr->sadb_msg_type])
err = pfkey_funcs[hdr->sadb_msg_type](sk, skb, hdr, ext_hdrs);
}
return err;
}
static struct sadb_msg *pfkey_get_base_msg(struct sk_buff *skb, int *errp)
{
struct sadb_msg *hdr = NULL;
if (skb->len < sizeof(*hdr)) {
*errp = -EMSGSIZE;
} else {
hdr = (struct sadb_msg *) skb->data;
if (hdr->sadb_msg_version != PF_KEY_V2 ||
hdr->sadb_msg_reserved != 0 ||
(hdr->sadb_msg_type <= SADB_RESERVED ||
hdr->sadb_msg_type > SADB_MAX)) {
hdr = NULL;
*errp = -EINVAL;
} else if (hdr->sadb_msg_len != (skb->len /
sizeof(uint64_t)) ||
hdr->sadb_msg_len < (sizeof(struct sadb_msg) /
sizeof(uint64_t))) {
hdr = NULL;
*errp = -EMSGSIZE;
} else {
*errp = 0;
}
}
return hdr;
}
static inline int aalg_tmpl_set(const struct xfrm_tmpl *t,
const struct xfrm_algo_desc *d)
{
unsigned int id = d->desc.sadb_alg_id;
if (id >= sizeof(t->aalgos) * 8)
return 0;
return (t->aalgos >> id) & 1;
}
static inline int ealg_tmpl_set(const struct xfrm_tmpl *t,
const struct xfrm_algo_desc *d)
{
unsigned int id = d->desc.sadb_alg_id;
if (id >= sizeof(t->ealgos) * 8)
return 0;
return (t->ealgos >> id) & 1;
}
static int count_ah_combs(const struct xfrm_tmpl *t)
{
int i, sz = 0;
for (i = 0; ; i++) {
const struct xfrm_algo_desc *aalg = xfrm_aalg_get_byidx(i);
if (!aalg)
break;
if (!aalg->pfkey_supported)
continue;
if (aalg_tmpl_set(t, aalg))
sz += sizeof(struct sadb_comb);
}
return sz + sizeof(struct sadb_prop);
}
static int count_esp_combs(const struct xfrm_tmpl *t)
{
int i, k, sz = 0;
for (i = 0; ; i++) {
const struct xfrm_algo_desc *ealg = xfrm_ealg_get_byidx(i);
if (!ealg)
break;
if (!ealg->pfkey_supported)
continue;
if (!(ealg_tmpl_set(t, ealg)))
continue;
for (k = 1; ; k++) {
const struct xfrm_algo_desc *aalg = xfrm_aalg_get_byidx(k);
if (!aalg)
break;
if (!aalg->pfkey_supported)
continue;
if (aalg_tmpl_set(t, aalg))
sz += sizeof(struct sadb_comb);
}
}
return sz + sizeof(struct sadb_prop);
}
static int dump_ah_combs(struct sk_buff *skb, const struct xfrm_tmpl *t)
{
struct sadb_prop *p;
int sz = 0;
int i;
p = skb_put(skb, sizeof(struct sadb_prop));
p->sadb_prop_len = sizeof(struct sadb_prop)/8;
p->sadb_prop_exttype = SADB_EXT_PROPOSAL;
p->sadb_prop_replay = 32;
memset(p->sadb_prop_reserved, 0, sizeof(p->sadb_prop_reserved));
for (i = 0; ; i++) {
const struct xfrm_algo_desc *aalg = xfrm_aalg_get_byidx(i);
if (!aalg)
break;
if (!aalg->pfkey_supported)
continue;
if (aalg_tmpl_set(t, aalg) && aalg->available) {
struct sadb_comb *c;
c = skb_put_zero(skb, sizeof(struct sadb_comb));
p->sadb_prop_len += sizeof(struct sadb_comb)/8;
c->sadb_comb_auth = aalg->desc.sadb_alg_id;
c->sadb_comb_auth_minbits = aalg->desc.sadb_alg_minbits;
c->sadb_comb_auth_maxbits = aalg->desc.sadb_alg_maxbits;
c->sadb_comb_hard_addtime = 24*60*60;
c->sadb_comb_soft_addtime = 20*60*60;
c->sadb_comb_hard_usetime = 8*60*60;
c->sadb_comb_soft_usetime = 7*60*60;
sz += sizeof(*c);
}
}
return sz + sizeof(*p);
}
static int dump_esp_combs(struct sk_buff *skb, const struct xfrm_tmpl *t)
{
struct sadb_prop *p;
int sz = 0;
int i, k;
p = skb_put(skb, sizeof(struct sadb_prop));
p->sadb_prop_len = sizeof(struct sadb_prop)/8;
p->sadb_prop_exttype = SADB_EXT_PROPOSAL;
p->sadb_prop_replay = 32;
memset(p->sadb_prop_reserved, 0, sizeof(p->sadb_prop_reserved));
for (i=0; ; i++) {
const struct xfrm_algo_desc *ealg = xfrm_ealg_get_byidx(i);
if (!ealg)
break;
if (!ealg->pfkey_supported)
continue;
if (!(ealg_tmpl_set(t, ealg) && ealg->available))
continue;
for (k = 1; ; k++) {
struct sadb_comb *c;
const struct xfrm_algo_desc *aalg = xfrm_aalg_get_byidx(k);
if (!aalg)
break;
if (!aalg->pfkey_supported)
continue;
if (!(aalg_tmpl_set(t, aalg) && aalg->available))
continue;
c = skb_put(skb, sizeof(struct sadb_comb));
memset(c, 0, sizeof(*c));
p->sadb_prop_len += sizeof(struct sadb_comb)/8;
c->sadb_comb_auth = aalg->desc.sadb_alg_id;
c->sadb_comb_auth_minbits = aalg->desc.sadb_alg_minbits;
c->sadb_comb_auth_maxbits = aalg->desc.sadb_alg_maxbits;
c->sadb_comb_encrypt = ealg->desc.sadb_alg_id;
c->sadb_comb_encrypt_minbits = ealg->desc.sadb_alg_minbits;
c->sadb_comb_encrypt_maxbits = ealg->desc.sadb_alg_maxbits;
c->sadb_comb_hard_addtime = 24*60*60;
c->sadb_comb_soft_addtime = 20*60*60;
c->sadb_comb_hard_usetime = 8*60*60;
c->sadb_comb_soft_usetime = 7*60*60;
sz += sizeof(*c);
}
}
return sz + sizeof(*p);
}
static int key_notify_policy_expire(struct xfrm_policy *xp, const struct km_event *c)
{
return 0;
}
static int key_notify_sa_expire(struct xfrm_state *x, const struct km_event *c)
{
struct sk_buff *out_skb;
struct sadb_msg *out_hdr;
int hard;
int hsc;
hard = c->data.hard;
if (hard)
hsc = 2;
else
hsc = 1;
out_skb = pfkey_xfrm_state2msg_expire(x, hsc);
if (IS_ERR(out_skb))
return PTR_ERR(out_skb);
out_hdr = (struct sadb_msg *) out_skb->data;
out_hdr->sadb_msg_version = PF_KEY_V2;
out_hdr->sadb_msg_type = SADB_EXPIRE;
out_hdr->sadb_msg_satype = pfkey_proto2satype(x->id.proto);
out_hdr->sadb_msg_errno = 0;
out_hdr->sadb_msg_reserved = 0;
out_hdr->sadb_msg_seq = 0;
out_hdr->sadb_msg_pid = 0;
pfkey_broadcast(out_skb, GFP_ATOMIC, BROADCAST_REGISTERED, NULL,
xs_net(x));
return 0;
}
static int pfkey_send_notify(struct xfrm_state *x, const struct km_event *c)
{
struct net *net = x ? xs_net(x) : c->net;
struct netns_pfkey *net_pfkey = net_generic(net, pfkey_net_id);
if (atomic_read(&net_pfkey->socks_nr) == 0)
return 0;
switch (c->event) {
case XFRM_MSG_EXPIRE:
return key_notify_sa_expire(x, c);
case XFRM_MSG_DELSA:
case XFRM_MSG_NEWSA:
case XFRM_MSG_UPDSA:
return key_notify_sa(x, c);
case XFRM_MSG_FLUSHSA:
return key_notify_sa_flush(c);
case XFRM_MSG_NEWAE: /* not yet supported */
break;
default:
pr_err("pfkey: Unknown SA event %d\n", c->event);
break;
}
return 0;
}
static int pfkey_send_policy_notify(struct xfrm_policy *xp, int dir, const struct km_event *c)
{
if (xp && xp->type != XFRM_POLICY_TYPE_MAIN)
return 0;
switch (c->event) {
case XFRM_MSG_POLEXPIRE:
return key_notify_policy_expire(xp, c);
case XFRM_MSG_DELPOLICY:
case XFRM_MSG_NEWPOLICY:
case XFRM_MSG_UPDPOLICY:
return key_notify_policy(xp, dir, c);
case XFRM_MSG_FLUSHPOLICY:
if (c->data.type != XFRM_POLICY_TYPE_MAIN)
break;
return key_notify_policy_flush(c);
default:
pr_err("pfkey: Unknown policy event %d\n", c->event);
break;
}
return 0;
}
static u32 get_acqseq(void)
{
u32 res;
static atomic_t acqseq;
do {
res = atomic_inc_return(&acqseq);
} while (!res);
return res;
}
static bool pfkey_is_alive(const struct km_event *c)
{
struct netns_pfkey *net_pfkey = net_generic(c->net, pfkey_net_id);
struct sock *sk;
bool is_alive = false;
rcu_read_lock();
sk_for_each_rcu(sk, &net_pfkey->table) {
if (pfkey_sk(sk)->registered) {
is_alive = true;
break;
}
}
rcu_read_unlock();
return is_alive;
}
static int pfkey_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *xp)
{
struct sk_buff *skb;
struct sadb_msg *hdr;
struct sadb_address *addr;
struct sadb_x_policy *pol;
int sockaddr_size;
int size;
struct sadb_x_sec_ctx *sec_ctx;
struct xfrm_sec_ctx *xfrm_ctx;
int ctx_size = 0;
int alg_size = 0;
sockaddr_size = pfkey_sockaddr_size(x->props.family);
if (!sockaddr_size)
return -EINVAL;
size = sizeof(struct sadb_msg) +
(sizeof(struct sadb_address) * 2) +
(sockaddr_size * 2) +
sizeof(struct sadb_x_policy);
if (x->id.proto == IPPROTO_AH)
alg_size = count_ah_combs(t);
else if (x->id.proto == IPPROTO_ESP)
alg_size = count_esp_combs(t);
if ((xfrm_ctx = x->security)) {
ctx_size = PFKEY_ALIGN8(xfrm_ctx->ctx_len);
size += sizeof(struct sadb_x_sec_ctx) + ctx_size;
}
skb = alloc_skb(size + alg_size + 16, GFP_ATOMIC);
if (skb == NULL)
return -ENOMEM;
hdr = skb_put(skb, sizeof(struct sadb_msg));
hdr->sadb_msg_version = PF_KEY_V2;
hdr->sadb_msg_type = SADB_ACQUIRE;
hdr->sadb_msg_satype = pfkey_proto2satype(x->id.proto);
hdr->sadb_msg_len = size / sizeof(uint64_t);
hdr->sadb_msg_errno = 0;
hdr->sadb_msg_reserved = 0;
hdr->sadb_msg_seq = x->km.seq = get_acqseq();
hdr->sadb_msg_pid = 0;
/* src address */
addr = skb_put(skb, sizeof(struct sadb_address) + sockaddr_size);
addr->sadb_address_len =
(sizeof(struct sadb_address)+sockaddr_size)/
sizeof(uint64_t);
addr->sadb_address_exttype = SADB_EXT_ADDRESS_SRC;
addr->sadb_address_proto = 0;
addr->sadb_address_reserved = 0;
addr->sadb_address_prefixlen =
pfkey_sockaddr_fill(&x->props.saddr, 0,
(struct sockaddr *) (addr + 1),
x->props.family);
if (!addr->sadb_address_prefixlen)
BUG();
/* dst address */
addr = skb_put(skb, sizeof(struct sadb_address) + sockaddr_size);
addr->sadb_address_len =
(sizeof(struct sadb_address)+sockaddr_size)/
sizeof(uint64_t);
addr->sadb_address_exttype = SADB_EXT_ADDRESS_DST;
addr->sadb_address_proto = 0;
addr->sadb_address_reserved = 0;
addr->sadb_address_prefixlen =
pfkey_sockaddr_fill(&x->id.daddr, 0,
(struct sockaddr *) (addr + 1),
x->props.family);
if (!addr->sadb_address_prefixlen)
BUG();
pol = skb_put(skb, sizeof(struct sadb_x_policy));
pol->sadb_x_policy_len = sizeof(struct sadb_x_policy)/sizeof(uint64_t);
pol->sadb_x_policy_exttype = SADB_X_EXT_POLICY;
pol->sadb_x_policy_type = IPSEC_POLICY_IPSEC;
pol->sadb_x_policy_dir = XFRM_POLICY_OUT + 1;
pol->sadb_x_policy_reserved = 0;
pol->sadb_x_policy_id = xp->index;
pol->sadb_x_policy_priority = xp->priority;
/* Set sadb_comb's. */
alg_size = 0;
if (x->id.proto == IPPROTO_AH)
alg_size = dump_ah_combs(skb, t);
else if (x->id.proto == IPPROTO_ESP)
alg_size = dump_esp_combs(skb, t);
hdr->sadb_msg_len += alg_size / 8;
/* security context */
if (xfrm_ctx) {
sec_ctx = skb_put(skb,
sizeof(struct sadb_x_sec_ctx) + ctx_size);
sec_ctx->sadb_x_sec_len =
(sizeof(struct sadb_x_sec_ctx) + ctx_size) / sizeof(uint64_t);
sec_ctx->sadb_x_sec_exttype = SADB_X_EXT_SEC_CTX;
sec_ctx->sadb_x_ctx_doi = xfrm_ctx->ctx_doi;
sec_ctx->sadb_x_ctx_alg = xfrm_ctx->ctx_alg;
sec_ctx->sadb_x_ctx_len = xfrm_ctx->ctx_len;
memcpy(sec_ctx + 1, xfrm_ctx->ctx_str,
xfrm_ctx->ctx_len);
}
return pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_REGISTERED, NULL,
xs_net(x));
}
static struct xfrm_policy *pfkey_compile_policy(struct sock *sk, int opt,
u8 *data, int len, int *dir)
{
struct net *net = sock_net(sk);
struct xfrm_policy *xp;
struct sadb_x_policy *pol = (struct sadb_x_policy*)data;
struct sadb_x_sec_ctx *sec_ctx;
switch (sk->sk_family) {
case AF_INET:
if (opt != IP_IPSEC_POLICY) {
*dir = -EOPNOTSUPP;
return NULL;
}
break;
#if IS_ENABLED(CONFIG_IPV6)
case AF_INET6:
if (opt != IPV6_IPSEC_POLICY) {
*dir = -EOPNOTSUPP;
return NULL;
}
break;
#endif
default:
*dir = -EINVAL;
return NULL;
}
*dir = -EINVAL;
if (len < sizeof(struct sadb_x_policy) ||
pol->sadb_x_policy_len*8 > len ||
pol->sadb_x_policy_type > IPSEC_POLICY_BYPASS ||
(!pol->sadb_x_policy_dir || pol->sadb_x_policy_dir > IPSEC_DIR_OUTBOUND))
return NULL;
xp = xfrm_policy_alloc(net, GFP_ATOMIC);
if (xp == NULL) {
*dir = -ENOBUFS;
return NULL;
}
xp->action = (pol->sadb_x_policy_type == IPSEC_POLICY_DISCARD ?
XFRM_POLICY_BLOCK : XFRM_POLICY_ALLOW);
xp->lft.soft_byte_limit = XFRM_INF;
xp->lft.hard_byte_limit = XFRM_INF;
xp->lft.soft_packet_limit = XFRM_INF;
xp->lft.hard_packet_limit = XFRM_INF;
xp->family = sk->sk_family;
xp->xfrm_nr = 0;
if (pol->sadb_x_policy_type == IPSEC_POLICY_IPSEC &&
(*dir = parse_ipsecrequests(xp, pol)) < 0)
goto out;
/* security context too */
if (len >= (pol->sadb_x_policy_len*8 +
sizeof(struct sadb_x_sec_ctx))) {
char *p = (char *)pol;
struct xfrm_user_sec_ctx *uctx;
p += pol->sadb_x_policy_len*8;
sec_ctx = (struct sadb_x_sec_ctx *)p;
if (len < pol->sadb_x_policy_len*8 +
sec_ctx->sadb_x_sec_len*8) {
*dir = -EINVAL;
goto out;
}
if ((*dir = verify_sec_ctx_len(p)))
goto out;
uctx = pfkey_sadb2xfrm_user_sec_ctx(sec_ctx, GFP_ATOMIC);
*dir = security_xfrm_policy_alloc(&xp->security, uctx, GFP_ATOMIC);
kfree(uctx);
if (*dir)
goto out;
}
*dir = pol->sadb_x_policy_dir-1;
return xp;
out:
xp->walk.dead = 1;
xfrm_policy_destroy(xp);
return NULL;
}
static int pfkey_send_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport)
{
struct sk_buff *skb;
struct sadb_msg *hdr;
struct sadb_sa *sa;
struct sadb_address *addr;
struct sadb_x_nat_t_port *n_port;
int sockaddr_size;
int size;
__u8 satype = (x->id.proto == IPPROTO_ESP ? SADB_SATYPE_ESP : 0);
struct xfrm_encap_tmpl *natt = NULL;
sockaddr_size = pfkey_sockaddr_size(x->props.family);
if (!sockaddr_size)
return -EINVAL;
if (!satype)
return -EINVAL;
if (!x->encap)
return -EINVAL;
natt = x->encap;
/* Build an SADB_X_NAT_T_NEW_MAPPING message:
*
* HDR | SA | ADDRESS_SRC (old addr) | NAT_T_SPORT (old port) |
* ADDRESS_DST (new addr) | NAT_T_DPORT (new port)
*/
size = sizeof(struct sadb_msg) +
sizeof(struct sadb_sa) +
(sizeof(struct sadb_address) * 2) +
(sockaddr_size * 2) +
(sizeof(struct sadb_x_nat_t_port) * 2);
skb = alloc_skb(size + 16, GFP_ATOMIC);
if (skb == NULL)
return -ENOMEM;
hdr = skb_put(skb, sizeof(struct sadb_msg));
hdr->sadb_msg_version = PF_KEY_V2;
hdr->sadb_msg_type = SADB_X_NAT_T_NEW_MAPPING;
hdr->sadb_msg_satype = satype;
hdr->sadb_msg_len = size / sizeof(uint64_t);
hdr->sadb_msg_errno = 0;
hdr->sadb_msg_reserved = 0;
hdr->sadb_msg_seq = x->km.seq;
hdr->sadb_msg_pid = 0;
/* SA */
sa = skb_put(skb, sizeof(struct sadb_sa));
sa->sadb_sa_len = sizeof(struct sadb_sa)/sizeof(uint64_t);
sa->sadb_sa_exttype = SADB_EXT_SA;
sa->sadb_sa_spi = x->id.spi;
sa->sadb_sa_replay = 0;
sa->sadb_sa_state = 0;
sa->sadb_sa_auth = 0;
sa->sadb_sa_encrypt = 0;
sa->sadb_sa_flags = 0;
/* ADDRESS_SRC (old addr) */
addr = skb_put(skb, sizeof(struct sadb_address) + sockaddr_size);
addr->sadb_address_len =
(sizeof(struct sadb_address)+sockaddr_size)/
sizeof(uint64_t);
addr->sadb_address_exttype = SADB_EXT_ADDRESS_SRC;
addr->sadb_address_proto = 0;
addr->sadb_address_reserved = 0;
addr->sadb_address_prefixlen =
pfkey_sockaddr_fill(&x->props.saddr, 0,
(struct sockaddr *) (addr + 1),
x->props.family);
if (!addr->sadb_address_prefixlen)
BUG();
/* NAT_T_SPORT (old port) */
n_port = skb_put(skb, sizeof(*n_port));
n_port->sadb_x_nat_t_port_len = sizeof(*n_port)/sizeof(uint64_t);
n_port->sadb_x_nat_t_port_exttype = SADB_X_EXT_NAT_T_SPORT;
n_port->sadb_x_nat_t_port_port = natt->encap_sport;
n_port->sadb_x_nat_t_port_reserved = 0;
/* ADDRESS_DST (new addr) */
addr = skb_put(skb, sizeof(struct sadb_address) + sockaddr_size);
addr->sadb_address_len =
(sizeof(struct sadb_address)+sockaddr_size)/
sizeof(uint64_t);
addr->sadb_address_exttype = SADB_EXT_ADDRESS_DST;
addr->sadb_address_proto = 0;
addr->sadb_address_reserved = 0;
addr->sadb_address_prefixlen =
pfkey_sockaddr_fill(ipaddr, 0,
(struct sockaddr *) (addr + 1),
x->props.family);
if (!addr->sadb_address_prefixlen)
BUG();
/* NAT_T_DPORT (new port) */
n_port = skb_put(skb, sizeof(*n_port));
n_port->sadb_x_nat_t_port_len = sizeof(*n_port)/sizeof(uint64_t);
n_port->sadb_x_nat_t_port_exttype = SADB_X_EXT_NAT_T_DPORT;
n_port->sadb_x_nat_t_port_port = sport;
n_port->sadb_x_nat_t_port_reserved = 0;
return pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_REGISTERED, NULL,
xs_net(x));
}
#ifdef CONFIG_NET_KEY_MIGRATE
static int set_sadb_address(struct sk_buff *skb, int sasize, int type,
const struct xfrm_selector *sel)
{
struct sadb_address *addr;
addr = skb_put(skb, sizeof(struct sadb_address) + sasize);
addr->sadb_address_len = (sizeof(struct sadb_address) + sasize)/8;
addr->sadb_address_exttype = type;
addr->sadb_address_proto = sel->proto;
addr->sadb_address_reserved = 0;
switch (type) {
case SADB_EXT_ADDRESS_SRC:
addr->sadb_address_prefixlen = sel->prefixlen_s;
pfkey_sockaddr_fill(&sel->saddr, 0,
(struct sockaddr *)(addr + 1),
sel->family);
break;
case SADB_EXT_ADDRESS_DST:
addr->sadb_address_prefixlen = sel->prefixlen_d;
pfkey_sockaddr_fill(&sel->daddr, 0,
(struct sockaddr *)(addr + 1),
sel->family);
break;
default:
return -EINVAL;
}
return 0;
}
static int set_sadb_kmaddress(struct sk_buff *skb, const struct xfrm_kmaddress *k)
{
struct sadb_x_kmaddress *kma;
u8 *sa;
int family = k->family;
int socklen = pfkey_sockaddr_len(family);
int size_req;
size_req = (sizeof(struct sadb_x_kmaddress) +
pfkey_sockaddr_pair_size(family));
kma = skb_put_zero(skb, size_req);
kma->sadb_x_kmaddress_len = size_req / 8;
kma->sadb_x_kmaddress_exttype = SADB_X_EXT_KMADDRESS;
kma->sadb_x_kmaddress_reserved = k->reserved;
sa = (u8 *)(kma + 1);
if (!pfkey_sockaddr_fill(&k->local, 0, (struct sockaddr *)sa, family) ||
!pfkey_sockaddr_fill(&k->remote, 0, (struct sockaddr *)(sa+socklen), family))
return -EINVAL;
return 0;
}
static int set_ipsecrequest(struct sk_buff *skb,
uint8_t proto, uint8_t mode, int level,
uint32_t reqid, uint8_t family,
const xfrm_address_t *src, const xfrm_address_t *dst)
{
struct sadb_x_ipsecrequest *rq;
u8 *sa;
int socklen = pfkey_sockaddr_len(family);
int size_req;
size_req = sizeof(struct sadb_x_ipsecrequest) +
pfkey_sockaddr_pair_size(family);
rq = skb_put_zero(skb, size_req);
rq->sadb_x_ipsecrequest_len = size_req;
rq->sadb_x_ipsecrequest_proto = proto;
rq->sadb_x_ipsecrequest_mode = mode;
rq->sadb_x_ipsecrequest_level = level;
rq->sadb_x_ipsecrequest_reqid = reqid;
sa = (u8 *) (rq + 1);
if (!pfkey_sockaddr_fill(src, 0, (struct sockaddr *)sa, family) ||
!pfkey_sockaddr_fill(dst, 0, (struct sockaddr *)(sa + socklen), family))
return -EINVAL;
return 0;
}
#endif
#ifdef CONFIG_NET_KEY_MIGRATE
static int pfkey_send_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
const struct xfrm_migrate *m, int num_bundles,
const struct xfrm_kmaddress *k,
const struct xfrm_encap_tmpl *encap)
{
int i;
int sasize_sel;
int size = 0;
int size_pol = 0;
struct sk_buff *skb;
struct sadb_msg *hdr;
struct sadb_x_policy *pol;
const struct xfrm_migrate *mp;
if (type != XFRM_POLICY_TYPE_MAIN)
return 0;
if (num_bundles <= 0 || num_bundles > XFRM_MAX_DEPTH)
return -EINVAL;
if (k != NULL) {
/* addresses for KM */
size += PFKEY_ALIGN8(sizeof(struct sadb_x_kmaddress) +
pfkey_sockaddr_pair_size(k->family));
}
/* selector */
sasize_sel = pfkey_sockaddr_size(sel->family);
if (!sasize_sel)
return -EINVAL;
size += (sizeof(struct sadb_address) + sasize_sel) * 2;
/* policy info */
size_pol += sizeof(struct sadb_x_policy);
/* ipsecrequests */
for (i = 0, mp = m; i < num_bundles; i++, mp++) {
/* old locator pair */
size_pol += sizeof(struct sadb_x_ipsecrequest) +
pfkey_sockaddr_pair_size(mp->old_family);
/* new locator pair */
size_pol += sizeof(struct sadb_x_ipsecrequest) +
pfkey_sockaddr_pair_size(mp->new_family);
}
size += sizeof(struct sadb_msg) + size_pol;
/* alloc buffer */
skb = alloc_skb(size, GFP_ATOMIC);
if (skb == NULL)
return -ENOMEM;
hdr = skb_put(skb, sizeof(struct sadb_msg));
hdr->sadb_msg_version = PF_KEY_V2;
hdr->sadb_msg_type = SADB_X_MIGRATE;
hdr->sadb_msg_satype = pfkey_proto2satype(m->proto);
hdr->sadb_msg_len = size / 8;
hdr->sadb_msg_errno = 0;
hdr->sadb_msg_reserved = 0;
hdr->sadb_msg_seq = 0;
hdr->sadb_msg_pid = 0;
/* Addresses to be used by KM for negotiation, if ext is available */
if (k != NULL && (set_sadb_kmaddress(skb, k) < 0))
goto err;
/* selector src */
set_sadb_address(skb, sasize_sel, SADB_EXT_ADDRESS_SRC, sel);
/* selector dst */
set_sadb_address(skb, sasize_sel, SADB_EXT_ADDRESS_DST, sel);
/* policy information */
pol = skb_put(skb, sizeof(struct sadb_x_policy));
pol->sadb_x_policy_len = size_pol / 8;
pol->sadb_x_policy_exttype = SADB_X_EXT_POLICY;
pol->sadb_x_policy_type = IPSEC_POLICY_IPSEC;
pol->sadb_x_policy_dir = dir + 1;
pol->sadb_x_policy_reserved = 0;
pol->sadb_x_policy_id = 0;
pol->sadb_x_policy_priority = 0;
for (i = 0, mp = m; i < num_bundles; i++, mp++) {
/* old ipsecrequest */
int mode = pfkey_mode_from_xfrm(mp->mode);
if (mode < 0)
goto err;
if (set_ipsecrequest(skb, mp->proto, mode,
(mp->reqid ? IPSEC_LEVEL_UNIQUE : IPSEC_LEVEL_REQUIRE),
mp->reqid, mp->old_family,
&mp->old_saddr, &mp->old_daddr) < 0)
goto err;
/* new ipsecrequest */
if (set_ipsecrequest(skb, mp->proto, mode,
(mp->reqid ? IPSEC_LEVEL_UNIQUE : IPSEC_LEVEL_REQUIRE),
mp->reqid, mp->new_family,
&mp->new_saddr, &mp->new_daddr) < 0)
goto err;
}
/* broadcast migrate message to sockets */
pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_ALL, NULL, &init_net);
return 0;
err:
kfree_skb(skb);
return -EINVAL;
}
#else
static int pfkey_send_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
const struct xfrm_migrate *m, int num_bundles,
const struct xfrm_kmaddress *k,
const struct xfrm_encap_tmpl *encap)
{
return -ENOPROTOOPT;
}
#endif
static int pfkey_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
{
struct sock *sk = sock->sk;
struct sk_buff *skb = NULL;
struct sadb_msg *hdr = NULL;
int err;
struct net *net = sock_net(sk);
err = -EOPNOTSUPP;
if (msg->msg_flags & MSG_OOB)
goto out;
err = -EMSGSIZE;
if ((unsigned int)len > sk->sk_sndbuf - 32)
goto out;
err = -ENOBUFS;
skb = alloc_skb(len, GFP_KERNEL);
if (skb == NULL)
goto out;
err = -EFAULT;
if (memcpy_from_msg(skb_put(skb,len), msg, len))
goto out;
hdr = pfkey_get_base_msg(skb, &err);
if (!hdr)
goto out;
mutex_lock(&net->xfrm.xfrm_cfg_mutex);
err = pfkey_process(sk, skb, hdr);
mutex_unlock(&net->xfrm.xfrm_cfg_mutex);
out:
if (err && hdr && pfkey_error(hdr, err, sk) == 0)
err = 0;
kfree_skb(skb);
return err ? : len;
}
static int pfkey_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
int flags)
{
struct sock *sk = sock->sk;
struct pfkey_sock *pfk = pfkey_sk(sk);
struct sk_buff *skb;
int copied, err;
err = -EINVAL;
if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT))
goto out;
skb = skb_recv_datagram(sk, flags, &err);
if (skb == NULL)
goto out;
copied = skb->len;
if (copied > len) {
msg->msg_flags |= MSG_TRUNC;
copied = len;
}
skb_reset_transport_header(skb);
err = skb_copy_datagram_msg(skb, 0, msg, copied);
if (err)
goto out_free;
sock_recv_cmsgs(msg, sk, skb);
err = (flags & MSG_TRUNC) ? skb->len : copied;
if (pfk->dump.dump != NULL &&
3 * atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf)
pfkey_do_dump(pfk);
out_free:
skb_free_datagram(sk, skb);
out:
return err;
}
static const struct proto_ops pfkey_ops = {
.family = PF_KEY,
.owner = THIS_MODULE,
/* Operations that make no sense on pfkey sockets. */
.bind = sock_no_bind,
.connect = sock_no_connect,
.socketpair = sock_no_socketpair,
.accept = sock_no_accept,
.getname = sock_no_getname,
.ioctl = sock_no_ioctl,
.listen = sock_no_listen,
.shutdown = sock_no_shutdown,
.mmap = sock_no_mmap,
/* Now the operations that really occur. */
.release = pfkey_release,
.poll = datagram_poll,
.sendmsg = pfkey_sendmsg,
.recvmsg = pfkey_recvmsg,
};
static const struct net_proto_family pfkey_family_ops = {
.family = PF_KEY,
.create = pfkey_create,
.owner = THIS_MODULE,
};
#ifdef CONFIG_PROC_FS
static int pfkey_seq_show(struct seq_file *f, void *v)
{
struct sock *s = sk_entry(v);
if (v == SEQ_START_TOKEN)
seq_printf(f ,"sk RefCnt Rmem Wmem User Inode\n");
else
seq_printf(f, "%pK %-6d %-6u %-6u %-6u %-6lu\n",
s,
refcount_read(&s->sk_refcnt),
sk_rmem_alloc_get(s),
sk_wmem_alloc_get(s),
from_kuid_munged(seq_user_ns(f), sock_i_uid(s)),
sock_i_ino(s)
);
return 0;
}
static void *pfkey_seq_start(struct seq_file *f, loff_t *ppos)
__acquires(rcu)
{
struct net *net = seq_file_net(f);
struct netns_pfkey *net_pfkey = net_generic(net, pfkey_net_id);
rcu_read_lock();
return seq_hlist_start_head_rcu(&net_pfkey->table, *ppos);
}
static void *pfkey_seq_next(struct seq_file *f, void *v, loff_t *ppos)
{
struct net *net = seq_file_net(f);
struct netns_pfkey *net_pfkey = net_generic(net, pfkey_net_id);
return seq_hlist_next_rcu(v, &net_pfkey->table, ppos);
}
static void pfkey_seq_stop(struct seq_file *f, void *v)
__releases(rcu)
{
rcu_read_unlock();
}
static const struct seq_operations pfkey_seq_ops = {
.start = pfkey_seq_start,
.next = pfkey_seq_next,
.stop = pfkey_seq_stop,
.show = pfkey_seq_show,
};
static int __net_init pfkey_init_proc(struct net *net)
{
struct proc_dir_entry *e;
e = proc_create_net("pfkey", 0, net->proc_net, &pfkey_seq_ops,
sizeof(struct seq_net_private));
if (e == NULL)
return -ENOMEM;
return 0;
}
static void __net_exit pfkey_exit_proc(struct net *net)
{
remove_proc_entry("pfkey", net->proc_net);
}
#else
static inline int pfkey_init_proc(struct net *net)
{
return 0;
}
static inline void pfkey_exit_proc(struct net *net)
{
}
#endif
static struct xfrm_mgr pfkeyv2_mgr =
{
.notify = pfkey_send_notify,
.acquire = pfkey_send_acquire,
.compile_policy = pfkey_compile_policy,
.new_mapping = pfkey_send_new_mapping,
.notify_policy = pfkey_send_policy_notify,
.migrate = pfkey_send_migrate,
.is_alive = pfkey_is_alive,
};
static int __net_init pfkey_net_init(struct net *net)
{
struct netns_pfkey *net_pfkey = net_generic(net, pfkey_net_id);
int rv;
INIT_HLIST_HEAD(&net_pfkey->table);
atomic_set(&net_pfkey->socks_nr, 0);
rv = pfkey_init_proc(net);
return rv;
}
static void __net_exit pfkey_net_exit(struct net *net)
{
struct netns_pfkey *net_pfkey = net_generic(net, pfkey_net_id);
pfkey_exit_proc(net);
WARN_ON(!hlist_empty(&net_pfkey->table));
}
static struct pernet_operations pfkey_net_ops = {
.init = pfkey_net_init,
.exit = pfkey_net_exit,
.id = &pfkey_net_id,
.size = sizeof(struct netns_pfkey),
};
static void __exit ipsec_pfkey_exit(void)
{
xfrm_unregister_km(&pfkeyv2_mgr);
sock_unregister(PF_KEY);
unregister_pernet_subsys(&pfkey_net_ops);
proto_unregister(&key_proto);
}
static int __init ipsec_pfkey_init(void)
{
int err = proto_register(&key_proto, 0);
if (err != 0)
goto out;
err = register_pernet_subsys(&pfkey_net_ops);
if (err != 0)
goto out_unregister_key_proto;
err = sock_register(&pfkey_family_ops);
if (err != 0)
goto out_unregister_pernet;
xfrm_register_km(&pfkeyv2_mgr);
out:
return err;
out_unregister_pernet:
unregister_pernet_subsys(&pfkey_net_ops);
out_unregister_key_proto:
proto_unregister(&key_proto);
goto out;
}
module_init(ipsec_pfkey_init);
module_exit(ipsec_pfkey_exit);
MODULE_LICENSE("GPL");
MODULE_ALIAS_NETPROTO(PF_KEY);
| linux-master | net/key/af_key.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Management Component Transport Protocol (MCTP) - routing
* implementation.
*
* This is currently based on a simple routing table, with no dst cache. The
* number of routes should stay fairly small, so the lookup cost is small.
*
* Copyright (c) 2021 Code Construct
* Copyright (c) 2021 Google
*/
#include <linux/idr.h>
#include <linux/mctp.h>
#include <linux/netdevice.h>
#include <linux/rtnetlink.h>
#include <linux/skbuff.h>
#include <net/mctp.h>
#include <net/mctpdevice.h>
#include <net/netlink.h>
#include <net/sock.h>
static int mctp_neigh_add(struct mctp_dev *mdev, mctp_eid_t eid,
enum mctp_neigh_source source,
size_t lladdr_len, const void *lladdr)
{
struct net *net = dev_net(mdev->dev);
struct mctp_neigh *neigh;
int rc;
mutex_lock(&net->mctp.neigh_lock);
if (mctp_neigh_lookup(mdev, eid, NULL) == 0) {
rc = -EEXIST;
goto out;
}
if (lladdr_len > sizeof(neigh->ha)) {
rc = -EINVAL;
goto out;
}
neigh = kzalloc(sizeof(*neigh), GFP_KERNEL);
if (!neigh) {
rc = -ENOMEM;
goto out;
}
INIT_LIST_HEAD(&neigh->list);
neigh->dev = mdev;
mctp_dev_hold(neigh->dev);
neigh->eid = eid;
neigh->source = source;
memcpy(neigh->ha, lladdr, lladdr_len);
list_add_rcu(&neigh->list, &net->mctp.neighbours);
rc = 0;
out:
mutex_unlock(&net->mctp.neigh_lock);
return rc;
}
static void __mctp_neigh_free(struct rcu_head *rcu)
{
struct mctp_neigh *neigh = container_of(rcu, struct mctp_neigh, rcu);
mctp_dev_put(neigh->dev);
kfree(neigh);
}
/* Removes all neighbour entries referring to a device */
void mctp_neigh_remove_dev(struct mctp_dev *mdev)
{
struct net *net = dev_net(mdev->dev);
struct mctp_neigh *neigh, *tmp;
mutex_lock(&net->mctp.neigh_lock);
list_for_each_entry_safe(neigh, tmp, &net->mctp.neighbours, list) {
if (neigh->dev == mdev) {
list_del_rcu(&neigh->list);
/* TODO: immediate RTM_DELNEIGH */
call_rcu(&neigh->rcu, __mctp_neigh_free);
}
}
mutex_unlock(&net->mctp.neigh_lock);
}
static int mctp_neigh_remove(struct mctp_dev *mdev, mctp_eid_t eid,
enum mctp_neigh_source source)
{
struct net *net = dev_net(mdev->dev);
struct mctp_neigh *neigh, *tmp;
bool dropped = false;
mutex_lock(&net->mctp.neigh_lock);
list_for_each_entry_safe(neigh, tmp, &net->mctp.neighbours, list) {
if (neigh->dev == mdev && neigh->eid == eid &&
neigh->source == source) {
list_del_rcu(&neigh->list);
/* TODO: immediate RTM_DELNEIGH */
call_rcu(&neigh->rcu, __mctp_neigh_free);
dropped = true;
}
}
mutex_unlock(&net->mctp.neigh_lock);
return dropped ? 0 : -ENOENT;
}
static const struct nla_policy nd_mctp_policy[NDA_MAX + 1] = {
[NDA_DST] = { .type = NLA_U8 },
[NDA_LLADDR] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN },
};
static int mctp_rtm_newneigh(struct sk_buff *skb, struct nlmsghdr *nlh,
struct netlink_ext_ack *extack)
{
struct net *net = sock_net(skb->sk);
struct net_device *dev;
struct mctp_dev *mdev;
struct ndmsg *ndm;
struct nlattr *tb[NDA_MAX + 1];
int rc;
mctp_eid_t eid;
void *lladdr;
int lladdr_len;
rc = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, nd_mctp_policy,
extack);
if (rc < 0) {
NL_SET_ERR_MSG(extack, "lladdr too large?");
return rc;
}
if (!tb[NDA_DST]) {
NL_SET_ERR_MSG(extack, "Neighbour EID must be specified");
return -EINVAL;
}
if (!tb[NDA_LLADDR]) {
NL_SET_ERR_MSG(extack, "Neighbour lladdr must be specified");
return -EINVAL;
}
eid = nla_get_u8(tb[NDA_DST]);
if (!mctp_address_unicast(eid)) {
NL_SET_ERR_MSG(extack, "Invalid neighbour EID");
return -EINVAL;
}
lladdr = nla_data(tb[NDA_LLADDR]);
lladdr_len = nla_len(tb[NDA_LLADDR]);
ndm = nlmsg_data(nlh);
dev = __dev_get_by_index(net, ndm->ndm_ifindex);
if (!dev)
return -ENODEV;
mdev = mctp_dev_get_rtnl(dev);
if (!mdev)
return -ENODEV;
if (lladdr_len != dev->addr_len) {
NL_SET_ERR_MSG(extack, "Wrong lladdr length");
return -EINVAL;
}
return mctp_neigh_add(mdev, eid, MCTP_NEIGH_STATIC,
lladdr_len, lladdr);
}
static int mctp_rtm_delneigh(struct sk_buff *skb, struct nlmsghdr *nlh,
struct netlink_ext_ack *extack)
{
struct net *net = sock_net(skb->sk);
struct nlattr *tb[NDA_MAX + 1];
struct net_device *dev;
struct mctp_dev *mdev;
struct ndmsg *ndm;
int rc;
mctp_eid_t eid;
rc = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, nd_mctp_policy,
extack);
if (rc < 0) {
NL_SET_ERR_MSG(extack, "incorrect format");
return rc;
}
if (!tb[NDA_DST]) {
NL_SET_ERR_MSG(extack, "Neighbour EID must be specified");
return -EINVAL;
}
eid = nla_get_u8(tb[NDA_DST]);
ndm = nlmsg_data(nlh);
dev = __dev_get_by_index(net, ndm->ndm_ifindex);
if (!dev)
return -ENODEV;
mdev = mctp_dev_get_rtnl(dev);
if (!mdev)
return -ENODEV;
return mctp_neigh_remove(mdev, eid, MCTP_NEIGH_STATIC);
}
static int mctp_fill_neigh(struct sk_buff *skb, u32 portid, u32 seq, int event,
unsigned int flags, struct mctp_neigh *neigh)
{
struct net_device *dev = neigh->dev->dev;
struct nlmsghdr *nlh;
struct ndmsg *hdr;
nlh = nlmsg_put(skb, portid, seq, event, sizeof(*hdr), flags);
if (!nlh)
return -EMSGSIZE;
hdr = nlmsg_data(nlh);
hdr->ndm_family = AF_MCTP;
hdr->ndm_ifindex = dev->ifindex;
hdr->ndm_state = 0; // TODO other state bits?
if (neigh->source == MCTP_NEIGH_STATIC)
hdr->ndm_state |= NUD_PERMANENT;
hdr->ndm_flags = 0;
hdr->ndm_type = RTN_UNICAST; // TODO: is loopback RTN_LOCAL?
if (nla_put_u8(skb, NDA_DST, neigh->eid))
goto cancel;
if (nla_put(skb, NDA_LLADDR, dev->addr_len, neigh->ha))
goto cancel;
nlmsg_end(skb, nlh);
return 0;
cancel:
nlmsg_cancel(skb, nlh);
return -EMSGSIZE;
}
static int mctp_rtm_getneigh(struct sk_buff *skb, struct netlink_callback *cb)
{
struct net *net = sock_net(skb->sk);
int rc, idx, req_ifindex;
struct mctp_neigh *neigh;
struct ndmsg *ndmsg;
struct {
int idx;
} *cbctx = (void *)cb->ctx;
ndmsg = nlmsg_data(cb->nlh);
req_ifindex = ndmsg->ndm_ifindex;
idx = 0;
rcu_read_lock();
list_for_each_entry_rcu(neigh, &net->mctp.neighbours, list) {
if (idx < cbctx->idx)
goto cont;
rc = 0;
if (req_ifindex == 0 || req_ifindex == neigh->dev->dev->ifindex)
rc = mctp_fill_neigh(skb, NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq,
RTM_NEWNEIGH, NLM_F_MULTI, neigh);
if (rc)
break;
cont:
idx++;
}
rcu_read_unlock();
cbctx->idx = idx;
return skb->len;
}
int mctp_neigh_lookup(struct mctp_dev *mdev, mctp_eid_t eid, void *ret_hwaddr)
{
struct net *net = dev_net(mdev->dev);
struct mctp_neigh *neigh;
int rc = -EHOSTUNREACH; // TODO: or ENOENT?
rcu_read_lock();
list_for_each_entry_rcu(neigh, &net->mctp.neighbours, list) {
if (mdev == neigh->dev && eid == neigh->eid) {
if (ret_hwaddr)
memcpy(ret_hwaddr, neigh->ha,
sizeof(neigh->ha));
rc = 0;
break;
}
}
rcu_read_unlock();
return rc;
}
/* namespace registration */
static int __net_init mctp_neigh_net_init(struct net *net)
{
struct netns_mctp *ns = &net->mctp;
INIT_LIST_HEAD(&ns->neighbours);
mutex_init(&ns->neigh_lock);
return 0;
}
static void __net_exit mctp_neigh_net_exit(struct net *net)
{
struct netns_mctp *ns = &net->mctp;
struct mctp_neigh *neigh;
list_for_each_entry(neigh, &ns->neighbours, list)
call_rcu(&neigh->rcu, __mctp_neigh_free);
}
/* net namespace implementation */
static struct pernet_operations mctp_net_ops = {
.init = mctp_neigh_net_init,
.exit = mctp_neigh_net_exit,
};
int __init mctp_neigh_init(void)
{
rtnl_register_module(THIS_MODULE, PF_MCTP, RTM_NEWNEIGH,
mctp_rtm_newneigh, NULL, 0);
rtnl_register_module(THIS_MODULE, PF_MCTP, RTM_DELNEIGH,
mctp_rtm_delneigh, NULL, 0);
rtnl_register_module(THIS_MODULE, PF_MCTP, RTM_GETNEIGH,
NULL, mctp_rtm_getneigh, 0);
return register_pernet_subsys(&mctp_net_ops);
}
void __exit mctp_neigh_exit(void)
{
unregister_pernet_subsys(&mctp_net_ops);
rtnl_unregister(PF_MCTP, RTM_GETNEIGH);
rtnl_unregister(PF_MCTP, RTM_DELNEIGH);
rtnl_unregister(PF_MCTP, RTM_NEWNEIGH);
}
| linux-master | net/mctp/neigh.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Management Component Transport Protocol (MCTP)
*
* Copyright (c) 2021 Code Construct
* Copyright (c) 2021 Google
*/
#include <linux/compat.h>
#include <linux/if_arp.h>
#include <linux/net.h>
#include <linux/mctp.h>
#include <linux/module.h>
#include <linux/socket.h>
#include <net/mctp.h>
#include <net/mctpdevice.h>
#include <net/sock.h>
#define CREATE_TRACE_POINTS
#include <trace/events/mctp.h>
/* socket implementation */
static void mctp_sk_expire_keys(struct timer_list *timer);
static int mctp_release(struct socket *sock)
{
struct sock *sk = sock->sk;
if (sk) {
sock->sk = NULL;
sk->sk_prot->close(sk, 0);
}
return 0;
}
/* Generic sockaddr checks, padding checks only so far */
static bool mctp_sockaddr_is_ok(const struct sockaddr_mctp *addr)
{
return !addr->__smctp_pad0 && !addr->__smctp_pad1;
}
static bool mctp_sockaddr_ext_is_ok(const struct sockaddr_mctp_ext *addr)
{
return !addr->__smctp_pad0[0] &&
!addr->__smctp_pad0[1] &&
!addr->__smctp_pad0[2];
}
static int mctp_bind(struct socket *sock, struct sockaddr *addr, int addrlen)
{
struct sock *sk = sock->sk;
struct mctp_sock *msk = container_of(sk, struct mctp_sock, sk);
struct sockaddr_mctp *smctp;
int rc;
if (addrlen < sizeof(*smctp))
return -EINVAL;
if (addr->sa_family != AF_MCTP)
return -EAFNOSUPPORT;
if (!capable(CAP_NET_BIND_SERVICE))
return -EACCES;
/* it's a valid sockaddr for MCTP, cast and do protocol checks */
smctp = (struct sockaddr_mctp *)addr;
if (!mctp_sockaddr_is_ok(smctp))
return -EINVAL;
lock_sock(sk);
/* TODO: allow rebind */
if (sk_hashed(sk)) {
rc = -EADDRINUSE;
goto out_release;
}
msk->bind_net = smctp->smctp_network;
msk->bind_addr = smctp->smctp_addr.s_addr;
msk->bind_type = smctp->smctp_type & 0x7f; /* ignore the IC bit */
rc = sk->sk_prot->hash(sk);
out_release:
release_sock(sk);
return rc;
}
static int mctp_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
{
DECLARE_SOCKADDR(struct sockaddr_mctp *, addr, msg->msg_name);
int rc, addrlen = msg->msg_namelen;
struct sock *sk = sock->sk;
struct mctp_sock *msk = container_of(sk, struct mctp_sock, sk);
struct mctp_skb_cb *cb;
struct mctp_route *rt;
struct sk_buff *skb = NULL;
int hlen;
if (addr) {
const u8 tagbits = MCTP_TAG_MASK | MCTP_TAG_OWNER |
MCTP_TAG_PREALLOC;
if (addrlen < sizeof(struct sockaddr_mctp))
return -EINVAL;
if (addr->smctp_family != AF_MCTP)
return -EINVAL;
if (!mctp_sockaddr_is_ok(addr))
return -EINVAL;
if (addr->smctp_tag & ~tagbits)
return -EINVAL;
/* can't preallocate a non-owned tag */
if (addr->smctp_tag & MCTP_TAG_PREALLOC &&
!(addr->smctp_tag & MCTP_TAG_OWNER))
return -EINVAL;
} else {
/* TODO: connect()ed sockets */
return -EDESTADDRREQ;
}
if (!capable(CAP_NET_RAW))
return -EACCES;
if (addr->smctp_network == MCTP_NET_ANY)
addr->smctp_network = mctp_default_net(sock_net(sk));
/* direct addressing */
if (msk->addr_ext && addrlen >= sizeof(struct sockaddr_mctp_ext)) {
DECLARE_SOCKADDR(struct sockaddr_mctp_ext *,
extaddr, msg->msg_name);
struct net_device *dev;
rc = -EINVAL;
rcu_read_lock();
dev = dev_get_by_index_rcu(sock_net(sk), extaddr->smctp_ifindex);
/* check for correct halen */
if (dev && extaddr->smctp_halen == dev->addr_len) {
hlen = LL_RESERVED_SPACE(dev) + sizeof(struct mctp_hdr);
rc = 0;
}
rcu_read_unlock();
if (rc)
goto err_free;
rt = NULL;
} else {
rt = mctp_route_lookup(sock_net(sk), addr->smctp_network,
addr->smctp_addr.s_addr);
if (!rt) {
rc = -EHOSTUNREACH;
goto err_free;
}
hlen = LL_RESERVED_SPACE(rt->dev->dev) + sizeof(struct mctp_hdr);
}
skb = sock_alloc_send_skb(sk, hlen + 1 + len,
msg->msg_flags & MSG_DONTWAIT, &rc);
if (!skb)
return rc;
skb_reserve(skb, hlen);
/* set type as fist byte in payload */
*(u8 *)skb_put(skb, 1) = addr->smctp_type;
rc = memcpy_from_msg((void *)skb_put(skb, len), msg, len);
if (rc < 0)
goto err_free;
/* set up cb */
cb = __mctp_cb(skb);
cb->net = addr->smctp_network;
if (!rt) {
/* fill extended address in cb */
DECLARE_SOCKADDR(struct sockaddr_mctp_ext *,
extaddr, msg->msg_name);
if (!mctp_sockaddr_ext_is_ok(extaddr) ||
extaddr->smctp_halen > sizeof(cb->haddr)) {
rc = -EINVAL;
goto err_free;
}
cb->ifindex = extaddr->smctp_ifindex;
/* smctp_halen is checked above */
cb->halen = extaddr->smctp_halen;
memcpy(cb->haddr, extaddr->smctp_haddr, cb->halen);
}
rc = mctp_local_output(sk, rt, skb, addr->smctp_addr.s_addr,
addr->smctp_tag);
return rc ? : len;
err_free:
kfree_skb(skb);
return rc;
}
static int mctp_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
int flags)
{
DECLARE_SOCKADDR(struct sockaddr_mctp *, addr, msg->msg_name);
struct sock *sk = sock->sk;
struct mctp_sock *msk = container_of(sk, struct mctp_sock, sk);
struct sk_buff *skb;
size_t msglen;
u8 type;
int rc;
if (flags & ~(MSG_DONTWAIT | MSG_TRUNC | MSG_PEEK))
return -EOPNOTSUPP;
skb = skb_recv_datagram(sk, flags, &rc);
if (!skb)
return rc;
if (!skb->len) {
rc = 0;
goto out_free;
}
/* extract message type, remove from data */
type = *((u8 *)skb->data);
msglen = skb->len - 1;
if (len < msglen)
msg->msg_flags |= MSG_TRUNC;
else
len = msglen;
rc = skb_copy_datagram_msg(skb, 1, msg, len);
if (rc < 0)
goto out_free;
sock_recv_cmsgs(msg, sk, skb);
if (addr) {
struct mctp_skb_cb *cb = mctp_cb(skb);
/* TODO: expand mctp_skb_cb for header fields? */
struct mctp_hdr *hdr = mctp_hdr(skb);
addr = msg->msg_name;
addr->smctp_family = AF_MCTP;
addr->__smctp_pad0 = 0;
addr->smctp_network = cb->net;
addr->smctp_addr.s_addr = hdr->src;
addr->smctp_type = type;
addr->smctp_tag = hdr->flags_seq_tag &
(MCTP_HDR_TAG_MASK | MCTP_HDR_FLAG_TO);
addr->__smctp_pad1 = 0;
msg->msg_namelen = sizeof(*addr);
if (msk->addr_ext) {
DECLARE_SOCKADDR(struct sockaddr_mctp_ext *, ae,
msg->msg_name);
msg->msg_namelen = sizeof(*ae);
ae->smctp_ifindex = cb->ifindex;
ae->smctp_halen = cb->halen;
memset(ae->__smctp_pad0, 0x0, sizeof(ae->__smctp_pad0));
memset(ae->smctp_haddr, 0x0, sizeof(ae->smctp_haddr));
memcpy(ae->smctp_haddr, cb->haddr, cb->halen);
}
}
rc = len;
if (flags & MSG_TRUNC)
rc = msglen;
out_free:
skb_free_datagram(sk, skb);
return rc;
}
/* We're done with the key; invalidate, stop reassembly, and remove from lists.
*/
static void __mctp_key_remove(struct mctp_sk_key *key, struct net *net,
unsigned long flags, unsigned long reason)
__releases(&key->lock)
__must_hold(&net->mctp.keys_lock)
{
struct sk_buff *skb;
trace_mctp_key_release(key, reason);
skb = key->reasm_head;
key->reasm_head = NULL;
key->reasm_dead = true;
key->valid = false;
mctp_dev_release_key(key->dev, key);
spin_unlock_irqrestore(&key->lock, flags);
if (!hlist_unhashed(&key->hlist)) {
hlist_del_init(&key->hlist);
hlist_del_init(&key->sklist);
/* unref for the lists */
mctp_key_unref(key);
}
kfree_skb(skb);
}
static int mctp_setsockopt(struct socket *sock, int level, int optname,
sockptr_t optval, unsigned int optlen)
{
struct mctp_sock *msk = container_of(sock->sk, struct mctp_sock, sk);
int val;
if (level != SOL_MCTP)
return -EINVAL;
if (optname == MCTP_OPT_ADDR_EXT) {
if (optlen != sizeof(int))
return -EINVAL;
if (copy_from_sockptr(&val, optval, sizeof(int)))
return -EFAULT;
msk->addr_ext = val;
return 0;
}
return -ENOPROTOOPT;
}
static int mctp_getsockopt(struct socket *sock, int level, int optname,
char __user *optval, int __user *optlen)
{
struct mctp_sock *msk = container_of(sock->sk, struct mctp_sock, sk);
int len, val;
if (level != SOL_MCTP)
return -EINVAL;
if (get_user(len, optlen))
return -EFAULT;
if (optname == MCTP_OPT_ADDR_EXT) {
if (len != sizeof(int))
return -EINVAL;
val = !!msk->addr_ext;
if (copy_to_user(optval, &val, len))
return -EFAULT;
return 0;
}
return -EINVAL;
}
static int mctp_ioctl_alloctag(struct mctp_sock *msk, unsigned long arg)
{
struct net *net = sock_net(&msk->sk);
struct mctp_sk_key *key = NULL;
struct mctp_ioc_tag_ctl ctl;
unsigned long flags;
u8 tag;
if (copy_from_user(&ctl, (void __user *)arg, sizeof(ctl)))
return -EFAULT;
if (ctl.tag)
return -EINVAL;
if (ctl.flags)
return -EINVAL;
key = mctp_alloc_local_tag(msk, ctl.peer_addr, MCTP_ADDR_ANY,
true, &tag);
if (IS_ERR(key))
return PTR_ERR(key);
ctl.tag = tag | MCTP_TAG_OWNER | MCTP_TAG_PREALLOC;
if (copy_to_user((void __user *)arg, &ctl, sizeof(ctl))) {
unsigned long fl2;
/* Unwind our key allocation: the keys list lock needs to be
* taken before the individual key locks, and we need a valid
* flags value (fl2) to pass to __mctp_key_remove, hence the
* second spin_lock_irqsave() rather than a plain spin_lock().
*/
spin_lock_irqsave(&net->mctp.keys_lock, flags);
spin_lock_irqsave(&key->lock, fl2);
__mctp_key_remove(key, net, fl2, MCTP_TRACE_KEY_DROPPED);
mctp_key_unref(key);
spin_unlock_irqrestore(&net->mctp.keys_lock, flags);
return -EFAULT;
}
mctp_key_unref(key);
return 0;
}
static int mctp_ioctl_droptag(struct mctp_sock *msk, unsigned long arg)
{
struct net *net = sock_net(&msk->sk);
struct mctp_ioc_tag_ctl ctl;
unsigned long flags, fl2;
struct mctp_sk_key *key;
struct hlist_node *tmp;
int rc;
u8 tag;
if (copy_from_user(&ctl, (void __user *)arg, sizeof(ctl)))
return -EFAULT;
if (ctl.flags)
return -EINVAL;
/* Must be a local tag, TO set, preallocated */
if ((ctl.tag & ~MCTP_TAG_MASK) != (MCTP_TAG_OWNER | MCTP_TAG_PREALLOC))
return -EINVAL;
tag = ctl.tag & MCTP_TAG_MASK;
rc = -EINVAL;
spin_lock_irqsave(&net->mctp.keys_lock, flags);
hlist_for_each_entry_safe(key, tmp, &msk->keys, sklist) {
/* we do an irqsave here, even though we know the irq state,
* so we have the flags to pass to __mctp_key_remove
*/
spin_lock_irqsave(&key->lock, fl2);
if (key->manual_alloc &&
ctl.peer_addr == key->peer_addr &&
tag == key->tag) {
__mctp_key_remove(key, net, fl2,
MCTP_TRACE_KEY_DROPPED);
rc = 0;
} else {
spin_unlock_irqrestore(&key->lock, fl2);
}
}
spin_unlock_irqrestore(&net->mctp.keys_lock, flags);
return rc;
}
static int mctp_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
{
struct mctp_sock *msk = container_of(sock->sk, struct mctp_sock, sk);
switch (cmd) {
case SIOCMCTPALLOCTAG:
return mctp_ioctl_alloctag(msk, arg);
case SIOCMCTPDROPTAG:
return mctp_ioctl_droptag(msk, arg);
}
return -EINVAL;
}
#ifdef CONFIG_COMPAT
static int mctp_compat_ioctl(struct socket *sock, unsigned int cmd,
unsigned long arg)
{
void __user *argp = compat_ptr(arg);
switch (cmd) {
/* These have compatible ptr layouts */
case SIOCMCTPALLOCTAG:
case SIOCMCTPDROPTAG:
return mctp_ioctl(sock, cmd, (unsigned long)argp);
}
return -ENOIOCTLCMD;
}
#endif
static const struct proto_ops mctp_dgram_ops = {
.family = PF_MCTP,
.release = mctp_release,
.bind = mctp_bind,
.connect = sock_no_connect,
.socketpair = sock_no_socketpair,
.accept = sock_no_accept,
.getname = sock_no_getname,
.poll = datagram_poll,
.ioctl = mctp_ioctl,
.gettstamp = sock_gettstamp,
.listen = sock_no_listen,
.shutdown = sock_no_shutdown,
.setsockopt = mctp_setsockopt,
.getsockopt = mctp_getsockopt,
.sendmsg = mctp_sendmsg,
.recvmsg = mctp_recvmsg,
.mmap = sock_no_mmap,
#ifdef CONFIG_COMPAT
.compat_ioctl = mctp_compat_ioctl,
#endif
};
static void mctp_sk_expire_keys(struct timer_list *timer)
{
struct mctp_sock *msk = container_of(timer, struct mctp_sock,
key_expiry);
struct net *net = sock_net(&msk->sk);
unsigned long next_expiry, flags, fl2;
struct mctp_sk_key *key;
struct hlist_node *tmp;
bool next_expiry_valid = false;
spin_lock_irqsave(&net->mctp.keys_lock, flags);
hlist_for_each_entry_safe(key, tmp, &msk->keys, sklist) {
/* don't expire. manual_alloc is immutable, no locking
* required.
*/
if (key->manual_alloc)
continue;
spin_lock_irqsave(&key->lock, fl2);
if (!time_after_eq(key->expiry, jiffies)) {
__mctp_key_remove(key, net, fl2,
MCTP_TRACE_KEY_TIMEOUT);
continue;
}
if (next_expiry_valid) {
if (time_before(key->expiry, next_expiry))
next_expiry = key->expiry;
} else {
next_expiry = key->expiry;
next_expiry_valid = true;
}
spin_unlock_irqrestore(&key->lock, fl2);
}
spin_unlock_irqrestore(&net->mctp.keys_lock, flags);
if (next_expiry_valid)
mod_timer(timer, next_expiry);
}
static int mctp_sk_init(struct sock *sk)
{
struct mctp_sock *msk = container_of(sk, struct mctp_sock, sk);
INIT_HLIST_HEAD(&msk->keys);
timer_setup(&msk->key_expiry, mctp_sk_expire_keys, 0);
return 0;
}
static void mctp_sk_close(struct sock *sk, long timeout)
{
sk_common_release(sk);
}
static int mctp_sk_hash(struct sock *sk)
{
struct net *net = sock_net(sk);
mutex_lock(&net->mctp.bind_lock);
sk_add_node_rcu(sk, &net->mctp.binds);
mutex_unlock(&net->mctp.bind_lock);
return 0;
}
static void mctp_sk_unhash(struct sock *sk)
{
struct mctp_sock *msk = container_of(sk, struct mctp_sock, sk);
struct net *net = sock_net(sk);
unsigned long flags, fl2;
struct mctp_sk_key *key;
struct hlist_node *tmp;
/* remove from any type-based binds */
mutex_lock(&net->mctp.bind_lock);
sk_del_node_init_rcu(sk);
mutex_unlock(&net->mctp.bind_lock);
/* remove tag allocations */
spin_lock_irqsave(&net->mctp.keys_lock, flags);
hlist_for_each_entry_safe(key, tmp, &msk->keys, sklist) {
spin_lock_irqsave(&key->lock, fl2);
__mctp_key_remove(key, net, fl2, MCTP_TRACE_KEY_CLOSED);
}
sock_set_flag(sk, SOCK_DEAD);
spin_unlock_irqrestore(&net->mctp.keys_lock, flags);
/* Since there are no more tag allocations (we have removed all of the
* keys), stop any pending expiry events. the timer cannot be re-queued
* as the sk is no longer observable
*/
del_timer_sync(&msk->key_expiry);
}
static void mctp_sk_destruct(struct sock *sk)
{
skb_queue_purge(&sk->sk_receive_queue);
}
static struct proto mctp_proto = {
.name = "MCTP",
.owner = THIS_MODULE,
.obj_size = sizeof(struct mctp_sock),
.init = mctp_sk_init,
.close = mctp_sk_close,
.hash = mctp_sk_hash,
.unhash = mctp_sk_unhash,
};
static int mctp_pf_create(struct net *net, struct socket *sock,
int protocol, int kern)
{
const struct proto_ops *ops;
struct proto *proto;
struct sock *sk;
int rc;
if (protocol)
return -EPROTONOSUPPORT;
/* only datagram sockets are supported */
if (sock->type != SOCK_DGRAM)
return -ESOCKTNOSUPPORT;
proto = &mctp_proto;
ops = &mctp_dgram_ops;
sock->state = SS_UNCONNECTED;
sock->ops = ops;
sk = sk_alloc(net, PF_MCTP, GFP_KERNEL, proto, kern);
if (!sk)
return -ENOMEM;
sock_init_data(sock, sk);
sk->sk_destruct = mctp_sk_destruct;
rc = 0;
if (sk->sk_prot->init)
rc = sk->sk_prot->init(sk);
if (rc)
goto err_sk_put;
return 0;
err_sk_put:
sock_orphan(sk);
sock_put(sk);
return rc;
}
static struct net_proto_family mctp_pf = {
.family = PF_MCTP,
.create = mctp_pf_create,
.owner = THIS_MODULE,
};
static __init int mctp_init(void)
{
int rc;
/* ensure our uapi tag definitions match the header format */
BUILD_BUG_ON(MCTP_TAG_OWNER != MCTP_HDR_FLAG_TO);
BUILD_BUG_ON(MCTP_TAG_MASK != MCTP_HDR_TAG_MASK);
pr_info("mctp: management component transport protocol core\n");
rc = sock_register(&mctp_pf);
if (rc)
return rc;
rc = proto_register(&mctp_proto, 0);
if (rc)
goto err_unreg_sock;
rc = mctp_routes_init();
if (rc)
goto err_unreg_proto;
rc = mctp_neigh_init();
if (rc)
goto err_unreg_routes;
mctp_device_init();
return 0;
err_unreg_routes:
mctp_routes_exit();
err_unreg_proto:
proto_unregister(&mctp_proto);
err_unreg_sock:
sock_unregister(PF_MCTP);
return rc;
}
static __exit void mctp_exit(void)
{
mctp_device_exit();
mctp_neigh_exit();
mctp_routes_exit();
proto_unregister(&mctp_proto);
sock_unregister(PF_MCTP);
}
subsys_initcall(mctp_init);
module_exit(mctp_exit);
MODULE_DESCRIPTION("MCTP core");
MODULE_AUTHOR("Jeremy Kerr <[email protected]>");
MODULE_ALIAS_NETPROTO(PF_MCTP);
| linux-master | net/mctp/af_mctp.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Management Component Transport Protocol (MCTP) - device implementation.
*
* Copyright (c) 2021 Code Construct
* Copyright (c) 2021 Google
*/
#include <linux/if_arp.h>
#include <linux/if_link.h>
#include <linux/mctp.h>
#include <linux/netdevice.h>
#include <linux/rcupdate.h>
#include <linux/rtnetlink.h>
#include <net/addrconf.h>
#include <net/netlink.h>
#include <net/mctp.h>
#include <net/mctpdevice.h>
#include <net/sock.h>
struct mctp_dump_cb {
int h;
int idx;
size_t a_idx;
};
/* unlocked: caller must hold rcu_read_lock.
* Returned mctp_dev has its refcount incremented, or NULL if unset.
*/
struct mctp_dev *__mctp_dev_get(const struct net_device *dev)
{
struct mctp_dev *mdev = rcu_dereference(dev->mctp_ptr);
/* RCU guarantees that any mdev is still live.
* Zero refcount implies a pending free, return NULL.
*/
if (mdev)
if (!refcount_inc_not_zero(&mdev->refs))
return NULL;
return mdev;
}
/* Returned mctp_dev does not have refcount incremented. The returned pointer
* remains live while rtnl_lock is held, as that prevents mctp_unregister()
*/
struct mctp_dev *mctp_dev_get_rtnl(const struct net_device *dev)
{
return rtnl_dereference(dev->mctp_ptr);
}
static int mctp_addrinfo_size(void)
{
return NLMSG_ALIGN(sizeof(struct ifaddrmsg))
+ nla_total_size(1) // IFA_LOCAL
+ nla_total_size(1) // IFA_ADDRESS
;
}
/* flag should be NLM_F_MULTI for dump calls */
static int mctp_fill_addrinfo(struct sk_buff *skb,
struct mctp_dev *mdev, mctp_eid_t eid,
int msg_type, u32 portid, u32 seq, int flag)
{
struct ifaddrmsg *hdr;
struct nlmsghdr *nlh;
nlh = nlmsg_put(skb, portid, seq,
msg_type, sizeof(*hdr), flag);
if (!nlh)
return -EMSGSIZE;
hdr = nlmsg_data(nlh);
hdr->ifa_family = AF_MCTP;
hdr->ifa_prefixlen = 0;
hdr->ifa_flags = 0;
hdr->ifa_scope = 0;
hdr->ifa_index = mdev->dev->ifindex;
if (nla_put_u8(skb, IFA_LOCAL, eid))
goto cancel;
if (nla_put_u8(skb, IFA_ADDRESS, eid))
goto cancel;
nlmsg_end(skb, nlh);
return 0;
cancel:
nlmsg_cancel(skb, nlh);
return -EMSGSIZE;
}
static int mctp_dump_dev_addrinfo(struct mctp_dev *mdev, struct sk_buff *skb,
struct netlink_callback *cb)
{
struct mctp_dump_cb *mcb = (void *)cb->ctx;
u32 portid, seq;
int rc = 0;
portid = NETLINK_CB(cb->skb).portid;
seq = cb->nlh->nlmsg_seq;
for (; mcb->a_idx < mdev->num_addrs; mcb->a_idx++) {
rc = mctp_fill_addrinfo(skb, mdev, mdev->addrs[mcb->a_idx],
RTM_NEWADDR, portid, seq, NLM_F_MULTI);
if (rc < 0)
break;
}
return rc;
}
static int mctp_dump_addrinfo(struct sk_buff *skb, struct netlink_callback *cb)
{
struct mctp_dump_cb *mcb = (void *)cb->ctx;
struct net *net = sock_net(skb->sk);
struct hlist_head *head;
struct net_device *dev;
struct ifaddrmsg *hdr;
struct mctp_dev *mdev;
int ifindex;
int idx = 0, rc;
hdr = nlmsg_data(cb->nlh);
// filter by ifindex if requested
ifindex = hdr->ifa_index;
rcu_read_lock();
for (; mcb->h < NETDEV_HASHENTRIES; mcb->h++, mcb->idx = 0) {
idx = 0;
head = &net->dev_index_head[mcb->h];
hlist_for_each_entry_rcu(dev, head, index_hlist) {
if (idx >= mcb->idx &&
(ifindex == 0 || ifindex == dev->ifindex)) {
mdev = __mctp_dev_get(dev);
if (mdev) {
rc = mctp_dump_dev_addrinfo(mdev,
skb, cb);
mctp_dev_put(mdev);
// Error indicates full buffer, this
// callback will get retried.
if (rc < 0)
goto out;
}
}
idx++;
// reset for next iteration
mcb->a_idx = 0;
}
}
out:
rcu_read_unlock();
mcb->idx = idx;
return skb->len;
}
static void mctp_addr_notify(struct mctp_dev *mdev, mctp_eid_t eid, int msg_type,
struct sk_buff *req_skb, struct nlmsghdr *req_nlh)
{
u32 portid = NETLINK_CB(req_skb).portid;
struct net *net = dev_net(mdev->dev);
struct sk_buff *skb;
int rc = -ENOBUFS;
skb = nlmsg_new(mctp_addrinfo_size(), GFP_KERNEL);
if (!skb)
goto out;
rc = mctp_fill_addrinfo(skb, mdev, eid, msg_type,
portid, req_nlh->nlmsg_seq, 0);
if (rc < 0) {
WARN_ON_ONCE(rc == -EMSGSIZE);
goto out;
}
rtnl_notify(skb, net, portid, RTNLGRP_MCTP_IFADDR, req_nlh, GFP_KERNEL);
return;
out:
kfree_skb(skb);
rtnl_set_sk_err(net, RTNLGRP_MCTP_IFADDR, rc);
}
static const struct nla_policy ifa_mctp_policy[IFA_MAX + 1] = {
[IFA_ADDRESS] = { .type = NLA_U8 },
[IFA_LOCAL] = { .type = NLA_U8 },
};
static int mctp_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh,
struct netlink_ext_ack *extack)
{
struct net *net = sock_net(skb->sk);
struct nlattr *tb[IFA_MAX + 1];
struct net_device *dev;
struct mctp_addr *addr;
struct mctp_dev *mdev;
struct ifaddrmsg *ifm;
unsigned long flags;
u8 *tmp_addrs;
int rc;
rc = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, ifa_mctp_policy,
extack);
if (rc < 0)
return rc;
ifm = nlmsg_data(nlh);
if (tb[IFA_LOCAL])
addr = nla_data(tb[IFA_LOCAL]);
else if (tb[IFA_ADDRESS])
addr = nla_data(tb[IFA_ADDRESS]);
else
return -EINVAL;
/* find device */
dev = __dev_get_by_index(net, ifm->ifa_index);
if (!dev)
return -ENODEV;
mdev = mctp_dev_get_rtnl(dev);
if (!mdev)
return -ENODEV;
if (!mctp_address_unicast(addr->s_addr))
return -EINVAL;
/* Prevent duplicates. Under RTNL so don't need to lock for reading */
if (memchr(mdev->addrs, addr->s_addr, mdev->num_addrs))
return -EEXIST;
tmp_addrs = kmalloc(mdev->num_addrs + 1, GFP_KERNEL);
if (!tmp_addrs)
return -ENOMEM;
memcpy(tmp_addrs, mdev->addrs, mdev->num_addrs);
tmp_addrs[mdev->num_addrs] = addr->s_addr;
/* Lock to write */
spin_lock_irqsave(&mdev->addrs_lock, flags);
mdev->num_addrs++;
swap(mdev->addrs, tmp_addrs);
spin_unlock_irqrestore(&mdev->addrs_lock, flags);
kfree(tmp_addrs);
mctp_addr_notify(mdev, addr->s_addr, RTM_NEWADDR, skb, nlh);
mctp_route_add_local(mdev, addr->s_addr);
return 0;
}
static int mctp_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh,
struct netlink_ext_ack *extack)
{
struct net *net = sock_net(skb->sk);
struct nlattr *tb[IFA_MAX + 1];
struct net_device *dev;
struct mctp_addr *addr;
struct mctp_dev *mdev;
struct ifaddrmsg *ifm;
unsigned long flags;
u8 *pos;
int rc;
rc = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, ifa_mctp_policy,
extack);
if (rc < 0)
return rc;
ifm = nlmsg_data(nlh);
if (tb[IFA_LOCAL])
addr = nla_data(tb[IFA_LOCAL]);
else if (tb[IFA_ADDRESS])
addr = nla_data(tb[IFA_ADDRESS]);
else
return -EINVAL;
/* find device */
dev = __dev_get_by_index(net, ifm->ifa_index);
if (!dev)
return -ENODEV;
mdev = mctp_dev_get_rtnl(dev);
if (!mdev)
return -ENODEV;
pos = memchr(mdev->addrs, addr->s_addr, mdev->num_addrs);
if (!pos)
return -ENOENT;
rc = mctp_route_remove_local(mdev, addr->s_addr);
// we can ignore -ENOENT in the case a route was already removed
if (rc < 0 && rc != -ENOENT)
return rc;
spin_lock_irqsave(&mdev->addrs_lock, flags);
memmove(pos, pos + 1, mdev->num_addrs - 1 - (pos - mdev->addrs));
mdev->num_addrs--;
spin_unlock_irqrestore(&mdev->addrs_lock, flags);
mctp_addr_notify(mdev, addr->s_addr, RTM_DELADDR, skb, nlh);
return 0;
}
void mctp_dev_hold(struct mctp_dev *mdev)
{
refcount_inc(&mdev->refs);
}
void mctp_dev_put(struct mctp_dev *mdev)
{
if (mdev && refcount_dec_and_test(&mdev->refs)) {
kfree(mdev->addrs);
dev_put(mdev->dev);
kfree_rcu(mdev, rcu);
}
}
void mctp_dev_release_key(struct mctp_dev *dev, struct mctp_sk_key *key)
__must_hold(&key->lock)
{
if (!dev)
return;
if (dev->ops && dev->ops->release_flow)
dev->ops->release_flow(dev, key);
key->dev = NULL;
mctp_dev_put(dev);
}
void mctp_dev_set_key(struct mctp_dev *dev, struct mctp_sk_key *key)
__must_hold(&key->lock)
{
mctp_dev_hold(dev);
key->dev = dev;
}
static struct mctp_dev *mctp_add_dev(struct net_device *dev)
{
struct mctp_dev *mdev;
ASSERT_RTNL();
mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
if (!mdev)
return ERR_PTR(-ENOMEM);
spin_lock_init(&mdev->addrs_lock);
mdev->net = mctp_default_net(dev_net(dev));
/* associate to net_device */
refcount_set(&mdev->refs, 1);
rcu_assign_pointer(dev->mctp_ptr, mdev);
dev_hold(dev);
mdev->dev = dev;
return mdev;
}
static int mctp_fill_link_af(struct sk_buff *skb,
const struct net_device *dev, u32 ext_filter_mask)
{
struct mctp_dev *mdev;
mdev = mctp_dev_get_rtnl(dev);
if (!mdev)
return -ENODATA;
if (nla_put_u32(skb, IFLA_MCTP_NET, mdev->net))
return -EMSGSIZE;
return 0;
}
static size_t mctp_get_link_af_size(const struct net_device *dev,
u32 ext_filter_mask)
{
struct mctp_dev *mdev;
unsigned int ret;
/* caller holds RCU */
mdev = __mctp_dev_get(dev);
if (!mdev)
return 0;
ret = nla_total_size(4); /* IFLA_MCTP_NET */
mctp_dev_put(mdev);
return ret;
}
static const struct nla_policy ifla_af_mctp_policy[IFLA_MCTP_MAX + 1] = {
[IFLA_MCTP_NET] = { .type = NLA_U32 },
};
static int mctp_set_link_af(struct net_device *dev, const struct nlattr *attr,
struct netlink_ext_ack *extack)
{
struct nlattr *tb[IFLA_MCTP_MAX + 1];
struct mctp_dev *mdev;
int rc;
rc = nla_parse_nested(tb, IFLA_MCTP_MAX, attr, ifla_af_mctp_policy,
NULL);
if (rc)
return rc;
mdev = mctp_dev_get_rtnl(dev);
if (!mdev)
return 0;
if (tb[IFLA_MCTP_NET])
WRITE_ONCE(mdev->net, nla_get_u32(tb[IFLA_MCTP_NET]));
return 0;
}
/* Matches netdev types that should have MCTP handling */
static bool mctp_known(struct net_device *dev)
{
/* only register specific types (inc. NONE for TUN devices) */
return dev->type == ARPHRD_MCTP ||
dev->type == ARPHRD_LOOPBACK ||
dev->type == ARPHRD_NONE;
}
static void mctp_unregister(struct net_device *dev)
{
struct mctp_dev *mdev;
mdev = mctp_dev_get_rtnl(dev);
if (!mdev)
return;
RCU_INIT_POINTER(mdev->dev->mctp_ptr, NULL);
mctp_route_remove_dev(mdev);
mctp_neigh_remove_dev(mdev);
mctp_dev_put(mdev);
}
static int mctp_register(struct net_device *dev)
{
struct mctp_dev *mdev;
/* Already registered? */
if (rtnl_dereference(dev->mctp_ptr))
return 0;
/* only register specific types */
if (!mctp_known(dev))
return 0;
mdev = mctp_add_dev(dev);
if (IS_ERR(mdev))
return PTR_ERR(mdev);
return 0;
}
static int mctp_dev_notify(struct notifier_block *this, unsigned long event,
void *ptr)
{
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
int rc;
switch (event) {
case NETDEV_REGISTER:
rc = mctp_register(dev);
if (rc)
return notifier_from_errno(rc);
break;
case NETDEV_UNREGISTER:
mctp_unregister(dev);
break;
}
return NOTIFY_OK;
}
static int mctp_register_netdevice(struct net_device *dev,
const struct mctp_netdev_ops *ops)
{
struct mctp_dev *mdev;
mdev = mctp_add_dev(dev);
if (IS_ERR(mdev))
return PTR_ERR(mdev);
mdev->ops = ops;
return register_netdevice(dev);
}
int mctp_register_netdev(struct net_device *dev,
const struct mctp_netdev_ops *ops)
{
int rc;
rtnl_lock();
rc = mctp_register_netdevice(dev, ops);
rtnl_unlock();
return rc;
}
EXPORT_SYMBOL_GPL(mctp_register_netdev);
void mctp_unregister_netdev(struct net_device *dev)
{
unregister_netdev(dev);
}
EXPORT_SYMBOL_GPL(mctp_unregister_netdev);
static struct rtnl_af_ops mctp_af_ops = {
.family = AF_MCTP,
.fill_link_af = mctp_fill_link_af,
.get_link_af_size = mctp_get_link_af_size,
.set_link_af = mctp_set_link_af,
};
static struct notifier_block mctp_dev_nb = {
.notifier_call = mctp_dev_notify,
.priority = ADDRCONF_NOTIFY_PRIORITY,
};
void __init mctp_device_init(void)
{
register_netdevice_notifier(&mctp_dev_nb);
rtnl_register_module(THIS_MODULE, PF_MCTP, RTM_GETADDR,
NULL, mctp_dump_addrinfo, 0);
rtnl_register_module(THIS_MODULE, PF_MCTP, RTM_NEWADDR,
mctp_rtm_newaddr, NULL, 0);
rtnl_register_module(THIS_MODULE, PF_MCTP, RTM_DELADDR,
mctp_rtm_deladdr, NULL, 0);
rtnl_af_register(&mctp_af_ops);
}
void __exit mctp_device_exit(void)
{
rtnl_af_unregister(&mctp_af_ops);
rtnl_unregister(PF_MCTP, RTM_DELADDR);
rtnl_unregister(PF_MCTP, RTM_NEWADDR);
rtnl_unregister(PF_MCTP, RTM_GETADDR);
unregister_netdevice_notifier(&mctp_dev_nb);
}
| linux-master | net/mctp/device.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Management Component Transport Protocol (MCTP) - routing
* implementation.
*
* This is currently based on a simple routing table, with no dst cache. The
* number of routes should stay fairly small, so the lookup cost is small.
*
* Copyright (c) 2021 Code Construct
* Copyright (c) 2021 Google
*/
#include <linux/idr.h>
#include <linux/kconfig.h>
#include <linux/mctp.h>
#include <linux/netdevice.h>
#include <linux/rtnetlink.h>
#include <linux/skbuff.h>
#include <uapi/linux/if_arp.h>
#include <net/mctp.h>
#include <net/mctpdevice.h>
#include <net/netlink.h>
#include <net/sock.h>
#include <trace/events/mctp.h>
static const unsigned int mctp_message_maxlen = 64 * 1024;
static const unsigned long mctp_key_lifetime = 6 * CONFIG_HZ;
static void mctp_flow_prepare_output(struct sk_buff *skb, struct mctp_dev *dev);
/* route output callbacks */
static int mctp_route_discard(struct mctp_route *route, struct sk_buff *skb)
{
kfree_skb(skb);
return 0;
}
static struct mctp_sock *mctp_lookup_bind(struct net *net, struct sk_buff *skb)
{
struct mctp_skb_cb *cb = mctp_cb(skb);
struct mctp_hdr *mh;
struct sock *sk;
u8 type;
WARN_ON(!rcu_read_lock_held());
/* TODO: look up in skb->cb? */
mh = mctp_hdr(skb);
if (!skb_headlen(skb))
return NULL;
type = (*(u8 *)skb->data) & 0x7f;
sk_for_each_rcu(sk, &net->mctp.binds) {
struct mctp_sock *msk = container_of(sk, struct mctp_sock, sk);
if (msk->bind_net != MCTP_NET_ANY && msk->bind_net != cb->net)
continue;
if (msk->bind_type != type)
continue;
if (!mctp_address_matches(msk->bind_addr, mh->dest))
continue;
return msk;
}
return NULL;
}
static bool mctp_key_match(struct mctp_sk_key *key, mctp_eid_t local,
mctp_eid_t peer, u8 tag)
{
if (!mctp_address_matches(key->local_addr, local))
return false;
if (key->peer_addr != peer)
return false;
if (key->tag != tag)
return false;
return true;
}
/* returns a key (with key->lock held, and refcounted), or NULL if no such
* key exists.
*/
static struct mctp_sk_key *mctp_lookup_key(struct net *net, struct sk_buff *skb,
mctp_eid_t peer,
unsigned long *irqflags)
__acquires(&key->lock)
{
struct mctp_sk_key *key, *ret;
unsigned long flags;
struct mctp_hdr *mh;
u8 tag;
mh = mctp_hdr(skb);
tag = mh->flags_seq_tag & (MCTP_HDR_TAG_MASK | MCTP_HDR_FLAG_TO);
ret = NULL;
spin_lock_irqsave(&net->mctp.keys_lock, flags);
hlist_for_each_entry(key, &net->mctp.keys, hlist) {
if (!mctp_key_match(key, mh->dest, peer, tag))
continue;
spin_lock(&key->lock);
if (key->valid) {
refcount_inc(&key->refs);
ret = key;
break;
}
spin_unlock(&key->lock);
}
if (ret) {
spin_unlock(&net->mctp.keys_lock);
*irqflags = flags;
} else {
spin_unlock_irqrestore(&net->mctp.keys_lock, flags);
}
return ret;
}
static struct mctp_sk_key *mctp_key_alloc(struct mctp_sock *msk,
mctp_eid_t local, mctp_eid_t peer,
u8 tag, gfp_t gfp)
{
struct mctp_sk_key *key;
key = kzalloc(sizeof(*key), gfp);
if (!key)
return NULL;
key->peer_addr = peer;
key->local_addr = local;
key->tag = tag;
key->sk = &msk->sk;
key->valid = true;
spin_lock_init(&key->lock);
refcount_set(&key->refs, 1);
sock_hold(key->sk);
return key;
}
void mctp_key_unref(struct mctp_sk_key *key)
{
unsigned long flags;
if (!refcount_dec_and_test(&key->refs))
return;
/* even though no refs exist here, the lock allows us to stay
* consistent with the locking requirement of mctp_dev_release_key
*/
spin_lock_irqsave(&key->lock, flags);
mctp_dev_release_key(key->dev, key);
spin_unlock_irqrestore(&key->lock, flags);
sock_put(key->sk);
kfree(key);
}
static int mctp_key_add(struct mctp_sk_key *key, struct mctp_sock *msk)
{
struct net *net = sock_net(&msk->sk);
struct mctp_sk_key *tmp;
unsigned long flags;
int rc = 0;
spin_lock_irqsave(&net->mctp.keys_lock, flags);
if (sock_flag(&msk->sk, SOCK_DEAD)) {
rc = -EINVAL;
goto out_unlock;
}
hlist_for_each_entry(tmp, &net->mctp.keys, hlist) {
if (mctp_key_match(tmp, key->local_addr, key->peer_addr,
key->tag)) {
spin_lock(&tmp->lock);
if (tmp->valid)
rc = -EEXIST;
spin_unlock(&tmp->lock);
if (rc)
break;
}
}
if (!rc) {
refcount_inc(&key->refs);
key->expiry = jiffies + mctp_key_lifetime;
timer_reduce(&msk->key_expiry, key->expiry);
hlist_add_head(&key->hlist, &net->mctp.keys);
hlist_add_head(&key->sklist, &msk->keys);
}
out_unlock:
spin_unlock_irqrestore(&net->mctp.keys_lock, flags);
return rc;
}
/* Helper for mctp_route_input().
* We're done with the key; unlock and unref the key.
* For the usual case of automatic expiry we remove the key from lists.
* In the case that manual allocation is set on a key we release the lock
* and local ref, reset reassembly, but don't remove from lists.
*/
static void __mctp_key_done_in(struct mctp_sk_key *key, struct net *net,
unsigned long flags, unsigned long reason)
__releases(&key->lock)
{
struct sk_buff *skb;
trace_mctp_key_release(key, reason);
skb = key->reasm_head;
key->reasm_head = NULL;
if (!key->manual_alloc) {
key->reasm_dead = true;
key->valid = false;
mctp_dev_release_key(key->dev, key);
}
spin_unlock_irqrestore(&key->lock, flags);
if (!key->manual_alloc) {
spin_lock_irqsave(&net->mctp.keys_lock, flags);
if (!hlist_unhashed(&key->hlist)) {
hlist_del_init(&key->hlist);
hlist_del_init(&key->sklist);
mctp_key_unref(key);
}
spin_unlock_irqrestore(&net->mctp.keys_lock, flags);
}
/* and one for the local reference */
mctp_key_unref(key);
kfree_skb(skb);
}
#ifdef CONFIG_MCTP_FLOWS
static void mctp_skb_set_flow(struct sk_buff *skb, struct mctp_sk_key *key)
{
struct mctp_flow *flow;
flow = skb_ext_add(skb, SKB_EXT_MCTP);
if (!flow)
return;
refcount_inc(&key->refs);
flow->key = key;
}
static void mctp_flow_prepare_output(struct sk_buff *skb, struct mctp_dev *dev)
{
struct mctp_sk_key *key;
struct mctp_flow *flow;
flow = skb_ext_find(skb, SKB_EXT_MCTP);
if (!flow)
return;
key = flow->key;
if (WARN_ON(key->dev && key->dev != dev))
return;
mctp_dev_set_key(dev, key);
}
#else
static void mctp_skb_set_flow(struct sk_buff *skb, struct mctp_sk_key *key) {}
static void mctp_flow_prepare_output(struct sk_buff *skb, struct mctp_dev *dev) {}
#endif
static int mctp_frag_queue(struct mctp_sk_key *key, struct sk_buff *skb)
{
struct mctp_hdr *hdr = mctp_hdr(skb);
u8 exp_seq, this_seq;
this_seq = (hdr->flags_seq_tag >> MCTP_HDR_SEQ_SHIFT)
& MCTP_HDR_SEQ_MASK;
if (!key->reasm_head) {
key->reasm_head = skb;
key->reasm_tailp = &(skb_shinfo(skb)->frag_list);
key->last_seq = this_seq;
return 0;
}
exp_seq = (key->last_seq + 1) & MCTP_HDR_SEQ_MASK;
if (this_seq != exp_seq)
return -EINVAL;
if (key->reasm_head->len + skb->len > mctp_message_maxlen)
return -EINVAL;
skb->next = NULL;
skb->sk = NULL;
*key->reasm_tailp = skb;
key->reasm_tailp = &skb->next;
key->last_seq = this_seq;
key->reasm_head->data_len += skb->len;
key->reasm_head->len += skb->len;
key->reasm_head->truesize += skb->truesize;
return 0;
}
static int mctp_route_input(struct mctp_route *route, struct sk_buff *skb)
{
struct mctp_sk_key *key, *any_key = NULL;
struct net *net = dev_net(skb->dev);
struct mctp_sock *msk;
struct mctp_hdr *mh;
unsigned long f;
u8 tag, flags;
int rc;
msk = NULL;
rc = -EINVAL;
/* we may be receiving a locally-routed packet; drop source sk
* accounting
*/
skb_orphan(skb);
/* ensure we have enough data for a header and a type */
if (skb->len < sizeof(struct mctp_hdr) + 1)
goto out;
/* grab header, advance data ptr */
mh = mctp_hdr(skb);
skb_pull(skb, sizeof(struct mctp_hdr));
if (mh->ver != 1)
goto out;
flags = mh->flags_seq_tag & (MCTP_HDR_FLAG_SOM | MCTP_HDR_FLAG_EOM);
tag = mh->flags_seq_tag & (MCTP_HDR_TAG_MASK | MCTP_HDR_FLAG_TO);
rcu_read_lock();
/* lookup socket / reasm context, exactly matching (src,dest,tag).
* we hold a ref on the key, and key->lock held.
*/
key = mctp_lookup_key(net, skb, mh->src, &f);
if (flags & MCTP_HDR_FLAG_SOM) {
if (key) {
msk = container_of(key->sk, struct mctp_sock, sk);
} else {
/* first response to a broadcast? do a more general
* key lookup to find the socket, but don't use this
* key for reassembly - we'll create a more specific
* one for future packets if required (ie, !EOM).
*/
any_key = mctp_lookup_key(net, skb, MCTP_ADDR_ANY, &f);
if (any_key) {
msk = container_of(any_key->sk,
struct mctp_sock, sk);
spin_unlock_irqrestore(&any_key->lock, f);
}
}
if (!key && !msk && (tag & MCTP_HDR_FLAG_TO))
msk = mctp_lookup_bind(net, skb);
if (!msk) {
rc = -ENOENT;
goto out_unlock;
}
/* single-packet message? deliver to socket, clean up any
* pending key.
*/
if (flags & MCTP_HDR_FLAG_EOM) {
sock_queue_rcv_skb(&msk->sk, skb);
if (key) {
/* we've hit a pending reassembly; not much we
* can do but drop it
*/
__mctp_key_done_in(key, net, f,
MCTP_TRACE_KEY_REPLIED);
key = NULL;
}
rc = 0;
goto out_unlock;
}
/* broadcast response or a bind() - create a key for further
* packets for this message
*/
if (!key) {
key = mctp_key_alloc(msk, mh->dest, mh->src,
tag, GFP_ATOMIC);
if (!key) {
rc = -ENOMEM;
goto out_unlock;
}
/* we can queue without the key lock here, as the
* key isn't observable yet
*/
mctp_frag_queue(key, skb);
/* if the key_add fails, we've raced with another
* SOM packet with the same src, dest and tag. There's
* no way to distinguish future packets, so all we
* can do is drop; we'll free the skb on exit from
* this function.
*/
rc = mctp_key_add(key, msk);
if (!rc)
trace_mctp_key_acquire(key);
/* we don't need to release key->lock on exit, so
* clean up here and suppress the unlock via
* setting to NULL
*/
mctp_key_unref(key);
key = NULL;
} else {
if (key->reasm_head || key->reasm_dead) {
/* duplicate start? drop everything */
__mctp_key_done_in(key, net, f,
MCTP_TRACE_KEY_INVALIDATED);
rc = -EEXIST;
key = NULL;
} else {
rc = mctp_frag_queue(key, skb);
}
}
} else if (key) {
/* this packet continues a previous message; reassemble
* using the message-specific key
*/
/* we need to be continuing an existing reassembly... */
if (!key->reasm_head)
rc = -EINVAL;
else
rc = mctp_frag_queue(key, skb);
/* end of message? deliver to socket, and we're done with
* the reassembly/response key
*/
if (!rc && flags & MCTP_HDR_FLAG_EOM) {
sock_queue_rcv_skb(key->sk, key->reasm_head);
key->reasm_head = NULL;
__mctp_key_done_in(key, net, f, MCTP_TRACE_KEY_REPLIED);
key = NULL;
}
} else {
/* not a start, no matching key */
rc = -ENOENT;
}
out_unlock:
rcu_read_unlock();
if (key) {
spin_unlock_irqrestore(&key->lock, f);
mctp_key_unref(key);
}
if (any_key)
mctp_key_unref(any_key);
out:
if (rc)
kfree_skb(skb);
return rc;
}
static unsigned int mctp_route_mtu(struct mctp_route *rt)
{
return rt->mtu ?: READ_ONCE(rt->dev->dev->mtu);
}
static int mctp_route_output(struct mctp_route *route, struct sk_buff *skb)
{
struct mctp_skb_cb *cb = mctp_cb(skb);
struct mctp_hdr *hdr = mctp_hdr(skb);
char daddr_buf[MAX_ADDR_LEN];
char *daddr = NULL;
unsigned int mtu;
int rc;
skb->protocol = htons(ETH_P_MCTP);
mtu = READ_ONCE(skb->dev->mtu);
if (skb->len > mtu) {
kfree_skb(skb);
return -EMSGSIZE;
}
if (cb->ifindex) {
/* direct route; use the hwaddr we stashed in sendmsg */
if (cb->halen != skb->dev->addr_len) {
/* sanity check, sendmsg should have already caught this */
kfree_skb(skb);
return -EMSGSIZE;
}
daddr = cb->haddr;
} else {
/* If lookup fails let the device handle daddr==NULL */
if (mctp_neigh_lookup(route->dev, hdr->dest, daddr_buf) == 0)
daddr = daddr_buf;
}
rc = dev_hard_header(skb, skb->dev, ntohs(skb->protocol),
daddr, skb->dev->dev_addr, skb->len);
if (rc < 0) {
kfree_skb(skb);
return -EHOSTUNREACH;
}
mctp_flow_prepare_output(skb, route->dev);
rc = dev_queue_xmit(skb);
if (rc)
rc = net_xmit_errno(rc);
return rc;
}
/* route alloc/release */
static void mctp_route_release(struct mctp_route *rt)
{
if (refcount_dec_and_test(&rt->refs)) {
mctp_dev_put(rt->dev);
kfree_rcu(rt, rcu);
}
}
/* returns a route with the refcount at 1 */
static struct mctp_route *mctp_route_alloc(void)
{
struct mctp_route *rt;
rt = kzalloc(sizeof(*rt), GFP_KERNEL);
if (!rt)
return NULL;
INIT_LIST_HEAD(&rt->list);
refcount_set(&rt->refs, 1);
rt->output = mctp_route_discard;
return rt;
}
unsigned int mctp_default_net(struct net *net)
{
return READ_ONCE(net->mctp.default_net);
}
int mctp_default_net_set(struct net *net, unsigned int index)
{
if (index == 0)
return -EINVAL;
WRITE_ONCE(net->mctp.default_net, index);
return 0;
}
/* tag management */
static void mctp_reserve_tag(struct net *net, struct mctp_sk_key *key,
struct mctp_sock *msk)
{
struct netns_mctp *mns = &net->mctp;
lockdep_assert_held(&mns->keys_lock);
key->expiry = jiffies + mctp_key_lifetime;
timer_reduce(&msk->key_expiry, key->expiry);
/* we hold the net->key_lock here, allowing updates to both
* then net and sk
*/
hlist_add_head_rcu(&key->hlist, &mns->keys);
hlist_add_head_rcu(&key->sklist, &msk->keys);
refcount_inc(&key->refs);
}
/* Allocate a locally-owned tag value for (saddr, daddr), and reserve
* it for the socket msk
*/
struct mctp_sk_key *mctp_alloc_local_tag(struct mctp_sock *msk,
mctp_eid_t daddr, mctp_eid_t saddr,
bool manual, u8 *tagp)
{
struct net *net = sock_net(&msk->sk);
struct netns_mctp *mns = &net->mctp;
struct mctp_sk_key *key, *tmp;
unsigned long flags;
u8 tagbits;
/* for NULL destination EIDs, we may get a response from any peer */
if (daddr == MCTP_ADDR_NULL)
daddr = MCTP_ADDR_ANY;
/* be optimistic, alloc now */
key = mctp_key_alloc(msk, saddr, daddr, 0, GFP_KERNEL);
if (!key)
return ERR_PTR(-ENOMEM);
/* 8 possible tag values */
tagbits = 0xff;
spin_lock_irqsave(&mns->keys_lock, flags);
/* Walk through the existing keys, looking for potential conflicting
* tags. If we find a conflict, clear that bit from tagbits
*/
hlist_for_each_entry(tmp, &mns->keys, hlist) {
/* We can check the lookup fields (*_addr, tag) without the
* lock held, they don't change over the lifetime of the key.
*/
/* if we don't own the tag, it can't conflict */
if (tmp->tag & MCTP_HDR_FLAG_TO)
continue;
if (!(mctp_address_matches(tmp->peer_addr, daddr) &&
mctp_address_matches(tmp->local_addr, saddr)))
continue;
spin_lock(&tmp->lock);
/* key must still be valid. If we find a match, clear the
* potential tag value
*/
if (tmp->valid)
tagbits &= ~(1 << tmp->tag);
spin_unlock(&tmp->lock);
if (!tagbits)
break;
}
if (tagbits) {
key->tag = __ffs(tagbits);
mctp_reserve_tag(net, key, msk);
trace_mctp_key_acquire(key);
key->manual_alloc = manual;
*tagp = key->tag;
}
spin_unlock_irqrestore(&mns->keys_lock, flags);
if (!tagbits) {
kfree(key);
return ERR_PTR(-EBUSY);
}
return key;
}
static struct mctp_sk_key *mctp_lookup_prealloc_tag(struct mctp_sock *msk,
mctp_eid_t daddr,
u8 req_tag, u8 *tagp)
{
struct net *net = sock_net(&msk->sk);
struct netns_mctp *mns = &net->mctp;
struct mctp_sk_key *key, *tmp;
unsigned long flags;
req_tag &= ~(MCTP_TAG_PREALLOC | MCTP_TAG_OWNER);
key = NULL;
spin_lock_irqsave(&mns->keys_lock, flags);
hlist_for_each_entry(tmp, &mns->keys, hlist) {
if (tmp->tag != req_tag)
continue;
if (!mctp_address_matches(tmp->peer_addr, daddr))
continue;
if (!tmp->manual_alloc)
continue;
spin_lock(&tmp->lock);
if (tmp->valid) {
key = tmp;
refcount_inc(&key->refs);
spin_unlock(&tmp->lock);
break;
}
spin_unlock(&tmp->lock);
}
spin_unlock_irqrestore(&mns->keys_lock, flags);
if (!key)
return ERR_PTR(-ENOENT);
if (tagp)
*tagp = key->tag;
return key;
}
/* routing lookups */
static bool mctp_rt_match_eid(struct mctp_route *rt,
unsigned int net, mctp_eid_t eid)
{
return READ_ONCE(rt->dev->net) == net &&
rt->min <= eid && rt->max >= eid;
}
/* compares match, used for duplicate prevention */
static bool mctp_rt_compare_exact(struct mctp_route *rt1,
struct mctp_route *rt2)
{
ASSERT_RTNL();
return rt1->dev->net == rt2->dev->net &&
rt1->min == rt2->min &&
rt1->max == rt2->max;
}
struct mctp_route *mctp_route_lookup(struct net *net, unsigned int dnet,
mctp_eid_t daddr)
{
struct mctp_route *tmp, *rt = NULL;
list_for_each_entry_rcu(tmp, &net->mctp.routes, list) {
/* TODO: add metrics */
if (mctp_rt_match_eid(tmp, dnet, daddr)) {
if (refcount_inc_not_zero(&tmp->refs)) {
rt = tmp;
break;
}
}
}
return rt;
}
static struct mctp_route *mctp_route_lookup_null(struct net *net,
struct net_device *dev)
{
struct mctp_route *rt;
list_for_each_entry_rcu(rt, &net->mctp.routes, list) {
if (rt->dev->dev == dev && rt->type == RTN_LOCAL &&
refcount_inc_not_zero(&rt->refs))
return rt;
}
return NULL;
}
static int mctp_do_fragment_route(struct mctp_route *rt, struct sk_buff *skb,
unsigned int mtu, u8 tag)
{
const unsigned int hlen = sizeof(struct mctp_hdr);
struct mctp_hdr *hdr, *hdr2;
unsigned int pos, size, headroom;
struct sk_buff *skb2;
int rc;
u8 seq;
hdr = mctp_hdr(skb);
seq = 0;
rc = 0;
if (mtu < hlen + 1) {
kfree_skb(skb);
return -EMSGSIZE;
}
/* keep same headroom as the original skb */
headroom = skb_headroom(skb);
/* we've got the header */
skb_pull(skb, hlen);
for (pos = 0; pos < skb->len;) {
/* size of message payload */
size = min(mtu - hlen, skb->len - pos);
skb2 = alloc_skb(headroom + hlen + size, GFP_KERNEL);
if (!skb2) {
rc = -ENOMEM;
break;
}
/* generic skb copy */
skb2->protocol = skb->protocol;
skb2->priority = skb->priority;
skb2->dev = skb->dev;
memcpy(skb2->cb, skb->cb, sizeof(skb2->cb));
if (skb->sk)
skb_set_owner_w(skb2, skb->sk);
/* establish packet */
skb_reserve(skb2, headroom);
skb_reset_network_header(skb2);
skb_put(skb2, hlen + size);
skb2->transport_header = skb2->network_header + hlen;
/* copy header fields, calculate SOM/EOM flags & seq */
hdr2 = mctp_hdr(skb2);
hdr2->ver = hdr->ver;
hdr2->dest = hdr->dest;
hdr2->src = hdr->src;
hdr2->flags_seq_tag = tag &
(MCTP_HDR_TAG_MASK | MCTP_HDR_FLAG_TO);
if (pos == 0)
hdr2->flags_seq_tag |= MCTP_HDR_FLAG_SOM;
if (pos + size == skb->len)
hdr2->flags_seq_tag |= MCTP_HDR_FLAG_EOM;
hdr2->flags_seq_tag |= seq << MCTP_HDR_SEQ_SHIFT;
/* copy message payload */
skb_copy_bits(skb, pos, skb_transport_header(skb2), size);
/* do route */
rc = rt->output(rt, skb2);
if (rc)
break;
seq = (seq + 1) & MCTP_HDR_SEQ_MASK;
pos += size;
}
consume_skb(skb);
return rc;
}
int mctp_local_output(struct sock *sk, struct mctp_route *rt,
struct sk_buff *skb, mctp_eid_t daddr, u8 req_tag)
{
struct mctp_sock *msk = container_of(sk, struct mctp_sock, sk);
struct mctp_skb_cb *cb = mctp_cb(skb);
struct mctp_route tmp_rt = {0};
struct mctp_sk_key *key;
struct mctp_hdr *hdr;
unsigned long flags;
unsigned int mtu;
mctp_eid_t saddr;
bool ext_rt;
int rc;
u8 tag;
rc = -ENODEV;
if (rt) {
ext_rt = false;
if (WARN_ON(!rt->dev))
goto out_release;
} else if (cb->ifindex) {
struct net_device *dev;
ext_rt = true;
rt = &tmp_rt;
rcu_read_lock();
dev = dev_get_by_index_rcu(sock_net(sk), cb->ifindex);
if (!dev) {
rcu_read_unlock();
return rc;
}
rt->dev = __mctp_dev_get(dev);
rcu_read_unlock();
if (!rt->dev)
goto out_release;
/* establish temporary route - we set up enough to keep
* mctp_route_output happy
*/
rt->output = mctp_route_output;
rt->mtu = 0;
} else {
return -EINVAL;
}
spin_lock_irqsave(&rt->dev->addrs_lock, flags);
if (rt->dev->num_addrs == 0) {
rc = -EHOSTUNREACH;
} else {
/* use the outbound interface's first address as our source */
saddr = rt->dev->addrs[0];
rc = 0;
}
spin_unlock_irqrestore(&rt->dev->addrs_lock, flags);
if (rc)
goto out_release;
if (req_tag & MCTP_TAG_OWNER) {
if (req_tag & MCTP_TAG_PREALLOC)
key = mctp_lookup_prealloc_tag(msk, daddr,
req_tag, &tag);
else
key = mctp_alloc_local_tag(msk, daddr, saddr,
false, &tag);
if (IS_ERR(key)) {
rc = PTR_ERR(key);
goto out_release;
}
mctp_skb_set_flow(skb, key);
/* done with the key in this scope */
mctp_key_unref(key);
tag |= MCTP_HDR_FLAG_TO;
} else {
key = NULL;
tag = req_tag & MCTP_TAG_MASK;
}
skb->protocol = htons(ETH_P_MCTP);
skb->priority = 0;
skb_reset_transport_header(skb);
skb_push(skb, sizeof(struct mctp_hdr));
skb_reset_network_header(skb);
skb->dev = rt->dev->dev;
/* cb->net will have been set on initial ingress */
cb->src = saddr;
/* set up common header fields */
hdr = mctp_hdr(skb);
hdr->ver = 1;
hdr->dest = daddr;
hdr->src = saddr;
mtu = mctp_route_mtu(rt);
if (skb->len + sizeof(struct mctp_hdr) <= mtu) {
hdr->flags_seq_tag = MCTP_HDR_FLAG_SOM |
MCTP_HDR_FLAG_EOM | tag;
rc = rt->output(rt, skb);
} else {
rc = mctp_do_fragment_route(rt, skb, mtu, tag);
}
out_release:
if (!ext_rt)
mctp_route_release(rt);
mctp_dev_put(tmp_rt.dev);
return rc;
}
/* route management */
static int mctp_route_add(struct mctp_dev *mdev, mctp_eid_t daddr_start,
unsigned int daddr_extent, unsigned int mtu,
unsigned char type)
{
int (*rtfn)(struct mctp_route *rt, struct sk_buff *skb);
struct net *net = dev_net(mdev->dev);
struct mctp_route *rt, *ert;
if (!mctp_address_unicast(daddr_start))
return -EINVAL;
if (daddr_extent > 0xff || daddr_start + daddr_extent >= 255)
return -EINVAL;
switch (type) {
case RTN_LOCAL:
rtfn = mctp_route_input;
break;
case RTN_UNICAST:
rtfn = mctp_route_output;
break;
default:
return -EINVAL;
}
rt = mctp_route_alloc();
if (!rt)
return -ENOMEM;
rt->min = daddr_start;
rt->max = daddr_start + daddr_extent;
rt->mtu = mtu;
rt->dev = mdev;
mctp_dev_hold(rt->dev);
rt->type = type;
rt->output = rtfn;
ASSERT_RTNL();
/* Prevent duplicate identical routes. */
list_for_each_entry(ert, &net->mctp.routes, list) {
if (mctp_rt_compare_exact(rt, ert)) {
mctp_route_release(rt);
return -EEXIST;
}
}
list_add_rcu(&rt->list, &net->mctp.routes);
return 0;
}
static int mctp_route_remove(struct mctp_dev *mdev, mctp_eid_t daddr_start,
unsigned int daddr_extent, unsigned char type)
{
struct net *net = dev_net(mdev->dev);
struct mctp_route *rt, *tmp;
mctp_eid_t daddr_end;
bool dropped;
if (daddr_extent > 0xff || daddr_start + daddr_extent >= 255)
return -EINVAL;
daddr_end = daddr_start + daddr_extent;
dropped = false;
ASSERT_RTNL();
list_for_each_entry_safe(rt, tmp, &net->mctp.routes, list) {
if (rt->dev == mdev &&
rt->min == daddr_start && rt->max == daddr_end &&
rt->type == type) {
list_del_rcu(&rt->list);
/* TODO: immediate RTM_DELROUTE */
mctp_route_release(rt);
dropped = true;
}
}
return dropped ? 0 : -ENOENT;
}
int mctp_route_add_local(struct mctp_dev *mdev, mctp_eid_t addr)
{
return mctp_route_add(mdev, addr, 0, 0, RTN_LOCAL);
}
int mctp_route_remove_local(struct mctp_dev *mdev, mctp_eid_t addr)
{
return mctp_route_remove(mdev, addr, 0, RTN_LOCAL);
}
/* removes all entries for a given device */
void mctp_route_remove_dev(struct mctp_dev *mdev)
{
struct net *net = dev_net(mdev->dev);
struct mctp_route *rt, *tmp;
ASSERT_RTNL();
list_for_each_entry_safe(rt, tmp, &net->mctp.routes, list) {
if (rt->dev == mdev) {
list_del_rcu(&rt->list);
/* TODO: immediate RTM_DELROUTE */
mctp_route_release(rt);
}
}
}
/* Incoming packet-handling */
static int mctp_pkttype_receive(struct sk_buff *skb, struct net_device *dev,
struct packet_type *pt,
struct net_device *orig_dev)
{
struct net *net = dev_net(dev);
struct mctp_dev *mdev;
struct mctp_skb_cb *cb;
struct mctp_route *rt;
struct mctp_hdr *mh;
rcu_read_lock();
mdev = __mctp_dev_get(dev);
rcu_read_unlock();
if (!mdev) {
/* basic non-data sanity checks */
goto err_drop;
}
if (!pskb_may_pull(skb, sizeof(struct mctp_hdr)))
goto err_drop;
skb_reset_transport_header(skb);
skb_reset_network_header(skb);
/* We have enough for a header; decode and route */
mh = mctp_hdr(skb);
if (mh->ver < MCTP_VER_MIN || mh->ver > MCTP_VER_MAX)
goto err_drop;
/* source must be valid unicast or null; drop reserved ranges and
* broadcast
*/
if (!(mctp_address_unicast(mh->src) || mctp_address_null(mh->src)))
goto err_drop;
/* dest address: as above, but allow broadcast */
if (!(mctp_address_unicast(mh->dest) || mctp_address_null(mh->dest) ||
mctp_address_broadcast(mh->dest)))
goto err_drop;
/* MCTP drivers must populate halen/haddr */
if (dev->type == ARPHRD_MCTP) {
cb = mctp_cb(skb);
} else {
cb = __mctp_cb(skb);
cb->halen = 0;
}
cb->net = READ_ONCE(mdev->net);
cb->ifindex = dev->ifindex;
rt = mctp_route_lookup(net, cb->net, mh->dest);
/* NULL EID, but addressed to our physical address */
if (!rt && mh->dest == MCTP_ADDR_NULL && skb->pkt_type == PACKET_HOST)
rt = mctp_route_lookup_null(net, dev);
if (!rt)
goto err_drop;
rt->output(rt, skb);
mctp_route_release(rt);
mctp_dev_put(mdev);
return NET_RX_SUCCESS;
err_drop:
kfree_skb(skb);
mctp_dev_put(mdev);
return NET_RX_DROP;
}
static struct packet_type mctp_packet_type = {
.type = cpu_to_be16(ETH_P_MCTP),
.func = mctp_pkttype_receive,
};
/* netlink interface */
static const struct nla_policy rta_mctp_policy[RTA_MAX + 1] = {
[RTA_DST] = { .type = NLA_U8 },
[RTA_METRICS] = { .type = NLA_NESTED },
[RTA_OIF] = { .type = NLA_U32 },
};
/* Common part for RTM_NEWROUTE and RTM_DELROUTE parsing.
* tb must hold RTA_MAX+1 elements.
*/
static int mctp_route_nlparse(struct sk_buff *skb, struct nlmsghdr *nlh,
struct netlink_ext_ack *extack,
struct nlattr **tb, struct rtmsg **rtm,
struct mctp_dev **mdev, mctp_eid_t *daddr_start)
{
struct net *net = sock_net(skb->sk);
struct net_device *dev;
unsigned int ifindex;
int rc;
rc = nlmsg_parse(nlh, sizeof(struct rtmsg), tb, RTA_MAX,
rta_mctp_policy, extack);
if (rc < 0) {
NL_SET_ERR_MSG(extack, "incorrect format");
return rc;
}
if (!tb[RTA_DST]) {
NL_SET_ERR_MSG(extack, "dst EID missing");
return -EINVAL;
}
*daddr_start = nla_get_u8(tb[RTA_DST]);
if (!tb[RTA_OIF]) {
NL_SET_ERR_MSG(extack, "ifindex missing");
return -EINVAL;
}
ifindex = nla_get_u32(tb[RTA_OIF]);
*rtm = nlmsg_data(nlh);
if ((*rtm)->rtm_family != AF_MCTP) {
NL_SET_ERR_MSG(extack, "route family must be AF_MCTP");
return -EINVAL;
}
dev = __dev_get_by_index(net, ifindex);
if (!dev) {
NL_SET_ERR_MSG(extack, "bad ifindex");
return -ENODEV;
}
*mdev = mctp_dev_get_rtnl(dev);
if (!*mdev)
return -ENODEV;
if (dev->flags & IFF_LOOPBACK) {
NL_SET_ERR_MSG(extack, "no routes to loopback");
return -EINVAL;
}
return 0;
}
static const struct nla_policy rta_metrics_policy[RTAX_MAX + 1] = {
[RTAX_MTU] = { .type = NLA_U32 },
};
static int mctp_newroute(struct sk_buff *skb, struct nlmsghdr *nlh,
struct netlink_ext_ack *extack)
{
struct nlattr *tb[RTA_MAX + 1];
struct nlattr *tbx[RTAX_MAX + 1];
mctp_eid_t daddr_start;
struct mctp_dev *mdev;
struct rtmsg *rtm;
unsigned int mtu;
int rc;
rc = mctp_route_nlparse(skb, nlh, extack, tb,
&rtm, &mdev, &daddr_start);
if (rc < 0)
return rc;
if (rtm->rtm_type != RTN_UNICAST) {
NL_SET_ERR_MSG(extack, "rtm_type must be RTN_UNICAST");
return -EINVAL;
}
mtu = 0;
if (tb[RTA_METRICS]) {
rc = nla_parse_nested(tbx, RTAX_MAX, tb[RTA_METRICS],
rta_metrics_policy, NULL);
if (rc < 0)
return rc;
if (tbx[RTAX_MTU])
mtu = nla_get_u32(tbx[RTAX_MTU]);
}
rc = mctp_route_add(mdev, daddr_start, rtm->rtm_dst_len, mtu,
rtm->rtm_type);
return rc;
}
static int mctp_delroute(struct sk_buff *skb, struct nlmsghdr *nlh,
struct netlink_ext_ack *extack)
{
struct nlattr *tb[RTA_MAX + 1];
mctp_eid_t daddr_start;
struct mctp_dev *mdev;
struct rtmsg *rtm;
int rc;
rc = mctp_route_nlparse(skb, nlh, extack, tb,
&rtm, &mdev, &daddr_start);
if (rc < 0)
return rc;
/* we only have unicast routes */
if (rtm->rtm_type != RTN_UNICAST)
return -EINVAL;
rc = mctp_route_remove(mdev, daddr_start, rtm->rtm_dst_len, RTN_UNICAST);
return rc;
}
static int mctp_fill_rtinfo(struct sk_buff *skb, struct mctp_route *rt,
u32 portid, u32 seq, int event, unsigned int flags)
{
struct nlmsghdr *nlh;
struct rtmsg *hdr;
void *metrics;
nlh = nlmsg_put(skb, portid, seq, event, sizeof(*hdr), flags);
if (!nlh)
return -EMSGSIZE;
hdr = nlmsg_data(nlh);
hdr->rtm_family = AF_MCTP;
/* we use the _len fields as a number of EIDs, rather than
* a number of bits in the address
*/
hdr->rtm_dst_len = rt->max - rt->min;
hdr->rtm_src_len = 0;
hdr->rtm_tos = 0;
hdr->rtm_table = RT_TABLE_DEFAULT;
hdr->rtm_protocol = RTPROT_STATIC; /* everything is user-defined */
hdr->rtm_scope = RT_SCOPE_LINK; /* TODO: scope in mctp_route? */
hdr->rtm_type = rt->type;
if (nla_put_u8(skb, RTA_DST, rt->min))
goto cancel;
metrics = nla_nest_start_noflag(skb, RTA_METRICS);
if (!metrics)
goto cancel;
if (rt->mtu) {
if (nla_put_u32(skb, RTAX_MTU, rt->mtu))
goto cancel;
}
nla_nest_end(skb, metrics);
if (rt->dev) {
if (nla_put_u32(skb, RTA_OIF, rt->dev->dev->ifindex))
goto cancel;
}
/* TODO: conditional neighbour physaddr? */
nlmsg_end(skb, nlh);
return 0;
cancel:
nlmsg_cancel(skb, nlh);
return -EMSGSIZE;
}
static int mctp_dump_rtinfo(struct sk_buff *skb, struct netlink_callback *cb)
{
struct net *net = sock_net(skb->sk);
struct mctp_route *rt;
int s_idx, idx;
/* TODO: allow filtering on route data, possibly under
* cb->strict_check
*/
/* TODO: change to struct overlay */
s_idx = cb->args[0];
idx = 0;
rcu_read_lock();
list_for_each_entry_rcu(rt, &net->mctp.routes, list) {
if (idx++ < s_idx)
continue;
if (mctp_fill_rtinfo(skb, rt,
NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq,
RTM_NEWROUTE, NLM_F_MULTI) < 0)
break;
}
rcu_read_unlock();
cb->args[0] = idx;
return skb->len;
}
/* net namespace implementation */
static int __net_init mctp_routes_net_init(struct net *net)
{
struct netns_mctp *ns = &net->mctp;
INIT_LIST_HEAD(&ns->routes);
INIT_HLIST_HEAD(&ns->binds);
mutex_init(&ns->bind_lock);
INIT_HLIST_HEAD(&ns->keys);
spin_lock_init(&ns->keys_lock);
WARN_ON(mctp_default_net_set(net, MCTP_INITIAL_DEFAULT_NET));
return 0;
}
static void __net_exit mctp_routes_net_exit(struct net *net)
{
struct mctp_route *rt;
rcu_read_lock();
list_for_each_entry_rcu(rt, &net->mctp.routes, list)
mctp_route_release(rt);
rcu_read_unlock();
}
static struct pernet_operations mctp_net_ops = {
.init = mctp_routes_net_init,
.exit = mctp_routes_net_exit,
};
int __init mctp_routes_init(void)
{
dev_add_pack(&mctp_packet_type);
rtnl_register_module(THIS_MODULE, PF_MCTP, RTM_GETROUTE,
NULL, mctp_dump_rtinfo, 0);
rtnl_register_module(THIS_MODULE, PF_MCTP, RTM_NEWROUTE,
mctp_newroute, NULL, 0);
rtnl_register_module(THIS_MODULE, PF_MCTP, RTM_DELROUTE,
mctp_delroute, NULL, 0);
return register_pernet_subsys(&mctp_net_ops);
}
void mctp_routes_exit(void)
{
unregister_pernet_subsys(&mctp_net_ops);
rtnl_unregister(PF_MCTP, RTM_DELROUTE);
rtnl_unregister(PF_MCTP, RTM_NEWROUTE);
rtnl_unregister(PF_MCTP, RTM_GETROUTE);
dev_remove_pack(&mctp_packet_type);
}
#if IS_ENABLED(CONFIG_MCTP_TEST)
#include "test/route-test.c"
#endif
| linux-master | net/mctp/route.c |
// SPDX-License-Identifier: GPL-2.0
#include <kunit/test.h>
#include "utils.h"
struct mctp_test_route {
struct mctp_route rt;
struct sk_buff_head pkts;
};
static int mctp_test_route_output(struct mctp_route *rt, struct sk_buff *skb)
{
struct mctp_test_route *test_rt = container_of(rt, struct mctp_test_route, rt);
skb_queue_tail(&test_rt->pkts, skb);
return 0;
}
/* local version of mctp_route_alloc() */
static struct mctp_test_route *mctp_route_test_alloc(void)
{
struct mctp_test_route *rt;
rt = kzalloc(sizeof(*rt), GFP_KERNEL);
if (!rt)
return NULL;
INIT_LIST_HEAD(&rt->rt.list);
refcount_set(&rt->rt.refs, 1);
rt->rt.output = mctp_test_route_output;
skb_queue_head_init(&rt->pkts);
return rt;
}
static struct mctp_test_route *mctp_test_create_route(struct net *net,
struct mctp_dev *dev,
mctp_eid_t eid,
unsigned int mtu)
{
struct mctp_test_route *rt;
rt = mctp_route_test_alloc();
if (!rt)
return NULL;
rt->rt.min = eid;
rt->rt.max = eid;
rt->rt.mtu = mtu;
rt->rt.type = RTN_UNSPEC;
if (dev)
mctp_dev_hold(dev);
rt->rt.dev = dev;
list_add_rcu(&rt->rt.list, &net->mctp.routes);
return rt;
}
static void mctp_test_route_destroy(struct kunit *test,
struct mctp_test_route *rt)
{
unsigned int refs;
rtnl_lock();
list_del_rcu(&rt->rt.list);
rtnl_unlock();
skb_queue_purge(&rt->pkts);
if (rt->rt.dev)
mctp_dev_put(rt->rt.dev);
refs = refcount_read(&rt->rt.refs);
KUNIT_ASSERT_EQ_MSG(test, refs, 1, "route ref imbalance");
kfree_rcu(&rt->rt, rcu);
}
static struct sk_buff *mctp_test_create_skb(const struct mctp_hdr *hdr,
unsigned int data_len)
{
size_t hdr_len = sizeof(*hdr);
struct sk_buff *skb;
unsigned int i;
u8 *buf;
skb = alloc_skb(hdr_len + data_len, GFP_KERNEL);
if (!skb)
return NULL;
memcpy(skb_put(skb, hdr_len), hdr, hdr_len);
buf = skb_put(skb, data_len);
for (i = 0; i < data_len; i++)
buf[i] = i & 0xff;
return skb;
}
static struct sk_buff *__mctp_test_create_skb_data(const struct mctp_hdr *hdr,
const void *data,
size_t data_len)
{
size_t hdr_len = sizeof(*hdr);
struct sk_buff *skb;
skb = alloc_skb(hdr_len + data_len, GFP_KERNEL);
if (!skb)
return NULL;
memcpy(skb_put(skb, hdr_len), hdr, hdr_len);
memcpy(skb_put(skb, data_len), data, data_len);
return skb;
}
#define mctp_test_create_skb_data(h, d) \
__mctp_test_create_skb_data(h, d, sizeof(*d))
struct mctp_frag_test {
unsigned int mtu;
unsigned int msgsize;
unsigned int n_frags;
};
static void mctp_test_fragment(struct kunit *test)
{
const struct mctp_frag_test *params;
int rc, i, n, mtu, msgsize;
struct mctp_test_route *rt;
struct sk_buff *skb;
struct mctp_hdr hdr;
u8 seq;
params = test->param_value;
mtu = params->mtu;
msgsize = params->msgsize;
hdr.ver = 1;
hdr.src = 8;
hdr.dest = 10;
hdr.flags_seq_tag = MCTP_HDR_FLAG_TO;
skb = mctp_test_create_skb(&hdr, msgsize);
KUNIT_ASSERT_TRUE(test, skb);
rt = mctp_test_create_route(&init_net, NULL, 10, mtu);
KUNIT_ASSERT_TRUE(test, rt);
rc = mctp_do_fragment_route(&rt->rt, skb, mtu, MCTP_TAG_OWNER);
KUNIT_EXPECT_FALSE(test, rc);
n = rt->pkts.qlen;
KUNIT_EXPECT_EQ(test, n, params->n_frags);
for (i = 0;; i++) {
struct mctp_hdr *hdr2;
struct sk_buff *skb2;
u8 tag_mask, seq2;
bool first, last;
first = i == 0;
last = i == (n - 1);
skb2 = skb_dequeue(&rt->pkts);
if (!skb2)
break;
hdr2 = mctp_hdr(skb2);
tag_mask = MCTP_HDR_TAG_MASK | MCTP_HDR_FLAG_TO;
KUNIT_EXPECT_EQ(test, hdr2->ver, hdr.ver);
KUNIT_EXPECT_EQ(test, hdr2->src, hdr.src);
KUNIT_EXPECT_EQ(test, hdr2->dest, hdr.dest);
KUNIT_EXPECT_EQ(test, hdr2->flags_seq_tag & tag_mask,
hdr.flags_seq_tag & tag_mask);
KUNIT_EXPECT_EQ(test,
!!(hdr2->flags_seq_tag & MCTP_HDR_FLAG_SOM), first);
KUNIT_EXPECT_EQ(test,
!!(hdr2->flags_seq_tag & MCTP_HDR_FLAG_EOM), last);
seq2 = (hdr2->flags_seq_tag >> MCTP_HDR_SEQ_SHIFT) &
MCTP_HDR_SEQ_MASK;
if (first) {
seq = seq2;
} else {
seq++;
KUNIT_EXPECT_EQ(test, seq2, seq & MCTP_HDR_SEQ_MASK);
}
if (!last)
KUNIT_EXPECT_EQ(test, skb2->len, mtu);
else
KUNIT_EXPECT_LE(test, skb2->len, mtu);
kfree_skb(skb2);
}
mctp_test_route_destroy(test, rt);
}
static const struct mctp_frag_test mctp_frag_tests[] = {
{.mtu = 68, .msgsize = 63, .n_frags = 1},
{.mtu = 68, .msgsize = 64, .n_frags = 1},
{.mtu = 68, .msgsize = 65, .n_frags = 2},
{.mtu = 68, .msgsize = 66, .n_frags = 2},
{.mtu = 68, .msgsize = 127, .n_frags = 2},
{.mtu = 68, .msgsize = 128, .n_frags = 2},
{.mtu = 68, .msgsize = 129, .n_frags = 3},
{.mtu = 68, .msgsize = 130, .n_frags = 3},
};
static void mctp_frag_test_to_desc(const struct mctp_frag_test *t, char *desc)
{
sprintf(desc, "mtu %d len %d -> %d frags",
t->msgsize, t->mtu, t->n_frags);
}
KUNIT_ARRAY_PARAM(mctp_frag, mctp_frag_tests, mctp_frag_test_to_desc);
struct mctp_rx_input_test {
struct mctp_hdr hdr;
bool input;
};
static void mctp_test_rx_input(struct kunit *test)
{
const struct mctp_rx_input_test *params;
struct mctp_test_route *rt;
struct mctp_test_dev *dev;
struct sk_buff *skb;
params = test->param_value;
dev = mctp_test_create_dev();
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dev);
rt = mctp_test_create_route(&init_net, dev->mdev, 8, 68);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, rt);
skb = mctp_test_create_skb(¶ms->hdr, 1);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, skb);
__mctp_cb(skb);
mctp_pkttype_receive(skb, dev->ndev, &mctp_packet_type, NULL);
KUNIT_EXPECT_EQ(test, !!rt->pkts.qlen, params->input);
mctp_test_route_destroy(test, rt);
mctp_test_destroy_dev(dev);
}
#define RX_HDR(_ver, _src, _dest, _fst) \
{ .ver = _ver, .src = _src, .dest = _dest, .flags_seq_tag = _fst }
/* we have a route for EID 8 only */
static const struct mctp_rx_input_test mctp_rx_input_tests[] = {
{ .hdr = RX_HDR(1, 10, 8, 0), .input = true },
{ .hdr = RX_HDR(1, 10, 9, 0), .input = false }, /* no input route */
{ .hdr = RX_HDR(2, 10, 8, 0), .input = false }, /* invalid version */
};
static void mctp_rx_input_test_to_desc(const struct mctp_rx_input_test *t,
char *desc)
{
sprintf(desc, "{%x,%x,%x,%x}", t->hdr.ver, t->hdr.src, t->hdr.dest,
t->hdr.flags_seq_tag);
}
KUNIT_ARRAY_PARAM(mctp_rx_input, mctp_rx_input_tests,
mctp_rx_input_test_to_desc);
/* set up a local dev, route on EID 8, and a socket listening on type 0 */
static void __mctp_route_test_init(struct kunit *test,
struct mctp_test_dev **devp,
struct mctp_test_route **rtp,
struct socket **sockp)
{
struct sockaddr_mctp addr = {0};
struct mctp_test_route *rt;
struct mctp_test_dev *dev;
struct socket *sock;
int rc;
dev = mctp_test_create_dev();
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dev);
rt = mctp_test_create_route(&init_net, dev->mdev, 8, 68);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, rt);
rc = sock_create_kern(&init_net, AF_MCTP, SOCK_DGRAM, 0, &sock);
KUNIT_ASSERT_EQ(test, rc, 0);
addr.smctp_family = AF_MCTP;
addr.smctp_network = MCTP_NET_ANY;
addr.smctp_addr.s_addr = 8;
addr.smctp_type = 0;
rc = kernel_bind(sock, (struct sockaddr *)&addr, sizeof(addr));
KUNIT_ASSERT_EQ(test, rc, 0);
*rtp = rt;
*devp = dev;
*sockp = sock;
}
static void __mctp_route_test_fini(struct kunit *test,
struct mctp_test_dev *dev,
struct mctp_test_route *rt,
struct socket *sock)
{
sock_release(sock);
mctp_test_route_destroy(test, rt);
mctp_test_destroy_dev(dev);
}
struct mctp_route_input_sk_test {
struct mctp_hdr hdr;
u8 type;
bool deliver;
};
static void mctp_test_route_input_sk(struct kunit *test)
{
const struct mctp_route_input_sk_test *params;
struct sk_buff *skb, *skb2;
struct mctp_test_route *rt;
struct mctp_test_dev *dev;
struct socket *sock;
int rc;
params = test->param_value;
__mctp_route_test_init(test, &dev, &rt, &sock);
skb = mctp_test_create_skb_data(¶ms->hdr, ¶ms->type);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, skb);
skb->dev = dev->ndev;
__mctp_cb(skb);
rc = mctp_route_input(&rt->rt, skb);
if (params->deliver) {
KUNIT_EXPECT_EQ(test, rc, 0);
skb2 = skb_recv_datagram(sock->sk, MSG_DONTWAIT, &rc);
KUNIT_EXPECT_NOT_ERR_OR_NULL(test, skb2);
KUNIT_EXPECT_EQ(test, skb->len, 1);
skb_free_datagram(sock->sk, skb2);
} else {
KUNIT_EXPECT_NE(test, rc, 0);
skb2 = skb_recv_datagram(sock->sk, MSG_DONTWAIT, &rc);
KUNIT_EXPECT_NULL(test, skb2);
}
__mctp_route_test_fini(test, dev, rt, sock);
}
#define FL_S (MCTP_HDR_FLAG_SOM)
#define FL_E (MCTP_HDR_FLAG_EOM)
#define FL_TO (MCTP_HDR_FLAG_TO)
#define FL_T(t) ((t) & MCTP_HDR_TAG_MASK)
static const struct mctp_route_input_sk_test mctp_route_input_sk_tests[] = {
{ .hdr = RX_HDR(1, 10, 8, FL_S | FL_E | FL_TO), .type = 0, .deliver = true },
{ .hdr = RX_HDR(1, 10, 8, FL_S | FL_E | FL_TO), .type = 1, .deliver = false },
{ .hdr = RX_HDR(1, 10, 8, FL_S | FL_E), .type = 0, .deliver = false },
{ .hdr = RX_HDR(1, 10, 8, FL_E | FL_TO), .type = 0, .deliver = false },
{ .hdr = RX_HDR(1, 10, 8, FL_TO), .type = 0, .deliver = false },
{ .hdr = RX_HDR(1, 10, 8, 0), .type = 0, .deliver = false },
};
static void mctp_route_input_sk_to_desc(const struct mctp_route_input_sk_test *t,
char *desc)
{
sprintf(desc, "{%x,%x,%x,%x} type %d", t->hdr.ver, t->hdr.src,
t->hdr.dest, t->hdr.flags_seq_tag, t->type);
}
KUNIT_ARRAY_PARAM(mctp_route_input_sk, mctp_route_input_sk_tests,
mctp_route_input_sk_to_desc);
struct mctp_route_input_sk_reasm_test {
const char *name;
struct mctp_hdr hdrs[4];
int n_hdrs;
int rx_len;
};
static void mctp_test_route_input_sk_reasm(struct kunit *test)
{
const struct mctp_route_input_sk_reasm_test *params;
struct sk_buff *skb, *skb2;
struct mctp_test_route *rt;
struct mctp_test_dev *dev;
struct socket *sock;
int i, rc;
u8 c;
params = test->param_value;
__mctp_route_test_init(test, &dev, &rt, &sock);
for (i = 0; i < params->n_hdrs; i++) {
c = i;
skb = mctp_test_create_skb_data(¶ms->hdrs[i], &c);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, skb);
skb->dev = dev->ndev;
__mctp_cb(skb);
rc = mctp_route_input(&rt->rt, skb);
}
skb2 = skb_recv_datagram(sock->sk, MSG_DONTWAIT, &rc);
if (params->rx_len) {
KUNIT_EXPECT_NOT_ERR_OR_NULL(test, skb2);
KUNIT_EXPECT_EQ(test, skb2->len, params->rx_len);
skb_free_datagram(sock->sk, skb2);
} else {
KUNIT_EXPECT_NULL(test, skb2);
}
__mctp_route_test_fini(test, dev, rt, sock);
}
#define RX_FRAG(f, s) RX_HDR(1, 10, 8, FL_TO | (f) | ((s) << MCTP_HDR_SEQ_SHIFT))
static const struct mctp_route_input_sk_reasm_test mctp_route_input_sk_reasm_tests[] = {
{
.name = "single packet",
.hdrs = {
RX_FRAG(FL_S | FL_E, 0),
},
.n_hdrs = 1,
.rx_len = 1,
},
{
.name = "single packet, offset seq",
.hdrs = {
RX_FRAG(FL_S | FL_E, 1),
},
.n_hdrs = 1,
.rx_len = 1,
},
{
.name = "start & end packets",
.hdrs = {
RX_FRAG(FL_S, 0),
RX_FRAG(FL_E, 1),
},
.n_hdrs = 2,
.rx_len = 2,
},
{
.name = "start & end packets, offset seq",
.hdrs = {
RX_FRAG(FL_S, 1),
RX_FRAG(FL_E, 2),
},
.n_hdrs = 2,
.rx_len = 2,
},
{
.name = "start & end packets, out of order",
.hdrs = {
RX_FRAG(FL_E, 1),
RX_FRAG(FL_S, 0),
},
.n_hdrs = 2,
.rx_len = 0,
},
{
.name = "start, middle & end packets",
.hdrs = {
RX_FRAG(FL_S, 0),
RX_FRAG(0, 1),
RX_FRAG(FL_E, 2),
},
.n_hdrs = 3,
.rx_len = 3,
},
{
.name = "missing seq",
.hdrs = {
RX_FRAG(FL_S, 0),
RX_FRAG(FL_E, 2),
},
.n_hdrs = 2,
.rx_len = 0,
},
{
.name = "seq wrap",
.hdrs = {
RX_FRAG(FL_S, 3),
RX_FRAG(FL_E, 0),
},
.n_hdrs = 2,
.rx_len = 2,
},
};
static void mctp_route_input_sk_reasm_to_desc(
const struct mctp_route_input_sk_reasm_test *t,
char *desc)
{
sprintf(desc, "%s", t->name);
}
KUNIT_ARRAY_PARAM(mctp_route_input_sk_reasm, mctp_route_input_sk_reasm_tests,
mctp_route_input_sk_reasm_to_desc);
struct mctp_route_input_sk_keys_test {
const char *name;
mctp_eid_t key_peer_addr;
mctp_eid_t key_local_addr;
u8 key_tag;
struct mctp_hdr hdr;
bool deliver;
};
/* test packet rx in the presence of various key configurations */
static void mctp_test_route_input_sk_keys(struct kunit *test)
{
const struct mctp_route_input_sk_keys_test *params;
struct mctp_test_route *rt;
struct sk_buff *skb, *skb2;
struct mctp_test_dev *dev;
struct mctp_sk_key *key;
struct netns_mctp *mns;
struct mctp_sock *msk;
struct socket *sock;
unsigned long flags;
int rc;
u8 c;
params = test->param_value;
dev = mctp_test_create_dev();
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dev);
rt = mctp_test_create_route(&init_net, dev->mdev, 8, 68);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, rt);
rc = sock_create_kern(&init_net, AF_MCTP, SOCK_DGRAM, 0, &sock);
KUNIT_ASSERT_EQ(test, rc, 0);
msk = container_of(sock->sk, struct mctp_sock, sk);
mns = &sock_net(sock->sk)->mctp;
/* set the incoming tag according to test params */
key = mctp_key_alloc(msk, params->key_local_addr, params->key_peer_addr,
params->key_tag, GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, key);
spin_lock_irqsave(&mns->keys_lock, flags);
mctp_reserve_tag(&init_net, key, msk);
spin_unlock_irqrestore(&mns->keys_lock, flags);
/* create packet and route */
c = 0;
skb = mctp_test_create_skb_data(¶ms->hdr, &c);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, skb);
skb->dev = dev->ndev;
__mctp_cb(skb);
rc = mctp_route_input(&rt->rt, skb);
/* (potentially) receive message */
skb2 = skb_recv_datagram(sock->sk, MSG_DONTWAIT, &rc);
if (params->deliver)
KUNIT_EXPECT_NOT_ERR_OR_NULL(test, skb2);
else
KUNIT_EXPECT_PTR_EQ(test, skb2, NULL);
if (skb2)
skb_free_datagram(sock->sk, skb2);
mctp_key_unref(key);
__mctp_route_test_fini(test, dev, rt, sock);
}
static const struct mctp_route_input_sk_keys_test mctp_route_input_sk_keys_tests[] = {
{
.name = "direct match",
.key_peer_addr = 9,
.key_local_addr = 8,
.key_tag = 1,
.hdr = RX_HDR(1, 9, 8, FL_S | FL_E | FL_T(1)),
.deliver = true,
},
{
.name = "flipped src/dest",
.key_peer_addr = 8,
.key_local_addr = 9,
.key_tag = 1,
.hdr = RX_HDR(1, 9, 8, FL_S | FL_E | FL_T(1)),
.deliver = false,
},
{
.name = "peer addr mismatch",
.key_peer_addr = 9,
.key_local_addr = 8,
.key_tag = 1,
.hdr = RX_HDR(1, 10, 8, FL_S | FL_E | FL_T(1)),
.deliver = false,
},
{
.name = "tag value mismatch",
.key_peer_addr = 9,
.key_local_addr = 8,
.key_tag = 1,
.hdr = RX_HDR(1, 9, 8, FL_S | FL_E | FL_T(2)),
.deliver = false,
},
{
.name = "TO mismatch",
.key_peer_addr = 9,
.key_local_addr = 8,
.key_tag = 1,
.hdr = RX_HDR(1, 9, 8, FL_S | FL_E | FL_T(1) | FL_TO),
.deliver = false,
},
{
.name = "broadcast response",
.key_peer_addr = MCTP_ADDR_ANY,
.key_local_addr = 8,
.key_tag = 1,
.hdr = RX_HDR(1, 11, 8, FL_S | FL_E | FL_T(1)),
.deliver = true,
},
{
.name = "any local match",
.key_peer_addr = 12,
.key_local_addr = MCTP_ADDR_ANY,
.key_tag = 1,
.hdr = RX_HDR(1, 12, 8, FL_S | FL_E | FL_T(1)),
.deliver = true,
},
};
static void mctp_route_input_sk_keys_to_desc(
const struct mctp_route_input_sk_keys_test *t,
char *desc)
{
sprintf(desc, "%s", t->name);
}
KUNIT_ARRAY_PARAM(mctp_route_input_sk_keys, mctp_route_input_sk_keys_tests,
mctp_route_input_sk_keys_to_desc);
static struct kunit_case mctp_test_cases[] = {
KUNIT_CASE_PARAM(mctp_test_fragment, mctp_frag_gen_params),
KUNIT_CASE_PARAM(mctp_test_rx_input, mctp_rx_input_gen_params),
KUNIT_CASE_PARAM(mctp_test_route_input_sk, mctp_route_input_sk_gen_params),
KUNIT_CASE_PARAM(mctp_test_route_input_sk_reasm,
mctp_route_input_sk_reasm_gen_params),
KUNIT_CASE_PARAM(mctp_test_route_input_sk_keys,
mctp_route_input_sk_keys_gen_params),
{}
};
static struct kunit_suite mctp_test_suite = {
.name = "mctp",
.test_cases = mctp_test_cases,
};
kunit_test_suite(mctp_test_suite);
| linux-master | net/mctp/test/route-test.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/netdevice.h>
#include <linux/mctp.h>
#include <linux/if_arp.h>
#include <net/mctpdevice.h>
#include <net/pkt_sched.h>
#include "utils.h"
static netdev_tx_t mctp_test_dev_tx(struct sk_buff *skb,
struct net_device *ndev)
{
kfree_skb(skb);
return NETDEV_TX_OK;
}
static const struct net_device_ops mctp_test_netdev_ops = {
.ndo_start_xmit = mctp_test_dev_tx,
};
static void mctp_test_dev_setup(struct net_device *ndev)
{
ndev->type = ARPHRD_MCTP;
ndev->mtu = MCTP_DEV_TEST_MTU;
ndev->hard_header_len = 0;
ndev->addr_len = 0;
ndev->tx_queue_len = DEFAULT_TX_QUEUE_LEN;
ndev->flags = IFF_NOARP;
ndev->netdev_ops = &mctp_test_netdev_ops;
ndev->needs_free_netdev = true;
}
struct mctp_test_dev *mctp_test_create_dev(void)
{
struct mctp_test_dev *dev;
struct net_device *ndev;
int rc;
ndev = alloc_netdev(sizeof(*dev), "mctptest%d", NET_NAME_ENUM,
mctp_test_dev_setup);
if (!ndev)
return NULL;
dev = netdev_priv(ndev);
dev->ndev = ndev;
rc = register_netdev(ndev);
if (rc) {
free_netdev(ndev);
return NULL;
}
rcu_read_lock();
dev->mdev = __mctp_dev_get(ndev);
rcu_read_unlock();
return dev;
}
void mctp_test_destroy_dev(struct mctp_test_dev *dev)
{
mctp_dev_put(dev->mdev);
unregister_netdev(dev->ndev);
}
| linux-master | net/mctp/test/utils.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* Null security operations.
*
* Copyright (C) 2016 Red Hat, Inc. All Rights Reserved.
* Written by David Howells ([email protected])
*/
#include <net/af_rxrpc.h>
#include "ar-internal.h"
static int none_init_connection_security(struct rxrpc_connection *conn,
struct rxrpc_key_token *token)
{
return 0;
}
/*
* Work out how much data we can put in an unsecured packet.
*/
static int none_how_much_data(struct rxrpc_call *call, size_t remain,
size_t *_buf_size, size_t *_data_size, size_t *_offset)
{
*_buf_size = *_data_size = min_t(size_t, remain, RXRPC_JUMBO_DATALEN);
*_offset = 0;
return 0;
}
static int none_secure_packet(struct rxrpc_call *call, struct rxrpc_txbuf *txb)
{
return 0;
}
static int none_verify_packet(struct rxrpc_call *call, struct sk_buff *skb)
{
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
sp->flags |= RXRPC_RX_VERIFIED;
return 0;
}
static void none_free_call_crypto(struct rxrpc_call *call)
{
}
static int none_respond_to_challenge(struct rxrpc_connection *conn,
struct sk_buff *skb)
{
return rxrpc_abort_conn(conn, skb, RX_PROTOCOL_ERROR, -EPROTO,
rxrpc_eproto_rxnull_challenge);
}
static int none_verify_response(struct rxrpc_connection *conn,
struct sk_buff *skb)
{
return rxrpc_abort_conn(conn, skb, RX_PROTOCOL_ERROR, -EPROTO,
rxrpc_eproto_rxnull_response);
}
static void none_clear(struct rxrpc_connection *conn)
{
}
static int none_init(void)
{
return 0;
}
static void none_exit(void)
{
}
/*
* RxRPC Kerberos-based security
*/
const struct rxrpc_security rxrpc_no_security = {
.name = "none",
.security_index = RXRPC_SECURITY_NONE,
.init = none_init,
.exit = none_exit,
.init_connection_security = none_init_connection_security,
.free_call_crypto = none_free_call_crypto,
.how_much_data = none_how_much_data,
.secure_packet = none_secure_packet,
.verify_packet = none_verify_packet,
.respond_to_challenge = none_respond_to_challenge,
.verify_response = none_verify_response,
.clear = none_clear,
};
| linux-master | net/rxrpc/insecure.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* incoming call handling
*
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
* Written by David Howells ([email protected])
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/net.h>
#include <linux/skbuff.h>
#include <linux/errqueue.h>
#include <linux/udp.h>
#include <linux/in.h>
#include <linux/in6.h>
#include <linux/icmp.h>
#include <linux/gfp.h>
#include <linux/circ_buf.h>
#include <net/sock.h>
#include <net/af_rxrpc.h>
#include <net/ip.h>
#include "ar-internal.h"
static void rxrpc_dummy_notify(struct sock *sk, struct rxrpc_call *call,
unsigned long user_call_ID)
{
}
/*
* Preallocate a single service call, connection and peer and, if possible,
* give them a user ID and attach the user's side of the ID to them.
*/
static int rxrpc_service_prealloc_one(struct rxrpc_sock *rx,
struct rxrpc_backlog *b,
rxrpc_notify_rx_t notify_rx,
rxrpc_user_attach_call_t user_attach_call,
unsigned long user_call_ID, gfp_t gfp,
unsigned int debug_id)
{
struct rxrpc_call *call, *xcall;
struct rxrpc_net *rxnet = rxrpc_net(sock_net(&rx->sk));
struct rb_node *parent, **pp;
int max, tmp;
unsigned int size = RXRPC_BACKLOG_MAX;
unsigned int head, tail, call_head, call_tail;
max = rx->sk.sk_max_ack_backlog;
tmp = rx->sk.sk_ack_backlog;
if (tmp >= max) {
_leave(" = -ENOBUFS [full %u]", max);
return -ENOBUFS;
}
max -= tmp;
/* We don't need more conns and peers than we have calls, but on the
* other hand, we shouldn't ever use more peers than conns or conns
* than calls.
*/
call_head = b->call_backlog_head;
call_tail = READ_ONCE(b->call_backlog_tail);
tmp = CIRC_CNT(call_head, call_tail, size);
if (tmp >= max) {
_leave(" = -ENOBUFS [enough %u]", tmp);
return -ENOBUFS;
}
max = tmp + 1;
head = b->peer_backlog_head;
tail = READ_ONCE(b->peer_backlog_tail);
if (CIRC_CNT(head, tail, size) < max) {
struct rxrpc_peer *peer;
peer = rxrpc_alloc_peer(rx->local, gfp, rxrpc_peer_new_prealloc);
if (!peer)
return -ENOMEM;
b->peer_backlog[head] = peer;
smp_store_release(&b->peer_backlog_head,
(head + 1) & (size - 1));
}
head = b->conn_backlog_head;
tail = READ_ONCE(b->conn_backlog_tail);
if (CIRC_CNT(head, tail, size) < max) {
struct rxrpc_connection *conn;
conn = rxrpc_prealloc_service_connection(rxnet, gfp);
if (!conn)
return -ENOMEM;
b->conn_backlog[head] = conn;
smp_store_release(&b->conn_backlog_head,
(head + 1) & (size - 1));
}
/* Now it gets complicated, because calls get registered with the
* socket here, with a user ID preassigned by the user.
*/
call = rxrpc_alloc_call(rx, gfp, debug_id);
if (!call)
return -ENOMEM;
call->flags |= (1 << RXRPC_CALL_IS_SERVICE);
rxrpc_set_call_state(call, RXRPC_CALL_SERVER_PREALLOC);
__set_bit(RXRPC_CALL_EV_INITIAL_PING, &call->events);
trace_rxrpc_call(call->debug_id, refcount_read(&call->ref),
user_call_ID, rxrpc_call_new_prealloc_service);
write_lock(&rx->call_lock);
/* Check the user ID isn't already in use */
pp = &rx->calls.rb_node;
parent = NULL;
while (*pp) {
parent = *pp;
xcall = rb_entry(parent, struct rxrpc_call, sock_node);
if (user_call_ID < xcall->user_call_ID)
pp = &(*pp)->rb_left;
else if (user_call_ID > xcall->user_call_ID)
pp = &(*pp)->rb_right;
else
goto id_in_use;
}
call->user_call_ID = user_call_ID;
call->notify_rx = notify_rx;
if (user_attach_call) {
rxrpc_get_call(call, rxrpc_call_get_kernel_service);
user_attach_call(call, user_call_ID);
}
rxrpc_get_call(call, rxrpc_call_get_userid);
rb_link_node(&call->sock_node, parent, pp);
rb_insert_color(&call->sock_node, &rx->calls);
set_bit(RXRPC_CALL_HAS_USERID, &call->flags);
list_add(&call->sock_link, &rx->sock_calls);
write_unlock(&rx->call_lock);
rxnet = call->rxnet;
spin_lock(&rxnet->call_lock);
list_add_tail_rcu(&call->link, &rxnet->calls);
spin_unlock(&rxnet->call_lock);
b->call_backlog[call_head] = call;
smp_store_release(&b->call_backlog_head, (call_head + 1) & (size - 1));
_leave(" = 0 [%d -> %lx]", call->debug_id, user_call_ID);
return 0;
id_in_use:
write_unlock(&rx->call_lock);
rxrpc_cleanup_call(call);
_leave(" = -EBADSLT");
return -EBADSLT;
}
/*
* Allocate the preallocation buffers for incoming service calls. These must
* be charged manually.
*/
int rxrpc_service_prealloc(struct rxrpc_sock *rx, gfp_t gfp)
{
struct rxrpc_backlog *b = rx->backlog;
if (!b) {
b = kzalloc(sizeof(struct rxrpc_backlog), gfp);
if (!b)
return -ENOMEM;
rx->backlog = b;
}
return 0;
}
/*
* Discard the preallocation on a service.
*/
void rxrpc_discard_prealloc(struct rxrpc_sock *rx)
{
struct rxrpc_backlog *b = rx->backlog;
struct rxrpc_net *rxnet = rxrpc_net(sock_net(&rx->sk));
unsigned int size = RXRPC_BACKLOG_MAX, head, tail;
if (!b)
return;
rx->backlog = NULL;
/* Make sure that there aren't any incoming calls in progress before we
* clear the preallocation buffers.
*/
spin_lock(&rx->incoming_lock);
spin_unlock(&rx->incoming_lock);
head = b->peer_backlog_head;
tail = b->peer_backlog_tail;
while (CIRC_CNT(head, tail, size) > 0) {
struct rxrpc_peer *peer = b->peer_backlog[tail];
rxrpc_put_local(peer->local, rxrpc_local_put_prealloc_peer);
kfree(peer);
tail = (tail + 1) & (size - 1);
}
head = b->conn_backlog_head;
tail = b->conn_backlog_tail;
while (CIRC_CNT(head, tail, size) > 0) {
struct rxrpc_connection *conn = b->conn_backlog[tail];
write_lock(&rxnet->conn_lock);
list_del(&conn->link);
list_del(&conn->proc_link);
write_unlock(&rxnet->conn_lock);
kfree(conn);
if (atomic_dec_and_test(&rxnet->nr_conns))
wake_up_var(&rxnet->nr_conns);
tail = (tail + 1) & (size - 1);
}
head = b->call_backlog_head;
tail = b->call_backlog_tail;
while (CIRC_CNT(head, tail, size) > 0) {
struct rxrpc_call *call = b->call_backlog[tail];
rcu_assign_pointer(call->socket, rx);
if (rx->discard_new_call) {
_debug("discard %lx", call->user_call_ID);
rx->discard_new_call(call, call->user_call_ID);
if (call->notify_rx)
call->notify_rx = rxrpc_dummy_notify;
rxrpc_put_call(call, rxrpc_call_put_kernel);
}
rxrpc_call_completed(call);
rxrpc_release_call(rx, call);
rxrpc_put_call(call, rxrpc_call_put_discard_prealloc);
tail = (tail + 1) & (size - 1);
}
kfree(b);
}
/*
* Allocate a new incoming call from the prealloc pool, along with a connection
* and a peer as necessary.
*/
static struct rxrpc_call *rxrpc_alloc_incoming_call(struct rxrpc_sock *rx,
struct rxrpc_local *local,
struct rxrpc_peer *peer,
struct rxrpc_connection *conn,
const struct rxrpc_security *sec,
struct sockaddr_rxrpc *peer_srx,
struct sk_buff *skb)
{
struct rxrpc_backlog *b = rx->backlog;
struct rxrpc_call *call;
unsigned short call_head, conn_head, peer_head;
unsigned short call_tail, conn_tail, peer_tail;
unsigned short call_count, conn_count;
/* #calls >= #conns >= #peers must hold true. */
call_head = smp_load_acquire(&b->call_backlog_head);
call_tail = b->call_backlog_tail;
call_count = CIRC_CNT(call_head, call_tail, RXRPC_BACKLOG_MAX);
conn_head = smp_load_acquire(&b->conn_backlog_head);
conn_tail = b->conn_backlog_tail;
conn_count = CIRC_CNT(conn_head, conn_tail, RXRPC_BACKLOG_MAX);
ASSERTCMP(conn_count, >=, call_count);
peer_head = smp_load_acquire(&b->peer_backlog_head);
peer_tail = b->peer_backlog_tail;
ASSERTCMP(CIRC_CNT(peer_head, peer_tail, RXRPC_BACKLOG_MAX), >=,
conn_count);
if (call_count == 0)
return NULL;
if (!conn) {
if (peer && !rxrpc_get_peer_maybe(peer, rxrpc_peer_get_service_conn))
peer = NULL;
if (!peer) {
peer = b->peer_backlog[peer_tail];
peer->srx = *peer_srx;
b->peer_backlog[peer_tail] = NULL;
smp_store_release(&b->peer_backlog_tail,
(peer_tail + 1) &
(RXRPC_BACKLOG_MAX - 1));
rxrpc_new_incoming_peer(local, peer);
}
/* Now allocate and set up the connection */
conn = b->conn_backlog[conn_tail];
b->conn_backlog[conn_tail] = NULL;
smp_store_release(&b->conn_backlog_tail,
(conn_tail + 1) & (RXRPC_BACKLOG_MAX - 1));
conn->local = rxrpc_get_local(local, rxrpc_local_get_prealloc_conn);
conn->peer = peer;
rxrpc_see_connection(conn, rxrpc_conn_see_new_service_conn);
rxrpc_new_incoming_connection(rx, conn, sec, skb);
} else {
rxrpc_get_connection(conn, rxrpc_conn_get_service_conn);
atomic_inc(&conn->active);
}
/* And now we can allocate and set up a new call */
call = b->call_backlog[call_tail];
b->call_backlog[call_tail] = NULL;
smp_store_release(&b->call_backlog_tail,
(call_tail + 1) & (RXRPC_BACKLOG_MAX - 1));
rxrpc_see_call(call, rxrpc_call_see_accept);
call->local = rxrpc_get_local(conn->local, rxrpc_local_get_call);
call->conn = conn;
call->security = conn->security;
call->security_ix = conn->security_ix;
call->peer = rxrpc_get_peer(conn->peer, rxrpc_peer_get_accept);
call->dest_srx = peer->srx;
call->cong_ssthresh = call->peer->cong_ssthresh;
call->tx_last_sent = ktime_get_real();
return call;
}
/*
* Set up a new incoming call. Called from the I/O thread.
*
* If this is for a kernel service, when we allocate the call, it will have
* three refs on it: (1) the kernel service, (2) the user_call_ID tree, (3) the
* retainer ref obtained from the backlog buffer. Prealloc calls for userspace
* services only have the ref from the backlog buffer.
*
* If we want to report an error, we mark the skb with the packet type and
* abort code and return false.
*/
bool rxrpc_new_incoming_call(struct rxrpc_local *local,
struct rxrpc_peer *peer,
struct rxrpc_connection *conn,
struct sockaddr_rxrpc *peer_srx,
struct sk_buff *skb)
{
const struct rxrpc_security *sec = NULL;
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
struct rxrpc_call *call = NULL;
struct rxrpc_sock *rx;
_enter("");
/* Don't set up a call for anything other than a DATA packet. */
if (sp->hdr.type != RXRPC_PACKET_TYPE_DATA)
return rxrpc_protocol_error(skb, rxrpc_eproto_no_service_call);
read_lock(&local->services_lock);
/* Weed out packets to services we're not offering. Packets that would
* begin a call are explicitly rejected and the rest are just
* discarded.
*/
rx = local->service;
if (!rx || (sp->hdr.serviceId != rx->srx.srx_service &&
sp->hdr.serviceId != rx->second_service)
) {
if (sp->hdr.type == RXRPC_PACKET_TYPE_DATA &&
sp->hdr.seq == 1)
goto unsupported_service;
goto discard;
}
if (!conn) {
sec = rxrpc_get_incoming_security(rx, skb);
if (!sec)
goto unsupported_security;
}
spin_lock(&rx->incoming_lock);
if (rx->sk.sk_state == RXRPC_SERVER_LISTEN_DISABLED ||
rx->sk.sk_state == RXRPC_CLOSE) {
rxrpc_direct_abort(skb, rxrpc_abort_shut_down,
RX_INVALID_OPERATION, -ESHUTDOWN);
goto no_call;
}
call = rxrpc_alloc_incoming_call(rx, local, peer, conn, sec, peer_srx,
skb);
if (!call) {
skb->mark = RXRPC_SKB_MARK_REJECT_BUSY;
goto no_call;
}
trace_rxrpc_receive(call, rxrpc_receive_incoming,
sp->hdr.serial, sp->hdr.seq);
/* Make the call live. */
rxrpc_incoming_call(rx, call, skb);
conn = call->conn;
if (rx->notify_new_call)
rx->notify_new_call(&rx->sk, call, call->user_call_ID);
spin_lock(&conn->state_lock);
if (conn->state == RXRPC_CONN_SERVICE_UNSECURED) {
conn->state = RXRPC_CONN_SERVICE_CHALLENGING;
set_bit(RXRPC_CONN_EV_CHALLENGE, &call->conn->events);
rxrpc_queue_conn(call->conn, rxrpc_conn_queue_challenge);
}
spin_unlock(&conn->state_lock);
spin_unlock(&rx->incoming_lock);
read_unlock(&local->services_lock);
if (hlist_unhashed(&call->error_link)) {
spin_lock(&call->peer->lock);
hlist_add_head(&call->error_link, &call->peer->error_targets);
spin_unlock(&call->peer->lock);
}
_leave(" = %p{%d}", call, call->debug_id);
rxrpc_input_call_event(call, skb);
rxrpc_put_call(call, rxrpc_call_put_input);
return true;
unsupported_service:
read_unlock(&local->services_lock);
return rxrpc_direct_abort(skb, rxrpc_abort_service_not_offered,
RX_INVALID_OPERATION, -EOPNOTSUPP);
unsupported_security:
read_unlock(&local->services_lock);
return rxrpc_direct_abort(skb, rxrpc_abort_service_not_offered,
RX_INVALID_OPERATION, -EKEYREJECTED);
no_call:
spin_unlock(&rx->incoming_lock);
read_unlock(&local->services_lock);
_leave(" = f [%u]", skb->mark);
return false;
discard:
read_unlock(&local->services_lock);
return true;
}
/*
* Charge up socket with preallocated calls, attaching user call IDs.
*/
int rxrpc_user_charge_accept(struct rxrpc_sock *rx, unsigned long user_call_ID)
{
struct rxrpc_backlog *b = rx->backlog;
if (rx->sk.sk_state == RXRPC_CLOSE)
return -ESHUTDOWN;
return rxrpc_service_prealloc_one(rx, b, NULL, NULL, user_call_ID,
GFP_KERNEL,
atomic_inc_return(&rxrpc_debug_id));
}
/*
* rxrpc_kernel_charge_accept - Charge up socket with preallocated calls
* @sock: The socket on which to preallocate
* @notify_rx: Event notification function for the call
* @user_attach_call: Func to attach call to user_call_ID
* @user_call_ID: The tag to attach to the preallocated call
* @gfp: The allocation conditions.
* @debug_id: The tracing debug ID.
*
* Charge up the socket with preallocated calls, each with a user ID. A
* function should be provided to effect the attachment from the user's side.
* The user is given a ref to hold on the call.
*
* Note that the call may be come connected before this function returns.
*/
int rxrpc_kernel_charge_accept(struct socket *sock,
rxrpc_notify_rx_t notify_rx,
rxrpc_user_attach_call_t user_attach_call,
unsigned long user_call_ID, gfp_t gfp,
unsigned int debug_id)
{
struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
struct rxrpc_backlog *b = rx->backlog;
if (sock->sk->sk_state == RXRPC_CLOSE)
return -ESHUTDOWN;
return rxrpc_service_prealloc_one(rx, b, notify_rx,
user_attach_call, user_call_ID,
gfp, debug_id);
}
EXPORT_SYMBOL(rxrpc_kernel_charge_accept);
| linux-master | net/rxrpc/call_accept.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* RxRPC packet reception
*
* Copyright (C) 2007, 2016, 2022 Red Hat, Inc. All Rights Reserved.
* Written by David Howells ([email protected])
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include "ar-internal.h"
static int rxrpc_input_packet_on_conn(struct rxrpc_connection *conn,
struct sockaddr_rxrpc *peer_srx,
struct sk_buff *skb);
/*
* handle data received on the local endpoint
* - may be called in interrupt context
*
* [!] Note that as this is called from the encap_rcv hook, the socket is not
* held locked by the caller and nothing prevents sk_user_data on the UDP from
* being cleared in the middle of processing this function.
*
* Called with the RCU read lock held from the IP layer via UDP.
*/
int rxrpc_encap_rcv(struct sock *udp_sk, struct sk_buff *skb)
{
struct sk_buff_head *rx_queue;
struct rxrpc_local *local = rcu_dereference_sk_user_data(udp_sk);
if (unlikely(!local)) {
kfree_skb(skb);
return 0;
}
if (skb->tstamp == 0)
skb->tstamp = ktime_get_real();
skb->mark = RXRPC_SKB_MARK_PACKET;
rxrpc_new_skb(skb, rxrpc_skb_new_encap_rcv);
rx_queue = &local->rx_queue;
#ifdef CONFIG_AF_RXRPC_INJECT_RX_DELAY
if (rxrpc_inject_rx_delay ||
!skb_queue_empty(&local->rx_delay_queue)) {
skb->tstamp = ktime_add_ms(skb->tstamp, rxrpc_inject_rx_delay);
rx_queue = &local->rx_delay_queue;
}
#endif
skb_queue_tail(rx_queue, skb);
rxrpc_wake_up_io_thread(local);
return 0;
}
/*
* Handle an error received on the local endpoint.
*/
void rxrpc_error_report(struct sock *sk)
{
struct rxrpc_local *local;
struct sk_buff *skb;
rcu_read_lock();
local = rcu_dereference_sk_user_data(sk);
if (unlikely(!local)) {
rcu_read_unlock();
return;
}
while ((skb = skb_dequeue(&sk->sk_error_queue))) {
skb->mark = RXRPC_SKB_MARK_ERROR;
rxrpc_new_skb(skb, rxrpc_skb_new_error_report);
skb_queue_tail(&local->rx_queue, skb);
}
rxrpc_wake_up_io_thread(local);
rcu_read_unlock();
}
/*
* Directly produce an abort from a packet.
*/
bool rxrpc_direct_abort(struct sk_buff *skb, enum rxrpc_abort_reason why,
s32 abort_code, int err)
{
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
trace_rxrpc_abort(0, why, sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
abort_code, err);
skb->mark = RXRPC_SKB_MARK_REJECT_ABORT;
skb->priority = abort_code;
return false;
}
static bool rxrpc_bad_message(struct sk_buff *skb, enum rxrpc_abort_reason why)
{
return rxrpc_direct_abort(skb, why, RX_PROTOCOL_ERROR, -EBADMSG);
}
#define just_discard true
/*
* Process event packets targeted at a local endpoint.
*/
static bool rxrpc_input_version(struct rxrpc_local *local, struct sk_buff *skb)
{
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
char v;
_enter("");
rxrpc_see_skb(skb, rxrpc_skb_see_version);
if (skb_copy_bits(skb, sizeof(struct rxrpc_wire_header), &v, 1) >= 0) {
if (v == 0)
rxrpc_send_version_request(local, &sp->hdr, skb);
}
return true;
}
/*
* Extract the wire header from a packet and translate the byte order.
*/
static bool rxrpc_extract_header(struct rxrpc_skb_priv *sp,
struct sk_buff *skb)
{
struct rxrpc_wire_header whdr;
/* dig out the RxRPC connection details */
if (skb_copy_bits(skb, 0, &whdr, sizeof(whdr)) < 0)
return rxrpc_bad_message(skb, rxrpc_badmsg_short_hdr);
memset(sp, 0, sizeof(*sp));
sp->hdr.epoch = ntohl(whdr.epoch);
sp->hdr.cid = ntohl(whdr.cid);
sp->hdr.callNumber = ntohl(whdr.callNumber);
sp->hdr.seq = ntohl(whdr.seq);
sp->hdr.serial = ntohl(whdr.serial);
sp->hdr.flags = whdr.flags;
sp->hdr.type = whdr.type;
sp->hdr.userStatus = whdr.userStatus;
sp->hdr.securityIndex = whdr.securityIndex;
sp->hdr._rsvd = ntohs(whdr._rsvd);
sp->hdr.serviceId = ntohs(whdr.serviceId);
return true;
}
/*
* Extract the abort code from an ABORT packet and stash it in skb->priority.
*/
static bool rxrpc_extract_abort(struct sk_buff *skb)
{
__be32 wtmp;
if (skb_copy_bits(skb, sizeof(struct rxrpc_wire_header),
&wtmp, sizeof(wtmp)) < 0)
return false;
skb->priority = ntohl(wtmp);
return true;
}
/*
* Process packets received on the local endpoint
*/
static bool rxrpc_input_packet(struct rxrpc_local *local, struct sk_buff **_skb)
{
struct rxrpc_connection *conn;
struct sockaddr_rxrpc peer_srx;
struct rxrpc_skb_priv *sp;
struct rxrpc_peer *peer = NULL;
struct sk_buff *skb = *_skb;
bool ret = false;
skb_pull(skb, sizeof(struct udphdr));
sp = rxrpc_skb(skb);
/* dig out the RxRPC connection details */
if (!rxrpc_extract_header(sp, skb))
return just_discard;
if (IS_ENABLED(CONFIG_AF_RXRPC_INJECT_LOSS)) {
static int lose;
if ((lose++ & 7) == 7) {
trace_rxrpc_rx_lose(sp);
return just_discard;
}
}
trace_rxrpc_rx_packet(sp);
switch (sp->hdr.type) {
case RXRPC_PACKET_TYPE_VERSION:
if (rxrpc_to_client(sp))
return just_discard;
return rxrpc_input_version(local, skb);
case RXRPC_PACKET_TYPE_BUSY:
if (rxrpc_to_server(sp))
return just_discard;
fallthrough;
case RXRPC_PACKET_TYPE_ACK:
case RXRPC_PACKET_TYPE_ACKALL:
if (sp->hdr.callNumber == 0)
return rxrpc_bad_message(skb, rxrpc_badmsg_zero_call);
break;
case RXRPC_PACKET_TYPE_ABORT:
if (!rxrpc_extract_abort(skb))
return just_discard; /* Just discard if malformed */
break;
case RXRPC_PACKET_TYPE_DATA:
if (sp->hdr.callNumber == 0)
return rxrpc_bad_message(skb, rxrpc_badmsg_zero_call);
if (sp->hdr.seq == 0)
return rxrpc_bad_message(skb, rxrpc_badmsg_zero_seq);
/* Unshare the packet so that it can be modified for in-place
* decryption.
*/
if (sp->hdr.securityIndex != 0) {
skb = skb_unshare(skb, GFP_ATOMIC);
if (!skb) {
rxrpc_eaten_skb(*_skb, rxrpc_skb_eaten_by_unshare_nomem);
*_skb = NULL;
return just_discard;
}
if (skb != *_skb) {
rxrpc_eaten_skb(*_skb, rxrpc_skb_eaten_by_unshare);
*_skb = skb;
rxrpc_new_skb(skb, rxrpc_skb_new_unshared);
sp = rxrpc_skb(skb);
}
}
break;
case RXRPC_PACKET_TYPE_CHALLENGE:
if (rxrpc_to_server(sp))
return just_discard;
break;
case RXRPC_PACKET_TYPE_RESPONSE:
if (rxrpc_to_client(sp))
return just_discard;
break;
/* Packet types 9-11 should just be ignored. */
case RXRPC_PACKET_TYPE_PARAMS:
case RXRPC_PACKET_TYPE_10:
case RXRPC_PACKET_TYPE_11:
return just_discard;
default:
return rxrpc_bad_message(skb, rxrpc_badmsg_unsupported_packet);
}
if (sp->hdr.serviceId == 0)
return rxrpc_bad_message(skb, rxrpc_badmsg_zero_service);
if (WARN_ON_ONCE(rxrpc_extract_addr_from_skb(&peer_srx, skb) < 0))
return just_discard; /* Unsupported address type. */
if (peer_srx.transport.family != local->srx.transport.family &&
(peer_srx.transport.family == AF_INET &&
local->srx.transport.family != AF_INET6)) {
pr_warn_ratelimited("AF_RXRPC: Protocol mismatch %u not %u\n",
peer_srx.transport.family,
local->srx.transport.family);
return just_discard; /* Wrong address type. */
}
if (rxrpc_to_client(sp)) {
rcu_read_lock();
conn = rxrpc_find_client_connection_rcu(local, &peer_srx, skb);
conn = rxrpc_get_connection_maybe(conn, rxrpc_conn_get_call_input);
rcu_read_unlock();
if (!conn)
return rxrpc_protocol_error(skb, rxrpc_eproto_no_client_conn);
ret = rxrpc_input_packet_on_conn(conn, &peer_srx, skb);
rxrpc_put_connection(conn, rxrpc_conn_put_call_input);
return ret;
}
/* We need to look up service connections by the full protocol
* parameter set. We look up the peer first as an intermediate step
* and then the connection from the peer's tree.
*/
rcu_read_lock();
peer = rxrpc_lookup_peer_rcu(local, &peer_srx);
if (!peer) {
rcu_read_unlock();
return rxrpc_new_incoming_call(local, NULL, NULL, &peer_srx, skb);
}
conn = rxrpc_find_service_conn_rcu(peer, skb);
conn = rxrpc_get_connection_maybe(conn, rxrpc_conn_get_call_input);
if (conn) {
rcu_read_unlock();
ret = rxrpc_input_packet_on_conn(conn, &peer_srx, skb);
rxrpc_put_connection(conn, rxrpc_conn_put_call_input);
return ret;
}
peer = rxrpc_get_peer_maybe(peer, rxrpc_peer_get_input);
rcu_read_unlock();
ret = rxrpc_new_incoming_call(local, peer, NULL, &peer_srx, skb);
rxrpc_put_peer(peer, rxrpc_peer_put_input);
return ret;
}
/*
* Deal with a packet that's associated with an extant connection.
*/
static int rxrpc_input_packet_on_conn(struct rxrpc_connection *conn,
struct sockaddr_rxrpc *peer_srx,
struct sk_buff *skb)
{
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
struct rxrpc_channel *chan;
struct rxrpc_call *call = NULL;
unsigned int channel;
bool ret;
if (sp->hdr.securityIndex != conn->security_ix)
return rxrpc_direct_abort(skb, rxrpc_eproto_wrong_security,
RXKADINCONSISTENCY, -EBADMSG);
if (sp->hdr.serviceId != conn->service_id) {
int old_id;
if (!test_bit(RXRPC_CONN_PROBING_FOR_UPGRADE, &conn->flags))
return rxrpc_protocol_error(skb, rxrpc_eproto_reupgrade);
old_id = cmpxchg(&conn->service_id, conn->orig_service_id,
sp->hdr.serviceId);
if (old_id != conn->orig_service_id &&
old_id != sp->hdr.serviceId)
return rxrpc_protocol_error(skb, rxrpc_eproto_bad_upgrade);
}
if (after(sp->hdr.serial, conn->hi_serial))
conn->hi_serial = sp->hdr.serial;
/* It's a connection-level packet if the call number is 0. */
if (sp->hdr.callNumber == 0)
return rxrpc_input_conn_packet(conn, skb);
/* Call-bound packets are routed by connection channel. */
channel = sp->hdr.cid & RXRPC_CHANNELMASK;
chan = &conn->channels[channel];
/* Ignore really old calls */
if (sp->hdr.callNumber < chan->last_call)
return just_discard;
if (sp->hdr.callNumber == chan->last_call) {
if (chan->call ||
sp->hdr.type == RXRPC_PACKET_TYPE_ABORT)
return just_discard;
/* For the previous service call, if completed successfully, we
* discard all further packets.
*/
if (rxrpc_conn_is_service(conn) &&
chan->last_type == RXRPC_PACKET_TYPE_ACK)
return just_discard;
/* But otherwise we need to retransmit the final packet from
* data cached in the connection record.
*/
if (sp->hdr.type == RXRPC_PACKET_TYPE_DATA)
trace_rxrpc_rx_data(chan->call_debug_id,
sp->hdr.seq,
sp->hdr.serial,
sp->hdr.flags);
rxrpc_conn_retransmit_call(conn, skb, channel);
return just_discard;
}
call = rxrpc_try_get_call(chan->call, rxrpc_call_get_input);
if (sp->hdr.callNumber > chan->call_id) {
if (rxrpc_to_client(sp)) {
rxrpc_put_call(call, rxrpc_call_put_input);
return rxrpc_protocol_error(skb,
rxrpc_eproto_unexpected_implicit_end);
}
if (call) {
rxrpc_implicit_end_call(call, skb);
rxrpc_put_call(call, rxrpc_call_put_input);
call = NULL;
}
}
if (!call) {
if (rxrpc_to_client(sp))
return rxrpc_protocol_error(skb, rxrpc_eproto_no_client_call);
return rxrpc_new_incoming_call(conn->local, conn->peer, conn,
peer_srx, skb);
}
ret = rxrpc_input_call_event(call, skb);
rxrpc_put_call(call, rxrpc_call_put_input);
return ret;
}
/*
* I/O and event handling thread.
*/
int rxrpc_io_thread(void *data)
{
struct rxrpc_connection *conn;
struct sk_buff_head rx_queue;
struct rxrpc_local *local = data;
struct rxrpc_call *call;
struct sk_buff *skb;
#ifdef CONFIG_AF_RXRPC_INJECT_RX_DELAY
ktime_t now;
#endif
bool should_stop;
complete(&local->io_thread_ready);
skb_queue_head_init(&rx_queue);
set_user_nice(current, MIN_NICE);
for (;;) {
rxrpc_inc_stat(local->rxnet, stat_io_loop);
/* Deal with connections that want immediate attention. */
conn = list_first_entry_or_null(&local->conn_attend_q,
struct rxrpc_connection,
attend_link);
if (conn) {
spin_lock_bh(&local->lock);
list_del_init(&conn->attend_link);
spin_unlock_bh(&local->lock);
rxrpc_input_conn_event(conn, NULL);
rxrpc_put_connection(conn, rxrpc_conn_put_poke);
continue;
}
if (test_and_clear_bit(RXRPC_CLIENT_CONN_REAP_TIMER,
&local->client_conn_flags))
rxrpc_discard_expired_client_conns(local);
/* Deal with calls that want immediate attention. */
if ((call = list_first_entry_or_null(&local->call_attend_q,
struct rxrpc_call,
attend_link))) {
spin_lock_bh(&local->lock);
list_del_init(&call->attend_link);
spin_unlock_bh(&local->lock);
trace_rxrpc_call_poked(call);
rxrpc_input_call_event(call, NULL);
rxrpc_put_call(call, rxrpc_call_put_poke);
continue;
}
if (!list_empty(&local->new_client_calls))
rxrpc_connect_client_calls(local);
/* Process received packets and errors. */
if ((skb = __skb_dequeue(&rx_queue))) {
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
switch (skb->mark) {
case RXRPC_SKB_MARK_PACKET:
skb->priority = 0;
if (!rxrpc_input_packet(local, &skb))
rxrpc_reject_packet(local, skb);
trace_rxrpc_rx_done(skb->mark, skb->priority);
rxrpc_free_skb(skb, rxrpc_skb_put_input);
break;
case RXRPC_SKB_MARK_ERROR:
rxrpc_input_error(local, skb);
rxrpc_free_skb(skb, rxrpc_skb_put_error_report);
break;
case RXRPC_SKB_MARK_SERVICE_CONN_SECURED:
rxrpc_input_conn_event(sp->conn, skb);
rxrpc_put_connection(sp->conn, rxrpc_conn_put_poke);
rxrpc_free_skb(skb, rxrpc_skb_put_conn_secured);
break;
default:
WARN_ON_ONCE(1);
rxrpc_free_skb(skb, rxrpc_skb_put_unknown);
break;
}
continue;
}
/* Inject a delay into packets if requested. */
#ifdef CONFIG_AF_RXRPC_INJECT_RX_DELAY
now = ktime_get_real();
while ((skb = skb_peek(&local->rx_delay_queue))) {
if (ktime_before(now, skb->tstamp))
break;
skb = skb_dequeue(&local->rx_delay_queue);
skb_queue_tail(&local->rx_queue, skb);
}
#endif
if (!skb_queue_empty(&local->rx_queue)) {
spin_lock_irq(&local->rx_queue.lock);
skb_queue_splice_tail_init(&local->rx_queue, &rx_queue);
spin_unlock_irq(&local->rx_queue.lock);
continue;
}
set_current_state(TASK_INTERRUPTIBLE);
should_stop = kthread_should_stop();
if (!skb_queue_empty(&local->rx_queue) ||
!list_empty(&local->call_attend_q) ||
!list_empty(&local->conn_attend_q) ||
!list_empty(&local->new_client_calls) ||
test_bit(RXRPC_CLIENT_CONN_REAP_TIMER,
&local->client_conn_flags)) {
__set_current_state(TASK_RUNNING);
continue;
}
if (should_stop)
break;
#ifdef CONFIG_AF_RXRPC_INJECT_RX_DELAY
skb = skb_peek(&local->rx_delay_queue);
if (skb) {
unsigned long timeout;
ktime_t tstamp = skb->tstamp;
ktime_t now = ktime_get_real();
s64 delay_ns = ktime_to_ns(ktime_sub(tstamp, now));
if (delay_ns <= 0) {
__set_current_state(TASK_RUNNING);
continue;
}
timeout = nsecs_to_jiffies(delay_ns);
timeout = max(timeout, 1UL);
schedule_timeout(timeout);
__set_current_state(TASK_RUNNING);
continue;
}
#endif
schedule();
}
__set_current_state(TASK_RUNNING);
rxrpc_see_local(local, rxrpc_local_stop);
rxrpc_destroy_local(local);
local->io_thread = NULL;
rxrpc_see_local(local, rxrpc_local_stopped);
return 0;
}
| linux-master | net/rxrpc/io_thread.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* Peer event handling, typically ICMP messages.
*
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
* Written by David Howells ([email protected])
*/
#include <linux/module.h>
#include <linux/net.h>
#include <linux/skbuff.h>
#include <linux/errqueue.h>
#include <linux/udp.h>
#include <linux/in.h>
#include <linux/in6.h>
#include <linux/icmp.h>
#include <net/sock.h>
#include <net/af_rxrpc.h>
#include <net/ip.h>
#include "ar-internal.h"
static void rxrpc_store_error(struct rxrpc_peer *, struct sk_buff *);
static void rxrpc_distribute_error(struct rxrpc_peer *, struct sk_buff *,
enum rxrpc_call_completion, int);
/*
* Find the peer associated with a local error.
*/
static struct rxrpc_peer *rxrpc_lookup_peer_local_rcu(struct rxrpc_local *local,
const struct sk_buff *skb,
struct sockaddr_rxrpc *srx)
{
struct sock_exterr_skb *serr = SKB_EXT_ERR(skb);
_enter("");
memset(srx, 0, sizeof(*srx));
srx->transport_type = local->srx.transport_type;
srx->transport_len = local->srx.transport_len;
srx->transport.family = local->srx.transport.family;
/* Can we see an ICMP4 packet on an ICMP6 listening socket? and vice
* versa?
*/
switch (srx->transport.family) {
case AF_INET:
srx->transport_len = sizeof(srx->transport.sin);
srx->transport.family = AF_INET;
srx->transport.sin.sin_port = serr->port;
switch (serr->ee.ee_origin) {
case SO_EE_ORIGIN_ICMP:
memcpy(&srx->transport.sin.sin_addr,
skb_network_header(skb) + serr->addr_offset,
sizeof(struct in_addr));
break;
case SO_EE_ORIGIN_ICMP6:
memcpy(&srx->transport.sin.sin_addr,
skb_network_header(skb) + serr->addr_offset + 12,
sizeof(struct in_addr));
break;
default:
memcpy(&srx->transport.sin.sin_addr, &ip_hdr(skb)->saddr,
sizeof(struct in_addr));
break;
}
break;
#ifdef CONFIG_AF_RXRPC_IPV6
case AF_INET6:
switch (serr->ee.ee_origin) {
case SO_EE_ORIGIN_ICMP6:
srx->transport.sin6.sin6_port = serr->port;
memcpy(&srx->transport.sin6.sin6_addr,
skb_network_header(skb) + serr->addr_offset,
sizeof(struct in6_addr));
break;
case SO_EE_ORIGIN_ICMP:
srx->transport_len = sizeof(srx->transport.sin);
srx->transport.family = AF_INET;
srx->transport.sin.sin_port = serr->port;
memcpy(&srx->transport.sin.sin_addr,
skb_network_header(skb) + serr->addr_offset,
sizeof(struct in_addr));
break;
default:
memcpy(&srx->transport.sin6.sin6_addr,
&ipv6_hdr(skb)->saddr,
sizeof(struct in6_addr));
break;
}
break;
#endif
default:
BUG();
}
return rxrpc_lookup_peer_rcu(local, srx);
}
/*
* Handle an MTU/fragmentation problem.
*/
static void rxrpc_adjust_mtu(struct rxrpc_peer *peer, unsigned int mtu)
{
/* wind down the local interface MTU */
if (mtu > 0 && peer->if_mtu == 65535 && mtu < peer->if_mtu)
peer->if_mtu = mtu;
if (mtu == 0) {
/* they didn't give us a size, estimate one */
mtu = peer->if_mtu;
if (mtu > 1500) {
mtu >>= 1;
if (mtu < 1500)
mtu = 1500;
} else {
mtu -= 100;
if (mtu < peer->hdrsize)
mtu = peer->hdrsize + 4;
}
}
if (mtu < peer->mtu) {
spin_lock(&peer->lock);
peer->mtu = mtu;
peer->maxdata = peer->mtu - peer->hdrsize;
spin_unlock(&peer->lock);
}
}
/*
* Handle an error received on the local endpoint.
*/
void rxrpc_input_error(struct rxrpc_local *local, struct sk_buff *skb)
{
struct sock_exterr_skb *serr = SKB_EXT_ERR(skb);
struct sockaddr_rxrpc srx;
struct rxrpc_peer *peer = NULL;
_enter("L=%x", local->debug_id);
if (!skb->len && serr->ee.ee_origin == SO_EE_ORIGIN_TIMESTAMPING) {
_leave("UDP empty message");
return;
}
rcu_read_lock();
peer = rxrpc_lookup_peer_local_rcu(local, skb, &srx);
if (peer && !rxrpc_get_peer_maybe(peer, rxrpc_peer_get_input_error))
peer = NULL;
rcu_read_unlock();
if (!peer)
return;
trace_rxrpc_rx_icmp(peer, &serr->ee, &srx);
if ((serr->ee.ee_origin == SO_EE_ORIGIN_ICMP &&
serr->ee.ee_type == ICMP_DEST_UNREACH &&
serr->ee.ee_code == ICMP_FRAG_NEEDED)) {
rxrpc_adjust_mtu(peer, serr->ee.ee_info);
goto out;
}
rxrpc_store_error(peer, skb);
out:
rxrpc_put_peer(peer, rxrpc_peer_put_input_error);
}
/*
* Map an error report to error codes on the peer record.
*/
static void rxrpc_store_error(struct rxrpc_peer *peer, struct sk_buff *skb)
{
enum rxrpc_call_completion compl = RXRPC_CALL_NETWORK_ERROR;
struct sock_exterr_skb *serr = SKB_EXT_ERR(skb);
struct sock_extended_err *ee = &serr->ee;
int err = ee->ee_errno;
_enter("");
switch (ee->ee_origin) {
case SO_EE_ORIGIN_NONE:
case SO_EE_ORIGIN_LOCAL:
compl = RXRPC_CALL_LOCAL_ERROR;
break;
case SO_EE_ORIGIN_ICMP6:
if (err == EACCES)
err = EHOSTUNREACH;
fallthrough;
case SO_EE_ORIGIN_ICMP:
default:
break;
}
rxrpc_distribute_error(peer, skb, compl, err);
}
/*
* Distribute an error that occurred on a peer.
*/
static void rxrpc_distribute_error(struct rxrpc_peer *peer, struct sk_buff *skb,
enum rxrpc_call_completion compl, int err)
{
struct rxrpc_call *call;
HLIST_HEAD(error_targets);
spin_lock(&peer->lock);
hlist_move_list(&peer->error_targets, &error_targets);
while (!hlist_empty(&error_targets)) {
call = hlist_entry(error_targets.first,
struct rxrpc_call, error_link);
hlist_del_init(&call->error_link);
spin_unlock(&peer->lock);
rxrpc_see_call(call, rxrpc_call_see_distribute_error);
rxrpc_set_call_completion(call, compl, 0, -err);
rxrpc_input_call_event(call, skb);
spin_lock(&peer->lock);
}
spin_unlock(&peer->lock);
}
/*
* Perform keep-alive pings.
*/
static void rxrpc_peer_keepalive_dispatch(struct rxrpc_net *rxnet,
struct list_head *collector,
time64_t base,
u8 cursor)
{
struct rxrpc_peer *peer;
const u8 mask = ARRAY_SIZE(rxnet->peer_keepalive) - 1;
time64_t keepalive_at;
bool use;
int slot;
spin_lock(&rxnet->peer_hash_lock);
while (!list_empty(collector)) {
peer = list_entry(collector->next,
struct rxrpc_peer, keepalive_link);
list_del_init(&peer->keepalive_link);
if (!rxrpc_get_peer_maybe(peer, rxrpc_peer_get_keepalive))
continue;
use = __rxrpc_use_local(peer->local, rxrpc_local_use_peer_keepalive);
spin_unlock(&rxnet->peer_hash_lock);
if (use) {
keepalive_at = peer->last_tx_at + RXRPC_KEEPALIVE_TIME;
slot = keepalive_at - base;
_debug("%02x peer %u t=%d {%pISp}",
cursor, peer->debug_id, slot, &peer->srx.transport);
if (keepalive_at <= base ||
keepalive_at > base + RXRPC_KEEPALIVE_TIME) {
rxrpc_send_keepalive(peer);
slot = RXRPC_KEEPALIVE_TIME;
}
/* A transmission to this peer occurred since last we
* examined it so put it into the appropriate future
* bucket.
*/
slot += cursor;
slot &= mask;
spin_lock(&rxnet->peer_hash_lock);
list_add_tail(&peer->keepalive_link,
&rxnet->peer_keepalive[slot & mask]);
spin_unlock(&rxnet->peer_hash_lock);
rxrpc_unuse_local(peer->local, rxrpc_local_unuse_peer_keepalive);
}
rxrpc_put_peer(peer, rxrpc_peer_put_keepalive);
spin_lock(&rxnet->peer_hash_lock);
}
spin_unlock(&rxnet->peer_hash_lock);
}
/*
* Perform keep-alive pings with VERSION packets to keep any NAT alive.
*/
void rxrpc_peer_keepalive_worker(struct work_struct *work)
{
struct rxrpc_net *rxnet =
container_of(work, struct rxrpc_net, peer_keepalive_work);
const u8 mask = ARRAY_SIZE(rxnet->peer_keepalive) - 1;
time64_t base, now, delay;
u8 cursor, stop;
LIST_HEAD(collector);
now = ktime_get_seconds();
base = rxnet->peer_keepalive_base;
cursor = rxnet->peer_keepalive_cursor;
_enter("%lld,%u", base - now, cursor);
if (!rxnet->live)
return;
/* Remove to a temporary list all the peers that are currently lodged
* in expired buckets plus all new peers.
*
* Everything in the bucket at the cursor is processed this
* second; the bucket at cursor + 1 goes at now + 1s and so
* on...
*/
spin_lock(&rxnet->peer_hash_lock);
list_splice_init(&rxnet->peer_keepalive_new, &collector);
stop = cursor + ARRAY_SIZE(rxnet->peer_keepalive);
while (base <= now && (s8)(cursor - stop) < 0) {
list_splice_tail_init(&rxnet->peer_keepalive[cursor & mask],
&collector);
base++;
cursor++;
}
base = now;
spin_unlock(&rxnet->peer_hash_lock);
rxnet->peer_keepalive_base = base;
rxnet->peer_keepalive_cursor = cursor;
rxrpc_peer_keepalive_dispatch(rxnet, &collector, base, cursor);
ASSERT(list_empty(&collector));
/* Schedule the timer for the next occupied timeslot. */
cursor = rxnet->peer_keepalive_cursor;
stop = cursor + RXRPC_KEEPALIVE_TIME - 1;
for (; (s8)(cursor - stop) < 0; cursor++) {
if (!list_empty(&rxnet->peer_keepalive[cursor & mask]))
break;
base++;
}
now = ktime_get_seconds();
delay = base - now;
if (delay < 1)
delay = 1;
delay *= HZ;
if (rxnet->live)
timer_reduce(&rxnet->peer_keepalive_timer, jiffies + delay);
_leave("");
}
| linux-master | net/rxrpc/peer_event.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* sysctls for configuring RxRPC operating parameters
*
* Copyright (C) 2014 Red Hat, Inc. All Rights Reserved.
* Written by David Howells ([email protected])
*/
#include <linux/sysctl.h>
#include <net/sock.h>
#include <net/af_rxrpc.h>
#include "ar-internal.h"
static struct ctl_table_header *rxrpc_sysctl_reg_table;
static const unsigned int four = 4;
static const unsigned int max_backlog = RXRPC_BACKLOG_MAX - 1;
static const unsigned int n_65535 = 65535;
static const unsigned int n_max_acks = 255;
static const unsigned long one_jiffy = 1;
static const unsigned long max_jiffies = MAX_JIFFY_OFFSET;
#ifdef CONFIG_AF_RXRPC_INJECT_RX_DELAY
static const unsigned long max_500 = 500;
#endif
/*
* RxRPC operating parameters.
*
* See Documentation/networking/rxrpc.rst and the variable definitions for more
* information on the individual parameters.
*/
static struct ctl_table rxrpc_sysctl_table[] = {
/* Values measured in milliseconds but used in jiffies */
{
.procname = "soft_ack_delay",
.data = &rxrpc_soft_ack_delay,
.maxlen = sizeof(unsigned long),
.mode = 0644,
.proc_handler = proc_doulongvec_ms_jiffies_minmax,
.extra1 = (void *)&one_jiffy,
.extra2 = (void *)&max_jiffies,
},
{
.procname = "idle_ack_delay",
.data = &rxrpc_idle_ack_delay,
.maxlen = sizeof(unsigned long),
.mode = 0644,
.proc_handler = proc_doulongvec_ms_jiffies_minmax,
.extra1 = (void *)&one_jiffy,
.extra2 = (void *)&max_jiffies,
},
{
.procname = "idle_conn_expiry",
.data = &rxrpc_conn_idle_client_expiry,
.maxlen = sizeof(unsigned long),
.mode = 0644,
.proc_handler = proc_doulongvec_ms_jiffies_minmax,
.extra1 = (void *)&one_jiffy,
.extra2 = (void *)&max_jiffies,
},
{
.procname = "idle_conn_fast_expiry",
.data = &rxrpc_conn_idle_client_fast_expiry,
.maxlen = sizeof(unsigned long),
.mode = 0644,
.proc_handler = proc_doulongvec_ms_jiffies_minmax,
.extra1 = (void *)&one_jiffy,
.extra2 = (void *)&max_jiffies,
},
/* Values used in milliseconds */
#ifdef CONFIG_AF_RXRPC_INJECT_RX_DELAY
{
.procname = "inject_rx_delay",
.data = &rxrpc_inject_rx_delay,
.maxlen = sizeof(unsigned long),
.mode = 0644,
.proc_handler = proc_doulongvec_minmax,
.extra1 = (void *)SYSCTL_LONG_ZERO,
.extra2 = (void *)&max_500,
},
#endif
/* Non-time values */
{
.procname = "reap_client_conns",
.data = &rxrpc_reap_client_connections,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = (void *)SYSCTL_ONE,
.extra2 = (void *)&n_65535,
},
{
.procname = "max_backlog",
.data = &rxrpc_max_backlog,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = (void *)&four,
.extra2 = (void *)&max_backlog,
},
{
.procname = "rx_window_size",
.data = &rxrpc_rx_window_size,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = (void *)SYSCTL_ONE,
.extra2 = (void *)&n_max_acks,
},
{
.procname = "rx_mtu",
.data = &rxrpc_rx_mtu,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = (void *)SYSCTL_ONE,
.extra2 = (void *)&n_65535,
},
{
.procname = "rx_jumbo_max",
.data = &rxrpc_rx_jumbo_max,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = (void *)SYSCTL_ONE,
.extra2 = (void *)&four,
},
{ }
};
int __init rxrpc_sysctl_init(void)
{
rxrpc_sysctl_reg_table = register_net_sysctl(&init_net, "net/rxrpc",
rxrpc_sysctl_table);
if (!rxrpc_sysctl_reg_table)
return -ENOMEM;
return 0;
}
void rxrpc_sysctl_exit(void)
{
if (rxrpc_sysctl_reg_table)
unregister_net_sysctl_table(rxrpc_sysctl_reg_table);
}
| linux-master | net/rxrpc/sysctl.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* Local endpoint object management
*
* Copyright (C) 2016 Red Hat, Inc. All Rights Reserved.
* Written by David Howells ([email protected])
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/net.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
#include <linux/udp.h>
#include <linux/ip.h>
#include <linux/hashtable.h>
#include <net/sock.h>
#include <net/udp.h>
#include <net/udp_tunnel.h>
#include <net/af_rxrpc.h>
#include "ar-internal.h"
static void rxrpc_local_rcu(struct rcu_head *);
/*
* Handle an ICMP/ICMP6 error turning up at the tunnel. Push it through the
* usual mechanism so that it gets parsed and presented through the UDP
* socket's error_report().
*/
static void rxrpc_encap_err_rcv(struct sock *sk, struct sk_buff *skb, int err,
__be16 port, u32 info, u8 *payload)
{
if (ip_hdr(skb)->version == IPVERSION)
return ip_icmp_error(sk, skb, err, port, info, payload);
if (IS_ENABLED(CONFIG_AF_RXRPC_IPV6))
return ipv6_icmp_error(sk, skb, err, port, info, payload);
}
/*
* Compare a local to an address. Return -ve, 0 or +ve to indicate less than,
* same or greater than.
*
* We explicitly don't compare the RxRPC service ID as we want to reject
* conflicting uses by differing services. Further, we don't want to share
* addresses with different options (IPv6), so we don't compare those bits
* either.
*/
static long rxrpc_local_cmp_key(const struct rxrpc_local *local,
const struct sockaddr_rxrpc *srx)
{
long diff;
diff = ((local->srx.transport_type - srx->transport_type) ?:
(local->srx.transport_len - srx->transport_len) ?:
(local->srx.transport.family - srx->transport.family));
if (diff != 0)
return diff;
switch (srx->transport.family) {
case AF_INET:
/* If the choice of UDP port is left up to the transport, then
* the endpoint record doesn't match.
*/
return ((u16 __force)local->srx.transport.sin.sin_port -
(u16 __force)srx->transport.sin.sin_port) ?:
memcmp(&local->srx.transport.sin.sin_addr,
&srx->transport.sin.sin_addr,
sizeof(struct in_addr));
#ifdef CONFIG_AF_RXRPC_IPV6
case AF_INET6:
/* If the choice of UDP6 port is left up to the transport, then
* the endpoint record doesn't match.
*/
return ((u16 __force)local->srx.transport.sin6.sin6_port -
(u16 __force)srx->transport.sin6.sin6_port) ?:
memcmp(&local->srx.transport.sin6.sin6_addr,
&srx->transport.sin6.sin6_addr,
sizeof(struct in6_addr));
#endif
default:
BUG();
}
}
static void rxrpc_client_conn_reap_timeout(struct timer_list *timer)
{
struct rxrpc_local *local =
container_of(timer, struct rxrpc_local, client_conn_reap_timer);
if (local->kill_all_client_conns &&
test_and_set_bit(RXRPC_CLIENT_CONN_REAP_TIMER, &local->client_conn_flags))
rxrpc_wake_up_io_thread(local);
}
/*
* Allocate a new local endpoint.
*/
static struct rxrpc_local *rxrpc_alloc_local(struct net *net,
const struct sockaddr_rxrpc *srx)
{
struct rxrpc_local *local;
u32 tmp;
local = kzalloc(sizeof(struct rxrpc_local), GFP_KERNEL);
if (local) {
refcount_set(&local->ref, 1);
atomic_set(&local->active_users, 1);
local->net = net;
local->rxnet = rxrpc_net(net);
INIT_HLIST_NODE(&local->link);
init_completion(&local->io_thread_ready);
#ifdef CONFIG_AF_RXRPC_INJECT_RX_DELAY
skb_queue_head_init(&local->rx_delay_queue);
#endif
skb_queue_head_init(&local->rx_queue);
INIT_LIST_HEAD(&local->conn_attend_q);
INIT_LIST_HEAD(&local->call_attend_q);
local->client_bundles = RB_ROOT;
spin_lock_init(&local->client_bundles_lock);
local->kill_all_client_conns = false;
INIT_LIST_HEAD(&local->idle_client_conns);
timer_setup(&local->client_conn_reap_timer,
rxrpc_client_conn_reap_timeout, 0);
spin_lock_init(&local->lock);
rwlock_init(&local->services_lock);
local->debug_id = atomic_inc_return(&rxrpc_debug_id);
memcpy(&local->srx, srx, sizeof(*srx));
local->srx.srx_service = 0;
idr_init(&local->conn_ids);
get_random_bytes(&tmp, sizeof(tmp));
tmp &= 0x3fffffff;
if (tmp == 0)
tmp = 1;
idr_set_cursor(&local->conn_ids, tmp);
INIT_LIST_HEAD(&local->new_client_calls);
spin_lock_init(&local->client_call_lock);
trace_rxrpc_local(local->debug_id, rxrpc_local_new, 1, 1);
}
_leave(" = %p", local);
return local;
}
/*
* create the local socket
* - must be called with rxrpc_local_mutex locked
*/
static int rxrpc_open_socket(struct rxrpc_local *local, struct net *net)
{
struct udp_tunnel_sock_cfg tuncfg = {NULL};
struct sockaddr_rxrpc *srx = &local->srx;
struct udp_port_cfg udp_conf = {0};
struct task_struct *io_thread;
struct sock *usk;
int ret;
_enter("%p{%d,%d}",
local, srx->transport_type, srx->transport.family);
udp_conf.family = srx->transport.family;
udp_conf.use_udp_checksums = true;
if (udp_conf.family == AF_INET) {
udp_conf.local_ip = srx->transport.sin.sin_addr;
udp_conf.local_udp_port = srx->transport.sin.sin_port;
#if IS_ENABLED(CONFIG_AF_RXRPC_IPV6)
} else {
udp_conf.local_ip6 = srx->transport.sin6.sin6_addr;
udp_conf.local_udp_port = srx->transport.sin6.sin6_port;
udp_conf.use_udp6_tx_checksums = true;
udp_conf.use_udp6_rx_checksums = true;
#endif
}
ret = udp_sock_create(net, &udp_conf, &local->socket);
if (ret < 0) {
_leave(" = %d [socket]", ret);
return ret;
}
tuncfg.encap_type = UDP_ENCAP_RXRPC;
tuncfg.encap_rcv = rxrpc_encap_rcv;
tuncfg.encap_err_rcv = rxrpc_encap_err_rcv;
tuncfg.sk_user_data = local;
setup_udp_tunnel_sock(net, local->socket, &tuncfg);
/* set the socket up */
usk = local->socket->sk;
usk->sk_error_report = rxrpc_error_report;
switch (srx->transport.family) {
case AF_INET6:
/* we want to receive ICMPv6 errors */
ip6_sock_set_recverr(usk);
/* Fall through and set IPv4 options too otherwise we don't get
* errors from IPv4 packets sent through the IPv6 socket.
*/
fallthrough;
case AF_INET:
/* we want to receive ICMP errors */
ip_sock_set_recverr(usk);
/* we want to set the don't fragment bit */
ip_sock_set_mtu_discover(usk, IP_PMTUDISC_DO);
/* We want receive timestamps. */
sock_enable_timestamps(usk);
break;
default:
BUG();
}
io_thread = kthread_run(rxrpc_io_thread, local,
"krxrpcio/%u", ntohs(udp_conf.local_udp_port));
if (IS_ERR(io_thread)) {
ret = PTR_ERR(io_thread);
goto error_sock;
}
wait_for_completion(&local->io_thread_ready);
local->io_thread = io_thread;
_leave(" = 0");
return 0;
error_sock:
kernel_sock_shutdown(local->socket, SHUT_RDWR);
local->socket->sk->sk_user_data = NULL;
sock_release(local->socket);
local->socket = NULL;
return ret;
}
/*
* Look up or create a new local endpoint using the specified local address.
*/
struct rxrpc_local *rxrpc_lookup_local(struct net *net,
const struct sockaddr_rxrpc *srx)
{
struct rxrpc_local *local;
struct rxrpc_net *rxnet = rxrpc_net(net);
struct hlist_node *cursor;
long diff;
int ret;
_enter("{%d,%d,%pISp}",
srx->transport_type, srx->transport.family, &srx->transport);
mutex_lock(&rxnet->local_mutex);
hlist_for_each(cursor, &rxnet->local_endpoints) {
local = hlist_entry(cursor, struct rxrpc_local, link);
diff = rxrpc_local_cmp_key(local, srx);
if (diff != 0)
continue;
/* Services aren't allowed to share transport sockets, so
* reject that here. It is possible that the object is dying -
* but it may also still have the local transport address that
* we want bound.
*/
if (srx->srx_service) {
local = NULL;
goto addr_in_use;
}
/* Found a match. We want to replace a dying object.
* Attempting to bind the transport socket may still fail if
* we're attempting to use a local address that the dying
* object is still using.
*/
if (!rxrpc_use_local(local, rxrpc_local_use_lookup))
break;
goto found;
}
local = rxrpc_alloc_local(net, srx);
if (!local)
goto nomem;
ret = rxrpc_open_socket(local, net);
if (ret < 0)
goto sock_error;
if (cursor) {
hlist_replace_rcu(cursor, &local->link);
cursor->pprev = NULL;
} else {
hlist_add_head_rcu(&local->link, &rxnet->local_endpoints);
}
found:
mutex_unlock(&rxnet->local_mutex);
_leave(" = %p", local);
return local;
nomem:
ret = -ENOMEM;
sock_error:
mutex_unlock(&rxnet->local_mutex);
if (local)
call_rcu(&local->rcu, rxrpc_local_rcu);
_leave(" = %d", ret);
return ERR_PTR(ret);
addr_in_use:
mutex_unlock(&rxnet->local_mutex);
_leave(" = -EADDRINUSE");
return ERR_PTR(-EADDRINUSE);
}
/*
* Get a ref on a local endpoint.
*/
struct rxrpc_local *rxrpc_get_local(struct rxrpc_local *local,
enum rxrpc_local_trace why)
{
int r, u;
u = atomic_read(&local->active_users);
__refcount_inc(&local->ref, &r);
trace_rxrpc_local(local->debug_id, why, r + 1, u);
return local;
}
/*
* Get a ref on a local endpoint unless its usage has already reached 0.
*/
struct rxrpc_local *rxrpc_get_local_maybe(struct rxrpc_local *local,
enum rxrpc_local_trace why)
{
int r, u;
if (local && __refcount_inc_not_zero(&local->ref, &r)) {
u = atomic_read(&local->active_users);
trace_rxrpc_local(local->debug_id, why, r + 1, u);
return local;
}
return NULL;
}
/*
* Drop a ref on a local endpoint.
*/
void rxrpc_put_local(struct rxrpc_local *local, enum rxrpc_local_trace why)
{
unsigned int debug_id;
bool dead;
int r, u;
if (local) {
debug_id = local->debug_id;
u = atomic_read(&local->active_users);
dead = __refcount_dec_and_test(&local->ref, &r);
trace_rxrpc_local(debug_id, why, r, u);
if (dead)
call_rcu(&local->rcu, rxrpc_local_rcu);
}
}
/*
* Start using a local endpoint.
*/
struct rxrpc_local *rxrpc_use_local(struct rxrpc_local *local,
enum rxrpc_local_trace why)
{
local = rxrpc_get_local_maybe(local, rxrpc_local_get_for_use);
if (!local)
return NULL;
if (!__rxrpc_use_local(local, why)) {
rxrpc_put_local(local, rxrpc_local_put_for_use);
return NULL;
}
return local;
}
/*
* Cease using a local endpoint. Once the number of active users reaches 0, we
* start the closure of the transport in the I/O thread..
*/
void rxrpc_unuse_local(struct rxrpc_local *local, enum rxrpc_local_trace why)
{
unsigned int debug_id;
int r, u;
if (local) {
debug_id = local->debug_id;
r = refcount_read(&local->ref);
u = atomic_dec_return(&local->active_users);
trace_rxrpc_local(debug_id, why, r, u);
if (u == 0)
kthread_stop(local->io_thread);
}
}
/*
* Destroy a local endpoint's socket and then hand the record to RCU to dispose
* of.
*
* Closing the socket cannot be done from bottom half context or RCU callback
* context because it might sleep.
*/
void rxrpc_destroy_local(struct rxrpc_local *local)
{
struct socket *socket = local->socket;
struct rxrpc_net *rxnet = local->rxnet;
_enter("%d", local->debug_id);
local->dead = true;
mutex_lock(&rxnet->local_mutex);
hlist_del_init_rcu(&local->link);
mutex_unlock(&rxnet->local_mutex);
rxrpc_clean_up_local_conns(local);
rxrpc_service_connection_reaper(&rxnet->service_conn_reaper);
ASSERT(!local->service);
if (socket) {
local->socket = NULL;
kernel_sock_shutdown(socket, SHUT_RDWR);
socket->sk->sk_user_data = NULL;
sock_release(socket);
}
/* At this point, there should be no more packets coming in to the
* local endpoint.
*/
#ifdef CONFIG_AF_RXRPC_INJECT_RX_DELAY
rxrpc_purge_queue(&local->rx_delay_queue);
#endif
rxrpc_purge_queue(&local->rx_queue);
rxrpc_purge_client_connections(local);
}
/*
* Destroy a local endpoint after the RCU grace period expires.
*/
static void rxrpc_local_rcu(struct rcu_head *rcu)
{
struct rxrpc_local *local = container_of(rcu, struct rxrpc_local, rcu);
rxrpc_see_local(local, rxrpc_local_free);
kfree(local);
}
/*
* Verify the local endpoint list is empty by this point.
*/
void rxrpc_destroy_all_locals(struct rxrpc_net *rxnet)
{
struct rxrpc_local *local;
_enter("");
flush_workqueue(rxrpc_workqueue);
if (!hlist_empty(&rxnet->local_endpoints)) {
mutex_lock(&rxnet->local_mutex);
hlist_for_each_entry(local, &rxnet->local_endpoints, link) {
pr_err("AF_RXRPC: Leaked local %p {%d}\n",
local, refcount_read(&local->ref));
}
mutex_unlock(&rxnet->local_mutex);
BUG();
}
}
| linux-master | net/rxrpc/local_object.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* RxRPC Tx data buffering.
*
* Copyright (C) 2022 Red Hat, Inc. All Rights Reserved.
* Written by David Howells ([email protected])
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/slab.h>
#include "ar-internal.h"
static atomic_t rxrpc_txbuf_debug_ids;
atomic_t rxrpc_nr_txbuf;
/*
* Allocate and partially initialise an I/O request structure.
*/
struct rxrpc_txbuf *rxrpc_alloc_txbuf(struct rxrpc_call *call, u8 packet_type,
gfp_t gfp)
{
struct rxrpc_txbuf *txb;
txb = kmalloc(sizeof(*txb), gfp);
if (txb) {
INIT_LIST_HEAD(&txb->call_link);
INIT_LIST_HEAD(&txb->tx_link);
refcount_set(&txb->ref, 1);
txb->call_debug_id = call->debug_id;
txb->debug_id = atomic_inc_return(&rxrpc_txbuf_debug_ids);
txb->space = sizeof(txb->data);
txb->len = 0;
txb->offset = 0;
txb->flags = 0;
txb->ack_why = 0;
txb->seq = call->tx_prepared + 1;
txb->wire.epoch = htonl(call->conn->proto.epoch);
txb->wire.cid = htonl(call->cid);
txb->wire.callNumber = htonl(call->call_id);
txb->wire.seq = htonl(txb->seq);
txb->wire.type = packet_type;
txb->wire.flags = call->conn->out_clientflag;
txb->wire.userStatus = 0;
txb->wire.securityIndex = call->security_ix;
txb->wire._rsvd = 0;
txb->wire.serviceId = htons(call->dest_srx.srx_service);
trace_rxrpc_txbuf(txb->debug_id,
txb->call_debug_id, txb->seq, 1,
packet_type == RXRPC_PACKET_TYPE_DATA ?
rxrpc_txbuf_alloc_data :
rxrpc_txbuf_alloc_ack);
atomic_inc(&rxrpc_nr_txbuf);
}
return txb;
}
void rxrpc_get_txbuf(struct rxrpc_txbuf *txb, enum rxrpc_txbuf_trace what)
{
int r;
__refcount_inc(&txb->ref, &r);
trace_rxrpc_txbuf(txb->debug_id, txb->call_debug_id, txb->seq, r + 1, what);
}
void rxrpc_see_txbuf(struct rxrpc_txbuf *txb, enum rxrpc_txbuf_trace what)
{
int r = refcount_read(&txb->ref);
trace_rxrpc_txbuf(txb->debug_id, txb->call_debug_id, txb->seq, r, what);
}
static void rxrpc_free_txbuf(struct rcu_head *rcu)
{
struct rxrpc_txbuf *txb = container_of(rcu, struct rxrpc_txbuf, rcu);
trace_rxrpc_txbuf(txb->debug_id, txb->call_debug_id, txb->seq, 0,
rxrpc_txbuf_free);
kfree(txb);
atomic_dec(&rxrpc_nr_txbuf);
}
void rxrpc_put_txbuf(struct rxrpc_txbuf *txb, enum rxrpc_txbuf_trace what)
{
unsigned int debug_id, call_debug_id;
rxrpc_seq_t seq;
bool dead;
int r;
if (txb) {
debug_id = txb->debug_id;
call_debug_id = txb->call_debug_id;
seq = txb->seq;
dead = __refcount_dec_and_test(&txb->ref, &r);
trace_rxrpc_txbuf(debug_id, call_debug_id, seq, r - 1, what);
if (dead)
call_rcu(&txb->rcu, rxrpc_free_txbuf);
}
}
/*
* Shrink the transmit buffer.
*/
void rxrpc_shrink_call_tx_buffer(struct rxrpc_call *call)
{
struct rxrpc_txbuf *txb;
rxrpc_seq_t hard_ack = smp_load_acquire(&call->acks_hard_ack);
bool wake = false;
_enter("%x/%x/%x", call->tx_bottom, call->acks_hard_ack, call->tx_top);
while ((txb = list_first_entry_or_null(&call->tx_buffer,
struct rxrpc_txbuf, call_link))) {
hard_ack = smp_load_acquire(&call->acks_hard_ack);
if (before(hard_ack, txb->seq))
break;
if (txb->seq != call->tx_bottom + 1)
rxrpc_see_txbuf(txb, rxrpc_txbuf_see_out_of_step);
ASSERTCMP(txb->seq, ==, call->tx_bottom + 1);
smp_store_release(&call->tx_bottom, call->tx_bottom + 1);
list_del_rcu(&txb->call_link);
trace_rxrpc_txqueue(call, rxrpc_txqueue_dequeue);
rxrpc_put_txbuf(txb, rxrpc_txbuf_put_rotated);
if (after(call->acks_hard_ack, call->tx_bottom + 128))
wake = true;
}
if (wake)
wake_up(&call->waitq);
}
| linux-master | net/rxrpc/txbuf.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* connection-level event handling
*
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
* Written by David Howells ([email protected])
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/net.h>
#include <linux/skbuff.h>
#include <linux/errqueue.h>
#include <net/sock.h>
#include <net/af_rxrpc.h>
#include <net/ip.h>
#include "ar-internal.h"
/*
* Set the completion state on an aborted connection.
*/
static bool rxrpc_set_conn_aborted(struct rxrpc_connection *conn, struct sk_buff *skb,
s32 abort_code, int err,
enum rxrpc_call_completion compl)
{
bool aborted = false;
if (conn->state != RXRPC_CONN_ABORTED) {
spin_lock(&conn->state_lock);
if (conn->state != RXRPC_CONN_ABORTED) {
conn->abort_code = abort_code;
conn->error = err;
conn->completion = compl;
/* Order the abort info before the state change. */
smp_store_release(&conn->state, RXRPC_CONN_ABORTED);
set_bit(RXRPC_CONN_DONT_REUSE, &conn->flags);
set_bit(RXRPC_CONN_EV_ABORT_CALLS, &conn->events);
aborted = true;
}
spin_unlock(&conn->state_lock);
}
return aborted;
}
/*
* Mark a socket buffer to indicate that the connection it's on should be aborted.
*/
int rxrpc_abort_conn(struct rxrpc_connection *conn, struct sk_buff *skb,
s32 abort_code, int err, enum rxrpc_abort_reason why)
{
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
if (rxrpc_set_conn_aborted(conn, skb, abort_code, err,
RXRPC_CALL_LOCALLY_ABORTED)) {
trace_rxrpc_abort(0, why, sp->hdr.cid, sp->hdr.callNumber,
sp->hdr.seq, abort_code, err);
rxrpc_poke_conn(conn, rxrpc_conn_get_poke_abort);
}
return -EPROTO;
}
/*
* Mark a connection as being remotely aborted.
*/
static bool rxrpc_input_conn_abort(struct rxrpc_connection *conn,
struct sk_buff *skb)
{
return rxrpc_set_conn_aborted(conn, skb, skb->priority, -ECONNABORTED,
RXRPC_CALL_REMOTELY_ABORTED);
}
/*
* Retransmit terminal ACK or ABORT of the previous call.
*/
void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn,
struct sk_buff *skb,
unsigned int channel)
{
struct rxrpc_skb_priv *sp = skb ? rxrpc_skb(skb) : NULL;
struct rxrpc_channel *chan;
struct msghdr msg;
struct kvec iov[3];
struct {
struct rxrpc_wire_header whdr;
union {
__be32 abort_code;
struct rxrpc_ackpacket ack;
};
} __attribute__((packed)) pkt;
struct rxrpc_ackinfo ack_info;
size_t len;
int ret, ioc;
u32 serial, mtu, call_id, padding;
_enter("%d", conn->debug_id);
chan = &conn->channels[channel];
/* If the last call got moved on whilst we were waiting to run, just
* ignore this packet.
*/
call_id = chan->last_call;
if (skb && call_id != sp->hdr.callNumber)
return;
msg.msg_name = &conn->peer->srx.transport;
msg.msg_namelen = conn->peer->srx.transport_len;
msg.msg_control = NULL;
msg.msg_controllen = 0;
msg.msg_flags = 0;
iov[0].iov_base = &pkt;
iov[0].iov_len = sizeof(pkt.whdr);
iov[1].iov_base = &padding;
iov[1].iov_len = 3;
iov[2].iov_base = &ack_info;
iov[2].iov_len = sizeof(ack_info);
serial = atomic_inc_return(&conn->serial);
pkt.whdr.epoch = htonl(conn->proto.epoch);
pkt.whdr.cid = htonl(conn->proto.cid | channel);
pkt.whdr.callNumber = htonl(call_id);
pkt.whdr.serial = htonl(serial);
pkt.whdr.seq = 0;
pkt.whdr.type = chan->last_type;
pkt.whdr.flags = conn->out_clientflag;
pkt.whdr.userStatus = 0;
pkt.whdr.securityIndex = conn->security_ix;
pkt.whdr._rsvd = 0;
pkt.whdr.serviceId = htons(conn->service_id);
len = sizeof(pkt.whdr);
switch (chan->last_type) {
case RXRPC_PACKET_TYPE_ABORT:
pkt.abort_code = htonl(chan->last_abort);
iov[0].iov_len += sizeof(pkt.abort_code);
len += sizeof(pkt.abort_code);
ioc = 1;
break;
case RXRPC_PACKET_TYPE_ACK:
mtu = conn->peer->if_mtu;
mtu -= conn->peer->hdrsize;
pkt.ack.bufferSpace = 0;
pkt.ack.maxSkew = htons(skb ? skb->priority : 0);
pkt.ack.firstPacket = htonl(chan->last_seq + 1);
pkt.ack.previousPacket = htonl(chan->last_seq);
pkt.ack.serial = htonl(skb ? sp->hdr.serial : 0);
pkt.ack.reason = skb ? RXRPC_ACK_DUPLICATE : RXRPC_ACK_IDLE;
pkt.ack.nAcks = 0;
ack_info.rxMTU = htonl(rxrpc_rx_mtu);
ack_info.maxMTU = htonl(mtu);
ack_info.rwind = htonl(rxrpc_rx_window_size);
ack_info.jumbo_max = htonl(rxrpc_rx_jumbo_max);
pkt.whdr.flags |= RXRPC_SLOW_START_OK;
padding = 0;
iov[0].iov_len += sizeof(pkt.ack);
len += sizeof(pkt.ack) + 3 + sizeof(ack_info);
ioc = 3;
trace_rxrpc_tx_ack(chan->call_debug_id, serial,
ntohl(pkt.ack.firstPacket),
ntohl(pkt.ack.serial),
pkt.ack.reason, 0, rxrpc_rx_window_size);
break;
default:
return;
}
ret = kernel_sendmsg(conn->local->socket, &msg, iov, ioc, len);
conn->peer->last_tx_at = ktime_get_seconds();
if (ret < 0)
trace_rxrpc_tx_fail(chan->call_debug_id, serial, ret,
rxrpc_tx_point_call_final_resend);
else
trace_rxrpc_tx_packet(chan->call_debug_id, &pkt.whdr,
rxrpc_tx_point_call_final_resend);
_leave("");
}
/*
* pass a connection-level abort onto all calls on that connection
*/
static void rxrpc_abort_calls(struct rxrpc_connection *conn)
{
struct rxrpc_call *call;
int i;
_enter("{%d},%x", conn->debug_id, conn->abort_code);
for (i = 0; i < RXRPC_MAXCALLS; i++) {
call = conn->channels[i].call;
if (call)
rxrpc_set_call_completion(call,
conn->completion,
conn->abort_code,
conn->error);
}
_leave("");
}
/*
* mark a call as being on a now-secured channel
* - must be called with BH's disabled.
*/
static void rxrpc_call_is_secure(struct rxrpc_call *call)
{
if (call && __rxrpc_call_state(call) == RXRPC_CALL_SERVER_SECURING) {
rxrpc_set_call_state(call, RXRPC_CALL_SERVER_RECV_REQUEST);
rxrpc_notify_socket(call);
}
}
/*
* connection-level Rx packet processor
*/
static int rxrpc_process_event(struct rxrpc_connection *conn,
struct sk_buff *skb)
{
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
int ret;
if (conn->state == RXRPC_CONN_ABORTED)
return -ECONNABORTED;
_enter("{%d},{%u,%%%u},", conn->debug_id, sp->hdr.type, sp->hdr.serial);
switch (sp->hdr.type) {
case RXRPC_PACKET_TYPE_CHALLENGE:
return conn->security->respond_to_challenge(conn, skb);
case RXRPC_PACKET_TYPE_RESPONSE:
ret = conn->security->verify_response(conn, skb);
if (ret < 0)
return ret;
ret = conn->security->init_connection_security(
conn, conn->key->payload.data[0]);
if (ret < 0)
return ret;
spin_lock(&conn->state_lock);
if (conn->state == RXRPC_CONN_SERVICE_CHALLENGING)
conn->state = RXRPC_CONN_SERVICE;
spin_unlock(&conn->state_lock);
if (conn->state == RXRPC_CONN_SERVICE) {
/* Offload call state flipping to the I/O thread. As
* we've already received the packet, put it on the
* front of the queue.
*/
skb->mark = RXRPC_SKB_MARK_SERVICE_CONN_SECURED;
rxrpc_get_skb(skb, rxrpc_skb_get_conn_secured);
skb_queue_head(&conn->local->rx_queue, skb);
rxrpc_wake_up_io_thread(conn->local);
}
return 0;
default:
WARN_ON_ONCE(1);
return -EPROTO;
}
}
/*
* set up security and issue a challenge
*/
static void rxrpc_secure_connection(struct rxrpc_connection *conn)
{
if (conn->security->issue_challenge(conn) < 0)
rxrpc_abort_conn(conn, NULL, RX_CALL_DEAD, -ENOMEM,
rxrpc_abort_nomem);
}
/*
* Process delayed final ACKs that we haven't subsumed into a subsequent call.
*/
void rxrpc_process_delayed_final_acks(struct rxrpc_connection *conn, bool force)
{
unsigned long j = jiffies, next_j;
unsigned int channel;
bool set;
again:
next_j = j + LONG_MAX;
set = false;
for (channel = 0; channel < RXRPC_MAXCALLS; channel++) {
struct rxrpc_channel *chan = &conn->channels[channel];
unsigned long ack_at;
if (!test_bit(RXRPC_CONN_FINAL_ACK_0 + channel, &conn->flags))
continue;
ack_at = chan->final_ack_at;
if (time_before(j, ack_at) && !force) {
if (time_before(ack_at, next_j)) {
next_j = ack_at;
set = true;
}
continue;
}
if (test_and_clear_bit(RXRPC_CONN_FINAL_ACK_0 + channel,
&conn->flags))
rxrpc_conn_retransmit_call(conn, NULL, channel);
}
j = jiffies;
if (time_before_eq(next_j, j))
goto again;
if (set)
rxrpc_reduce_conn_timer(conn, next_j);
}
/*
* connection-level event processor
*/
static void rxrpc_do_process_connection(struct rxrpc_connection *conn)
{
struct sk_buff *skb;
int ret;
if (test_and_clear_bit(RXRPC_CONN_EV_CHALLENGE, &conn->events))
rxrpc_secure_connection(conn);
/* go through the conn-level event packets, releasing the ref on this
* connection that each one has when we've finished with it */
while ((skb = skb_dequeue(&conn->rx_queue))) {
rxrpc_see_skb(skb, rxrpc_skb_see_conn_work);
ret = rxrpc_process_event(conn, skb);
switch (ret) {
case -ENOMEM:
case -EAGAIN:
skb_queue_head(&conn->rx_queue, skb);
rxrpc_queue_conn(conn, rxrpc_conn_queue_retry_work);
break;
default:
rxrpc_free_skb(skb, rxrpc_skb_put_conn_work);
break;
}
}
}
void rxrpc_process_connection(struct work_struct *work)
{
struct rxrpc_connection *conn =
container_of(work, struct rxrpc_connection, processor);
rxrpc_see_connection(conn, rxrpc_conn_see_work);
if (__rxrpc_use_local(conn->local, rxrpc_local_use_conn_work)) {
rxrpc_do_process_connection(conn);
rxrpc_unuse_local(conn->local, rxrpc_local_unuse_conn_work);
}
}
/*
* post connection-level events to the connection
* - this includes challenges, responses, some aborts and call terminal packet
* retransmission.
*/
static void rxrpc_post_packet_to_conn(struct rxrpc_connection *conn,
struct sk_buff *skb)
{
_enter("%p,%p", conn, skb);
rxrpc_get_skb(skb, rxrpc_skb_get_conn_work);
skb_queue_tail(&conn->rx_queue, skb);
rxrpc_queue_conn(conn, rxrpc_conn_queue_rx_work);
}
/*
* Input a connection-level packet.
*/
bool rxrpc_input_conn_packet(struct rxrpc_connection *conn, struct sk_buff *skb)
{
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
switch (sp->hdr.type) {
case RXRPC_PACKET_TYPE_BUSY:
/* Just ignore BUSY packets for now. */
return true;
case RXRPC_PACKET_TYPE_ABORT:
if (rxrpc_is_conn_aborted(conn))
return true;
rxrpc_input_conn_abort(conn, skb);
rxrpc_abort_calls(conn);
return true;
case RXRPC_PACKET_TYPE_CHALLENGE:
case RXRPC_PACKET_TYPE_RESPONSE:
if (rxrpc_is_conn_aborted(conn)) {
if (conn->completion == RXRPC_CALL_LOCALLY_ABORTED)
rxrpc_send_conn_abort(conn);
return true;
}
rxrpc_post_packet_to_conn(conn, skb);
return true;
default:
WARN_ON_ONCE(1);
return true;
}
}
/*
* Input a connection event.
*/
void rxrpc_input_conn_event(struct rxrpc_connection *conn, struct sk_buff *skb)
{
unsigned int loop;
if (test_and_clear_bit(RXRPC_CONN_EV_ABORT_CALLS, &conn->events))
rxrpc_abort_calls(conn);
switch (skb->mark) {
case RXRPC_SKB_MARK_SERVICE_CONN_SECURED:
if (conn->state != RXRPC_CONN_SERVICE)
break;
for (loop = 0; loop < RXRPC_MAXCALLS; loop++)
rxrpc_call_is_secure(conn->channels[loop].call);
break;
}
/* Process delayed ACKs whose time has come. */
if (conn->flags & RXRPC_CONN_FINAL_ACK_MASK)
rxrpc_process_delayed_final_acks(conn, false);
}
| linux-master | net/rxrpc/conn_event.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* Management of Tx window, Tx resend, ACKs and out-of-sequence reception
*
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
* Written by David Howells ([email protected])
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/circ_buf.h>
#include <linux/net.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
#include <linux/udp.h>
#include <net/sock.h>
#include <net/af_rxrpc.h>
#include "ar-internal.h"
/*
* Propose a PING ACK be sent.
*/
void rxrpc_propose_ping(struct rxrpc_call *call, u32 serial,
enum rxrpc_propose_ack_trace why)
{
unsigned long now = jiffies;
unsigned long ping_at = now + rxrpc_idle_ack_delay;
if (time_before(ping_at, call->ping_at)) {
WRITE_ONCE(call->ping_at, ping_at);
rxrpc_reduce_call_timer(call, ping_at, now,
rxrpc_timer_set_for_ping);
trace_rxrpc_propose_ack(call, why, RXRPC_ACK_PING, serial);
}
}
/*
* Propose a DELAY ACK be sent in the future.
*/
void rxrpc_propose_delay_ACK(struct rxrpc_call *call, rxrpc_serial_t serial,
enum rxrpc_propose_ack_trace why)
{
unsigned long expiry = rxrpc_soft_ack_delay;
unsigned long now = jiffies, ack_at;
call->ackr_serial = serial;
if (rxrpc_soft_ack_delay < expiry)
expiry = rxrpc_soft_ack_delay;
if (call->peer->srtt_us != 0)
ack_at = usecs_to_jiffies(call->peer->srtt_us >> 3);
else
ack_at = expiry;
ack_at += READ_ONCE(call->tx_backoff);
ack_at += now;
if (time_before(ack_at, call->delay_ack_at)) {
WRITE_ONCE(call->delay_ack_at, ack_at);
rxrpc_reduce_call_timer(call, ack_at, now,
rxrpc_timer_set_for_ack);
}
trace_rxrpc_propose_ack(call, why, RXRPC_ACK_DELAY, serial);
}
/*
* Queue an ACK for immediate transmission.
*/
void rxrpc_send_ACK(struct rxrpc_call *call, u8 ack_reason,
rxrpc_serial_t serial, enum rxrpc_propose_ack_trace why)
{
struct rxrpc_txbuf *txb;
if (test_bit(RXRPC_CALL_DISCONNECTED, &call->flags))
return;
rxrpc_inc_stat(call->rxnet, stat_tx_acks[ack_reason]);
txb = rxrpc_alloc_txbuf(call, RXRPC_PACKET_TYPE_ACK,
rcu_read_lock_held() ? GFP_ATOMIC | __GFP_NOWARN : GFP_NOFS);
if (!txb) {
kleave(" = -ENOMEM");
return;
}
txb->ack_why = why;
txb->wire.seq = 0;
txb->wire.type = RXRPC_PACKET_TYPE_ACK;
txb->wire.flags |= RXRPC_SLOW_START_OK;
txb->ack.bufferSpace = 0;
txb->ack.maxSkew = 0;
txb->ack.firstPacket = 0;
txb->ack.previousPacket = 0;
txb->ack.serial = htonl(serial);
txb->ack.reason = ack_reason;
txb->ack.nAcks = 0;
trace_rxrpc_send_ack(call, why, ack_reason, serial);
rxrpc_send_ack_packet(call, txb);
rxrpc_put_txbuf(txb, rxrpc_txbuf_put_ack_tx);
}
/*
* Handle congestion being detected by the retransmit timeout.
*/
static void rxrpc_congestion_timeout(struct rxrpc_call *call)
{
set_bit(RXRPC_CALL_RETRANS_TIMEOUT, &call->flags);
}
/*
* Perform retransmission of NAK'd and unack'd packets.
*/
void rxrpc_resend(struct rxrpc_call *call, struct sk_buff *ack_skb)
{
struct rxrpc_ackpacket *ack = NULL;
struct rxrpc_txbuf *txb;
unsigned long resend_at;
rxrpc_seq_t transmitted = READ_ONCE(call->tx_transmitted);
ktime_t now, max_age, oldest, ack_ts;
bool unacked = false;
unsigned int i;
LIST_HEAD(retrans_queue);
_enter("{%d,%d}", call->acks_hard_ack, call->tx_top);
now = ktime_get_real();
max_age = ktime_sub_us(now, jiffies_to_usecs(call->peer->rto_j));
oldest = now;
if (list_empty(&call->tx_buffer))
goto no_resend;
if (list_empty(&call->tx_buffer))
goto no_further_resend;
trace_rxrpc_resend(call, ack_skb);
txb = list_first_entry(&call->tx_buffer, struct rxrpc_txbuf, call_link);
/* Scan the soft ACK table without dropping the lock and resend any
* explicitly NAK'd packets.
*/
if (ack_skb) {
ack = (void *)ack_skb->data + sizeof(struct rxrpc_wire_header);
for (i = 0; i < ack->nAcks; i++) {
rxrpc_seq_t seq;
if (ack->acks[i] & 1)
continue;
seq = ntohl(ack->firstPacket) + i;
if (after(txb->seq, transmitted))
break;
if (after(txb->seq, seq))
continue; /* A new hard ACK probably came in */
list_for_each_entry_from(txb, &call->tx_buffer, call_link) {
if (txb->seq == seq)
goto found_txb;
}
goto no_further_resend;
found_txb:
if (after(ntohl(txb->wire.serial), call->acks_highest_serial))
continue; /* Ack point not yet reached */
rxrpc_see_txbuf(txb, rxrpc_txbuf_see_unacked);
if (list_empty(&txb->tx_link)) {
list_add_tail(&txb->tx_link, &retrans_queue);
set_bit(RXRPC_TXBUF_RESENT, &txb->flags);
}
trace_rxrpc_retransmit(call, txb->seq,
ktime_to_ns(ktime_sub(txb->last_sent,
max_age)));
if (list_is_last(&txb->call_link, &call->tx_buffer))
goto no_further_resend;
txb = list_next_entry(txb, call_link);
}
}
/* Fast-forward through the Tx queue to the point the peer says it has
* seen. Anything between the soft-ACK table and that point will get
* ACK'd or NACK'd in due course, so don't worry about it here; here we
* need to consider retransmitting anything beyond that point.
*
* Note that ACK for a packet can beat the update of tx_transmitted.
*/
if (after_eq(READ_ONCE(call->acks_prev_seq), READ_ONCE(call->tx_transmitted)))
goto no_further_resend;
list_for_each_entry_from(txb, &call->tx_buffer, call_link) {
if (before_eq(txb->seq, READ_ONCE(call->acks_prev_seq)))
continue;
if (after(txb->seq, READ_ONCE(call->tx_transmitted)))
break; /* Not transmitted yet */
if (ack && ack->reason == RXRPC_ACK_PING_RESPONSE &&
before(ntohl(txb->wire.serial), ntohl(ack->serial)))
goto do_resend; /* Wasn't accounted for by a more recent ping. */
if (ktime_after(txb->last_sent, max_age)) {
if (ktime_before(txb->last_sent, oldest))
oldest = txb->last_sent;
continue;
}
do_resend:
unacked = true;
if (list_empty(&txb->tx_link)) {
list_add_tail(&txb->tx_link, &retrans_queue);
set_bit(RXRPC_TXBUF_RESENT, &txb->flags);
rxrpc_inc_stat(call->rxnet, stat_tx_data_retrans);
}
}
no_further_resend:
no_resend:
resend_at = nsecs_to_jiffies(ktime_to_ns(ktime_sub(now, oldest)));
resend_at += jiffies + rxrpc_get_rto_backoff(call->peer,
!list_empty(&retrans_queue));
WRITE_ONCE(call->resend_at, resend_at);
if (unacked)
rxrpc_congestion_timeout(call);
/* If there was nothing that needed retransmission then it's likely
* that an ACK got lost somewhere. Send a ping to find out instead of
* retransmitting data.
*/
if (list_empty(&retrans_queue)) {
rxrpc_reduce_call_timer(call, resend_at, jiffies,
rxrpc_timer_set_for_resend);
ack_ts = ktime_sub(now, call->acks_latest_ts);
if (ktime_to_us(ack_ts) < (call->peer->srtt_us >> 3))
goto out;
rxrpc_send_ACK(call, RXRPC_ACK_PING, 0,
rxrpc_propose_ack_ping_for_lost_ack);
goto out;
}
/* Retransmit the queue */
while ((txb = list_first_entry_or_null(&retrans_queue,
struct rxrpc_txbuf, tx_link))) {
list_del_init(&txb->tx_link);
rxrpc_transmit_one(call, txb);
}
out:
_leave("");
}
/*
* Start transmitting the reply to a service. This cancels the need to ACK the
* request if we haven't yet done so.
*/
static void rxrpc_begin_service_reply(struct rxrpc_call *call)
{
unsigned long now = jiffies;
rxrpc_set_call_state(call, RXRPC_CALL_SERVER_SEND_REPLY);
WRITE_ONCE(call->delay_ack_at, now + MAX_JIFFY_OFFSET);
if (call->ackr_reason == RXRPC_ACK_DELAY)
call->ackr_reason = 0;
trace_rxrpc_timer(call, rxrpc_timer_init_for_send_reply, now);
}
/*
* Close the transmission phase. After this point there is no more data to be
* transmitted in the call.
*/
static void rxrpc_close_tx_phase(struct rxrpc_call *call)
{
_debug("________awaiting reply/ACK__________");
switch (__rxrpc_call_state(call)) {
case RXRPC_CALL_CLIENT_SEND_REQUEST:
rxrpc_set_call_state(call, RXRPC_CALL_CLIENT_AWAIT_REPLY);
break;
case RXRPC_CALL_SERVER_SEND_REPLY:
rxrpc_set_call_state(call, RXRPC_CALL_SERVER_AWAIT_ACK);
break;
default:
break;
}
}
static bool rxrpc_tx_window_has_space(struct rxrpc_call *call)
{
unsigned int winsize = min_t(unsigned int, call->tx_winsize,
call->cong_cwnd + call->cong_extra);
rxrpc_seq_t window = call->acks_hard_ack, wtop = window + winsize;
rxrpc_seq_t tx_top = call->tx_top;
int space;
space = wtop - tx_top;
return space > 0;
}
/*
* Decant some if the sendmsg prepared queue into the transmission buffer.
*/
static void rxrpc_decant_prepared_tx(struct rxrpc_call *call)
{
struct rxrpc_txbuf *txb;
if (!test_bit(RXRPC_CALL_EXPOSED, &call->flags)) {
if (list_empty(&call->tx_sendmsg))
return;
rxrpc_expose_client_call(call);
}
while ((txb = list_first_entry_or_null(&call->tx_sendmsg,
struct rxrpc_txbuf, call_link))) {
spin_lock(&call->tx_lock);
list_del(&txb->call_link);
spin_unlock(&call->tx_lock);
call->tx_top = txb->seq;
list_add_tail(&txb->call_link, &call->tx_buffer);
if (txb->wire.flags & RXRPC_LAST_PACKET)
rxrpc_close_tx_phase(call);
rxrpc_transmit_one(call, txb);
if (!rxrpc_tx_window_has_space(call))
break;
}
}
static void rxrpc_transmit_some_data(struct rxrpc_call *call)
{
switch (__rxrpc_call_state(call)) {
case RXRPC_CALL_SERVER_ACK_REQUEST:
if (list_empty(&call->tx_sendmsg))
return;
rxrpc_begin_service_reply(call);
fallthrough;
case RXRPC_CALL_SERVER_SEND_REPLY:
case RXRPC_CALL_CLIENT_SEND_REQUEST:
if (!rxrpc_tx_window_has_space(call))
return;
if (list_empty(&call->tx_sendmsg)) {
rxrpc_inc_stat(call->rxnet, stat_tx_data_underflow);
return;
}
rxrpc_decant_prepared_tx(call);
break;
default:
return;
}
}
/*
* Ping the other end to fill our RTT cache and to retrieve the rwind
* and MTU parameters.
*/
static void rxrpc_send_initial_ping(struct rxrpc_call *call)
{
if (call->peer->rtt_count < 3 ||
ktime_before(ktime_add_ms(call->peer->rtt_last_req, 1000),
ktime_get_real()))
rxrpc_send_ACK(call, RXRPC_ACK_PING, 0,
rxrpc_propose_ack_ping_for_params);
}
/*
* Handle retransmission and deferred ACK/abort generation.
*/
bool rxrpc_input_call_event(struct rxrpc_call *call, struct sk_buff *skb)
{
unsigned long now, next, t;
rxrpc_serial_t ackr_serial;
bool resend = false, expired = false;
s32 abort_code;
rxrpc_see_call(call, rxrpc_call_see_input);
//printk("\n--------------------\n");
_enter("{%d,%s,%lx}",
call->debug_id, rxrpc_call_states[__rxrpc_call_state(call)],
call->events);
if (__rxrpc_call_is_complete(call))
goto out;
/* Handle abort request locklessly, vs rxrpc_propose_abort(). */
abort_code = smp_load_acquire(&call->send_abort);
if (abort_code) {
rxrpc_abort_call(call, 0, call->send_abort, call->send_abort_err,
call->send_abort_why);
goto out;
}
if (skb && skb->mark == RXRPC_SKB_MARK_ERROR)
goto out;
/* If we see our async-event poke, check for timeout trippage. */
now = jiffies;
t = READ_ONCE(call->expect_rx_by);
if (time_after_eq(now, t)) {
trace_rxrpc_timer(call, rxrpc_timer_exp_normal, now);
expired = true;
}
t = READ_ONCE(call->expect_req_by);
if (__rxrpc_call_state(call) == RXRPC_CALL_SERVER_RECV_REQUEST &&
time_after_eq(now, t)) {
trace_rxrpc_timer(call, rxrpc_timer_exp_idle, now);
expired = true;
}
t = READ_ONCE(call->expect_term_by);
if (time_after_eq(now, t)) {
trace_rxrpc_timer(call, rxrpc_timer_exp_hard, now);
expired = true;
}
t = READ_ONCE(call->delay_ack_at);
if (time_after_eq(now, t)) {
trace_rxrpc_timer(call, rxrpc_timer_exp_ack, now);
cmpxchg(&call->delay_ack_at, t, now + MAX_JIFFY_OFFSET);
ackr_serial = xchg(&call->ackr_serial, 0);
rxrpc_send_ACK(call, RXRPC_ACK_DELAY, ackr_serial,
rxrpc_propose_ack_ping_for_lost_ack);
}
t = READ_ONCE(call->ack_lost_at);
if (time_after_eq(now, t)) {
trace_rxrpc_timer(call, rxrpc_timer_exp_lost_ack, now);
cmpxchg(&call->ack_lost_at, t, now + MAX_JIFFY_OFFSET);
set_bit(RXRPC_CALL_EV_ACK_LOST, &call->events);
}
t = READ_ONCE(call->keepalive_at);
if (time_after_eq(now, t)) {
trace_rxrpc_timer(call, rxrpc_timer_exp_keepalive, now);
cmpxchg(&call->keepalive_at, t, now + MAX_JIFFY_OFFSET);
rxrpc_send_ACK(call, RXRPC_ACK_PING, 0,
rxrpc_propose_ack_ping_for_keepalive);
}
t = READ_ONCE(call->ping_at);
if (time_after_eq(now, t)) {
trace_rxrpc_timer(call, rxrpc_timer_exp_ping, now);
cmpxchg(&call->ping_at, t, now + MAX_JIFFY_OFFSET);
rxrpc_send_ACK(call, RXRPC_ACK_PING, 0,
rxrpc_propose_ack_ping_for_keepalive);
}
t = READ_ONCE(call->resend_at);
if (time_after_eq(now, t)) {
trace_rxrpc_timer(call, rxrpc_timer_exp_resend, now);
cmpxchg(&call->resend_at, t, now + MAX_JIFFY_OFFSET);
resend = true;
}
if (skb)
rxrpc_input_call_packet(call, skb);
rxrpc_transmit_some_data(call);
if (skb) {
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
if (sp->hdr.type == RXRPC_PACKET_TYPE_ACK)
rxrpc_congestion_degrade(call);
}
if (test_and_clear_bit(RXRPC_CALL_EV_INITIAL_PING, &call->events))
rxrpc_send_initial_ping(call);
/* Process events */
if (expired) {
if (test_bit(RXRPC_CALL_RX_HEARD, &call->flags) &&
(int)call->conn->hi_serial - (int)call->rx_serial > 0) {
trace_rxrpc_call_reset(call);
rxrpc_abort_call(call, 0, RX_CALL_DEAD, -ECONNRESET,
rxrpc_abort_call_reset);
} else {
rxrpc_abort_call(call, 0, RX_CALL_TIMEOUT, -ETIME,
rxrpc_abort_call_timeout);
}
goto out;
}
if (test_and_clear_bit(RXRPC_CALL_EV_ACK_LOST, &call->events))
rxrpc_send_ACK(call, RXRPC_ACK_PING, 0,
rxrpc_propose_ack_ping_for_lost_ack);
if (resend && __rxrpc_call_state(call) != RXRPC_CALL_CLIENT_RECV_REPLY)
rxrpc_resend(call, NULL);
if (test_and_clear_bit(RXRPC_CALL_RX_IS_IDLE, &call->flags))
rxrpc_send_ACK(call, RXRPC_ACK_IDLE, 0,
rxrpc_propose_ack_rx_idle);
if (call->ackr_nr_unacked > 2) {
if (call->peer->rtt_count < 3)
rxrpc_send_ACK(call, RXRPC_ACK_PING, 0,
rxrpc_propose_ack_ping_for_rtt);
else if (ktime_before(ktime_add_ms(call->peer->rtt_last_req, 1000),
ktime_get_real()))
rxrpc_send_ACK(call, RXRPC_ACK_PING, 0,
rxrpc_propose_ack_ping_for_old_rtt);
else
rxrpc_send_ACK(call, RXRPC_ACK_IDLE, 0,
rxrpc_propose_ack_input_data);
}
/* Make sure the timer is restarted */
if (!__rxrpc_call_is_complete(call)) {
next = call->expect_rx_by;
#define set(T) { t = READ_ONCE(T); if (time_before(t, next)) next = t; }
set(call->expect_req_by);
set(call->expect_term_by);
set(call->delay_ack_at);
set(call->ack_lost_at);
set(call->resend_at);
set(call->keepalive_at);
set(call->ping_at);
now = jiffies;
if (time_after_eq(now, next))
rxrpc_poke_call(call, rxrpc_call_poke_timer_now);
rxrpc_reduce_call_timer(call, next, now, rxrpc_timer_restart);
}
out:
if (__rxrpc_call_is_complete(call)) {
del_timer_sync(&call->timer);
if (!test_bit(RXRPC_CALL_DISCONNECTED, &call->flags))
rxrpc_disconnect_call(call);
if (call->security)
call->security->free_call_crypto(call);
}
if (call->acks_hard_ack != call->tx_bottom)
rxrpc_shrink_call_tx_buffer(call);
_leave("");
return true;
}
| linux-master | net/rxrpc/call_event.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* RxRPC recvmsg() implementation
*
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
* Written by David Howells ([email protected])
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/net.h>
#include <linux/skbuff.h>
#include <linux/export.h>
#include <linux/sched/signal.h>
#include <net/sock.h>
#include <net/af_rxrpc.h>
#include "ar-internal.h"
/*
* Post a call for attention by the socket or kernel service. Further
* notifications are suppressed by putting recvmsg_link on a dummy queue.
*/
void rxrpc_notify_socket(struct rxrpc_call *call)
{
struct rxrpc_sock *rx;
struct sock *sk;
_enter("%d", call->debug_id);
if (!list_empty(&call->recvmsg_link))
return;
rcu_read_lock();
rx = rcu_dereference(call->socket);
sk = &rx->sk;
if (rx && sk->sk_state < RXRPC_CLOSE) {
if (call->notify_rx) {
spin_lock(&call->notify_lock);
call->notify_rx(sk, call, call->user_call_ID);
spin_unlock(&call->notify_lock);
} else {
spin_lock(&rx->recvmsg_lock);
if (list_empty(&call->recvmsg_link)) {
rxrpc_get_call(call, rxrpc_call_get_notify_socket);
list_add_tail(&call->recvmsg_link, &rx->recvmsg_q);
}
spin_unlock(&rx->recvmsg_lock);
if (!sock_flag(sk, SOCK_DEAD)) {
_debug("call %ps", sk->sk_data_ready);
sk->sk_data_ready(sk);
}
}
}
rcu_read_unlock();
_leave("");
}
/*
* Pass a call terminating message to userspace.
*/
static int rxrpc_recvmsg_term(struct rxrpc_call *call, struct msghdr *msg)
{
u32 tmp = 0;
int ret;
switch (call->completion) {
case RXRPC_CALL_SUCCEEDED:
ret = 0;
if (rxrpc_is_service_call(call))
ret = put_cmsg(msg, SOL_RXRPC, RXRPC_ACK, 0, &tmp);
break;
case RXRPC_CALL_REMOTELY_ABORTED:
tmp = call->abort_code;
ret = put_cmsg(msg, SOL_RXRPC, RXRPC_ABORT, 4, &tmp);
break;
case RXRPC_CALL_LOCALLY_ABORTED:
tmp = call->abort_code;
ret = put_cmsg(msg, SOL_RXRPC, RXRPC_ABORT, 4, &tmp);
break;
case RXRPC_CALL_NETWORK_ERROR:
tmp = -call->error;
ret = put_cmsg(msg, SOL_RXRPC, RXRPC_NET_ERROR, 4, &tmp);
break;
case RXRPC_CALL_LOCAL_ERROR:
tmp = -call->error;
ret = put_cmsg(msg, SOL_RXRPC, RXRPC_LOCAL_ERROR, 4, &tmp);
break;
default:
pr_err("Invalid terminal call state %u\n", call->completion);
BUG();
break;
}
trace_rxrpc_recvdata(call, rxrpc_recvmsg_terminal,
call->ackr_window - 1,
call->rx_pkt_offset, call->rx_pkt_len, ret);
return ret;
}
/*
* Discard a packet we've used up and advance the Rx window by one.
*/
static void rxrpc_rotate_rx_window(struct rxrpc_call *call)
{
struct rxrpc_skb_priv *sp;
struct sk_buff *skb;
rxrpc_serial_t serial;
rxrpc_seq_t old_consumed = call->rx_consumed, tseq;
bool last;
int acked;
_enter("%d", call->debug_id);
skb = skb_dequeue(&call->recvmsg_queue);
rxrpc_see_skb(skb, rxrpc_skb_see_rotate);
sp = rxrpc_skb(skb);
tseq = sp->hdr.seq;
serial = sp->hdr.serial;
last = sp->hdr.flags & RXRPC_LAST_PACKET;
/* Barrier against rxrpc_input_data(). */
if (after(tseq, call->rx_consumed))
smp_store_release(&call->rx_consumed, tseq);
rxrpc_free_skb(skb, rxrpc_skb_put_rotate);
trace_rxrpc_receive(call, last ? rxrpc_receive_rotate_last : rxrpc_receive_rotate,
serial, call->rx_consumed);
if (last)
set_bit(RXRPC_CALL_RECVMSG_READ_ALL, &call->flags);
/* Check to see if there's an ACK that needs sending. */
acked = atomic_add_return(call->rx_consumed - old_consumed,
&call->ackr_nr_consumed);
if (acked > 8 &&
!test_and_set_bit(RXRPC_CALL_RX_IS_IDLE, &call->flags))
rxrpc_poke_call(call, rxrpc_call_poke_idle);
}
/*
* Decrypt and verify a DATA packet.
*/
static int rxrpc_verify_data(struct rxrpc_call *call, struct sk_buff *skb)
{
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
if (sp->flags & RXRPC_RX_VERIFIED)
return 0;
return call->security->verify_packet(call, skb);
}
/*
* Deliver messages to a call. This keeps processing packets until the buffer
* is filled and we find either more DATA (returns 0) or the end of the DATA
* (returns 1). If more packets are required, it returns -EAGAIN and if the
* call has failed it returns -EIO.
*/
static int rxrpc_recvmsg_data(struct socket *sock, struct rxrpc_call *call,
struct msghdr *msg, struct iov_iter *iter,
size_t len, int flags, size_t *_offset)
{
struct rxrpc_skb_priv *sp;
struct sk_buff *skb;
rxrpc_seq_t seq = 0;
size_t remain;
unsigned int rx_pkt_offset, rx_pkt_len;
int copy, ret = -EAGAIN, ret2;
rx_pkt_offset = call->rx_pkt_offset;
rx_pkt_len = call->rx_pkt_len;
if (rxrpc_call_has_failed(call)) {
seq = call->ackr_window - 1;
ret = -EIO;
goto done;
}
if (test_bit(RXRPC_CALL_RECVMSG_READ_ALL, &call->flags)) {
seq = call->ackr_window - 1;
ret = 1;
goto done;
}
/* No one else can be removing stuff from the queue, so we shouldn't
* need the Rx lock to walk it.
*/
skb = skb_peek(&call->recvmsg_queue);
while (skb) {
rxrpc_see_skb(skb, rxrpc_skb_see_recvmsg);
sp = rxrpc_skb(skb);
seq = sp->hdr.seq;
if (!(flags & MSG_PEEK))
trace_rxrpc_receive(call, rxrpc_receive_front,
sp->hdr.serial, seq);
if (msg)
sock_recv_timestamp(msg, sock->sk, skb);
if (rx_pkt_offset == 0) {
ret2 = rxrpc_verify_data(call, skb);
trace_rxrpc_recvdata(call, rxrpc_recvmsg_next, seq,
sp->offset, sp->len, ret2);
if (ret2 < 0) {
kdebug("verify = %d", ret2);
ret = ret2;
goto out;
}
rx_pkt_offset = sp->offset;
rx_pkt_len = sp->len;
} else {
trace_rxrpc_recvdata(call, rxrpc_recvmsg_cont, seq,
rx_pkt_offset, rx_pkt_len, 0);
}
/* We have to handle short, empty and used-up DATA packets. */
remain = len - *_offset;
copy = rx_pkt_len;
if (copy > remain)
copy = remain;
if (copy > 0) {
ret2 = skb_copy_datagram_iter(skb, rx_pkt_offset, iter,
copy);
if (ret2 < 0) {
ret = ret2;
goto out;
}
/* handle piecemeal consumption of data packets */
rx_pkt_offset += copy;
rx_pkt_len -= copy;
*_offset += copy;
}
if (rx_pkt_len > 0) {
trace_rxrpc_recvdata(call, rxrpc_recvmsg_full, seq,
rx_pkt_offset, rx_pkt_len, 0);
ASSERTCMP(*_offset, ==, len);
ret = 0;
break;
}
/* The whole packet has been transferred. */
if (sp->hdr.flags & RXRPC_LAST_PACKET)
ret = 1;
rx_pkt_offset = 0;
rx_pkt_len = 0;
skb = skb_peek_next(skb, &call->recvmsg_queue);
if (!(flags & MSG_PEEK))
rxrpc_rotate_rx_window(call);
}
out:
if (!(flags & MSG_PEEK)) {
call->rx_pkt_offset = rx_pkt_offset;
call->rx_pkt_len = rx_pkt_len;
}
done:
trace_rxrpc_recvdata(call, rxrpc_recvmsg_data_return, seq,
rx_pkt_offset, rx_pkt_len, ret);
if (ret == -EAGAIN)
set_bit(RXRPC_CALL_RX_IS_IDLE, &call->flags);
return ret;
}
/*
* Receive a message from an RxRPC socket
* - we need to be careful about two or more threads calling recvmsg
* simultaneously
*/
int rxrpc_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
int flags)
{
struct rxrpc_call *call;
struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
struct list_head *l;
unsigned int call_debug_id = 0;
size_t copied = 0;
long timeo;
int ret;
DEFINE_WAIT(wait);
trace_rxrpc_recvmsg(0, rxrpc_recvmsg_enter, 0);
if (flags & (MSG_OOB | MSG_TRUNC))
return -EOPNOTSUPP;
timeo = sock_rcvtimeo(&rx->sk, flags & MSG_DONTWAIT);
try_again:
lock_sock(&rx->sk);
/* Return immediately if a client socket has no outstanding calls */
if (RB_EMPTY_ROOT(&rx->calls) &&
list_empty(&rx->recvmsg_q) &&
rx->sk.sk_state != RXRPC_SERVER_LISTENING) {
release_sock(&rx->sk);
return -EAGAIN;
}
if (list_empty(&rx->recvmsg_q)) {
ret = -EWOULDBLOCK;
if (timeo == 0) {
call = NULL;
goto error_no_call;
}
release_sock(&rx->sk);
/* Wait for something to happen */
prepare_to_wait_exclusive(sk_sleep(&rx->sk), &wait,
TASK_INTERRUPTIBLE);
ret = sock_error(&rx->sk);
if (ret)
goto wait_error;
if (list_empty(&rx->recvmsg_q)) {
if (signal_pending(current))
goto wait_interrupted;
trace_rxrpc_recvmsg(0, rxrpc_recvmsg_wait, 0);
timeo = schedule_timeout(timeo);
}
finish_wait(sk_sleep(&rx->sk), &wait);
goto try_again;
}
/* Find the next call and dequeue it if we're not just peeking. If we
* do dequeue it, that comes with a ref that we will need to release.
* We also want to weed out calls that got requeued whilst we were
* shovelling data out.
*/
spin_lock(&rx->recvmsg_lock);
l = rx->recvmsg_q.next;
call = list_entry(l, struct rxrpc_call, recvmsg_link);
if (!rxrpc_call_is_complete(call) &&
skb_queue_empty(&call->recvmsg_queue)) {
list_del_init(&call->recvmsg_link);
spin_unlock(&rx->recvmsg_lock);
release_sock(&rx->sk);
trace_rxrpc_recvmsg(call->debug_id, rxrpc_recvmsg_unqueue, 0);
rxrpc_put_call(call, rxrpc_call_put_recvmsg);
goto try_again;
}
if (!(flags & MSG_PEEK))
list_del_init(&call->recvmsg_link);
else
rxrpc_get_call(call, rxrpc_call_get_recvmsg);
spin_unlock(&rx->recvmsg_lock);
call_debug_id = call->debug_id;
trace_rxrpc_recvmsg(call_debug_id, rxrpc_recvmsg_dequeue, 0);
/* We're going to drop the socket lock, so we need to lock the call
* against interference by sendmsg.
*/
if (!mutex_trylock(&call->user_mutex)) {
ret = -EWOULDBLOCK;
if (flags & MSG_DONTWAIT)
goto error_requeue_call;
ret = -ERESTARTSYS;
if (mutex_lock_interruptible(&call->user_mutex) < 0)
goto error_requeue_call;
}
release_sock(&rx->sk);
if (test_bit(RXRPC_CALL_RELEASED, &call->flags))
BUG();
if (test_bit(RXRPC_CALL_HAS_USERID, &call->flags)) {
if (flags & MSG_CMSG_COMPAT) {
unsigned int id32 = call->user_call_ID;
ret = put_cmsg(msg, SOL_RXRPC, RXRPC_USER_CALL_ID,
sizeof(unsigned int), &id32);
} else {
unsigned long idl = call->user_call_ID;
ret = put_cmsg(msg, SOL_RXRPC, RXRPC_USER_CALL_ID,
sizeof(unsigned long), &idl);
}
if (ret < 0)
goto error_unlock_call;
}
if (msg->msg_name && call->peer) {
size_t len = sizeof(call->dest_srx);
memcpy(msg->msg_name, &call->dest_srx, len);
msg->msg_namelen = len;
}
ret = rxrpc_recvmsg_data(sock, call, msg, &msg->msg_iter, len,
flags, &copied);
if (ret == -EAGAIN)
ret = 0;
if (ret == -EIO)
goto call_failed;
if (ret < 0)
goto error_unlock_call;
if (rxrpc_call_is_complete(call) &&
skb_queue_empty(&call->recvmsg_queue))
goto call_complete;
if (rxrpc_call_has_failed(call))
goto call_failed;
if (!skb_queue_empty(&call->recvmsg_queue))
rxrpc_notify_socket(call);
goto not_yet_complete;
call_failed:
rxrpc_purge_queue(&call->recvmsg_queue);
call_complete:
ret = rxrpc_recvmsg_term(call, msg);
if (ret < 0)
goto error_unlock_call;
if (!(flags & MSG_PEEK))
rxrpc_release_call(rx, call);
msg->msg_flags |= MSG_EOR;
ret = 1;
not_yet_complete:
if (ret == 0)
msg->msg_flags |= MSG_MORE;
else
msg->msg_flags &= ~MSG_MORE;
ret = copied;
error_unlock_call:
mutex_unlock(&call->user_mutex);
rxrpc_put_call(call, rxrpc_call_put_recvmsg);
trace_rxrpc_recvmsg(call_debug_id, rxrpc_recvmsg_return, ret);
return ret;
error_requeue_call:
if (!(flags & MSG_PEEK)) {
spin_lock(&rx->recvmsg_lock);
list_add(&call->recvmsg_link, &rx->recvmsg_q);
spin_unlock(&rx->recvmsg_lock);
trace_rxrpc_recvmsg(call_debug_id, rxrpc_recvmsg_requeue, 0);
} else {
rxrpc_put_call(call, rxrpc_call_put_recvmsg);
}
error_no_call:
release_sock(&rx->sk);
error_trace:
trace_rxrpc_recvmsg(call_debug_id, rxrpc_recvmsg_return, ret);
return ret;
wait_interrupted:
ret = sock_intr_errno(timeo);
wait_error:
finish_wait(sk_sleep(&rx->sk), &wait);
call = NULL;
goto error_trace;
}
/**
* rxrpc_kernel_recv_data - Allow a kernel service to receive data/info
* @sock: The socket that the call exists on
* @call: The call to send data through
* @iter: The buffer to receive into
* @_len: The amount of data we want to receive (decreased on return)
* @want_more: True if more data is expected to be read
* @_abort: Where the abort code is stored if -ECONNABORTED is returned
* @_service: Where to store the actual service ID (may be upgraded)
*
* Allow a kernel service to receive data and pick up information about the
* state of a call. Returns 0 if got what was asked for and there's more
* available, 1 if we got what was asked for and we're at the end of the data
* and -EAGAIN if we need more data.
*
* Note that we may return -EAGAIN to drain empty packets at the end of the
* data, even if we've already copied over the requested data.
*
* *_abort should also be initialised to 0.
*/
int rxrpc_kernel_recv_data(struct socket *sock, struct rxrpc_call *call,
struct iov_iter *iter, size_t *_len,
bool want_more, u32 *_abort, u16 *_service)
{
size_t offset = 0;
int ret;
_enter("{%d},%zu,%d", call->debug_id, *_len, want_more);
mutex_lock(&call->user_mutex);
ret = rxrpc_recvmsg_data(sock, call, NULL, iter, *_len, 0, &offset);
*_len -= offset;
if (ret == -EIO)
goto call_failed;
if (ret < 0)
goto out;
/* We can only reach here with a partially full buffer if we have
* reached the end of the data. We must otherwise have a full buffer
* or have been given -EAGAIN.
*/
if (ret == 1) {
if (iov_iter_count(iter) > 0)
goto short_data;
if (!want_more)
goto read_phase_complete;
ret = 0;
goto out;
}
if (!want_more)
goto excess_data;
goto out;
read_phase_complete:
ret = 1;
out:
if (_service)
*_service = call->dest_srx.srx_service;
mutex_unlock(&call->user_mutex);
_leave(" = %d [%zu,%d]", ret, iov_iter_count(iter), *_abort);
return ret;
short_data:
trace_rxrpc_abort(call->debug_id, rxrpc_recvmsg_short_data,
call->cid, call->call_id, call->rx_consumed,
0, -EBADMSG);
ret = -EBADMSG;
goto out;
excess_data:
trace_rxrpc_abort(call->debug_id, rxrpc_recvmsg_excess_data,
call->cid, call->call_id, call->rx_consumed,
0, -EMSGSIZE);
ret = -EMSGSIZE;
goto out;
call_failed:
*_abort = call->abort_code;
ret = call->error;
if (call->completion == RXRPC_CALL_SUCCEEDED) {
ret = 1;
if (iov_iter_count(iter) > 0)
ret = -ECONNRESET;
}
goto out;
}
EXPORT_SYMBOL(rxrpc_kernel_recv_data);
| linux-master | net/rxrpc/recvmsg.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* RxRPC virtual connection handler, common bits.
*
* Copyright (C) 2007, 2016 Red Hat, Inc. All Rights Reserved.
* Written by David Howells ([email protected])
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/net.h>
#include <linux/skbuff.h>
#include "ar-internal.h"
/*
* Time till a connection expires after last use (in seconds).
*/
unsigned int __read_mostly rxrpc_connection_expiry = 10 * 60;
unsigned int __read_mostly rxrpc_closed_conn_expiry = 10;
static void rxrpc_clean_up_connection(struct work_struct *work);
static void rxrpc_set_service_reap_timer(struct rxrpc_net *rxnet,
unsigned long reap_at);
void rxrpc_poke_conn(struct rxrpc_connection *conn, enum rxrpc_conn_trace why)
{
struct rxrpc_local *local = conn->local;
bool busy;
if (WARN_ON_ONCE(!local))
return;
spin_lock_bh(&local->lock);
busy = !list_empty(&conn->attend_link);
if (!busy) {
rxrpc_get_connection(conn, why);
list_add_tail(&conn->attend_link, &local->conn_attend_q);
}
spin_unlock_bh(&local->lock);
rxrpc_wake_up_io_thread(local);
}
static void rxrpc_connection_timer(struct timer_list *timer)
{
struct rxrpc_connection *conn =
container_of(timer, struct rxrpc_connection, timer);
rxrpc_poke_conn(conn, rxrpc_conn_get_poke_timer);
}
/*
* allocate a new connection
*/
struct rxrpc_connection *rxrpc_alloc_connection(struct rxrpc_net *rxnet,
gfp_t gfp)
{
struct rxrpc_connection *conn;
_enter("");
conn = kzalloc(sizeof(struct rxrpc_connection), gfp);
if (conn) {
INIT_LIST_HEAD(&conn->cache_link);
timer_setup(&conn->timer, &rxrpc_connection_timer, 0);
INIT_WORK(&conn->processor, rxrpc_process_connection);
INIT_WORK(&conn->destructor, rxrpc_clean_up_connection);
INIT_LIST_HEAD(&conn->proc_link);
INIT_LIST_HEAD(&conn->link);
mutex_init(&conn->security_lock);
skb_queue_head_init(&conn->rx_queue);
conn->rxnet = rxnet;
conn->security = &rxrpc_no_security;
spin_lock_init(&conn->state_lock);
conn->debug_id = atomic_inc_return(&rxrpc_debug_id);
conn->idle_timestamp = jiffies;
}
_leave(" = %p{%d}", conn, conn ? conn->debug_id : 0);
return conn;
}
/*
* Look up a connection in the cache by protocol parameters.
*
* If successful, a pointer to the connection is returned, but no ref is taken.
* NULL is returned if there is no match.
*
* When searching for a service call, if we find a peer but no connection, we
* return that through *_peer in case we need to create a new service call.
*
* The caller must be holding the RCU read lock.
*/
struct rxrpc_connection *rxrpc_find_client_connection_rcu(struct rxrpc_local *local,
struct sockaddr_rxrpc *srx,
struct sk_buff *skb)
{
struct rxrpc_connection *conn;
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
struct rxrpc_peer *peer;
_enter(",%x", sp->hdr.cid & RXRPC_CIDMASK);
/* Look up client connections by connection ID alone as their
* IDs are unique for this machine.
*/
conn = idr_find(&local->conn_ids, sp->hdr.cid >> RXRPC_CIDSHIFT);
if (!conn || refcount_read(&conn->ref) == 0) {
_debug("no conn");
goto not_found;
}
if (conn->proto.epoch != sp->hdr.epoch ||
conn->local != local)
goto not_found;
peer = conn->peer;
switch (srx->transport.family) {
case AF_INET:
if (peer->srx.transport.sin.sin_port !=
srx->transport.sin.sin_port ||
peer->srx.transport.sin.sin_addr.s_addr !=
srx->transport.sin.sin_addr.s_addr)
goto not_found;
break;
#ifdef CONFIG_AF_RXRPC_IPV6
case AF_INET6:
if (peer->srx.transport.sin6.sin6_port !=
srx->transport.sin6.sin6_port ||
memcmp(&peer->srx.transport.sin6.sin6_addr,
&srx->transport.sin6.sin6_addr,
sizeof(struct in6_addr)) != 0)
goto not_found;
break;
#endif
default:
BUG();
}
_leave(" = %p", conn);
return conn;
not_found:
_leave(" = NULL");
return NULL;
}
/*
* Disconnect a call and clear any channel it occupies when that call
* terminates. The caller must hold the channel_lock and must release the
* call's ref on the connection.
*/
void __rxrpc_disconnect_call(struct rxrpc_connection *conn,
struct rxrpc_call *call)
{
struct rxrpc_channel *chan =
&conn->channels[call->cid & RXRPC_CHANNELMASK];
_enter("%d,%x", conn->debug_id, call->cid);
if (chan->call == call) {
/* Save the result of the call so that we can repeat it if necessary
* through the channel, whilst disposing of the actual call record.
*/
trace_rxrpc_disconnect_call(call);
switch (call->completion) {
case RXRPC_CALL_SUCCEEDED:
chan->last_seq = call->rx_highest_seq;
chan->last_type = RXRPC_PACKET_TYPE_ACK;
break;
case RXRPC_CALL_LOCALLY_ABORTED:
chan->last_abort = call->abort_code;
chan->last_type = RXRPC_PACKET_TYPE_ABORT;
break;
default:
chan->last_abort = RX_CALL_DEAD;
chan->last_type = RXRPC_PACKET_TYPE_ABORT;
break;
}
chan->last_call = chan->call_id;
chan->call_id = chan->call_counter;
chan->call = NULL;
}
_leave("");
}
/*
* Disconnect a call and clear any channel it occupies when that call
* terminates.
*/
void rxrpc_disconnect_call(struct rxrpc_call *call)
{
struct rxrpc_connection *conn = call->conn;
set_bit(RXRPC_CALL_DISCONNECTED, &call->flags);
rxrpc_see_call(call, rxrpc_call_see_disconnected);
call->peer->cong_ssthresh = call->cong_ssthresh;
if (!hlist_unhashed(&call->error_link)) {
spin_lock(&call->peer->lock);
hlist_del_init(&call->error_link);
spin_unlock(&call->peer->lock);
}
if (rxrpc_is_client_call(call)) {
rxrpc_disconnect_client_call(call->bundle, call);
} else {
__rxrpc_disconnect_call(conn, call);
conn->idle_timestamp = jiffies;
if (atomic_dec_and_test(&conn->active))
rxrpc_set_service_reap_timer(conn->rxnet,
jiffies + rxrpc_connection_expiry);
}
rxrpc_put_call(call, rxrpc_call_put_io_thread);
}
/*
* Queue a connection's work processor, getting a ref to pass to the work
* queue.
*/
void rxrpc_queue_conn(struct rxrpc_connection *conn, enum rxrpc_conn_trace why)
{
if (atomic_read(&conn->active) >= 0 &&
rxrpc_queue_work(&conn->processor))
rxrpc_see_connection(conn, why);
}
/*
* Note the re-emergence of a connection.
*/
void rxrpc_see_connection(struct rxrpc_connection *conn,
enum rxrpc_conn_trace why)
{
if (conn) {
int r = refcount_read(&conn->ref);
trace_rxrpc_conn(conn->debug_id, r, why);
}
}
/*
* Get a ref on a connection.
*/
struct rxrpc_connection *rxrpc_get_connection(struct rxrpc_connection *conn,
enum rxrpc_conn_trace why)
{
int r;
__refcount_inc(&conn->ref, &r);
trace_rxrpc_conn(conn->debug_id, r + 1, why);
return conn;
}
/*
* Try to get a ref on a connection.
*/
struct rxrpc_connection *
rxrpc_get_connection_maybe(struct rxrpc_connection *conn,
enum rxrpc_conn_trace why)
{
int r;
if (conn) {
if (__refcount_inc_not_zero(&conn->ref, &r))
trace_rxrpc_conn(conn->debug_id, r + 1, why);
else
conn = NULL;
}
return conn;
}
/*
* Set the service connection reap timer.
*/
static void rxrpc_set_service_reap_timer(struct rxrpc_net *rxnet,
unsigned long reap_at)
{
if (rxnet->live)
timer_reduce(&rxnet->service_conn_reap_timer, reap_at);
}
/*
* destroy a virtual connection
*/
static void rxrpc_rcu_free_connection(struct rcu_head *rcu)
{
struct rxrpc_connection *conn =
container_of(rcu, struct rxrpc_connection, rcu);
struct rxrpc_net *rxnet = conn->rxnet;
_enter("{%d,u=%d}", conn->debug_id, refcount_read(&conn->ref));
trace_rxrpc_conn(conn->debug_id, refcount_read(&conn->ref),
rxrpc_conn_free);
kfree(conn);
if (atomic_dec_and_test(&rxnet->nr_conns))
wake_up_var(&rxnet->nr_conns);
}
/*
* Clean up a dead connection.
*/
static void rxrpc_clean_up_connection(struct work_struct *work)
{
struct rxrpc_connection *conn =
container_of(work, struct rxrpc_connection, destructor);
struct rxrpc_net *rxnet = conn->rxnet;
ASSERT(!conn->channels[0].call &&
!conn->channels[1].call &&
!conn->channels[2].call &&
!conn->channels[3].call);
ASSERT(list_empty(&conn->cache_link));
del_timer_sync(&conn->timer);
cancel_work_sync(&conn->processor); /* Processing may restart the timer */
del_timer_sync(&conn->timer);
write_lock(&rxnet->conn_lock);
list_del_init(&conn->proc_link);
write_unlock(&rxnet->conn_lock);
rxrpc_purge_queue(&conn->rx_queue);
rxrpc_kill_client_conn(conn);
conn->security->clear(conn);
key_put(conn->key);
rxrpc_put_bundle(conn->bundle, rxrpc_bundle_put_conn);
rxrpc_put_peer(conn->peer, rxrpc_peer_put_conn);
rxrpc_put_local(conn->local, rxrpc_local_put_kill_conn);
/* Drain the Rx queue. Note that even though we've unpublished, an
* incoming packet could still be being added to our Rx queue, so we
* will need to drain it again in the RCU cleanup handler.
*/
rxrpc_purge_queue(&conn->rx_queue);
call_rcu(&conn->rcu, rxrpc_rcu_free_connection);
}
/*
* Drop a ref on a connection.
*/
void rxrpc_put_connection(struct rxrpc_connection *conn,
enum rxrpc_conn_trace why)
{
unsigned int debug_id;
bool dead;
int r;
if (!conn)
return;
debug_id = conn->debug_id;
dead = __refcount_dec_and_test(&conn->ref, &r);
trace_rxrpc_conn(debug_id, r - 1, why);
if (dead) {
del_timer(&conn->timer);
cancel_work(&conn->processor);
if (in_softirq() || work_busy(&conn->processor) ||
timer_pending(&conn->timer))
/* Can't use the rxrpc workqueue as we need to cancel/flush
* something that may be running/waiting there.
*/
schedule_work(&conn->destructor);
else
rxrpc_clean_up_connection(&conn->destructor);
}
}
/*
* reap dead service connections
*/
void rxrpc_service_connection_reaper(struct work_struct *work)
{
struct rxrpc_connection *conn, *_p;
struct rxrpc_net *rxnet =
container_of(work, struct rxrpc_net, service_conn_reaper);
unsigned long expire_at, earliest, idle_timestamp, now;
int active;
LIST_HEAD(graveyard);
_enter("");
now = jiffies;
earliest = now + MAX_JIFFY_OFFSET;
write_lock(&rxnet->conn_lock);
list_for_each_entry_safe(conn, _p, &rxnet->service_conns, link) {
ASSERTCMP(atomic_read(&conn->active), >=, 0);
if (likely(atomic_read(&conn->active) > 0))
continue;
if (conn->state == RXRPC_CONN_SERVICE_PREALLOC)
continue;
if (rxnet->live && !conn->local->dead) {
idle_timestamp = READ_ONCE(conn->idle_timestamp);
expire_at = idle_timestamp + rxrpc_connection_expiry * HZ;
if (conn->local->service_closed)
expire_at = idle_timestamp + rxrpc_closed_conn_expiry * HZ;
_debug("reap CONN %d { a=%d,t=%ld }",
conn->debug_id, atomic_read(&conn->active),
(long)expire_at - (long)now);
if (time_before(now, expire_at)) {
if (time_before(expire_at, earliest))
earliest = expire_at;
continue;
}
}
/* The activity count sits at 0 whilst the conn is unused on
* the list; we reduce that to -1 to make the conn unavailable.
*/
active = 0;
if (!atomic_try_cmpxchg(&conn->active, &active, -1))
continue;
rxrpc_see_connection(conn, rxrpc_conn_see_reap_service);
if (rxrpc_conn_is_client(conn))
BUG();
else
rxrpc_unpublish_service_conn(conn);
list_move_tail(&conn->link, &graveyard);
}
write_unlock(&rxnet->conn_lock);
if (earliest != now + MAX_JIFFY_OFFSET) {
_debug("reschedule reaper %ld", (long)earliest - (long)now);
ASSERT(time_after(earliest, now));
rxrpc_set_service_reap_timer(rxnet, earliest);
}
while (!list_empty(&graveyard)) {
conn = list_entry(graveyard.next, struct rxrpc_connection,
link);
list_del_init(&conn->link);
ASSERTCMP(atomic_read(&conn->active), ==, -1);
rxrpc_put_connection(conn, rxrpc_conn_put_service_reaped);
}
_leave("");
}
/*
* preemptively destroy all the service connection records rather than
* waiting for them to time out
*/
void rxrpc_destroy_all_connections(struct rxrpc_net *rxnet)
{
struct rxrpc_connection *conn, *_p;
bool leak = false;
_enter("");
atomic_dec(&rxnet->nr_conns);
del_timer_sync(&rxnet->service_conn_reap_timer);
rxrpc_queue_work(&rxnet->service_conn_reaper);
flush_workqueue(rxrpc_workqueue);
write_lock(&rxnet->conn_lock);
list_for_each_entry_safe(conn, _p, &rxnet->service_conns, link) {
pr_err("AF_RXRPC: Leaked conn %p {%d}\n",
conn, refcount_read(&conn->ref));
leak = true;
}
write_unlock(&rxnet->conn_lock);
BUG_ON(leak);
ASSERT(list_empty(&rxnet->conn_proc_list));
/* We need to wait for the connections to be destroyed by RCU as they
* pin things that we still need to get rid of.
*/
wait_var_event(&rxnet->nr_conns, !atomic_read(&rxnet->nr_conns));
_leave("");
}
| linux-master | net/rxrpc/conn_object.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* Socket buffer accounting
*
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
* Written by David Howells ([email protected])
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/net.h>
#include <linux/skbuff.h>
#include <net/sock.h>
#include <net/af_rxrpc.h>
#include "ar-internal.h"
#define select_skb_count(skb) (&rxrpc_n_rx_skbs)
/*
* Note the allocation or reception of a socket buffer.
*/
void rxrpc_new_skb(struct sk_buff *skb, enum rxrpc_skb_trace why)
{
int n = atomic_inc_return(select_skb_count(skb));
trace_rxrpc_skb(skb, refcount_read(&skb->users), n, why);
}
/*
* Note the re-emergence of a socket buffer from a queue or buffer.
*/
void rxrpc_see_skb(struct sk_buff *skb, enum rxrpc_skb_trace why)
{
if (skb) {
int n = atomic_read(select_skb_count(skb));
trace_rxrpc_skb(skb, refcount_read(&skb->users), n, why);
}
}
/*
* Note the addition of a ref on a socket buffer.
*/
void rxrpc_get_skb(struct sk_buff *skb, enum rxrpc_skb_trace why)
{
int n = atomic_inc_return(select_skb_count(skb));
trace_rxrpc_skb(skb, refcount_read(&skb->users), n, why);
skb_get(skb);
}
/*
* Note the dropping of a ref on a socket buffer by the core.
*/
void rxrpc_eaten_skb(struct sk_buff *skb, enum rxrpc_skb_trace why)
{
int n = atomic_inc_return(&rxrpc_n_rx_skbs);
trace_rxrpc_skb(skb, 0, n, why);
}
/*
* Note the destruction of a socket buffer.
*/
void rxrpc_free_skb(struct sk_buff *skb, enum rxrpc_skb_trace why)
{
if (skb) {
int n = atomic_dec_return(select_skb_count(skb));
trace_rxrpc_skb(skb, refcount_read(&skb->users), n, why);
consume_skb(skb);
}
}
/*
* Clear a queue of socket buffers.
*/
void rxrpc_purge_queue(struct sk_buff_head *list)
{
struct sk_buff *skb;
while ((skb = skb_dequeue((list))) != NULL) {
int n = atomic_dec_return(select_skb_count(skb));
trace_rxrpc_skb(skb, refcount_read(&skb->users), n,
rxrpc_skb_put_purge);
consume_skb(skb);
}
}
| linux-master | net/rxrpc/skbuff.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* RxRPC remote transport endpoint record management
*
* Copyright (C) 2007, 2016 Red Hat, Inc. All Rights Reserved.
* Written by David Howells ([email protected])
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/net.h>
#include <linux/skbuff.h>
#include <linux/udp.h>
#include <linux/in.h>
#include <linux/in6.h>
#include <linux/slab.h>
#include <linux/hashtable.h>
#include <net/sock.h>
#include <net/af_rxrpc.h>
#include <net/ip.h>
#include <net/route.h>
#include <net/ip6_route.h>
#include "ar-internal.h"
/*
* Hash a peer key.
*/
static unsigned long rxrpc_peer_hash_key(struct rxrpc_local *local,
const struct sockaddr_rxrpc *srx)
{
const u16 *p;
unsigned int i, size;
unsigned long hash_key;
_enter("");
hash_key = (unsigned long)local / __alignof__(*local);
hash_key += srx->transport_type;
hash_key += srx->transport_len;
hash_key += srx->transport.family;
switch (srx->transport.family) {
case AF_INET:
hash_key += (u16 __force)srx->transport.sin.sin_port;
size = sizeof(srx->transport.sin.sin_addr);
p = (u16 *)&srx->transport.sin.sin_addr;
break;
#ifdef CONFIG_AF_RXRPC_IPV6
case AF_INET6:
hash_key += (u16 __force)srx->transport.sin.sin_port;
size = sizeof(srx->transport.sin6.sin6_addr);
p = (u16 *)&srx->transport.sin6.sin6_addr;
break;
#endif
default:
WARN(1, "AF_RXRPC: Unsupported transport address family\n");
return 0;
}
/* Step through the peer address in 16-bit portions for speed */
for (i = 0; i < size; i += sizeof(*p), p++)
hash_key += *p;
_leave(" 0x%lx", hash_key);
return hash_key;
}
/*
* Compare a peer to a key. Return -ve, 0 or +ve to indicate less than, same
* or greater than.
*
* Unfortunately, the primitives in linux/hashtable.h don't allow for sorted
* buckets and mid-bucket insertion, so we don't make full use of this
* information at this point.
*/
static long rxrpc_peer_cmp_key(const struct rxrpc_peer *peer,
struct rxrpc_local *local,
const struct sockaddr_rxrpc *srx,
unsigned long hash_key)
{
long diff;
diff = ((peer->hash_key - hash_key) ?:
((unsigned long)peer->local - (unsigned long)local) ?:
(peer->srx.transport_type - srx->transport_type) ?:
(peer->srx.transport_len - srx->transport_len) ?:
(peer->srx.transport.family - srx->transport.family));
if (diff != 0)
return diff;
switch (srx->transport.family) {
case AF_INET:
return ((u16 __force)peer->srx.transport.sin.sin_port -
(u16 __force)srx->transport.sin.sin_port) ?:
memcmp(&peer->srx.transport.sin.sin_addr,
&srx->transport.sin.sin_addr,
sizeof(struct in_addr));
#ifdef CONFIG_AF_RXRPC_IPV6
case AF_INET6:
return ((u16 __force)peer->srx.transport.sin6.sin6_port -
(u16 __force)srx->transport.sin6.sin6_port) ?:
memcmp(&peer->srx.transport.sin6.sin6_addr,
&srx->transport.sin6.sin6_addr,
sizeof(struct in6_addr));
#endif
default:
BUG();
}
}
/*
* Look up a remote transport endpoint for the specified address using RCU.
*/
static struct rxrpc_peer *__rxrpc_lookup_peer_rcu(
struct rxrpc_local *local,
const struct sockaddr_rxrpc *srx,
unsigned long hash_key)
{
struct rxrpc_peer *peer;
struct rxrpc_net *rxnet = local->rxnet;
hash_for_each_possible_rcu(rxnet->peer_hash, peer, hash_link, hash_key) {
if (rxrpc_peer_cmp_key(peer, local, srx, hash_key) == 0 &&
refcount_read(&peer->ref) > 0)
return peer;
}
return NULL;
}
/*
* Look up a remote transport endpoint for the specified address using RCU.
*/
struct rxrpc_peer *rxrpc_lookup_peer_rcu(struct rxrpc_local *local,
const struct sockaddr_rxrpc *srx)
{
struct rxrpc_peer *peer;
unsigned long hash_key = rxrpc_peer_hash_key(local, srx);
peer = __rxrpc_lookup_peer_rcu(local, srx, hash_key);
if (peer)
_leave(" = %p {u=%d}", peer, refcount_read(&peer->ref));
return peer;
}
/*
* assess the MTU size for the network interface through which this peer is
* reached
*/
static void rxrpc_assess_MTU_size(struct rxrpc_local *local,
struct rxrpc_peer *peer)
{
struct net *net = local->net;
struct dst_entry *dst;
struct rtable *rt;
struct flowi fl;
struct flowi4 *fl4 = &fl.u.ip4;
#ifdef CONFIG_AF_RXRPC_IPV6
struct flowi6 *fl6 = &fl.u.ip6;
#endif
peer->if_mtu = 1500;
memset(&fl, 0, sizeof(fl));
switch (peer->srx.transport.family) {
case AF_INET:
rt = ip_route_output_ports(
net, fl4, NULL,
peer->srx.transport.sin.sin_addr.s_addr, 0,
htons(7000), htons(7001), IPPROTO_UDP, 0, 0);
if (IS_ERR(rt)) {
_leave(" [route err %ld]", PTR_ERR(rt));
return;
}
dst = &rt->dst;
break;
#ifdef CONFIG_AF_RXRPC_IPV6
case AF_INET6:
fl6->flowi6_iif = LOOPBACK_IFINDEX;
fl6->flowi6_scope = RT_SCOPE_UNIVERSE;
fl6->flowi6_proto = IPPROTO_UDP;
memcpy(&fl6->daddr, &peer->srx.transport.sin6.sin6_addr,
sizeof(struct in6_addr));
fl6->fl6_dport = htons(7001);
fl6->fl6_sport = htons(7000);
dst = ip6_route_output(net, NULL, fl6);
if (dst->error) {
_leave(" [route err %d]", dst->error);
return;
}
break;
#endif
default:
BUG();
}
peer->if_mtu = dst_mtu(dst);
dst_release(dst);
_leave(" [if_mtu %u]", peer->if_mtu);
}
/*
* Allocate a peer.
*/
struct rxrpc_peer *rxrpc_alloc_peer(struct rxrpc_local *local, gfp_t gfp,
enum rxrpc_peer_trace why)
{
struct rxrpc_peer *peer;
_enter("");
peer = kzalloc(sizeof(struct rxrpc_peer), gfp);
if (peer) {
refcount_set(&peer->ref, 1);
peer->local = rxrpc_get_local(local, rxrpc_local_get_peer);
INIT_HLIST_HEAD(&peer->error_targets);
peer->service_conns = RB_ROOT;
seqlock_init(&peer->service_conn_lock);
spin_lock_init(&peer->lock);
spin_lock_init(&peer->rtt_input_lock);
peer->debug_id = atomic_inc_return(&rxrpc_debug_id);
rxrpc_peer_init_rtt(peer);
peer->cong_ssthresh = RXRPC_TX_MAX_WINDOW;
trace_rxrpc_peer(peer->debug_id, 1, why);
}
_leave(" = %p", peer);
return peer;
}
/*
* Initialise peer record.
*/
static void rxrpc_init_peer(struct rxrpc_local *local, struct rxrpc_peer *peer,
unsigned long hash_key)
{
peer->hash_key = hash_key;
rxrpc_assess_MTU_size(local, peer);
peer->mtu = peer->if_mtu;
peer->rtt_last_req = ktime_get_real();
switch (peer->srx.transport.family) {
case AF_INET:
peer->hdrsize = sizeof(struct iphdr);
break;
#ifdef CONFIG_AF_RXRPC_IPV6
case AF_INET6:
peer->hdrsize = sizeof(struct ipv6hdr);
break;
#endif
default:
BUG();
}
switch (peer->srx.transport_type) {
case SOCK_DGRAM:
peer->hdrsize += sizeof(struct udphdr);
break;
default:
BUG();
}
peer->hdrsize += sizeof(struct rxrpc_wire_header);
peer->maxdata = peer->mtu - peer->hdrsize;
}
/*
* Set up a new peer.
*/
static struct rxrpc_peer *rxrpc_create_peer(struct rxrpc_local *local,
struct sockaddr_rxrpc *srx,
unsigned long hash_key,
gfp_t gfp)
{
struct rxrpc_peer *peer;
_enter("");
peer = rxrpc_alloc_peer(local, gfp, rxrpc_peer_new_client);
if (peer) {
memcpy(&peer->srx, srx, sizeof(*srx));
rxrpc_init_peer(local, peer, hash_key);
}
_leave(" = %p", peer);
return peer;
}
static void rxrpc_free_peer(struct rxrpc_peer *peer)
{
trace_rxrpc_peer(peer->debug_id, 0, rxrpc_peer_free);
rxrpc_put_local(peer->local, rxrpc_local_put_peer);
kfree_rcu(peer, rcu);
}
/*
* Set up a new incoming peer. There shouldn't be any other matching peers
* since we've already done a search in the list from the non-reentrant context
* (the data_ready handler) that is the only place we can add new peers.
*/
void rxrpc_new_incoming_peer(struct rxrpc_local *local, struct rxrpc_peer *peer)
{
struct rxrpc_net *rxnet = local->rxnet;
unsigned long hash_key;
hash_key = rxrpc_peer_hash_key(local, &peer->srx);
rxrpc_init_peer(local, peer, hash_key);
spin_lock(&rxnet->peer_hash_lock);
hash_add_rcu(rxnet->peer_hash, &peer->hash_link, hash_key);
list_add_tail(&peer->keepalive_link, &rxnet->peer_keepalive_new);
spin_unlock(&rxnet->peer_hash_lock);
}
/*
* obtain a remote transport endpoint for the specified address
*/
struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_local *local,
struct sockaddr_rxrpc *srx, gfp_t gfp)
{
struct rxrpc_peer *peer, *candidate;
struct rxrpc_net *rxnet = local->rxnet;
unsigned long hash_key = rxrpc_peer_hash_key(local, srx);
_enter("{%pISp}", &srx->transport);
/* search the peer list first */
rcu_read_lock();
peer = __rxrpc_lookup_peer_rcu(local, srx, hash_key);
if (peer && !rxrpc_get_peer_maybe(peer, rxrpc_peer_get_lookup_client))
peer = NULL;
rcu_read_unlock();
if (!peer) {
/* The peer is not yet present in hash - create a candidate
* for a new record and then redo the search.
*/
candidate = rxrpc_create_peer(local, srx, hash_key, gfp);
if (!candidate) {
_leave(" = NULL [nomem]");
return NULL;
}
spin_lock(&rxnet->peer_hash_lock);
/* Need to check that we aren't racing with someone else */
peer = __rxrpc_lookup_peer_rcu(local, srx, hash_key);
if (peer && !rxrpc_get_peer_maybe(peer, rxrpc_peer_get_lookup_client))
peer = NULL;
if (!peer) {
hash_add_rcu(rxnet->peer_hash,
&candidate->hash_link, hash_key);
list_add_tail(&candidate->keepalive_link,
&rxnet->peer_keepalive_new);
}
spin_unlock(&rxnet->peer_hash_lock);
if (peer)
rxrpc_free_peer(candidate);
else
peer = candidate;
}
_leave(" = %p {u=%d}", peer, refcount_read(&peer->ref));
return peer;
}
/*
* Get a ref on a peer record.
*/
struct rxrpc_peer *rxrpc_get_peer(struct rxrpc_peer *peer, enum rxrpc_peer_trace why)
{
int r;
__refcount_inc(&peer->ref, &r);
trace_rxrpc_peer(peer->debug_id, r + 1, why);
return peer;
}
/*
* Get a ref on a peer record unless its usage has already reached 0.
*/
struct rxrpc_peer *rxrpc_get_peer_maybe(struct rxrpc_peer *peer,
enum rxrpc_peer_trace why)
{
int r;
if (peer) {
if (__refcount_inc_not_zero(&peer->ref, &r))
trace_rxrpc_peer(peer->debug_id, r + 1, why);
else
peer = NULL;
}
return peer;
}
/*
* Discard a peer record.
*/
static void __rxrpc_put_peer(struct rxrpc_peer *peer)
{
struct rxrpc_net *rxnet = peer->local->rxnet;
ASSERT(hlist_empty(&peer->error_targets));
spin_lock(&rxnet->peer_hash_lock);
hash_del_rcu(&peer->hash_link);
list_del_init(&peer->keepalive_link);
spin_unlock(&rxnet->peer_hash_lock);
rxrpc_free_peer(peer);
}
/*
* Drop a ref on a peer record.
*/
void rxrpc_put_peer(struct rxrpc_peer *peer, enum rxrpc_peer_trace why)
{
unsigned int debug_id;
bool dead;
int r;
if (peer) {
debug_id = peer->debug_id;
dead = __refcount_dec_and_test(&peer->ref, &r);
trace_rxrpc_peer(debug_id, r - 1, why);
if (dead)
__rxrpc_put_peer(peer);
}
}
/*
* Make sure all peer records have been discarded.
*/
void rxrpc_destroy_all_peers(struct rxrpc_net *rxnet)
{
struct rxrpc_peer *peer;
int i;
for (i = 0; i < HASH_SIZE(rxnet->peer_hash); i++) {
if (hlist_empty(&rxnet->peer_hash[i]))
continue;
hlist_for_each_entry(peer, &rxnet->peer_hash[i], hash_link) {
pr_err("Leaked peer %u {%u} %pISp\n",
peer->debug_id,
refcount_read(&peer->ref),
&peer->srx.transport);
}
}
}
/**
* rxrpc_kernel_get_peer - Get the peer address of a call
* @sock: The socket on which the call is in progress.
* @call: The call to query
* @_srx: Where to place the result
*
* Get the address of the remote peer in a call.
*/
void rxrpc_kernel_get_peer(struct socket *sock, struct rxrpc_call *call,
struct sockaddr_rxrpc *_srx)
{
*_srx = call->peer->srx;
}
EXPORT_SYMBOL(rxrpc_kernel_get_peer);
/**
* rxrpc_kernel_get_srtt - Get a call's peer smoothed RTT
* @sock: The socket on which the call is in progress.
* @call: The call to query
* @_srtt: Where to store the SRTT value.
*
* Get the call's peer smoothed RTT in uS.
*/
bool rxrpc_kernel_get_srtt(struct socket *sock, struct rxrpc_call *call,
u32 *_srtt)
{
struct rxrpc_peer *peer = call->peer;
if (peer->rtt_count == 0) {
*_srtt = 1000000; /* 1S */
return false;
}
*_srtt = call->peer->srtt_us >> 3;
return true;
}
EXPORT_SYMBOL(rxrpc_kernel_get_srtt);
| linux-master | net/rxrpc/peer_object.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* Processing of received RxRPC packets
*
* Copyright (C) 2020 Red Hat, Inc. All Rights Reserved.
* Written by David Howells ([email protected])
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include "ar-internal.h"
static void rxrpc_proto_abort(struct rxrpc_call *call, rxrpc_seq_t seq,
enum rxrpc_abort_reason why)
{
rxrpc_abort_call(call, seq, RX_PROTOCOL_ERROR, -EBADMSG, why);
}
/*
* Do TCP-style congestion management [RFC 5681].
*/
static void rxrpc_congestion_management(struct rxrpc_call *call,
struct sk_buff *skb,
struct rxrpc_ack_summary *summary,
rxrpc_serial_t acked_serial)
{
enum rxrpc_congest_change change = rxrpc_cong_no_change;
unsigned int cumulative_acks = call->cong_cumul_acks;
unsigned int cwnd = call->cong_cwnd;
bool resend = false;
summary->flight_size =
(call->tx_top - call->acks_hard_ack) - summary->nr_acks;
if (test_and_clear_bit(RXRPC_CALL_RETRANS_TIMEOUT, &call->flags)) {
summary->retrans_timeo = true;
call->cong_ssthresh = max_t(unsigned int,
summary->flight_size / 2, 2);
cwnd = 1;
if (cwnd >= call->cong_ssthresh &&
call->cong_mode == RXRPC_CALL_SLOW_START) {
call->cong_mode = RXRPC_CALL_CONGEST_AVOIDANCE;
call->cong_tstamp = skb->tstamp;
cumulative_acks = 0;
}
}
cumulative_acks += summary->nr_new_acks;
cumulative_acks += summary->nr_rot_new_acks;
if (cumulative_acks > 255)
cumulative_acks = 255;
summary->mode = call->cong_mode;
summary->cwnd = call->cong_cwnd;
summary->ssthresh = call->cong_ssthresh;
summary->cumulative_acks = cumulative_acks;
summary->dup_acks = call->cong_dup_acks;
switch (call->cong_mode) {
case RXRPC_CALL_SLOW_START:
if (summary->saw_nacks)
goto packet_loss_detected;
if (summary->cumulative_acks > 0)
cwnd += 1;
if (cwnd >= call->cong_ssthresh) {
call->cong_mode = RXRPC_CALL_CONGEST_AVOIDANCE;
call->cong_tstamp = skb->tstamp;
}
goto out;
case RXRPC_CALL_CONGEST_AVOIDANCE:
if (summary->saw_nacks)
goto packet_loss_detected;
/* We analyse the number of packets that get ACK'd per RTT
* period and increase the window if we managed to fill it.
*/
if (call->peer->rtt_count == 0)
goto out;
if (ktime_before(skb->tstamp,
ktime_add_us(call->cong_tstamp,
call->peer->srtt_us >> 3)))
goto out_no_clear_ca;
change = rxrpc_cong_rtt_window_end;
call->cong_tstamp = skb->tstamp;
if (cumulative_acks >= cwnd)
cwnd++;
goto out;
case RXRPC_CALL_PACKET_LOSS:
if (!summary->saw_nacks)
goto resume_normality;
if (summary->new_low_nack) {
change = rxrpc_cong_new_low_nack;
call->cong_dup_acks = 1;
if (call->cong_extra > 1)
call->cong_extra = 1;
goto send_extra_data;
}
call->cong_dup_acks++;
if (call->cong_dup_acks < 3)
goto send_extra_data;
change = rxrpc_cong_begin_retransmission;
call->cong_mode = RXRPC_CALL_FAST_RETRANSMIT;
call->cong_ssthresh = max_t(unsigned int,
summary->flight_size / 2, 2);
cwnd = call->cong_ssthresh + 3;
call->cong_extra = 0;
call->cong_dup_acks = 0;
resend = true;
goto out;
case RXRPC_CALL_FAST_RETRANSMIT:
if (!summary->new_low_nack) {
if (summary->nr_new_acks == 0)
cwnd += 1;
call->cong_dup_acks++;
if (call->cong_dup_acks == 2) {
change = rxrpc_cong_retransmit_again;
call->cong_dup_acks = 0;
resend = true;
}
} else {
change = rxrpc_cong_progress;
cwnd = call->cong_ssthresh;
if (!summary->saw_nacks)
goto resume_normality;
}
goto out;
default:
BUG();
goto out;
}
resume_normality:
change = rxrpc_cong_cleared_nacks;
call->cong_dup_acks = 0;
call->cong_extra = 0;
call->cong_tstamp = skb->tstamp;
if (cwnd < call->cong_ssthresh)
call->cong_mode = RXRPC_CALL_SLOW_START;
else
call->cong_mode = RXRPC_CALL_CONGEST_AVOIDANCE;
out:
cumulative_acks = 0;
out_no_clear_ca:
if (cwnd >= RXRPC_TX_MAX_WINDOW)
cwnd = RXRPC_TX_MAX_WINDOW;
call->cong_cwnd = cwnd;
call->cong_cumul_acks = cumulative_acks;
trace_rxrpc_congest(call, summary, acked_serial, change);
if (resend)
rxrpc_resend(call, skb);
return;
packet_loss_detected:
change = rxrpc_cong_saw_nack;
call->cong_mode = RXRPC_CALL_PACKET_LOSS;
call->cong_dup_acks = 0;
goto send_extra_data;
send_extra_data:
/* Send some previously unsent DATA if we have some to advance the ACK
* state.
*/
if (test_bit(RXRPC_CALL_TX_LAST, &call->flags) ||
summary->nr_acks != call->tx_top - call->acks_hard_ack) {
call->cong_extra++;
wake_up(&call->waitq);
}
goto out_no_clear_ca;
}
/*
* Degrade the congestion window if we haven't transmitted a packet for >1RTT.
*/
void rxrpc_congestion_degrade(struct rxrpc_call *call)
{
ktime_t rtt, now;
if (call->cong_mode != RXRPC_CALL_SLOW_START &&
call->cong_mode != RXRPC_CALL_CONGEST_AVOIDANCE)
return;
if (__rxrpc_call_state(call) == RXRPC_CALL_CLIENT_AWAIT_REPLY)
return;
rtt = ns_to_ktime(call->peer->srtt_us * (1000 / 8));
now = ktime_get_real();
if (!ktime_before(ktime_add(call->tx_last_sent, rtt), now))
return;
trace_rxrpc_reset_cwnd(call, now);
rxrpc_inc_stat(call->rxnet, stat_tx_data_cwnd_reset);
call->tx_last_sent = now;
call->cong_mode = RXRPC_CALL_SLOW_START;
call->cong_ssthresh = max_t(unsigned int, call->cong_ssthresh,
call->cong_cwnd * 3 / 4);
call->cong_cwnd = max_t(unsigned int, call->cong_cwnd / 2, RXRPC_MIN_CWND);
}
/*
* Apply a hard ACK by advancing the Tx window.
*/
static bool rxrpc_rotate_tx_window(struct rxrpc_call *call, rxrpc_seq_t to,
struct rxrpc_ack_summary *summary)
{
struct rxrpc_txbuf *txb;
bool rot_last = false;
list_for_each_entry_rcu(txb, &call->tx_buffer, call_link, false) {
if (before_eq(txb->seq, call->acks_hard_ack))
continue;
summary->nr_rot_new_acks++;
if (test_bit(RXRPC_TXBUF_LAST, &txb->flags)) {
set_bit(RXRPC_CALL_TX_LAST, &call->flags);
rot_last = true;
}
if (txb->seq == to)
break;
}
if (rot_last)
set_bit(RXRPC_CALL_TX_ALL_ACKED, &call->flags);
_enter("%x,%x,%x,%d", to, call->acks_hard_ack, call->tx_top, rot_last);
if (call->acks_lowest_nak == call->acks_hard_ack) {
call->acks_lowest_nak = to;
} else if (after(to, call->acks_lowest_nak)) {
summary->new_low_nack = true;
call->acks_lowest_nak = to;
}
smp_store_release(&call->acks_hard_ack, to);
trace_rxrpc_txqueue(call, (rot_last ?
rxrpc_txqueue_rotate_last :
rxrpc_txqueue_rotate));
wake_up(&call->waitq);
return rot_last;
}
/*
* End the transmission phase of a call.
*
* This occurs when we get an ACKALL packet, the first DATA packet of a reply,
* or a final ACK packet.
*/
static void rxrpc_end_tx_phase(struct rxrpc_call *call, bool reply_begun,
enum rxrpc_abort_reason abort_why)
{
ASSERT(test_bit(RXRPC_CALL_TX_LAST, &call->flags));
switch (__rxrpc_call_state(call)) {
case RXRPC_CALL_CLIENT_SEND_REQUEST:
case RXRPC_CALL_CLIENT_AWAIT_REPLY:
if (reply_begun) {
rxrpc_set_call_state(call, RXRPC_CALL_CLIENT_RECV_REPLY);
trace_rxrpc_txqueue(call, rxrpc_txqueue_end);
break;
}
rxrpc_set_call_state(call, RXRPC_CALL_CLIENT_AWAIT_REPLY);
trace_rxrpc_txqueue(call, rxrpc_txqueue_await_reply);
break;
case RXRPC_CALL_SERVER_AWAIT_ACK:
rxrpc_call_completed(call);
trace_rxrpc_txqueue(call, rxrpc_txqueue_end);
break;
default:
kdebug("end_tx %s", rxrpc_call_states[__rxrpc_call_state(call)]);
rxrpc_proto_abort(call, call->tx_top, abort_why);
break;
}
}
/*
* Begin the reply reception phase of a call.
*/
static bool rxrpc_receiving_reply(struct rxrpc_call *call)
{
struct rxrpc_ack_summary summary = { 0 };
unsigned long now, timo;
rxrpc_seq_t top = READ_ONCE(call->tx_top);
if (call->ackr_reason) {
now = jiffies;
timo = now + MAX_JIFFY_OFFSET;
WRITE_ONCE(call->delay_ack_at, timo);
trace_rxrpc_timer(call, rxrpc_timer_init_for_reply, now);
}
if (!test_bit(RXRPC_CALL_TX_LAST, &call->flags)) {
if (!rxrpc_rotate_tx_window(call, top, &summary)) {
rxrpc_proto_abort(call, top, rxrpc_eproto_early_reply);
return false;
}
}
rxrpc_end_tx_phase(call, true, rxrpc_eproto_unexpected_reply);
return true;
}
/*
* End the packet reception phase.
*/
static void rxrpc_end_rx_phase(struct rxrpc_call *call, rxrpc_serial_t serial)
{
rxrpc_seq_t whigh = READ_ONCE(call->rx_highest_seq);
_enter("%d,%s", call->debug_id, rxrpc_call_states[__rxrpc_call_state(call)]);
trace_rxrpc_receive(call, rxrpc_receive_end, 0, whigh);
switch (__rxrpc_call_state(call)) {
case RXRPC_CALL_CLIENT_RECV_REPLY:
rxrpc_propose_delay_ACK(call, serial, rxrpc_propose_ack_terminal_ack);
rxrpc_call_completed(call);
break;
case RXRPC_CALL_SERVER_RECV_REQUEST:
rxrpc_set_call_state(call, RXRPC_CALL_SERVER_ACK_REQUEST);
call->expect_req_by = jiffies + MAX_JIFFY_OFFSET;
rxrpc_propose_delay_ACK(call, serial, rxrpc_propose_ack_processing_op);
break;
default:
break;
}
}
static void rxrpc_input_update_ack_window(struct rxrpc_call *call,
rxrpc_seq_t window, rxrpc_seq_t wtop)
{
call->ackr_window = window;
call->ackr_wtop = wtop;
}
/*
* Push a DATA packet onto the Rx queue.
*/
static void rxrpc_input_queue_data(struct rxrpc_call *call, struct sk_buff *skb,
rxrpc_seq_t window, rxrpc_seq_t wtop,
enum rxrpc_receive_trace why)
{
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
bool last = sp->hdr.flags & RXRPC_LAST_PACKET;
__skb_queue_tail(&call->recvmsg_queue, skb);
rxrpc_input_update_ack_window(call, window, wtop);
trace_rxrpc_receive(call, last ? why + 1 : why, sp->hdr.serial, sp->hdr.seq);
if (last)
rxrpc_end_rx_phase(call, sp->hdr.serial);
}
/*
* Process a DATA packet.
*/
static void rxrpc_input_data_one(struct rxrpc_call *call, struct sk_buff *skb,
bool *_notify)
{
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
struct sk_buff *oos;
rxrpc_serial_t serial = sp->hdr.serial;
unsigned int sack = call->ackr_sack_base;
rxrpc_seq_t window = call->ackr_window;
rxrpc_seq_t wtop = call->ackr_wtop;
rxrpc_seq_t wlimit = window + call->rx_winsize - 1;
rxrpc_seq_t seq = sp->hdr.seq;
bool last = sp->hdr.flags & RXRPC_LAST_PACKET;
int ack_reason = -1;
rxrpc_inc_stat(call->rxnet, stat_rx_data);
if (sp->hdr.flags & RXRPC_REQUEST_ACK)
rxrpc_inc_stat(call->rxnet, stat_rx_data_reqack);
if (sp->hdr.flags & RXRPC_JUMBO_PACKET)
rxrpc_inc_stat(call->rxnet, stat_rx_data_jumbo);
if (last) {
if (test_and_set_bit(RXRPC_CALL_RX_LAST, &call->flags) &&
seq + 1 != wtop)
return rxrpc_proto_abort(call, seq, rxrpc_eproto_different_last);
} else {
if (test_bit(RXRPC_CALL_RX_LAST, &call->flags) &&
after_eq(seq, wtop)) {
pr_warn("Packet beyond last: c=%x q=%x window=%x-%x wlimit=%x\n",
call->debug_id, seq, window, wtop, wlimit);
return rxrpc_proto_abort(call, seq, rxrpc_eproto_data_after_last);
}
}
if (after(seq, call->rx_highest_seq))
call->rx_highest_seq = seq;
trace_rxrpc_rx_data(call->debug_id, seq, serial, sp->hdr.flags);
if (before(seq, window)) {
ack_reason = RXRPC_ACK_DUPLICATE;
goto send_ack;
}
if (after(seq, wlimit)) {
ack_reason = RXRPC_ACK_EXCEEDS_WINDOW;
goto send_ack;
}
/* Queue the packet. */
if (seq == window) {
if (sp->hdr.flags & RXRPC_REQUEST_ACK)
ack_reason = RXRPC_ACK_REQUESTED;
/* Send an immediate ACK if we fill in a hole */
else if (!skb_queue_empty(&call->rx_oos_queue))
ack_reason = RXRPC_ACK_DELAY;
else
call->ackr_nr_unacked++;
window++;
if (after(window, wtop)) {
trace_rxrpc_sack(call, seq, sack, rxrpc_sack_none);
wtop = window;
} else {
trace_rxrpc_sack(call, seq, sack, rxrpc_sack_advance);
sack = (sack + 1) % RXRPC_SACK_SIZE;
}
rxrpc_get_skb(skb, rxrpc_skb_get_to_recvmsg);
spin_lock(&call->recvmsg_queue.lock);
rxrpc_input_queue_data(call, skb, window, wtop, rxrpc_receive_queue);
*_notify = true;
while ((oos = skb_peek(&call->rx_oos_queue))) {
struct rxrpc_skb_priv *osp = rxrpc_skb(oos);
if (after(osp->hdr.seq, window))
break;
__skb_unlink(oos, &call->rx_oos_queue);
last = osp->hdr.flags & RXRPC_LAST_PACKET;
seq = osp->hdr.seq;
call->ackr_sack_table[sack] = 0;
trace_rxrpc_sack(call, seq, sack, rxrpc_sack_fill);
sack = (sack + 1) % RXRPC_SACK_SIZE;
window++;
rxrpc_input_queue_data(call, oos, window, wtop,
rxrpc_receive_queue_oos);
}
spin_unlock(&call->recvmsg_queue.lock);
call->ackr_sack_base = sack;
} else {
unsigned int slot;
ack_reason = RXRPC_ACK_OUT_OF_SEQUENCE;
slot = seq - window;
sack = (sack + slot) % RXRPC_SACK_SIZE;
if (call->ackr_sack_table[sack % RXRPC_SACK_SIZE]) {
ack_reason = RXRPC_ACK_DUPLICATE;
goto send_ack;
}
call->ackr_sack_table[sack % RXRPC_SACK_SIZE] |= 1;
trace_rxrpc_sack(call, seq, sack, rxrpc_sack_oos);
if (after(seq + 1, wtop)) {
wtop = seq + 1;
rxrpc_input_update_ack_window(call, window, wtop);
}
skb_queue_walk(&call->rx_oos_queue, oos) {
struct rxrpc_skb_priv *osp = rxrpc_skb(oos);
if (after(osp->hdr.seq, seq)) {
rxrpc_get_skb(skb, rxrpc_skb_get_to_recvmsg_oos);
__skb_queue_before(&call->rx_oos_queue, oos, skb);
goto oos_queued;
}
}
rxrpc_get_skb(skb, rxrpc_skb_get_to_recvmsg_oos);
__skb_queue_tail(&call->rx_oos_queue, skb);
oos_queued:
trace_rxrpc_receive(call, last ? rxrpc_receive_oos_last : rxrpc_receive_oos,
sp->hdr.serial, sp->hdr.seq);
}
send_ack:
if (ack_reason >= 0)
rxrpc_send_ACK(call, ack_reason, serial,
rxrpc_propose_ack_input_data);
else
rxrpc_propose_delay_ACK(call, serial,
rxrpc_propose_ack_input_data);
}
/*
* Split a jumbo packet and file the bits separately.
*/
static bool rxrpc_input_split_jumbo(struct rxrpc_call *call, struct sk_buff *skb)
{
struct rxrpc_jumbo_header jhdr;
struct rxrpc_skb_priv *sp = rxrpc_skb(skb), *jsp;
struct sk_buff *jskb;
unsigned int offset = sizeof(struct rxrpc_wire_header);
unsigned int len = skb->len - offset;
bool notify = false;
while (sp->hdr.flags & RXRPC_JUMBO_PACKET) {
if (len < RXRPC_JUMBO_SUBPKTLEN)
goto protocol_error;
if (sp->hdr.flags & RXRPC_LAST_PACKET)
goto protocol_error;
if (skb_copy_bits(skb, offset + RXRPC_JUMBO_DATALEN,
&jhdr, sizeof(jhdr)) < 0)
goto protocol_error;
jskb = skb_clone(skb, GFP_NOFS);
if (!jskb) {
kdebug("couldn't clone");
return false;
}
rxrpc_new_skb(jskb, rxrpc_skb_new_jumbo_subpacket);
jsp = rxrpc_skb(jskb);
jsp->offset = offset;
jsp->len = RXRPC_JUMBO_DATALEN;
rxrpc_input_data_one(call, jskb, ¬ify);
rxrpc_free_skb(jskb, rxrpc_skb_put_jumbo_subpacket);
sp->hdr.flags = jhdr.flags;
sp->hdr._rsvd = ntohs(jhdr._rsvd);
sp->hdr.seq++;
sp->hdr.serial++;
offset += RXRPC_JUMBO_SUBPKTLEN;
len -= RXRPC_JUMBO_SUBPKTLEN;
}
sp->offset = offset;
sp->len = len;
rxrpc_input_data_one(call, skb, ¬ify);
if (notify) {
trace_rxrpc_notify_socket(call->debug_id, sp->hdr.serial);
rxrpc_notify_socket(call);
}
return true;
protocol_error:
return false;
}
/*
* Process a DATA packet, adding the packet to the Rx ring. The caller's
* packet ref must be passed on or discarded.
*/
static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb)
{
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
rxrpc_serial_t serial = sp->hdr.serial;
rxrpc_seq_t seq0 = sp->hdr.seq;
_enter("{%x,%x,%x},{%u,%x}",
call->ackr_window, call->ackr_wtop, call->rx_highest_seq,
skb->len, seq0);
if (__rxrpc_call_is_complete(call))
return;
switch (__rxrpc_call_state(call)) {
case RXRPC_CALL_CLIENT_SEND_REQUEST:
case RXRPC_CALL_CLIENT_AWAIT_REPLY:
/* Received data implicitly ACKs all of the request
* packets we sent when we're acting as a client.
*/
if (!rxrpc_receiving_reply(call))
goto out_notify;
break;
case RXRPC_CALL_SERVER_RECV_REQUEST: {
unsigned long timo = READ_ONCE(call->next_req_timo);
unsigned long now, expect_req_by;
if (timo) {
now = jiffies;
expect_req_by = now + timo;
WRITE_ONCE(call->expect_req_by, expect_req_by);
rxrpc_reduce_call_timer(call, expect_req_by, now,
rxrpc_timer_set_for_idle);
}
break;
}
default:
break;
}
if (!rxrpc_input_split_jumbo(call, skb)) {
rxrpc_proto_abort(call, sp->hdr.seq, rxrpc_badmsg_bad_jumbo);
goto out_notify;
}
return;
out_notify:
trace_rxrpc_notify_socket(call->debug_id, serial);
rxrpc_notify_socket(call);
_leave(" [queued]");
}
/*
* See if there's a cached RTT probe to complete.
*/
static void rxrpc_complete_rtt_probe(struct rxrpc_call *call,
ktime_t resp_time,
rxrpc_serial_t acked_serial,
rxrpc_serial_t ack_serial,
enum rxrpc_rtt_rx_trace type)
{
rxrpc_serial_t orig_serial;
unsigned long avail;
ktime_t sent_at;
bool matched = false;
int i;
avail = READ_ONCE(call->rtt_avail);
smp_rmb(); /* Read avail bits before accessing data. */
for (i = 0; i < ARRAY_SIZE(call->rtt_serial); i++) {
if (!test_bit(i + RXRPC_CALL_RTT_PEND_SHIFT, &avail))
continue;
sent_at = call->rtt_sent_at[i];
orig_serial = call->rtt_serial[i];
if (orig_serial == acked_serial) {
clear_bit(i + RXRPC_CALL_RTT_PEND_SHIFT, &call->rtt_avail);
smp_mb(); /* Read data before setting avail bit */
set_bit(i, &call->rtt_avail);
if (type != rxrpc_rtt_rx_cancel)
rxrpc_peer_add_rtt(call, type, i, acked_serial, ack_serial,
sent_at, resp_time);
else
trace_rxrpc_rtt_rx(call, rxrpc_rtt_rx_cancel, i,
orig_serial, acked_serial, 0, 0);
matched = true;
}
/* If a later serial is being acked, then mark this slot as
* being available.
*/
if (after(acked_serial, orig_serial)) {
trace_rxrpc_rtt_rx(call, rxrpc_rtt_rx_obsolete, i,
orig_serial, acked_serial, 0, 0);
clear_bit(i + RXRPC_CALL_RTT_PEND_SHIFT, &call->rtt_avail);
smp_wmb();
set_bit(i, &call->rtt_avail);
}
}
if (!matched)
trace_rxrpc_rtt_rx(call, rxrpc_rtt_rx_lost, 9, 0, acked_serial, 0, 0);
}
/*
* Process the extra information that may be appended to an ACK packet
*/
static void rxrpc_input_ackinfo(struct rxrpc_call *call, struct sk_buff *skb,
struct rxrpc_ackinfo *ackinfo)
{
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
struct rxrpc_peer *peer;
unsigned int mtu;
bool wake = false;
u32 rwind = ntohl(ackinfo->rwind);
if (rwind > RXRPC_TX_MAX_WINDOW)
rwind = RXRPC_TX_MAX_WINDOW;
if (call->tx_winsize != rwind) {
if (rwind > call->tx_winsize)
wake = true;
trace_rxrpc_rx_rwind_change(call, sp->hdr.serial, rwind, wake);
call->tx_winsize = rwind;
}
if (call->cong_ssthresh > rwind)
call->cong_ssthresh = rwind;
mtu = min(ntohl(ackinfo->rxMTU), ntohl(ackinfo->maxMTU));
peer = call->peer;
if (mtu < peer->maxdata) {
spin_lock(&peer->lock);
peer->maxdata = mtu;
peer->mtu = mtu + peer->hdrsize;
spin_unlock(&peer->lock);
}
if (wake)
wake_up(&call->waitq);
}
/*
* Process individual soft ACKs.
*
* Each ACK in the array corresponds to one packet and can be either an ACK or
* a NAK. If we get find an explicitly NAK'd packet we resend immediately;
* packets that lie beyond the end of the ACK list are scheduled for resend by
* the timer on the basis that the peer might just not have processed them at
* the time the ACK was sent.
*/
static void rxrpc_input_soft_acks(struct rxrpc_call *call, u8 *acks,
rxrpc_seq_t seq, int nr_acks,
struct rxrpc_ack_summary *summary)
{
unsigned int i;
for (i = 0; i < nr_acks; i++) {
if (acks[i] == RXRPC_ACK_TYPE_ACK) {
summary->nr_acks++;
summary->nr_new_acks++;
} else {
if (!summary->saw_nacks &&
call->acks_lowest_nak != seq + i) {
call->acks_lowest_nak = seq + i;
summary->new_low_nack = true;
}
summary->saw_nacks = true;
}
}
}
/*
* Return true if the ACK is valid - ie. it doesn't appear to have regressed
* with respect to the ack state conveyed by preceding ACKs.
*/
static bool rxrpc_is_ack_valid(struct rxrpc_call *call,
rxrpc_seq_t first_pkt, rxrpc_seq_t prev_pkt)
{
rxrpc_seq_t base = READ_ONCE(call->acks_first_seq);
if (after(first_pkt, base))
return true; /* The window advanced */
if (before(first_pkt, base))
return false; /* firstPacket regressed */
if (after_eq(prev_pkt, call->acks_prev_seq))
return true; /* previousPacket hasn't regressed. */
/* Some rx implementations put a serial number in previousPacket. */
if (after_eq(prev_pkt, base + call->tx_winsize))
return false;
return true;
}
/*
* Process an ACK packet.
*
* ack.firstPacket is the sequence number of the first soft-ACK'd/NAK'd packet
* in the ACK array. Anything before that is hard-ACK'd and may be discarded.
*
* A hard-ACK means that a packet has been processed and may be discarded; a
* soft-ACK means that the packet may be discarded and retransmission
* requested. A phase is complete when all packets are hard-ACK'd.
*/
static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb)
{
struct rxrpc_ack_summary summary = { 0 };
struct rxrpc_ackpacket ack;
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
struct rxrpc_ackinfo info;
rxrpc_serial_t ack_serial, acked_serial;
rxrpc_seq_t first_soft_ack, hard_ack, prev_pkt;
int nr_acks, offset, ioffset;
_enter("");
offset = sizeof(struct rxrpc_wire_header);
if (skb_copy_bits(skb, offset, &ack, sizeof(ack)) < 0)
return rxrpc_proto_abort(call, 0, rxrpc_badmsg_short_ack);
offset += sizeof(ack);
ack_serial = sp->hdr.serial;
acked_serial = ntohl(ack.serial);
first_soft_ack = ntohl(ack.firstPacket);
prev_pkt = ntohl(ack.previousPacket);
hard_ack = first_soft_ack - 1;
nr_acks = ack.nAcks;
summary.ack_reason = (ack.reason < RXRPC_ACK__INVALID ?
ack.reason : RXRPC_ACK__INVALID);
trace_rxrpc_rx_ack(call, ack_serial, acked_serial,
first_soft_ack, prev_pkt,
summary.ack_reason, nr_acks);
rxrpc_inc_stat(call->rxnet, stat_rx_acks[ack.reason]);
switch (ack.reason) {
case RXRPC_ACK_PING_RESPONSE:
rxrpc_complete_rtt_probe(call, skb->tstamp, acked_serial, ack_serial,
rxrpc_rtt_rx_ping_response);
break;
case RXRPC_ACK_REQUESTED:
rxrpc_complete_rtt_probe(call, skb->tstamp, acked_serial, ack_serial,
rxrpc_rtt_rx_requested_ack);
break;
default:
if (acked_serial != 0)
rxrpc_complete_rtt_probe(call, skb->tstamp, acked_serial, ack_serial,
rxrpc_rtt_rx_cancel);
break;
}
if (ack.reason == RXRPC_ACK_PING) {
rxrpc_send_ACK(call, RXRPC_ACK_PING_RESPONSE, ack_serial,
rxrpc_propose_ack_respond_to_ping);
} else if (sp->hdr.flags & RXRPC_REQUEST_ACK) {
rxrpc_send_ACK(call, RXRPC_ACK_REQUESTED, ack_serial,
rxrpc_propose_ack_respond_to_ack);
}
/* If we get an EXCEEDS_WINDOW ACK from the server, it probably
* indicates that the client address changed due to NAT. The server
* lost the call because it switched to a different peer.
*/
if (unlikely(ack.reason == RXRPC_ACK_EXCEEDS_WINDOW) &&
first_soft_ack == 1 &&
prev_pkt == 0 &&
rxrpc_is_client_call(call)) {
rxrpc_set_call_completion(call, RXRPC_CALL_REMOTELY_ABORTED,
0, -ENETRESET);
return;
}
/* If we get an OUT_OF_SEQUENCE ACK from the server, that can also
* indicate a change of address. However, we can retransmit the call
* if we still have it buffered to the beginning.
*/
if (unlikely(ack.reason == RXRPC_ACK_OUT_OF_SEQUENCE) &&
first_soft_ack == 1 &&
prev_pkt == 0 &&
call->acks_hard_ack == 0 &&
rxrpc_is_client_call(call)) {
rxrpc_set_call_completion(call, RXRPC_CALL_REMOTELY_ABORTED,
0, -ENETRESET);
return;
}
/* Discard any out-of-order or duplicate ACKs (outside lock). */
if (!rxrpc_is_ack_valid(call, first_soft_ack, prev_pkt)) {
trace_rxrpc_rx_discard_ack(call->debug_id, ack_serial,
first_soft_ack, call->acks_first_seq,
prev_pkt, call->acks_prev_seq);
return;
}
info.rxMTU = 0;
ioffset = offset + nr_acks + 3;
if (skb->len >= ioffset + sizeof(info) &&
skb_copy_bits(skb, ioffset, &info, sizeof(info)) < 0)
return rxrpc_proto_abort(call, 0, rxrpc_badmsg_short_ack_info);
if (nr_acks > 0)
skb_condense(skb);
call->acks_latest_ts = skb->tstamp;
call->acks_first_seq = first_soft_ack;
call->acks_prev_seq = prev_pkt;
switch (ack.reason) {
case RXRPC_ACK_PING:
break;
default:
if (after(acked_serial, call->acks_highest_serial))
call->acks_highest_serial = acked_serial;
break;
}
/* Parse rwind and mtu sizes if provided. */
if (info.rxMTU)
rxrpc_input_ackinfo(call, skb, &info);
if (first_soft_ack == 0)
return rxrpc_proto_abort(call, 0, rxrpc_eproto_ackr_zero);
/* Ignore ACKs unless we are or have just been transmitting. */
switch (__rxrpc_call_state(call)) {
case RXRPC_CALL_CLIENT_SEND_REQUEST:
case RXRPC_CALL_CLIENT_AWAIT_REPLY:
case RXRPC_CALL_SERVER_SEND_REPLY:
case RXRPC_CALL_SERVER_AWAIT_ACK:
break;
default:
return;
}
if (before(hard_ack, call->acks_hard_ack) ||
after(hard_ack, call->tx_top))
return rxrpc_proto_abort(call, 0, rxrpc_eproto_ackr_outside_window);
if (nr_acks > call->tx_top - hard_ack)
return rxrpc_proto_abort(call, 0, rxrpc_eproto_ackr_sack_overflow);
if (after(hard_ack, call->acks_hard_ack)) {
if (rxrpc_rotate_tx_window(call, hard_ack, &summary)) {
rxrpc_end_tx_phase(call, false, rxrpc_eproto_unexpected_ack);
return;
}
}
if (nr_acks > 0) {
if (offset > (int)skb->len - nr_acks)
return rxrpc_proto_abort(call, 0, rxrpc_eproto_ackr_short_sack);
rxrpc_input_soft_acks(call, skb->data + offset, first_soft_ack,
nr_acks, &summary);
}
if (test_bit(RXRPC_CALL_TX_LAST, &call->flags) &&
summary.nr_acks == call->tx_top - hard_ack &&
rxrpc_is_client_call(call))
rxrpc_propose_ping(call, ack_serial,
rxrpc_propose_ack_ping_for_lost_reply);
rxrpc_congestion_management(call, skb, &summary, acked_serial);
}
/*
* Process an ACKALL packet.
*/
static void rxrpc_input_ackall(struct rxrpc_call *call, struct sk_buff *skb)
{
struct rxrpc_ack_summary summary = { 0 };
if (rxrpc_rotate_tx_window(call, call->tx_top, &summary))
rxrpc_end_tx_phase(call, false, rxrpc_eproto_unexpected_ackall);
}
/*
* Process an ABORT packet directed at a call.
*/
static void rxrpc_input_abort(struct rxrpc_call *call, struct sk_buff *skb)
{
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
trace_rxrpc_rx_abort(call, sp->hdr.serial, skb->priority);
rxrpc_set_call_completion(call, RXRPC_CALL_REMOTELY_ABORTED,
skb->priority, -ECONNABORTED);
}
/*
* Process an incoming call packet.
*/
void rxrpc_input_call_packet(struct rxrpc_call *call, struct sk_buff *skb)
{
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
unsigned long timo;
_enter("%p,%p", call, skb);
if (sp->hdr.serviceId != call->dest_srx.srx_service)
call->dest_srx.srx_service = sp->hdr.serviceId;
if ((int)sp->hdr.serial - (int)call->rx_serial > 0)
call->rx_serial = sp->hdr.serial;
if (!test_bit(RXRPC_CALL_RX_HEARD, &call->flags))
set_bit(RXRPC_CALL_RX_HEARD, &call->flags);
timo = READ_ONCE(call->next_rx_timo);
if (timo) {
unsigned long now = jiffies, expect_rx_by;
expect_rx_by = now + timo;
WRITE_ONCE(call->expect_rx_by, expect_rx_by);
rxrpc_reduce_call_timer(call, expect_rx_by, now,
rxrpc_timer_set_for_normal);
}
switch (sp->hdr.type) {
case RXRPC_PACKET_TYPE_DATA:
return rxrpc_input_data(call, skb);
case RXRPC_PACKET_TYPE_ACK:
return rxrpc_input_ack(call, skb);
case RXRPC_PACKET_TYPE_BUSY:
/* Just ignore BUSY packets from the server; the retry and
* lifespan timers will take care of business. BUSY packets
* from the client don't make sense.
*/
return;
case RXRPC_PACKET_TYPE_ABORT:
return rxrpc_input_abort(call, skb);
case RXRPC_PACKET_TYPE_ACKALL:
return rxrpc_input_ackall(call, skb);
default:
break;
}
}
/*
* Handle a new service call on a channel implicitly completing the preceding
* call on that channel. This does not apply to client conns.
*
* TODO: If callNumber > call_id + 1, renegotiate security.
*/
void rxrpc_implicit_end_call(struct rxrpc_call *call, struct sk_buff *skb)
{
switch (__rxrpc_call_state(call)) {
case RXRPC_CALL_SERVER_AWAIT_ACK:
rxrpc_call_completed(call);
fallthrough;
case RXRPC_CALL_COMPLETE:
break;
default:
rxrpc_abort_call(call, 0, RX_CALL_DEAD, -ESHUTDOWN,
rxrpc_eproto_improper_term);
trace_rxrpc_improper_term(call);
break;
}
rxrpc_input_call_event(call, skb);
}
| linux-master | net/rxrpc/input.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* Utility routines
*
* Copyright (C) 2015 Red Hat, Inc. All Rights Reserved.
* Written by David Howells ([email protected])
*/
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/udp.h>
#include "ar-internal.h"
/*
* Fill out a peer address from a socket buffer containing a packet.
*/
int rxrpc_extract_addr_from_skb(struct sockaddr_rxrpc *srx, struct sk_buff *skb)
{
memset(srx, 0, sizeof(*srx));
switch (ntohs(skb->protocol)) {
case ETH_P_IP:
srx->transport_type = SOCK_DGRAM;
srx->transport_len = sizeof(srx->transport.sin);
srx->transport.sin.sin_family = AF_INET;
srx->transport.sin.sin_port = udp_hdr(skb)->source;
srx->transport.sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
return 0;
#ifdef CONFIG_AF_RXRPC_IPV6
case ETH_P_IPV6:
srx->transport_type = SOCK_DGRAM;
srx->transport_len = sizeof(srx->transport.sin6);
srx->transport.sin6.sin6_family = AF_INET6;
srx->transport.sin6.sin6_port = udp_hdr(skb)->source;
srx->transport.sin6.sin6_addr = ipv6_hdr(skb)->saddr;
return 0;
#endif
default:
pr_warn_ratelimited("AF_RXRPC: Unknown eth protocol %u\n",
ntohs(skb->protocol));
return -EAFNOSUPPORT;
}
}
| linux-master | net/rxrpc/utils.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* AF_RXRPC sendmsg() implementation.
*
* Copyright (C) 2007, 2016 Red Hat, Inc. All Rights Reserved.
* Written by David Howells ([email protected])
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/net.h>
#include <linux/gfp.h>
#include <linux/skbuff.h>
#include <linux/export.h>
#include <linux/sched/signal.h>
#include <net/sock.h>
#include <net/af_rxrpc.h>
#include "ar-internal.h"
/*
* Propose an abort to be made in the I/O thread.
*/
bool rxrpc_propose_abort(struct rxrpc_call *call, s32 abort_code, int error,
enum rxrpc_abort_reason why)
{
_enter("{%d},%d,%d,%u", call->debug_id, abort_code, error, why);
if (!call->send_abort && !rxrpc_call_is_complete(call)) {
call->send_abort_why = why;
call->send_abort_err = error;
call->send_abort_seq = 0;
/* Request abort locklessly vs rxrpc_input_call_event(). */
smp_store_release(&call->send_abort, abort_code);
rxrpc_poke_call(call, rxrpc_call_poke_abort);
return true;
}
return false;
}
/*
* Wait for a call to become connected. Interruption here doesn't cause the
* call to be aborted.
*/
static int rxrpc_wait_to_be_connected(struct rxrpc_call *call, long *timeo)
{
DECLARE_WAITQUEUE(myself, current);
int ret = 0;
_enter("%d", call->debug_id);
if (rxrpc_call_state(call) != RXRPC_CALL_CLIENT_AWAIT_CONN)
goto no_wait;
add_wait_queue_exclusive(&call->waitq, &myself);
for (;;) {
switch (call->interruptibility) {
case RXRPC_INTERRUPTIBLE:
case RXRPC_PREINTERRUPTIBLE:
set_current_state(TASK_INTERRUPTIBLE);
break;
case RXRPC_UNINTERRUPTIBLE:
default:
set_current_state(TASK_UNINTERRUPTIBLE);
break;
}
if (rxrpc_call_state(call) != RXRPC_CALL_CLIENT_AWAIT_CONN)
break;
if ((call->interruptibility == RXRPC_INTERRUPTIBLE ||
call->interruptibility == RXRPC_PREINTERRUPTIBLE) &&
signal_pending(current)) {
ret = sock_intr_errno(*timeo);
break;
}
*timeo = schedule_timeout(*timeo);
}
remove_wait_queue(&call->waitq, &myself);
__set_current_state(TASK_RUNNING);
no_wait:
if (ret == 0 && rxrpc_call_is_complete(call))
ret = call->error;
_leave(" = %d", ret);
return ret;
}
/*
* Return true if there's sufficient Tx queue space.
*/
static bool rxrpc_check_tx_space(struct rxrpc_call *call, rxrpc_seq_t *_tx_win)
{
if (_tx_win)
*_tx_win = call->tx_bottom;
return call->tx_prepared - call->tx_bottom < 256;
}
/*
* Wait for space to appear in the Tx queue or a signal to occur.
*/
static int rxrpc_wait_for_tx_window_intr(struct rxrpc_sock *rx,
struct rxrpc_call *call,
long *timeo)
{
for (;;) {
set_current_state(TASK_INTERRUPTIBLE);
if (rxrpc_check_tx_space(call, NULL))
return 0;
if (rxrpc_call_is_complete(call))
return call->error;
if (signal_pending(current))
return sock_intr_errno(*timeo);
trace_rxrpc_txqueue(call, rxrpc_txqueue_wait);
*timeo = schedule_timeout(*timeo);
}
}
/*
* Wait for space to appear in the Tx queue uninterruptibly, but with
* a timeout of 2*RTT if no progress was made and a signal occurred.
*/
static int rxrpc_wait_for_tx_window_waitall(struct rxrpc_sock *rx,
struct rxrpc_call *call)
{
rxrpc_seq_t tx_start, tx_win;
signed long rtt, timeout;
rtt = READ_ONCE(call->peer->srtt_us) >> 3;
rtt = usecs_to_jiffies(rtt) * 2;
if (rtt < 2)
rtt = 2;
timeout = rtt;
tx_start = smp_load_acquire(&call->acks_hard_ack);
for (;;) {
set_current_state(TASK_UNINTERRUPTIBLE);
if (rxrpc_check_tx_space(call, &tx_win))
return 0;
if (rxrpc_call_is_complete(call))
return call->error;
if (timeout == 0 &&
tx_win == tx_start && signal_pending(current))
return -EINTR;
if (tx_win != tx_start) {
timeout = rtt;
tx_start = tx_win;
}
trace_rxrpc_txqueue(call, rxrpc_txqueue_wait);
timeout = schedule_timeout(timeout);
}
}
/*
* Wait for space to appear in the Tx queue uninterruptibly.
*/
static int rxrpc_wait_for_tx_window_nonintr(struct rxrpc_sock *rx,
struct rxrpc_call *call,
long *timeo)
{
for (;;) {
set_current_state(TASK_UNINTERRUPTIBLE);
if (rxrpc_check_tx_space(call, NULL))
return 0;
if (rxrpc_call_is_complete(call))
return call->error;
trace_rxrpc_txqueue(call, rxrpc_txqueue_wait);
*timeo = schedule_timeout(*timeo);
}
}
/*
* wait for space to appear in the transmit/ACK window
* - caller holds the socket locked
*/
static int rxrpc_wait_for_tx_window(struct rxrpc_sock *rx,
struct rxrpc_call *call,
long *timeo,
bool waitall)
{
DECLARE_WAITQUEUE(myself, current);
int ret;
_enter(",{%u,%u,%u,%u}",
call->tx_bottom, call->acks_hard_ack, call->tx_top, call->tx_winsize);
add_wait_queue(&call->waitq, &myself);
switch (call->interruptibility) {
case RXRPC_INTERRUPTIBLE:
if (waitall)
ret = rxrpc_wait_for_tx_window_waitall(rx, call);
else
ret = rxrpc_wait_for_tx_window_intr(rx, call, timeo);
break;
case RXRPC_PREINTERRUPTIBLE:
case RXRPC_UNINTERRUPTIBLE:
default:
ret = rxrpc_wait_for_tx_window_nonintr(rx, call, timeo);
break;
}
remove_wait_queue(&call->waitq, &myself);
set_current_state(TASK_RUNNING);
_leave(" = %d", ret);
return ret;
}
/*
* Notify the owner of the call that the transmit phase is ended and the last
* packet has been queued.
*/
static void rxrpc_notify_end_tx(struct rxrpc_sock *rx, struct rxrpc_call *call,
rxrpc_notify_end_tx_t notify_end_tx)
{
if (notify_end_tx)
notify_end_tx(&rx->sk, call, call->user_call_ID);
}
/*
* Queue a DATA packet for transmission, set the resend timeout and send
* the packet immediately. Returns the error from rxrpc_send_data_packet()
* in case the caller wants to do something with it.
*/
static void rxrpc_queue_packet(struct rxrpc_sock *rx, struct rxrpc_call *call,
struct rxrpc_txbuf *txb,
rxrpc_notify_end_tx_t notify_end_tx)
{
rxrpc_seq_t seq = txb->seq;
bool last = test_bit(RXRPC_TXBUF_LAST, &txb->flags), poke;
rxrpc_inc_stat(call->rxnet, stat_tx_data);
ASSERTCMP(txb->seq, ==, call->tx_prepared + 1);
/* We have to set the timestamp before queueing as the retransmit
* algorithm can see the packet as soon as we queue it.
*/
txb->last_sent = ktime_get_real();
if (last)
trace_rxrpc_txqueue(call, rxrpc_txqueue_queue_last);
else
trace_rxrpc_txqueue(call, rxrpc_txqueue_queue);
/* Add the packet to the call's output buffer */
spin_lock(&call->tx_lock);
poke = list_empty(&call->tx_sendmsg);
list_add_tail(&txb->call_link, &call->tx_sendmsg);
call->tx_prepared = seq;
if (last)
rxrpc_notify_end_tx(rx, call, notify_end_tx);
spin_unlock(&call->tx_lock);
if (poke)
rxrpc_poke_call(call, rxrpc_call_poke_start);
}
/*
* send data through a socket
* - must be called in process context
* - The caller holds the call user access mutex, but not the socket lock.
*/
static int rxrpc_send_data(struct rxrpc_sock *rx,
struct rxrpc_call *call,
struct msghdr *msg, size_t len,
rxrpc_notify_end_tx_t notify_end_tx,
bool *_dropped_lock)
{
struct rxrpc_txbuf *txb;
struct sock *sk = &rx->sk;
enum rxrpc_call_state state;
long timeo;
bool more = msg->msg_flags & MSG_MORE;
int ret, copied = 0;
timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
ret = rxrpc_wait_to_be_connected(call, &timeo);
if (ret < 0)
return ret;
if (call->conn->state == RXRPC_CONN_CLIENT_UNSECURED) {
ret = rxrpc_init_client_conn_security(call->conn);
if (ret < 0)
return ret;
}
/* this should be in poll */
sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
reload:
ret = -EPIPE;
if (sk->sk_shutdown & SEND_SHUTDOWN)
goto maybe_error;
state = rxrpc_call_state(call);
ret = -ESHUTDOWN;
if (state >= RXRPC_CALL_COMPLETE)
goto maybe_error;
ret = -EPROTO;
if (state != RXRPC_CALL_CLIENT_SEND_REQUEST &&
state != RXRPC_CALL_SERVER_ACK_REQUEST &&
state != RXRPC_CALL_SERVER_SEND_REPLY) {
/* Request phase complete for this client call */
trace_rxrpc_abort(call->debug_id, rxrpc_sendmsg_late_send,
call->cid, call->call_id, call->rx_consumed,
0, -EPROTO);
goto maybe_error;
}
ret = -EMSGSIZE;
if (call->tx_total_len != -1) {
if (len - copied > call->tx_total_len)
goto maybe_error;
if (!more && len - copied != call->tx_total_len)
goto maybe_error;
}
txb = call->tx_pending;
call->tx_pending = NULL;
if (txb)
rxrpc_see_txbuf(txb, rxrpc_txbuf_see_send_more);
do {
if (!txb) {
size_t remain, bufsize, chunk, offset;
_debug("alloc");
if (!rxrpc_check_tx_space(call, NULL))
goto wait_for_space;
/* Work out the maximum size of a packet. Assume that
* the security header is going to be in the padded
* region (enc blocksize), but the trailer is not.
*/
remain = more ? INT_MAX : msg_data_left(msg);
ret = call->conn->security->how_much_data(call, remain,
&bufsize, &chunk, &offset);
if (ret < 0)
goto maybe_error;
_debug("SIZE: %zu/%zu @%zu", chunk, bufsize, offset);
/* create a buffer that we can retain until it's ACK'd */
ret = -ENOMEM;
txb = rxrpc_alloc_txbuf(call, RXRPC_PACKET_TYPE_DATA,
GFP_KERNEL);
if (!txb)
goto maybe_error;
txb->offset = offset;
txb->space -= offset;
txb->space = min_t(size_t, chunk, txb->space);
}
_debug("append");
/* append next segment of data to the current buffer */
if (msg_data_left(msg) > 0) {
size_t copy = min_t(size_t, txb->space, msg_data_left(msg));
_debug("add %zu", copy);
if (!copy_from_iter_full(txb->data + txb->offset, copy,
&msg->msg_iter))
goto efault;
_debug("added");
txb->space -= copy;
txb->len += copy;
txb->offset += copy;
copied += copy;
if (call->tx_total_len != -1)
call->tx_total_len -= copy;
}
/* check for the far side aborting the call or a network error
* occurring */
if (rxrpc_call_is_complete(call))
goto call_terminated;
/* add the packet to the send queue if it's now full */
if (!txb->space ||
(msg_data_left(msg) == 0 && !more)) {
if (msg_data_left(msg) == 0 && !more) {
txb->wire.flags |= RXRPC_LAST_PACKET;
__set_bit(RXRPC_TXBUF_LAST, &txb->flags);
}
else if (call->tx_top - call->acks_hard_ack <
call->tx_winsize)
txb->wire.flags |= RXRPC_MORE_PACKETS;
ret = call->security->secure_packet(call, txb);
if (ret < 0)
goto out;
rxrpc_queue_packet(rx, call, txb, notify_end_tx);
txb = NULL;
}
} while (msg_data_left(msg) > 0);
success:
ret = copied;
if (rxrpc_call_is_complete(call) &&
call->error < 0)
ret = call->error;
out:
call->tx_pending = txb;
_leave(" = %d", ret);
return ret;
call_terminated:
rxrpc_put_txbuf(txb, rxrpc_txbuf_put_send_aborted);
_leave(" = %d", call->error);
return call->error;
maybe_error:
if (copied)
goto success;
goto out;
efault:
ret = -EFAULT;
goto out;
wait_for_space:
ret = -EAGAIN;
if (msg->msg_flags & MSG_DONTWAIT)
goto maybe_error;
mutex_unlock(&call->user_mutex);
*_dropped_lock = true;
ret = rxrpc_wait_for_tx_window(rx, call, &timeo,
msg->msg_flags & MSG_WAITALL);
if (ret < 0)
goto maybe_error;
if (call->interruptibility == RXRPC_INTERRUPTIBLE) {
if (mutex_lock_interruptible(&call->user_mutex) < 0) {
ret = sock_intr_errno(timeo);
goto maybe_error;
}
} else {
mutex_lock(&call->user_mutex);
}
*_dropped_lock = false;
goto reload;
}
/*
* extract control messages from the sendmsg() control buffer
*/
static int rxrpc_sendmsg_cmsg(struct msghdr *msg, struct rxrpc_send_params *p)
{
struct cmsghdr *cmsg;
bool got_user_ID = false;
int len;
if (msg->msg_controllen == 0)
return -EINVAL;
for_each_cmsghdr(cmsg, msg) {
if (!CMSG_OK(msg, cmsg))
return -EINVAL;
len = cmsg->cmsg_len - sizeof(struct cmsghdr);
_debug("CMSG %d, %d, %d",
cmsg->cmsg_level, cmsg->cmsg_type, len);
if (cmsg->cmsg_level != SOL_RXRPC)
continue;
switch (cmsg->cmsg_type) {
case RXRPC_USER_CALL_ID:
if (msg->msg_flags & MSG_CMSG_COMPAT) {
if (len != sizeof(u32))
return -EINVAL;
p->call.user_call_ID = *(u32 *)CMSG_DATA(cmsg);
} else {
if (len != sizeof(unsigned long))
return -EINVAL;
p->call.user_call_ID = *(unsigned long *)
CMSG_DATA(cmsg);
}
got_user_ID = true;
break;
case RXRPC_ABORT:
if (p->command != RXRPC_CMD_SEND_DATA)
return -EINVAL;
p->command = RXRPC_CMD_SEND_ABORT;
if (len != sizeof(p->abort_code))
return -EINVAL;
p->abort_code = *(unsigned int *)CMSG_DATA(cmsg);
if (p->abort_code == 0)
return -EINVAL;
break;
case RXRPC_CHARGE_ACCEPT:
if (p->command != RXRPC_CMD_SEND_DATA)
return -EINVAL;
p->command = RXRPC_CMD_CHARGE_ACCEPT;
if (len != 0)
return -EINVAL;
break;
case RXRPC_EXCLUSIVE_CALL:
p->exclusive = true;
if (len != 0)
return -EINVAL;
break;
case RXRPC_UPGRADE_SERVICE:
p->upgrade = true;
if (len != 0)
return -EINVAL;
break;
case RXRPC_TX_LENGTH:
if (p->call.tx_total_len != -1 || len != sizeof(__s64))
return -EINVAL;
p->call.tx_total_len = *(__s64 *)CMSG_DATA(cmsg);
if (p->call.tx_total_len < 0)
return -EINVAL;
break;
case RXRPC_SET_CALL_TIMEOUT:
if (len & 3 || len < 4 || len > 12)
return -EINVAL;
memcpy(&p->call.timeouts, CMSG_DATA(cmsg), len);
p->call.nr_timeouts = len / 4;
if (p->call.timeouts.hard > INT_MAX / HZ)
return -ERANGE;
if (p->call.nr_timeouts >= 2 && p->call.timeouts.idle > 60 * 60 * 1000)
return -ERANGE;
if (p->call.nr_timeouts >= 3 && p->call.timeouts.normal > 60 * 60 * 1000)
return -ERANGE;
break;
default:
return -EINVAL;
}
}
if (!got_user_ID)
return -EINVAL;
if (p->call.tx_total_len != -1 && p->command != RXRPC_CMD_SEND_DATA)
return -EINVAL;
_leave(" = 0");
return 0;
}
/*
* Create a new client call for sendmsg().
* - Called with the socket lock held, which it must release.
* - If it returns a call, the call's lock will need releasing by the caller.
*/
static struct rxrpc_call *
rxrpc_new_client_call_for_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg,
struct rxrpc_send_params *p)
__releases(&rx->sk.sk_lock.slock)
__acquires(&call->user_mutex)
{
struct rxrpc_conn_parameters cp;
struct rxrpc_call *call;
struct key *key;
DECLARE_SOCKADDR(struct sockaddr_rxrpc *, srx, msg->msg_name);
_enter("");
if (!msg->msg_name) {
release_sock(&rx->sk);
return ERR_PTR(-EDESTADDRREQ);
}
key = rx->key;
if (key && !rx->key->payload.data[0])
key = NULL;
memset(&cp, 0, sizeof(cp));
cp.local = rx->local;
cp.key = rx->key;
cp.security_level = rx->min_sec_level;
cp.exclusive = rx->exclusive | p->exclusive;
cp.upgrade = p->upgrade;
cp.service_id = srx->srx_service;
call = rxrpc_new_client_call(rx, &cp, srx, &p->call, GFP_KERNEL,
atomic_inc_return(&rxrpc_debug_id));
/* The socket is now unlocked */
_leave(" = %p\n", call);
return call;
}
/*
* send a message forming part of a client call through an RxRPC socket
* - caller holds the socket locked
* - the socket may be either a client socket or a server socket
*/
int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
__releases(&rx->sk.sk_lock.slock)
{
struct rxrpc_call *call;
unsigned long now, j;
bool dropped_lock = false;
int ret;
struct rxrpc_send_params p = {
.call.tx_total_len = -1,
.call.user_call_ID = 0,
.call.nr_timeouts = 0,
.call.interruptibility = RXRPC_INTERRUPTIBLE,
.abort_code = 0,
.command = RXRPC_CMD_SEND_DATA,
.exclusive = false,
.upgrade = false,
};
_enter("");
ret = rxrpc_sendmsg_cmsg(msg, &p);
if (ret < 0)
goto error_release_sock;
if (p.command == RXRPC_CMD_CHARGE_ACCEPT) {
ret = -EINVAL;
if (rx->sk.sk_state != RXRPC_SERVER_LISTENING)
goto error_release_sock;
ret = rxrpc_user_charge_accept(rx, p.call.user_call_ID);
goto error_release_sock;
}
call = rxrpc_find_call_by_user_ID(rx, p.call.user_call_ID);
if (!call) {
ret = -EBADSLT;
if (p.command != RXRPC_CMD_SEND_DATA)
goto error_release_sock;
call = rxrpc_new_client_call_for_sendmsg(rx, msg, &p);
/* The socket is now unlocked... */
if (IS_ERR(call))
return PTR_ERR(call);
/* ... and we have the call lock. */
p.call.nr_timeouts = 0;
ret = 0;
if (rxrpc_call_is_complete(call))
goto out_put_unlock;
} else {
switch (rxrpc_call_state(call)) {
case RXRPC_CALL_CLIENT_AWAIT_CONN:
case RXRPC_CALL_SERVER_SECURING:
if (p.command == RXRPC_CMD_SEND_ABORT)
break;
fallthrough;
case RXRPC_CALL_UNINITIALISED:
case RXRPC_CALL_SERVER_PREALLOC:
rxrpc_put_call(call, rxrpc_call_put_sendmsg);
ret = -EBUSY;
goto error_release_sock;
default:
break;
}
ret = mutex_lock_interruptible(&call->user_mutex);
release_sock(&rx->sk);
if (ret < 0) {
ret = -ERESTARTSYS;
goto error_put;
}
if (p.call.tx_total_len != -1) {
ret = -EINVAL;
if (call->tx_total_len != -1 ||
call->tx_pending ||
call->tx_top != 0)
goto out_put_unlock;
call->tx_total_len = p.call.tx_total_len;
}
}
switch (p.call.nr_timeouts) {
case 3:
j = msecs_to_jiffies(p.call.timeouts.normal);
if (p.call.timeouts.normal > 0 && j == 0)
j = 1;
WRITE_ONCE(call->next_rx_timo, j);
fallthrough;
case 2:
j = msecs_to_jiffies(p.call.timeouts.idle);
if (p.call.timeouts.idle > 0 && j == 0)
j = 1;
WRITE_ONCE(call->next_req_timo, j);
fallthrough;
case 1:
if (p.call.timeouts.hard > 0) {
j = p.call.timeouts.hard * HZ;
now = jiffies;
j += now;
WRITE_ONCE(call->expect_term_by, j);
rxrpc_reduce_call_timer(call, j, now,
rxrpc_timer_set_for_hard);
}
break;
}
if (rxrpc_call_is_complete(call)) {
/* it's too late for this call */
ret = -ESHUTDOWN;
} else if (p.command == RXRPC_CMD_SEND_ABORT) {
rxrpc_propose_abort(call, p.abort_code, -ECONNABORTED,
rxrpc_abort_call_sendmsg);
ret = 0;
} else if (p.command != RXRPC_CMD_SEND_DATA) {
ret = -EINVAL;
} else {
ret = rxrpc_send_data(rx, call, msg, len, NULL, &dropped_lock);
}
out_put_unlock:
if (!dropped_lock)
mutex_unlock(&call->user_mutex);
error_put:
rxrpc_put_call(call, rxrpc_call_put_sendmsg);
_leave(" = %d", ret);
return ret;
error_release_sock:
release_sock(&rx->sk);
return ret;
}
/**
* rxrpc_kernel_send_data - Allow a kernel service to send data on a call
* @sock: The socket the call is on
* @call: The call to send data through
* @msg: The data to send
* @len: The amount of data to send
* @notify_end_tx: Notification that the last packet is queued.
*
* Allow a kernel service to send data on a call. The call must be in an state
* appropriate to sending data. No control data should be supplied in @msg,
* nor should an address be supplied. MSG_MORE should be flagged if there's
* more data to come, otherwise this data will end the transmission phase.
*/
int rxrpc_kernel_send_data(struct socket *sock, struct rxrpc_call *call,
struct msghdr *msg, size_t len,
rxrpc_notify_end_tx_t notify_end_tx)
{
bool dropped_lock = false;
int ret;
_enter("{%d},", call->debug_id);
ASSERTCMP(msg->msg_name, ==, NULL);
ASSERTCMP(msg->msg_control, ==, NULL);
mutex_lock(&call->user_mutex);
ret = rxrpc_send_data(rxrpc_sk(sock->sk), call, msg, len,
notify_end_tx, &dropped_lock);
if (ret == -ESHUTDOWN)
ret = call->error;
if (!dropped_lock)
mutex_unlock(&call->user_mutex);
_leave(" = %d", ret);
return ret;
}
EXPORT_SYMBOL(rxrpc_kernel_send_data);
/**
* rxrpc_kernel_abort_call - Allow a kernel service to abort a call
* @sock: The socket the call is on
* @call: The call to be aborted
* @abort_code: The abort code to stick into the ABORT packet
* @error: Local error value
* @why: Indication as to why.
*
* Allow a kernel service to abort a call, if it's still in an abortable state
* and return true if the call was aborted, false if it was already complete.
*/
bool rxrpc_kernel_abort_call(struct socket *sock, struct rxrpc_call *call,
u32 abort_code, int error, enum rxrpc_abort_reason why)
{
bool aborted;
_enter("{%d},%d,%d,%u", call->debug_id, abort_code, error, why);
mutex_lock(&call->user_mutex);
aborted = rxrpc_propose_abort(call, abort_code, error, why);
mutex_unlock(&call->user_mutex);
return aborted;
}
EXPORT_SYMBOL(rxrpc_kernel_abort_call);
/**
* rxrpc_kernel_set_tx_length - Set the total Tx length on a call
* @sock: The socket the call is on
* @call: The call to be informed
* @tx_total_len: The amount of data to be transmitted for this call
*
* Allow a kernel service to set the total transmit length on a call. This
* allows buffer-to-packet encrypt-and-copy to be performed.
*
* This function is primarily for use for setting the reply length since the
* request length can be set when beginning the call.
*/
void rxrpc_kernel_set_tx_length(struct socket *sock, struct rxrpc_call *call,
s64 tx_total_len)
{
WARN_ON(call->tx_total_len != -1);
call->tx_total_len = tx_total_len;
}
EXPORT_SYMBOL(rxrpc_kernel_set_tx_length);
| linux-master | net/rxrpc/sendmsg.c |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.