python_code
stringlengths 0
1.8M
| repo_name
stringclasses 7
values | file_path
stringlengths 5
99
|
---|---|---|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2008-2009 Patrick McHardy <[email protected]>
* Copyright (c) 2016 Pablo Neira Ayuso <[email protected]>
*
* Development of this code funded by Astaro AG (http://www.astaro.com/)
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/netlink.h>
#include <linux/netfilter.h>
#include <linux/netfilter/nf_tables.h>
#include <net/netfilter/nf_tables_core.h>
#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_conntrack_acct.h>
#include <net/netfilter/nf_conntrack_tuple.h>
#include <net/netfilter/nf_conntrack_helper.h>
#include <net/netfilter/nf_conntrack_ecache.h>
#include <net/netfilter/nf_conntrack_labels.h>
#include <net/netfilter/nf_conntrack_timeout.h>
#include <net/netfilter/nf_conntrack_l4proto.h>
#include <net/netfilter/nf_conntrack_expect.h>
struct nft_ct_helper_obj {
struct nf_conntrack_helper *helper4;
struct nf_conntrack_helper *helper6;
u8 l4proto;
};
#ifdef CONFIG_NF_CONNTRACK_ZONES
static DEFINE_PER_CPU(struct nf_conn *, nft_ct_pcpu_template);
static unsigned int nft_ct_pcpu_template_refcnt __read_mostly;
static DEFINE_MUTEX(nft_ct_pcpu_mutex);
#endif
static u64 nft_ct_get_eval_counter(const struct nf_conn_counter *c,
enum nft_ct_keys k,
enum ip_conntrack_dir d)
{
if (d < IP_CT_DIR_MAX)
return k == NFT_CT_BYTES ? atomic64_read(&c[d].bytes) :
atomic64_read(&c[d].packets);
return nft_ct_get_eval_counter(c, k, IP_CT_DIR_ORIGINAL) +
nft_ct_get_eval_counter(c, k, IP_CT_DIR_REPLY);
}
static void nft_ct_get_eval(const struct nft_expr *expr,
struct nft_regs *regs,
const struct nft_pktinfo *pkt)
{
const struct nft_ct *priv = nft_expr_priv(expr);
u32 *dest = ®s->data[priv->dreg];
enum ip_conntrack_info ctinfo;
const struct nf_conn *ct;
const struct nf_conn_help *help;
const struct nf_conntrack_tuple *tuple;
const struct nf_conntrack_helper *helper;
unsigned int state;
ct = nf_ct_get(pkt->skb, &ctinfo);
switch (priv->key) {
case NFT_CT_STATE:
if (ct)
state = NF_CT_STATE_BIT(ctinfo);
else if (ctinfo == IP_CT_UNTRACKED)
state = NF_CT_STATE_UNTRACKED_BIT;
else
state = NF_CT_STATE_INVALID_BIT;
*dest = state;
return;
default:
break;
}
if (ct == NULL)
goto err;
switch (priv->key) {
case NFT_CT_DIRECTION:
nft_reg_store8(dest, CTINFO2DIR(ctinfo));
return;
case NFT_CT_STATUS:
*dest = ct->status;
return;
#ifdef CONFIG_NF_CONNTRACK_MARK
case NFT_CT_MARK:
*dest = READ_ONCE(ct->mark);
return;
#endif
#ifdef CONFIG_NF_CONNTRACK_SECMARK
case NFT_CT_SECMARK:
*dest = ct->secmark;
return;
#endif
case NFT_CT_EXPIRATION:
*dest = jiffies_to_msecs(nf_ct_expires(ct));
return;
case NFT_CT_HELPER:
if (ct->master == NULL)
goto err;
help = nfct_help(ct->master);
if (help == NULL)
goto err;
helper = rcu_dereference(help->helper);
if (helper == NULL)
goto err;
strscpy_pad((char *)dest, helper->name, NF_CT_HELPER_NAME_LEN);
return;
#ifdef CONFIG_NF_CONNTRACK_LABELS
case NFT_CT_LABELS: {
struct nf_conn_labels *labels = nf_ct_labels_find(ct);
if (labels)
memcpy(dest, labels->bits, NF_CT_LABELS_MAX_SIZE);
else
memset(dest, 0, NF_CT_LABELS_MAX_SIZE);
return;
}
#endif
case NFT_CT_BYTES:
case NFT_CT_PKTS: {
const struct nf_conn_acct *acct = nf_conn_acct_find(ct);
u64 count = 0;
if (acct)
count = nft_ct_get_eval_counter(acct->counter,
priv->key, priv->dir);
memcpy(dest, &count, sizeof(count));
return;
}
case NFT_CT_AVGPKT: {
const struct nf_conn_acct *acct = nf_conn_acct_find(ct);
u64 avgcnt = 0, bcnt = 0, pcnt = 0;
if (acct) {
pcnt = nft_ct_get_eval_counter(acct->counter,
NFT_CT_PKTS, priv->dir);
bcnt = nft_ct_get_eval_counter(acct->counter,
NFT_CT_BYTES, priv->dir);
if (pcnt != 0)
avgcnt = div64_u64(bcnt, pcnt);
}
memcpy(dest, &avgcnt, sizeof(avgcnt));
return;
}
case NFT_CT_L3PROTOCOL:
nft_reg_store8(dest, nf_ct_l3num(ct));
return;
case NFT_CT_PROTOCOL:
nft_reg_store8(dest, nf_ct_protonum(ct));
return;
#ifdef CONFIG_NF_CONNTRACK_ZONES
case NFT_CT_ZONE: {
const struct nf_conntrack_zone *zone = nf_ct_zone(ct);
u16 zoneid;
if (priv->dir < IP_CT_DIR_MAX)
zoneid = nf_ct_zone_id(zone, priv->dir);
else
zoneid = zone->id;
nft_reg_store16(dest, zoneid);
return;
}
#endif
case NFT_CT_ID:
*dest = nf_ct_get_id(ct);
return;
default:
break;
}
tuple = &ct->tuplehash[priv->dir].tuple;
switch (priv->key) {
case NFT_CT_SRC:
memcpy(dest, tuple->src.u3.all,
nf_ct_l3num(ct) == NFPROTO_IPV4 ? 4 : 16);
return;
case NFT_CT_DST:
memcpy(dest, tuple->dst.u3.all,
nf_ct_l3num(ct) == NFPROTO_IPV4 ? 4 : 16);
return;
case NFT_CT_PROTO_SRC:
nft_reg_store16(dest, (__force u16)tuple->src.u.all);
return;
case NFT_CT_PROTO_DST:
nft_reg_store16(dest, (__force u16)tuple->dst.u.all);
return;
case NFT_CT_SRC_IP:
if (nf_ct_l3num(ct) != NFPROTO_IPV4)
goto err;
*dest = (__force __u32)tuple->src.u3.ip;
return;
case NFT_CT_DST_IP:
if (nf_ct_l3num(ct) != NFPROTO_IPV4)
goto err;
*dest = (__force __u32)tuple->dst.u3.ip;
return;
case NFT_CT_SRC_IP6:
if (nf_ct_l3num(ct) != NFPROTO_IPV6)
goto err;
memcpy(dest, tuple->src.u3.ip6, sizeof(struct in6_addr));
return;
case NFT_CT_DST_IP6:
if (nf_ct_l3num(ct) != NFPROTO_IPV6)
goto err;
memcpy(dest, tuple->dst.u3.ip6, sizeof(struct in6_addr));
return;
default:
break;
}
return;
err:
regs->verdict.code = NFT_BREAK;
}
#ifdef CONFIG_NF_CONNTRACK_ZONES
static void nft_ct_set_zone_eval(const struct nft_expr *expr,
struct nft_regs *regs,
const struct nft_pktinfo *pkt)
{
struct nf_conntrack_zone zone = { .dir = NF_CT_DEFAULT_ZONE_DIR };
const struct nft_ct *priv = nft_expr_priv(expr);
struct sk_buff *skb = pkt->skb;
enum ip_conntrack_info ctinfo;
u16 value = nft_reg_load16(®s->data[priv->sreg]);
struct nf_conn *ct;
ct = nf_ct_get(skb, &ctinfo);
if (ct) /* already tracked */
return;
zone.id = value;
switch (priv->dir) {
case IP_CT_DIR_ORIGINAL:
zone.dir = NF_CT_ZONE_DIR_ORIG;
break;
case IP_CT_DIR_REPLY:
zone.dir = NF_CT_ZONE_DIR_REPL;
break;
default:
break;
}
ct = this_cpu_read(nft_ct_pcpu_template);
if (likely(refcount_read(&ct->ct_general.use) == 1)) {
refcount_inc(&ct->ct_general.use);
nf_ct_zone_add(ct, &zone);
} else {
/* previous skb got queued to userspace, allocate temporary
* one until percpu template can be reused.
*/
ct = nf_ct_tmpl_alloc(nft_net(pkt), &zone, GFP_ATOMIC);
if (!ct) {
regs->verdict.code = NF_DROP;
return;
}
__set_bit(IPS_CONFIRMED_BIT, &ct->status);
}
nf_ct_set(skb, ct, IP_CT_NEW);
}
#endif
static void nft_ct_set_eval(const struct nft_expr *expr,
struct nft_regs *regs,
const struct nft_pktinfo *pkt)
{
const struct nft_ct *priv = nft_expr_priv(expr);
struct sk_buff *skb = pkt->skb;
#if defined(CONFIG_NF_CONNTRACK_MARK) || defined(CONFIG_NF_CONNTRACK_SECMARK)
u32 value = regs->data[priv->sreg];
#endif
enum ip_conntrack_info ctinfo;
struct nf_conn *ct;
ct = nf_ct_get(skb, &ctinfo);
if (ct == NULL || nf_ct_is_template(ct))
return;
switch (priv->key) {
#ifdef CONFIG_NF_CONNTRACK_MARK
case NFT_CT_MARK:
if (READ_ONCE(ct->mark) != value) {
WRITE_ONCE(ct->mark, value);
nf_conntrack_event_cache(IPCT_MARK, ct);
}
break;
#endif
#ifdef CONFIG_NF_CONNTRACK_SECMARK
case NFT_CT_SECMARK:
if (ct->secmark != value) {
ct->secmark = value;
nf_conntrack_event_cache(IPCT_SECMARK, ct);
}
break;
#endif
#ifdef CONFIG_NF_CONNTRACK_LABELS
case NFT_CT_LABELS:
nf_connlabels_replace(ct,
®s->data[priv->sreg],
®s->data[priv->sreg],
NF_CT_LABELS_MAX_SIZE / sizeof(u32));
break;
#endif
#ifdef CONFIG_NF_CONNTRACK_EVENTS
case NFT_CT_EVENTMASK: {
struct nf_conntrack_ecache *e = nf_ct_ecache_find(ct);
u32 ctmask = regs->data[priv->sreg];
if (e) {
if (e->ctmask != ctmask)
e->ctmask = ctmask;
break;
}
if (ctmask && !nf_ct_is_confirmed(ct))
nf_ct_ecache_ext_add(ct, ctmask, 0, GFP_ATOMIC);
break;
}
#endif
default:
break;
}
}
static const struct nla_policy nft_ct_policy[NFTA_CT_MAX + 1] = {
[NFTA_CT_DREG] = { .type = NLA_U32 },
[NFTA_CT_KEY] = NLA_POLICY_MAX(NLA_BE32, 255),
[NFTA_CT_DIRECTION] = { .type = NLA_U8 },
[NFTA_CT_SREG] = { .type = NLA_U32 },
};
#ifdef CONFIG_NF_CONNTRACK_ZONES
static void nft_ct_tmpl_put_pcpu(void)
{
struct nf_conn *ct;
int cpu;
for_each_possible_cpu(cpu) {
ct = per_cpu(nft_ct_pcpu_template, cpu);
if (!ct)
break;
nf_ct_put(ct);
per_cpu(nft_ct_pcpu_template, cpu) = NULL;
}
}
static bool nft_ct_tmpl_alloc_pcpu(void)
{
struct nf_conntrack_zone zone = { .id = 0 };
struct nf_conn *tmp;
int cpu;
if (nft_ct_pcpu_template_refcnt)
return true;
for_each_possible_cpu(cpu) {
tmp = nf_ct_tmpl_alloc(&init_net, &zone, GFP_KERNEL);
if (!tmp) {
nft_ct_tmpl_put_pcpu();
return false;
}
__set_bit(IPS_CONFIRMED_BIT, &tmp->status);
per_cpu(nft_ct_pcpu_template, cpu) = tmp;
}
return true;
}
#endif
static int nft_ct_get_init(const struct nft_ctx *ctx,
const struct nft_expr *expr,
const struct nlattr * const tb[])
{
struct nft_ct *priv = nft_expr_priv(expr);
unsigned int len;
int err;
priv->key = ntohl(nla_get_be32(tb[NFTA_CT_KEY]));
priv->dir = IP_CT_DIR_MAX;
switch (priv->key) {
case NFT_CT_DIRECTION:
if (tb[NFTA_CT_DIRECTION] != NULL)
return -EINVAL;
len = sizeof(u8);
break;
case NFT_CT_STATE:
case NFT_CT_STATUS:
#ifdef CONFIG_NF_CONNTRACK_MARK
case NFT_CT_MARK:
#endif
#ifdef CONFIG_NF_CONNTRACK_SECMARK
case NFT_CT_SECMARK:
#endif
case NFT_CT_EXPIRATION:
if (tb[NFTA_CT_DIRECTION] != NULL)
return -EINVAL;
len = sizeof(u32);
break;
#ifdef CONFIG_NF_CONNTRACK_LABELS
case NFT_CT_LABELS:
if (tb[NFTA_CT_DIRECTION] != NULL)
return -EINVAL;
len = NF_CT_LABELS_MAX_SIZE;
break;
#endif
case NFT_CT_HELPER:
if (tb[NFTA_CT_DIRECTION] != NULL)
return -EINVAL;
len = NF_CT_HELPER_NAME_LEN;
break;
case NFT_CT_L3PROTOCOL:
case NFT_CT_PROTOCOL:
/* For compatibility, do not report error if NFTA_CT_DIRECTION
* attribute is specified.
*/
len = sizeof(u8);
break;
case NFT_CT_SRC:
case NFT_CT_DST:
if (tb[NFTA_CT_DIRECTION] == NULL)
return -EINVAL;
switch (ctx->family) {
case NFPROTO_IPV4:
len = sizeof_field(struct nf_conntrack_tuple,
src.u3.ip);
break;
case NFPROTO_IPV6:
case NFPROTO_INET:
len = sizeof_field(struct nf_conntrack_tuple,
src.u3.ip6);
break;
default:
return -EAFNOSUPPORT;
}
break;
case NFT_CT_SRC_IP:
case NFT_CT_DST_IP:
if (tb[NFTA_CT_DIRECTION] == NULL)
return -EINVAL;
len = sizeof_field(struct nf_conntrack_tuple, src.u3.ip);
break;
case NFT_CT_SRC_IP6:
case NFT_CT_DST_IP6:
if (tb[NFTA_CT_DIRECTION] == NULL)
return -EINVAL;
len = sizeof_field(struct nf_conntrack_tuple, src.u3.ip6);
break;
case NFT_CT_PROTO_SRC:
case NFT_CT_PROTO_DST:
if (tb[NFTA_CT_DIRECTION] == NULL)
return -EINVAL;
len = sizeof_field(struct nf_conntrack_tuple, src.u.all);
break;
case NFT_CT_BYTES:
case NFT_CT_PKTS:
case NFT_CT_AVGPKT:
len = sizeof(u64);
break;
#ifdef CONFIG_NF_CONNTRACK_ZONES
case NFT_CT_ZONE:
len = sizeof(u16);
break;
#endif
case NFT_CT_ID:
len = sizeof(u32);
break;
default:
return -EOPNOTSUPP;
}
if (tb[NFTA_CT_DIRECTION] != NULL) {
priv->dir = nla_get_u8(tb[NFTA_CT_DIRECTION]);
switch (priv->dir) {
case IP_CT_DIR_ORIGINAL:
case IP_CT_DIR_REPLY:
break;
default:
return -EINVAL;
}
}
priv->len = len;
err = nft_parse_register_store(ctx, tb[NFTA_CT_DREG], &priv->dreg, NULL,
NFT_DATA_VALUE, len);
if (err < 0)
return err;
err = nf_ct_netns_get(ctx->net, ctx->family);
if (err < 0)
return err;
if (priv->key == NFT_CT_BYTES ||
priv->key == NFT_CT_PKTS ||
priv->key == NFT_CT_AVGPKT)
nf_ct_set_acct(ctx->net, true);
return 0;
}
static void __nft_ct_set_destroy(const struct nft_ctx *ctx, struct nft_ct *priv)
{
switch (priv->key) {
#ifdef CONFIG_NF_CONNTRACK_LABELS
case NFT_CT_LABELS:
nf_connlabels_put(ctx->net);
break;
#endif
#ifdef CONFIG_NF_CONNTRACK_ZONES
case NFT_CT_ZONE:
mutex_lock(&nft_ct_pcpu_mutex);
if (--nft_ct_pcpu_template_refcnt == 0)
nft_ct_tmpl_put_pcpu();
mutex_unlock(&nft_ct_pcpu_mutex);
break;
#endif
default:
break;
}
}
static int nft_ct_set_init(const struct nft_ctx *ctx,
const struct nft_expr *expr,
const struct nlattr * const tb[])
{
struct nft_ct *priv = nft_expr_priv(expr);
unsigned int len;
int err;
priv->dir = IP_CT_DIR_MAX;
priv->key = ntohl(nla_get_be32(tb[NFTA_CT_KEY]));
switch (priv->key) {
#ifdef CONFIG_NF_CONNTRACK_MARK
case NFT_CT_MARK:
if (tb[NFTA_CT_DIRECTION])
return -EINVAL;
len = sizeof_field(struct nf_conn, mark);
break;
#endif
#ifdef CONFIG_NF_CONNTRACK_LABELS
case NFT_CT_LABELS:
if (tb[NFTA_CT_DIRECTION])
return -EINVAL;
len = NF_CT_LABELS_MAX_SIZE;
err = nf_connlabels_get(ctx->net, (len * BITS_PER_BYTE) - 1);
if (err)
return err;
break;
#endif
#ifdef CONFIG_NF_CONNTRACK_ZONES
case NFT_CT_ZONE:
mutex_lock(&nft_ct_pcpu_mutex);
if (!nft_ct_tmpl_alloc_pcpu()) {
mutex_unlock(&nft_ct_pcpu_mutex);
return -ENOMEM;
}
nft_ct_pcpu_template_refcnt++;
mutex_unlock(&nft_ct_pcpu_mutex);
len = sizeof(u16);
break;
#endif
#ifdef CONFIG_NF_CONNTRACK_EVENTS
case NFT_CT_EVENTMASK:
if (tb[NFTA_CT_DIRECTION])
return -EINVAL;
len = sizeof(u32);
break;
#endif
#ifdef CONFIG_NF_CONNTRACK_SECMARK
case NFT_CT_SECMARK:
if (tb[NFTA_CT_DIRECTION])
return -EINVAL;
len = sizeof(u32);
break;
#endif
default:
return -EOPNOTSUPP;
}
if (tb[NFTA_CT_DIRECTION]) {
priv->dir = nla_get_u8(tb[NFTA_CT_DIRECTION]);
switch (priv->dir) {
case IP_CT_DIR_ORIGINAL:
case IP_CT_DIR_REPLY:
break;
default:
err = -EINVAL;
goto err1;
}
}
priv->len = len;
err = nft_parse_register_load(tb[NFTA_CT_SREG], &priv->sreg, len);
if (err < 0)
goto err1;
err = nf_ct_netns_get(ctx->net, ctx->family);
if (err < 0)
goto err1;
return 0;
err1:
__nft_ct_set_destroy(ctx, priv);
return err;
}
static void nft_ct_get_destroy(const struct nft_ctx *ctx,
const struct nft_expr *expr)
{
nf_ct_netns_put(ctx->net, ctx->family);
}
static void nft_ct_set_destroy(const struct nft_ctx *ctx,
const struct nft_expr *expr)
{
struct nft_ct *priv = nft_expr_priv(expr);
__nft_ct_set_destroy(ctx, priv);
nf_ct_netns_put(ctx->net, ctx->family);
}
static int nft_ct_get_dump(struct sk_buff *skb,
const struct nft_expr *expr, bool reset)
{
const struct nft_ct *priv = nft_expr_priv(expr);
if (nft_dump_register(skb, NFTA_CT_DREG, priv->dreg))
goto nla_put_failure;
if (nla_put_be32(skb, NFTA_CT_KEY, htonl(priv->key)))
goto nla_put_failure;
switch (priv->key) {
case NFT_CT_SRC:
case NFT_CT_DST:
case NFT_CT_SRC_IP:
case NFT_CT_DST_IP:
case NFT_CT_SRC_IP6:
case NFT_CT_DST_IP6:
case NFT_CT_PROTO_SRC:
case NFT_CT_PROTO_DST:
if (nla_put_u8(skb, NFTA_CT_DIRECTION, priv->dir))
goto nla_put_failure;
break;
case NFT_CT_BYTES:
case NFT_CT_PKTS:
case NFT_CT_AVGPKT:
case NFT_CT_ZONE:
if (priv->dir < IP_CT_DIR_MAX &&
nla_put_u8(skb, NFTA_CT_DIRECTION, priv->dir))
goto nla_put_failure;
break;
default:
break;
}
return 0;
nla_put_failure:
return -1;
}
static bool nft_ct_get_reduce(struct nft_regs_track *track,
const struct nft_expr *expr)
{
const struct nft_ct *priv = nft_expr_priv(expr);
const struct nft_ct *ct;
if (!nft_reg_track_cmp(track, expr, priv->dreg)) {
nft_reg_track_update(track, expr, priv->dreg, priv->len);
return false;
}
ct = nft_expr_priv(track->regs[priv->dreg].selector);
if (priv->key != ct->key) {
nft_reg_track_update(track, expr, priv->dreg, priv->len);
return false;
}
if (!track->regs[priv->dreg].bitwise)
return true;
return nft_expr_reduce_bitwise(track, expr);
}
static int nft_ct_set_dump(struct sk_buff *skb,
const struct nft_expr *expr, bool reset)
{
const struct nft_ct *priv = nft_expr_priv(expr);
if (nft_dump_register(skb, NFTA_CT_SREG, priv->sreg))
goto nla_put_failure;
if (nla_put_be32(skb, NFTA_CT_KEY, htonl(priv->key)))
goto nla_put_failure;
switch (priv->key) {
case NFT_CT_ZONE:
if (priv->dir < IP_CT_DIR_MAX &&
nla_put_u8(skb, NFTA_CT_DIRECTION, priv->dir))
goto nla_put_failure;
break;
default:
break;
}
return 0;
nla_put_failure:
return -1;
}
static struct nft_expr_type nft_ct_type;
static const struct nft_expr_ops nft_ct_get_ops = {
.type = &nft_ct_type,
.size = NFT_EXPR_SIZE(sizeof(struct nft_ct)),
.eval = nft_ct_get_eval,
.init = nft_ct_get_init,
.destroy = nft_ct_get_destroy,
.dump = nft_ct_get_dump,
.reduce = nft_ct_get_reduce,
};
static bool nft_ct_set_reduce(struct nft_regs_track *track,
const struct nft_expr *expr)
{
int i;
for (i = 0; i < NFT_REG32_NUM; i++) {
if (!track->regs[i].selector)
continue;
if (track->regs[i].selector->ops != &nft_ct_get_ops)
continue;
__nft_reg_track_cancel(track, i);
}
return false;
}
#ifdef CONFIG_RETPOLINE
static const struct nft_expr_ops nft_ct_get_fast_ops = {
.type = &nft_ct_type,
.size = NFT_EXPR_SIZE(sizeof(struct nft_ct)),
.eval = nft_ct_get_fast_eval,
.init = nft_ct_get_init,
.destroy = nft_ct_get_destroy,
.dump = nft_ct_get_dump,
.reduce = nft_ct_set_reduce,
};
#endif
static const struct nft_expr_ops nft_ct_set_ops = {
.type = &nft_ct_type,
.size = NFT_EXPR_SIZE(sizeof(struct nft_ct)),
.eval = nft_ct_set_eval,
.init = nft_ct_set_init,
.destroy = nft_ct_set_destroy,
.dump = nft_ct_set_dump,
.reduce = nft_ct_set_reduce,
};
#ifdef CONFIG_NF_CONNTRACK_ZONES
static const struct nft_expr_ops nft_ct_set_zone_ops = {
.type = &nft_ct_type,
.size = NFT_EXPR_SIZE(sizeof(struct nft_ct)),
.eval = nft_ct_set_zone_eval,
.init = nft_ct_set_init,
.destroy = nft_ct_set_destroy,
.dump = nft_ct_set_dump,
.reduce = nft_ct_set_reduce,
};
#endif
static const struct nft_expr_ops *
nft_ct_select_ops(const struct nft_ctx *ctx,
const struct nlattr * const tb[])
{
if (tb[NFTA_CT_KEY] == NULL)
return ERR_PTR(-EINVAL);
if (tb[NFTA_CT_DREG] && tb[NFTA_CT_SREG])
return ERR_PTR(-EINVAL);
if (tb[NFTA_CT_DREG]) {
#ifdef CONFIG_RETPOLINE
u32 k = ntohl(nla_get_be32(tb[NFTA_CT_KEY]));
switch (k) {
case NFT_CT_STATE:
case NFT_CT_DIRECTION:
case NFT_CT_STATUS:
case NFT_CT_MARK:
case NFT_CT_SECMARK:
return &nft_ct_get_fast_ops;
}
#endif
return &nft_ct_get_ops;
}
if (tb[NFTA_CT_SREG]) {
#ifdef CONFIG_NF_CONNTRACK_ZONES
if (nla_get_be32(tb[NFTA_CT_KEY]) == htonl(NFT_CT_ZONE))
return &nft_ct_set_zone_ops;
#endif
return &nft_ct_set_ops;
}
return ERR_PTR(-EINVAL);
}
static struct nft_expr_type nft_ct_type __read_mostly = {
.name = "ct",
.select_ops = nft_ct_select_ops,
.policy = nft_ct_policy,
.maxattr = NFTA_CT_MAX,
.owner = THIS_MODULE,
};
static void nft_notrack_eval(const struct nft_expr *expr,
struct nft_regs *regs,
const struct nft_pktinfo *pkt)
{
struct sk_buff *skb = pkt->skb;
enum ip_conntrack_info ctinfo;
struct nf_conn *ct;
ct = nf_ct_get(pkt->skb, &ctinfo);
/* Previously seen (loopback or untracked)? Ignore. */
if (ct || ctinfo == IP_CT_UNTRACKED)
return;
nf_ct_set(skb, ct, IP_CT_UNTRACKED);
}
static struct nft_expr_type nft_notrack_type;
static const struct nft_expr_ops nft_notrack_ops = {
.type = &nft_notrack_type,
.size = NFT_EXPR_SIZE(0),
.eval = nft_notrack_eval,
.reduce = NFT_REDUCE_READONLY,
};
static struct nft_expr_type nft_notrack_type __read_mostly = {
.name = "notrack",
.ops = &nft_notrack_ops,
.owner = THIS_MODULE,
};
#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
static int
nft_ct_timeout_parse_policy(void *timeouts,
const struct nf_conntrack_l4proto *l4proto,
struct net *net, const struct nlattr *attr)
{
struct nlattr **tb;
int ret = 0;
tb = kcalloc(l4proto->ctnl_timeout.nlattr_max + 1, sizeof(*tb),
GFP_KERNEL);
if (!tb)
return -ENOMEM;
ret = nla_parse_nested_deprecated(tb,
l4proto->ctnl_timeout.nlattr_max,
attr,
l4proto->ctnl_timeout.nla_policy,
NULL);
if (ret < 0)
goto err;
ret = l4proto->ctnl_timeout.nlattr_to_obj(tb, net, timeouts);
err:
kfree(tb);
return ret;
}
struct nft_ct_timeout_obj {
struct nf_ct_timeout *timeout;
u8 l4proto;
};
static void nft_ct_timeout_obj_eval(struct nft_object *obj,
struct nft_regs *regs,
const struct nft_pktinfo *pkt)
{
const struct nft_ct_timeout_obj *priv = nft_obj_data(obj);
struct nf_conn *ct = (struct nf_conn *)skb_nfct(pkt->skb);
struct nf_conn_timeout *timeout;
const unsigned int *values;
if (priv->l4proto != pkt->tprot)
return;
if (!ct || nf_ct_is_template(ct) || nf_ct_is_confirmed(ct))
return;
timeout = nf_ct_timeout_find(ct);
if (!timeout) {
timeout = nf_ct_timeout_ext_add(ct, priv->timeout, GFP_ATOMIC);
if (!timeout) {
regs->verdict.code = NF_DROP;
return;
}
}
rcu_assign_pointer(timeout->timeout, priv->timeout);
/* adjust the timeout as per 'new' state. ct is unconfirmed,
* so the current timestamp must not be added.
*/
values = nf_ct_timeout_data(timeout);
if (values)
nf_ct_refresh(ct, pkt->skb, values[0]);
}
static int nft_ct_timeout_obj_init(const struct nft_ctx *ctx,
const struct nlattr * const tb[],
struct nft_object *obj)
{
struct nft_ct_timeout_obj *priv = nft_obj_data(obj);
const struct nf_conntrack_l4proto *l4proto;
struct nf_ct_timeout *timeout;
int l3num = ctx->family;
__u8 l4num;
int ret;
if (!tb[NFTA_CT_TIMEOUT_L4PROTO] ||
!tb[NFTA_CT_TIMEOUT_DATA])
return -EINVAL;
if (tb[NFTA_CT_TIMEOUT_L3PROTO])
l3num = ntohs(nla_get_be16(tb[NFTA_CT_TIMEOUT_L3PROTO]));
l4num = nla_get_u8(tb[NFTA_CT_TIMEOUT_L4PROTO]);
priv->l4proto = l4num;
l4proto = nf_ct_l4proto_find(l4num);
if (l4proto->l4proto != l4num) {
ret = -EOPNOTSUPP;
goto err_proto_put;
}
timeout = kzalloc(sizeof(struct nf_ct_timeout) +
l4proto->ctnl_timeout.obj_size, GFP_KERNEL);
if (timeout == NULL) {
ret = -ENOMEM;
goto err_proto_put;
}
ret = nft_ct_timeout_parse_policy(&timeout->data, l4proto, ctx->net,
tb[NFTA_CT_TIMEOUT_DATA]);
if (ret < 0)
goto err_free_timeout;
timeout->l3num = l3num;
timeout->l4proto = l4proto;
ret = nf_ct_netns_get(ctx->net, ctx->family);
if (ret < 0)
goto err_free_timeout;
priv->timeout = timeout;
return 0;
err_free_timeout:
kfree(timeout);
err_proto_put:
return ret;
}
static void nft_ct_timeout_obj_destroy(const struct nft_ctx *ctx,
struct nft_object *obj)
{
struct nft_ct_timeout_obj *priv = nft_obj_data(obj);
struct nf_ct_timeout *timeout = priv->timeout;
nf_ct_untimeout(ctx->net, timeout);
nf_ct_netns_put(ctx->net, ctx->family);
kfree(priv->timeout);
}
static int nft_ct_timeout_obj_dump(struct sk_buff *skb,
struct nft_object *obj, bool reset)
{
const struct nft_ct_timeout_obj *priv = nft_obj_data(obj);
const struct nf_ct_timeout *timeout = priv->timeout;
struct nlattr *nest_params;
int ret;
if (nla_put_u8(skb, NFTA_CT_TIMEOUT_L4PROTO, timeout->l4proto->l4proto) ||
nla_put_be16(skb, NFTA_CT_TIMEOUT_L3PROTO, htons(timeout->l3num)))
return -1;
nest_params = nla_nest_start(skb, NFTA_CT_TIMEOUT_DATA);
if (!nest_params)
return -1;
ret = timeout->l4proto->ctnl_timeout.obj_to_nlattr(skb, &timeout->data);
if (ret < 0)
return -1;
nla_nest_end(skb, nest_params);
return 0;
}
static const struct nla_policy nft_ct_timeout_policy[NFTA_CT_TIMEOUT_MAX + 1] = {
[NFTA_CT_TIMEOUT_L3PROTO] = {.type = NLA_U16 },
[NFTA_CT_TIMEOUT_L4PROTO] = {.type = NLA_U8 },
[NFTA_CT_TIMEOUT_DATA] = {.type = NLA_NESTED },
};
static struct nft_object_type nft_ct_timeout_obj_type;
static const struct nft_object_ops nft_ct_timeout_obj_ops = {
.type = &nft_ct_timeout_obj_type,
.size = sizeof(struct nft_ct_timeout_obj),
.eval = nft_ct_timeout_obj_eval,
.init = nft_ct_timeout_obj_init,
.destroy = nft_ct_timeout_obj_destroy,
.dump = nft_ct_timeout_obj_dump,
};
static struct nft_object_type nft_ct_timeout_obj_type __read_mostly = {
.type = NFT_OBJECT_CT_TIMEOUT,
.ops = &nft_ct_timeout_obj_ops,
.maxattr = NFTA_CT_TIMEOUT_MAX,
.policy = nft_ct_timeout_policy,
.owner = THIS_MODULE,
};
#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
static int nft_ct_helper_obj_init(const struct nft_ctx *ctx,
const struct nlattr * const tb[],
struct nft_object *obj)
{
struct nft_ct_helper_obj *priv = nft_obj_data(obj);
struct nf_conntrack_helper *help4, *help6;
char name[NF_CT_HELPER_NAME_LEN];
int family = ctx->family;
int err;
if (!tb[NFTA_CT_HELPER_NAME] || !tb[NFTA_CT_HELPER_L4PROTO])
return -EINVAL;
priv->l4proto = nla_get_u8(tb[NFTA_CT_HELPER_L4PROTO]);
if (!priv->l4proto)
return -ENOENT;
nla_strscpy(name, tb[NFTA_CT_HELPER_NAME], sizeof(name));
if (tb[NFTA_CT_HELPER_L3PROTO])
family = ntohs(nla_get_be16(tb[NFTA_CT_HELPER_L3PROTO]));
help4 = NULL;
help6 = NULL;
switch (family) {
case NFPROTO_IPV4:
if (ctx->family == NFPROTO_IPV6)
return -EINVAL;
help4 = nf_conntrack_helper_try_module_get(name, family,
priv->l4proto);
break;
case NFPROTO_IPV6:
if (ctx->family == NFPROTO_IPV4)
return -EINVAL;
help6 = nf_conntrack_helper_try_module_get(name, family,
priv->l4proto);
break;
case NFPROTO_NETDEV:
case NFPROTO_BRIDGE:
case NFPROTO_INET:
help4 = nf_conntrack_helper_try_module_get(name, NFPROTO_IPV4,
priv->l4proto);
help6 = nf_conntrack_helper_try_module_get(name, NFPROTO_IPV6,
priv->l4proto);
break;
default:
return -EAFNOSUPPORT;
}
/* && is intentional; only error if INET found neither ipv4 or ipv6 */
if (!help4 && !help6)
return -ENOENT;
priv->helper4 = help4;
priv->helper6 = help6;
err = nf_ct_netns_get(ctx->net, ctx->family);
if (err < 0)
goto err_put_helper;
return 0;
err_put_helper:
if (priv->helper4)
nf_conntrack_helper_put(priv->helper4);
if (priv->helper6)
nf_conntrack_helper_put(priv->helper6);
return err;
}
static void nft_ct_helper_obj_destroy(const struct nft_ctx *ctx,
struct nft_object *obj)
{
struct nft_ct_helper_obj *priv = nft_obj_data(obj);
if (priv->helper4)
nf_conntrack_helper_put(priv->helper4);
if (priv->helper6)
nf_conntrack_helper_put(priv->helper6);
nf_ct_netns_put(ctx->net, ctx->family);
}
static void nft_ct_helper_obj_eval(struct nft_object *obj,
struct nft_regs *regs,
const struct nft_pktinfo *pkt)
{
const struct nft_ct_helper_obj *priv = nft_obj_data(obj);
struct nf_conn *ct = (struct nf_conn *)skb_nfct(pkt->skb);
struct nf_conntrack_helper *to_assign = NULL;
struct nf_conn_help *help;
if (!ct ||
nf_ct_is_confirmed(ct) ||
nf_ct_is_template(ct) ||
priv->l4proto != nf_ct_protonum(ct))
return;
switch (nf_ct_l3num(ct)) {
case NFPROTO_IPV4:
to_assign = priv->helper4;
break;
case NFPROTO_IPV6:
to_assign = priv->helper6;
break;
default:
WARN_ON_ONCE(1);
return;
}
if (!to_assign)
return;
if (test_bit(IPS_HELPER_BIT, &ct->status))
return;
help = nf_ct_helper_ext_add(ct, GFP_ATOMIC);
if (help) {
rcu_assign_pointer(help->helper, to_assign);
set_bit(IPS_HELPER_BIT, &ct->status);
}
}
static int nft_ct_helper_obj_dump(struct sk_buff *skb,
struct nft_object *obj, bool reset)
{
const struct nft_ct_helper_obj *priv = nft_obj_data(obj);
const struct nf_conntrack_helper *helper;
u16 family;
if (priv->helper4 && priv->helper6) {
family = NFPROTO_INET;
helper = priv->helper4;
} else if (priv->helper6) {
family = NFPROTO_IPV6;
helper = priv->helper6;
} else {
family = NFPROTO_IPV4;
helper = priv->helper4;
}
if (nla_put_string(skb, NFTA_CT_HELPER_NAME, helper->name))
return -1;
if (nla_put_u8(skb, NFTA_CT_HELPER_L4PROTO, priv->l4proto))
return -1;
if (nla_put_be16(skb, NFTA_CT_HELPER_L3PROTO, htons(family)))
return -1;
return 0;
}
static const struct nla_policy nft_ct_helper_policy[NFTA_CT_HELPER_MAX + 1] = {
[NFTA_CT_HELPER_NAME] = { .type = NLA_STRING,
.len = NF_CT_HELPER_NAME_LEN - 1 },
[NFTA_CT_HELPER_L3PROTO] = { .type = NLA_U16 },
[NFTA_CT_HELPER_L4PROTO] = { .type = NLA_U8 },
};
static struct nft_object_type nft_ct_helper_obj_type;
static const struct nft_object_ops nft_ct_helper_obj_ops = {
.type = &nft_ct_helper_obj_type,
.size = sizeof(struct nft_ct_helper_obj),
.eval = nft_ct_helper_obj_eval,
.init = nft_ct_helper_obj_init,
.destroy = nft_ct_helper_obj_destroy,
.dump = nft_ct_helper_obj_dump,
};
static struct nft_object_type nft_ct_helper_obj_type __read_mostly = {
.type = NFT_OBJECT_CT_HELPER,
.ops = &nft_ct_helper_obj_ops,
.maxattr = NFTA_CT_HELPER_MAX,
.policy = nft_ct_helper_policy,
.owner = THIS_MODULE,
};
struct nft_ct_expect_obj {
u16 l3num;
__be16 dport;
u8 l4proto;
u8 size;
u32 timeout;
};
static int nft_ct_expect_obj_init(const struct nft_ctx *ctx,
const struct nlattr * const tb[],
struct nft_object *obj)
{
struct nft_ct_expect_obj *priv = nft_obj_data(obj);
if (!tb[NFTA_CT_EXPECT_L4PROTO] ||
!tb[NFTA_CT_EXPECT_DPORT] ||
!tb[NFTA_CT_EXPECT_TIMEOUT] ||
!tb[NFTA_CT_EXPECT_SIZE])
return -EINVAL;
priv->l3num = ctx->family;
if (tb[NFTA_CT_EXPECT_L3PROTO])
priv->l3num = ntohs(nla_get_be16(tb[NFTA_CT_EXPECT_L3PROTO]));
priv->l4proto = nla_get_u8(tb[NFTA_CT_EXPECT_L4PROTO]);
priv->dport = nla_get_be16(tb[NFTA_CT_EXPECT_DPORT]);
priv->timeout = nla_get_u32(tb[NFTA_CT_EXPECT_TIMEOUT]);
priv->size = nla_get_u8(tb[NFTA_CT_EXPECT_SIZE]);
return nf_ct_netns_get(ctx->net, ctx->family);
}
static void nft_ct_expect_obj_destroy(const struct nft_ctx *ctx,
struct nft_object *obj)
{
nf_ct_netns_put(ctx->net, ctx->family);
}
static int nft_ct_expect_obj_dump(struct sk_buff *skb,
struct nft_object *obj, bool reset)
{
const struct nft_ct_expect_obj *priv = nft_obj_data(obj);
if (nla_put_be16(skb, NFTA_CT_EXPECT_L3PROTO, htons(priv->l3num)) ||
nla_put_u8(skb, NFTA_CT_EXPECT_L4PROTO, priv->l4proto) ||
nla_put_be16(skb, NFTA_CT_EXPECT_DPORT, priv->dport) ||
nla_put_u32(skb, NFTA_CT_EXPECT_TIMEOUT, priv->timeout) ||
nla_put_u8(skb, NFTA_CT_EXPECT_SIZE, priv->size))
return -1;
return 0;
}
static void nft_ct_expect_obj_eval(struct nft_object *obj,
struct nft_regs *regs,
const struct nft_pktinfo *pkt)
{
const struct nft_ct_expect_obj *priv = nft_obj_data(obj);
struct nf_conntrack_expect *exp;
enum ip_conntrack_info ctinfo;
struct nf_conn_help *help;
enum ip_conntrack_dir dir;
u16 l3num = priv->l3num;
struct nf_conn *ct;
ct = nf_ct_get(pkt->skb, &ctinfo);
if (!ct || nf_ct_is_confirmed(ct) || nf_ct_is_template(ct)) {
regs->verdict.code = NFT_BREAK;
return;
}
dir = CTINFO2DIR(ctinfo);
help = nfct_help(ct);
if (!help)
help = nf_ct_helper_ext_add(ct, GFP_ATOMIC);
if (!help) {
regs->verdict.code = NF_DROP;
return;
}
if (help->expecting[NF_CT_EXPECT_CLASS_DEFAULT] >= priv->size) {
regs->verdict.code = NFT_BREAK;
return;
}
if (l3num == NFPROTO_INET)
l3num = nf_ct_l3num(ct);
exp = nf_ct_expect_alloc(ct);
if (exp == NULL) {
regs->verdict.code = NF_DROP;
return;
}
nf_ct_expect_init(exp, NF_CT_EXPECT_CLASS_DEFAULT, l3num,
&ct->tuplehash[!dir].tuple.src.u3,
&ct->tuplehash[!dir].tuple.dst.u3,
priv->l4proto, NULL, &priv->dport);
exp->timeout.expires = jiffies + priv->timeout * HZ;
if (nf_ct_expect_related(exp, 0) != 0)
regs->verdict.code = NF_DROP;
}
static const struct nla_policy nft_ct_expect_policy[NFTA_CT_EXPECT_MAX + 1] = {
[NFTA_CT_EXPECT_L3PROTO] = { .type = NLA_U16 },
[NFTA_CT_EXPECT_L4PROTO] = { .type = NLA_U8 },
[NFTA_CT_EXPECT_DPORT] = { .type = NLA_U16 },
[NFTA_CT_EXPECT_TIMEOUT] = { .type = NLA_U32 },
[NFTA_CT_EXPECT_SIZE] = { .type = NLA_U8 },
};
static struct nft_object_type nft_ct_expect_obj_type;
static const struct nft_object_ops nft_ct_expect_obj_ops = {
.type = &nft_ct_expect_obj_type,
.size = sizeof(struct nft_ct_expect_obj),
.eval = nft_ct_expect_obj_eval,
.init = nft_ct_expect_obj_init,
.destroy = nft_ct_expect_obj_destroy,
.dump = nft_ct_expect_obj_dump,
};
static struct nft_object_type nft_ct_expect_obj_type __read_mostly = {
.type = NFT_OBJECT_CT_EXPECT,
.ops = &nft_ct_expect_obj_ops,
.maxattr = NFTA_CT_EXPECT_MAX,
.policy = nft_ct_expect_policy,
.owner = THIS_MODULE,
};
static int __init nft_ct_module_init(void)
{
int err;
BUILD_BUG_ON(NF_CT_LABELS_MAX_SIZE > NFT_REG_SIZE);
err = nft_register_expr(&nft_ct_type);
if (err < 0)
return err;
err = nft_register_expr(&nft_notrack_type);
if (err < 0)
goto err1;
err = nft_register_obj(&nft_ct_helper_obj_type);
if (err < 0)
goto err2;
err = nft_register_obj(&nft_ct_expect_obj_type);
if (err < 0)
goto err3;
#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
err = nft_register_obj(&nft_ct_timeout_obj_type);
if (err < 0)
goto err4;
#endif
return 0;
#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
err4:
nft_unregister_obj(&nft_ct_expect_obj_type);
#endif
err3:
nft_unregister_obj(&nft_ct_helper_obj_type);
err2:
nft_unregister_expr(&nft_notrack_type);
err1:
nft_unregister_expr(&nft_ct_type);
return err;
}
static void __exit nft_ct_module_exit(void)
{
#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
nft_unregister_obj(&nft_ct_timeout_obj_type);
#endif
nft_unregister_obj(&nft_ct_expect_obj_type);
nft_unregister_obj(&nft_ct_helper_obj_type);
nft_unregister_expr(&nft_notrack_type);
nft_unregister_expr(&nft_ct_type);
}
module_init(nft_ct_module_init);
module_exit(nft_ct_module_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Patrick McHardy <[email protected]>");
MODULE_ALIAS_NFT_EXPR("ct");
MODULE_ALIAS_NFT_EXPR("notrack");
MODULE_ALIAS_NFT_OBJ(NFT_OBJECT_CT_HELPER);
MODULE_ALIAS_NFT_OBJ(NFT_OBJECT_CT_TIMEOUT);
MODULE_ALIAS_NFT_OBJ(NFT_OBJECT_CT_EXPECT);
MODULE_DESCRIPTION("Netfilter nf_tables conntrack module");
| linux-master | net/netfilter/nft_ct.c |
/* SPDX-License-Identifier: GPL-2.0 */
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/netlink.h>
#include <linux/netfilter.h>
#include <linux/netfilter/nf_tables.h>
#include <net/netfilter/nf_tables.h>
#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_conntrack_count.h>
#include <net/netfilter/nf_conntrack_core.h>
#include <net/netfilter/nf_conntrack_tuple.h>
#include <net/netfilter/nf_conntrack_zones.h>
struct nft_connlimit {
struct nf_conncount_list *list;
u32 limit;
bool invert;
};
static inline void nft_connlimit_do_eval(struct nft_connlimit *priv,
struct nft_regs *regs,
const struct nft_pktinfo *pkt,
const struct nft_set_ext *ext)
{
const struct nf_conntrack_zone *zone = &nf_ct_zone_dflt;
const struct nf_conntrack_tuple *tuple_ptr;
struct nf_conntrack_tuple tuple;
enum ip_conntrack_info ctinfo;
const struct nf_conn *ct;
unsigned int count;
tuple_ptr = &tuple;
ct = nf_ct_get(pkt->skb, &ctinfo);
if (ct != NULL) {
tuple_ptr = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
zone = nf_ct_zone(ct);
} else if (!nf_ct_get_tuplepr(pkt->skb, skb_network_offset(pkt->skb),
nft_pf(pkt), nft_net(pkt), &tuple)) {
regs->verdict.code = NF_DROP;
return;
}
if (nf_conncount_add(nft_net(pkt), priv->list, tuple_ptr, zone)) {
regs->verdict.code = NF_DROP;
return;
}
count = priv->list->count;
if ((count > priv->limit) ^ priv->invert) {
regs->verdict.code = NFT_BREAK;
return;
}
}
static int nft_connlimit_do_init(const struct nft_ctx *ctx,
const struct nlattr * const tb[],
struct nft_connlimit *priv)
{
bool invert = false;
u32 flags, limit;
int err;
if (!tb[NFTA_CONNLIMIT_COUNT])
return -EINVAL;
limit = ntohl(nla_get_be32(tb[NFTA_CONNLIMIT_COUNT]));
if (tb[NFTA_CONNLIMIT_FLAGS]) {
flags = ntohl(nla_get_be32(tb[NFTA_CONNLIMIT_FLAGS]));
if (flags & ~NFT_CONNLIMIT_F_INV)
return -EOPNOTSUPP;
if (flags & NFT_CONNLIMIT_F_INV)
invert = true;
}
priv->list = kmalloc(sizeof(*priv->list), GFP_KERNEL_ACCOUNT);
if (!priv->list)
return -ENOMEM;
nf_conncount_list_init(priv->list);
priv->limit = limit;
priv->invert = invert;
err = nf_ct_netns_get(ctx->net, ctx->family);
if (err < 0)
goto err_netns;
return 0;
err_netns:
kfree(priv->list);
return err;
}
static void nft_connlimit_do_destroy(const struct nft_ctx *ctx,
struct nft_connlimit *priv)
{
nf_ct_netns_put(ctx->net, ctx->family);
nf_conncount_cache_free(priv->list);
kfree(priv->list);
}
static int nft_connlimit_do_dump(struct sk_buff *skb,
struct nft_connlimit *priv)
{
if (nla_put_be32(skb, NFTA_CONNLIMIT_COUNT, htonl(priv->limit)))
goto nla_put_failure;
if (priv->invert &&
nla_put_be32(skb, NFTA_CONNLIMIT_FLAGS, htonl(NFT_CONNLIMIT_F_INV)))
goto nla_put_failure;
return 0;
nla_put_failure:
return -1;
}
static inline void nft_connlimit_obj_eval(struct nft_object *obj,
struct nft_regs *regs,
const struct nft_pktinfo *pkt)
{
struct nft_connlimit *priv = nft_obj_data(obj);
nft_connlimit_do_eval(priv, regs, pkt, NULL);
}
static int nft_connlimit_obj_init(const struct nft_ctx *ctx,
const struct nlattr * const tb[],
struct nft_object *obj)
{
struct nft_connlimit *priv = nft_obj_data(obj);
return nft_connlimit_do_init(ctx, tb, priv);
}
static void nft_connlimit_obj_destroy(const struct nft_ctx *ctx,
struct nft_object *obj)
{
struct nft_connlimit *priv = nft_obj_data(obj);
nft_connlimit_do_destroy(ctx, priv);
}
static int nft_connlimit_obj_dump(struct sk_buff *skb,
struct nft_object *obj, bool reset)
{
struct nft_connlimit *priv = nft_obj_data(obj);
return nft_connlimit_do_dump(skb, priv);
}
static const struct nla_policy nft_connlimit_policy[NFTA_CONNLIMIT_MAX + 1] = {
[NFTA_CONNLIMIT_COUNT] = { .type = NLA_U32 },
[NFTA_CONNLIMIT_FLAGS] = { .type = NLA_U32 },
};
static struct nft_object_type nft_connlimit_obj_type;
static const struct nft_object_ops nft_connlimit_obj_ops = {
.type = &nft_connlimit_obj_type,
.size = sizeof(struct nft_connlimit),
.eval = nft_connlimit_obj_eval,
.init = nft_connlimit_obj_init,
.destroy = nft_connlimit_obj_destroy,
.dump = nft_connlimit_obj_dump,
};
static struct nft_object_type nft_connlimit_obj_type __read_mostly = {
.type = NFT_OBJECT_CONNLIMIT,
.ops = &nft_connlimit_obj_ops,
.maxattr = NFTA_CONNLIMIT_MAX,
.policy = nft_connlimit_policy,
.owner = THIS_MODULE,
};
static void nft_connlimit_eval(const struct nft_expr *expr,
struct nft_regs *regs,
const struct nft_pktinfo *pkt)
{
struct nft_connlimit *priv = nft_expr_priv(expr);
nft_connlimit_do_eval(priv, regs, pkt, NULL);
}
static int nft_connlimit_dump(struct sk_buff *skb,
const struct nft_expr *expr, bool reset)
{
struct nft_connlimit *priv = nft_expr_priv(expr);
return nft_connlimit_do_dump(skb, priv);
}
static int nft_connlimit_init(const struct nft_ctx *ctx,
const struct nft_expr *expr,
const struct nlattr * const tb[])
{
struct nft_connlimit *priv = nft_expr_priv(expr);
return nft_connlimit_do_init(ctx, tb, priv);
}
static void nft_connlimit_destroy(const struct nft_ctx *ctx,
const struct nft_expr *expr)
{
struct nft_connlimit *priv = nft_expr_priv(expr);
nft_connlimit_do_destroy(ctx, priv);
}
static int nft_connlimit_clone(struct nft_expr *dst, const struct nft_expr *src)
{
struct nft_connlimit *priv_dst = nft_expr_priv(dst);
struct nft_connlimit *priv_src = nft_expr_priv(src);
priv_dst->list = kmalloc(sizeof(*priv_dst->list), GFP_ATOMIC);
if (!priv_dst->list)
return -ENOMEM;
nf_conncount_list_init(priv_dst->list);
priv_dst->limit = priv_src->limit;
priv_dst->invert = priv_src->invert;
return 0;
}
static void nft_connlimit_destroy_clone(const struct nft_ctx *ctx,
const struct nft_expr *expr)
{
struct nft_connlimit *priv = nft_expr_priv(expr);
nf_conncount_cache_free(priv->list);
kfree(priv->list);
}
static bool nft_connlimit_gc(struct net *net, const struct nft_expr *expr)
{
struct nft_connlimit *priv = nft_expr_priv(expr);
bool ret;
local_bh_disable();
ret = nf_conncount_gc_list(net, priv->list);
local_bh_enable();
return ret;
}
static struct nft_expr_type nft_connlimit_type;
static const struct nft_expr_ops nft_connlimit_ops = {
.type = &nft_connlimit_type,
.size = NFT_EXPR_SIZE(sizeof(struct nft_connlimit)),
.eval = nft_connlimit_eval,
.init = nft_connlimit_init,
.destroy = nft_connlimit_destroy,
.clone = nft_connlimit_clone,
.destroy_clone = nft_connlimit_destroy_clone,
.dump = nft_connlimit_dump,
.gc = nft_connlimit_gc,
.reduce = NFT_REDUCE_READONLY,
};
static struct nft_expr_type nft_connlimit_type __read_mostly = {
.name = "connlimit",
.ops = &nft_connlimit_ops,
.policy = nft_connlimit_policy,
.maxattr = NFTA_CONNLIMIT_MAX,
.flags = NFT_EXPR_STATEFUL | NFT_EXPR_GC,
.owner = THIS_MODULE,
};
static int __init nft_connlimit_module_init(void)
{
int err;
err = nft_register_obj(&nft_connlimit_obj_type);
if (err < 0)
return err;
err = nft_register_expr(&nft_connlimit_type);
if (err < 0)
goto err1;
return 0;
err1:
nft_unregister_obj(&nft_connlimit_obj_type);
return err;
}
static void __exit nft_connlimit_module_exit(void)
{
nft_unregister_expr(&nft_connlimit_type);
nft_unregister_obj(&nft_connlimit_obj_type);
}
module_init(nft_connlimit_module_init);
module_exit(nft_connlimit_module_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Pablo Neira Ayuso");
MODULE_ALIAS_NFT_EXPR("connlimit");
MODULE_ALIAS_NFT_OBJ(NFT_OBJECT_CONNLIMIT);
MODULE_DESCRIPTION("nftables connlimit rule support");
| linux-master | net/netfilter/nft_connlimit.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2015 Pablo Neira Ayuso <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/netlink.h>
#include <linux/netfilter.h>
#include <linux/netfilter/nf_tables.h>
#include <net/netfilter/nf_tables.h>
#include <net/netfilter/nf_tables_offload.h>
#include <net/netfilter/nf_dup_netdev.h>
#define NF_RECURSION_LIMIT 2
static DEFINE_PER_CPU(u8, nf_dup_skb_recursion);
static void nf_do_netdev_egress(struct sk_buff *skb, struct net_device *dev,
enum nf_dev_hooks hook)
{
if (__this_cpu_read(nf_dup_skb_recursion) > NF_RECURSION_LIMIT)
goto err;
if (hook == NF_NETDEV_INGRESS && skb_mac_header_was_set(skb)) {
if (skb_cow_head(skb, skb->mac_len))
goto err;
skb_push(skb, skb->mac_len);
}
skb->dev = dev;
skb_clear_tstamp(skb);
__this_cpu_inc(nf_dup_skb_recursion);
dev_queue_xmit(skb);
__this_cpu_dec(nf_dup_skb_recursion);
return;
err:
kfree_skb(skb);
}
void nf_fwd_netdev_egress(const struct nft_pktinfo *pkt, int oif)
{
struct net_device *dev;
dev = dev_get_by_index_rcu(nft_net(pkt), oif);
if (!dev) {
kfree_skb(pkt->skb);
return;
}
nf_do_netdev_egress(pkt->skb, dev, nft_hook(pkt));
}
EXPORT_SYMBOL_GPL(nf_fwd_netdev_egress);
void nf_dup_netdev_egress(const struct nft_pktinfo *pkt, int oif)
{
struct net_device *dev;
struct sk_buff *skb;
dev = dev_get_by_index_rcu(nft_net(pkt), oif);
if (dev == NULL)
return;
skb = skb_clone(pkt->skb, GFP_ATOMIC);
if (skb)
nf_do_netdev_egress(skb, dev, nft_hook(pkt));
}
EXPORT_SYMBOL_GPL(nf_dup_netdev_egress);
int nft_fwd_dup_netdev_offload(struct nft_offload_ctx *ctx,
struct nft_flow_rule *flow,
enum flow_action_id id, int oif)
{
struct flow_action_entry *entry;
struct net_device *dev;
/* nft_flow_rule_destroy() releases the reference on this device. */
dev = dev_get_by_index(ctx->net, oif);
if (!dev)
return -EOPNOTSUPP;
entry = &flow->rule->action.entries[ctx->num_actions++];
entry->id = id;
entry->dev = dev;
return 0;
}
EXPORT_SYMBOL_GPL(nft_fwd_dup_netdev_offload);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Pablo Neira Ayuso <[email protected]>");
MODULE_DESCRIPTION("Netfilter packet duplication support");
| linux-master | net/netfilter/nf_dup_netdev.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* (C) 2013 Astaro GmbH & Co KG
*/
#include <linux/module.h>
#include <linux/skbuff.h>
#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_conntrack_ecache.h>
#include <net/netfilter/nf_conntrack_labels.h>
#include <linux/netfilter/x_tables.h>
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Florian Westphal <[email protected]>");
MODULE_DESCRIPTION("Xtables: add/match connection tracking labels");
MODULE_ALIAS("ipt_connlabel");
MODULE_ALIAS("ip6t_connlabel");
static bool
connlabel_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct xt_connlabel_mtinfo *info = par->matchinfo;
enum ip_conntrack_info ctinfo;
struct nf_conn_labels *labels;
struct nf_conn *ct;
bool invert = info->options & XT_CONNLABEL_OP_INVERT;
ct = nf_ct_get(skb, &ctinfo);
if (ct == NULL)
return invert;
labels = nf_ct_labels_find(ct);
if (!labels)
return invert;
if (test_bit(info->bit, labels->bits))
return !invert;
if (info->options & XT_CONNLABEL_OP_SET) {
if (!test_and_set_bit(info->bit, labels->bits))
nf_conntrack_event_cache(IPCT_LABEL, ct);
return !invert;
}
return invert;
}
static int connlabel_mt_check(const struct xt_mtchk_param *par)
{
const int options = XT_CONNLABEL_OP_INVERT |
XT_CONNLABEL_OP_SET;
struct xt_connlabel_mtinfo *info = par->matchinfo;
int ret;
if (info->options & ~options) {
pr_info_ratelimited("Unknown options in mask %x\n",
info->options);
return -EINVAL;
}
ret = nf_ct_netns_get(par->net, par->family);
if (ret < 0) {
pr_info_ratelimited("cannot load conntrack support for proto=%u\n",
par->family);
return ret;
}
ret = nf_connlabels_get(par->net, info->bit);
if (ret < 0)
nf_ct_netns_put(par->net, par->family);
return ret;
}
static void connlabel_mt_destroy(const struct xt_mtdtor_param *par)
{
nf_connlabels_put(par->net);
nf_ct_netns_put(par->net, par->family);
}
static struct xt_match connlabels_mt_reg __read_mostly = {
.name = "connlabel",
.family = NFPROTO_UNSPEC,
.checkentry = connlabel_mt_check,
.match = connlabel_mt,
.matchsize = sizeof(struct xt_connlabel_mtinfo),
.destroy = connlabel_mt_destroy,
.me = THIS_MODULE,
};
static int __init connlabel_mt_init(void)
{
return xt_register_match(&connlabels_mt_reg);
}
static void __exit connlabel_mt_exit(void)
{
xt_unregister_match(&connlabels_mt_reg);
}
module_init(connlabel_mt_init);
module_exit(connlabel_mt_exit);
| linux-master | net/netfilter/xt_connlabel.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* count the number of connections matching an arbitrary key.
*
* (C) 2017 Red Hat GmbH
* Author: Florian Westphal <[email protected]>
*
* split from xt_connlimit.c:
* (c) 2000 Gerd Knorr <[email protected]>
* Nov 2002: Martin Bene <[email protected]>:
* only ignore TIME_WAIT or gone connections
* (C) CC Computer Consultants GmbH, 2007
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/in.h>
#include <linux/in6.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/jhash.h>
#include <linux/slab.h>
#include <linux/list.h>
#include <linux/rbtree.h>
#include <linux/module.h>
#include <linux/random.h>
#include <linux/skbuff.h>
#include <linux/spinlock.h>
#include <linux/netfilter/nf_conntrack_tcp.h>
#include <linux/netfilter/x_tables.h>
#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_conntrack_count.h>
#include <net/netfilter/nf_conntrack_core.h>
#include <net/netfilter/nf_conntrack_tuple.h>
#include <net/netfilter/nf_conntrack_zones.h>
#define CONNCOUNT_SLOTS 256U
#define CONNCOUNT_GC_MAX_NODES 8
#define MAX_KEYLEN 5
/* we will save the tuples of all connections we care about */
struct nf_conncount_tuple {
struct list_head node;
struct nf_conntrack_tuple tuple;
struct nf_conntrack_zone zone;
int cpu;
u32 jiffies32;
};
struct nf_conncount_rb {
struct rb_node node;
struct nf_conncount_list list;
u32 key[MAX_KEYLEN];
struct rcu_head rcu_head;
};
static spinlock_t nf_conncount_locks[CONNCOUNT_SLOTS] __cacheline_aligned_in_smp;
struct nf_conncount_data {
unsigned int keylen;
struct rb_root root[CONNCOUNT_SLOTS];
struct net *net;
struct work_struct gc_work;
unsigned long pending_trees[BITS_TO_LONGS(CONNCOUNT_SLOTS)];
unsigned int gc_tree;
};
static u_int32_t conncount_rnd __read_mostly;
static struct kmem_cache *conncount_rb_cachep __read_mostly;
static struct kmem_cache *conncount_conn_cachep __read_mostly;
static inline bool already_closed(const struct nf_conn *conn)
{
if (nf_ct_protonum(conn) == IPPROTO_TCP)
return conn->proto.tcp.state == TCP_CONNTRACK_TIME_WAIT ||
conn->proto.tcp.state == TCP_CONNTRACK_CLOSE;
else
return false;
}
static int key_diff(const u32 *a, const u32 *b, unsigned int klen)
{
return memcmp(a, b, klen * sizeof(u32));
}
static void conn_free(struct nf_conncount_list *list,
struct nf_conncount_tuple *conn)
{
lockdep_assert_held(&list->list_lock);
list->count--;
list_del(&conn->node);
kmem_cache_free(conncount_conn_cachep, conn);
}
static const struct nf_conntrack_tuple_hash *
find_or_evict(struct net *net, struct nf_conncount_list *list,
struct nf_conncount_tuple *conn)
{
const struct nf_conntrack_tuple_hash *found;
unsigned long a, b;
int cpu = raw_smp_processor_id();
u32 age;
found = nf_conntrack_find_get(net, &conn->zone, &conn->tuple);
if (found)
return found;
b = conn->jiffies32;
a = (u32)jiffies;
/* conn might have been added just before by another cpu and
* might still be unconfirmed. In this case, nf_conntrack_find()
* returns no result. Thus only evict if this cpu added the
* stale entry or if the entry is older than two jiffies.
*/
age = a - b;
if (conn->cpu == cpu || age >= 2) {
conn_free(list, conn);
return ERR_PTR(-ENOENT);
}
return ERR_PTR(-EAGAIN);
}
static int __nf_conncount_add(struct net *net,
struct nf_conncount_list *list,
const struct nf_conntrack_tuple *tuple,
const struct nf_conntrack_zone *zone)
{
const struct nf_conntrack_tuple_hash *found;
struct nf_conncount_tuple *conn, *conn_n;
struct nf_conn *found_ct;
unsigned int collect = 0;
if (time_is_after_eq_jiffies((unsigned long)list->last_gc))
goto add_new_node;
/* check the saved connections */
list_for_each_entry_safe(conn, conn_n, &list->head, node) {
if (collect > CONNCOUNT_GC_MAX_NODES)
break;
found = find_or_evict(net, list, conn);
if (IS_ERR(found)) {
/* Not found, but might be about to be confirmed */
if (PTR_ERR(found) == -EAGAIN) {
if (nf_ct_tuple_equal(&conn->tuple, tuple) &&
nf_ct_zone_id(&conn->zone, conn->zone.dir) ==
nf_ct_zone_id(zone, zone->dir))
return 0; /* already exists */
} else {
collect++;
}
continue;
}
found_ct = nf_ct_tuplehash_to_ctrack(found);
if (nf_ct_tuple_equal(&conn->tuple, tuple) &&
nf_ct_zone_equal(found_ct, zone, zone->dir)) {
/*
* We should not see tuples twice unless someone hooks
* this into a table without "-p tcp --syn".
*
* Attempt to avoid a re-add in this case.
*/
nf_ct_put(found_ct);
return 0;
} else if (already_closed(found_ct)) {
/*
* we do not care about connections which are
* closed already -> ditch it
*/
nf_ct_put(found_ct);
conn_free(list, conn);
collect++;
continue;
}
nf_ct_put(found_ct);
}
add_new_node:
if (WARN_ON_ONCE(list->count > INT_MAX))
return -EOVERFLOW;
conn = kmem_cache_alloc(conncount_conn_cachep, GFP_ATOMIC);
if (conn == NULL)
return -ENOMEM;
conn->tuple = *tuple;
conn->zone = *zone;
conn->cpu = raw_smp_processor_id();
conn->jiffies32 = (u32)jiffies;
list_add_tail(&conn->node, &list->head);
list->count++;
list->last_gc = (u32)jiffies;
return 0;
}
int nf_conncount_add(struct net *net,
struct nf_conncount_list *list,
const struct nf_conntrack_tuple *tuple,
const struct nf_conntrack_zone *zone)
{
int ret;
/* check the saved connections */
spin_lock_bh(&list->list_lock);
ret = __nf_conncount_add(net, list, tuple, zone);
spin_unlock_bh(&list->list_lock);
return ret;
}
EXPORT_SYMBOL_GPL(nf_conncount_add);
void nf_conncount_list_init(struct nf_conncount_list *list)
{
spin_lock_init(&list->list_lock);
INIT_LIST_HEAD(&list->head);
list->count = 0;
list->last_gc = (u32)jiffies;
}
EXPORT_SYMBOL_GPL(nf_conncount_list_init);
/* Return true if the list is empty. Must be called with BH disabled. */
bool nf_conncount_gc_list(struct net *net,
struct nf_conncount_list *list)
{
const struct nf_conntrack_tuple_hash *found;
struct nf_conncount_tuple *conn, *conn_n;
struct nf_conn *found_ct;
unsigned int collected = 0;
bool ret = false;
/* don't bother if we just did GC */
if (time_is_after_eq_jiffies((unsigned long)READ_ONCE(list->last_gc)))
return false;
/* don't bother if other cpu is already doing GC */
if (!spin_trylock(&list->list_lock))
return false;
list_for_each_entry_safe(conn, conn_n, &list->head, node) {
found = find_or_evict(net, list, conn);
if (IS_ERR(found)) {
if (PTR_ERR(found) == -ENOENT)
collected++;
continue;
}
found_ct = nf_ct_tuplehash_to_ctrack(found);
if (already_closed(found_ct)) {
/*
* we do not care about connections which are
* closed already -> ditch it
*/
nf_ct_put(found_ct);
conn_free(list, conn);
collected++;
continue;
}
nf_ct_put(found_ct);
if (collected > CONNCOUNT_GC_MAX_NODES)
break;
}
if (!list->count)
ret = true;
list->last_gc = (u32)jiffies;
spin_unlock(&list->list_lock);
return ret;
}
EXPORT_SYMBOL_GPL(nf_conncount_gc_list);
static void __tree_nodes_free(struct rcu_head *h)
{
struct nf_conncount_rb *rbconn;
rbconn = container_of(h, struct nf_conncount_rb, rcu_head);
kmem_cache_free(conncount_rb_cachep, rbconn);
}
/* caller must hold tree nf_conncount_locks[] lock */
static void tree_nodes_free(struct rb_root *root,
struct nf_conncount_rb *gc_nodes[],
unsigned int gc_count)
{
struct nf_conncount_rb *rbconn;
while (gc_count) {
rbconn = gc_nodes[--gc_count];
spin_lock(&rbconn->list.list_lock);
if (!rbconn->list.count) {
rb_erase(&rbconn->node, root);
call_rcu(&rbconn->rcu_head, __tree_nodes_free);
}
spin_unlock(&rbconn->list.list_lock);
}
}
static void schedule_gc_worker(struct nf_conncount_data *data, int tree)
{
set_bit(tree, data->pending_trees);
schedule_work(&data->gc_work);
}
static unsigned int
insert_tree(struct net *net,
struct nf_conncount_data *data,
struct rb_root *root,
unsigned int hash,
const u32 *key,
const struct nf_conntrack_tuple *tuple,
const struct nf_conntrack_zone *zone)
{
struct nf_conncount_rb *gc_nodes[CONNCOUNT_GC_MAX_NODES];
struct rb_node **rbnode, *parent;
struct nf_conncount_rb *rbconn;
struct nf_conncount_tuple *conn;
unsigned int count = 0, gc_count = 0;
u8 keylen = data->keylen;
bool do_gc = true;
spin_lock_bh(&nf_conncount_locks[hash]);
restart:
parent = NULL;
rbnode = &(root->rb_node);
while (*rbnode) {
int diff;
rbconn = rb_entry(*rbnode, struct nf_conncount_rb, node);
parent = *rbnode;
diff = key_diff(key, rbconn->key, keylen);
if (diff < 0) {
rbnode = &((*rbnode)->rb_left);
} else if (diff > 0) {
rbnode = &((*rbnode)->rb_right);
} else {
int ret;
ret = nf_conncount_add(net, &rbconn->list, tuple, zone);
if (ret)
count = 0; /* hotdrop */
else
count = rbconn->list.count;
tree_nodes_free(root, gc_nodes, gc_count);
goto out_unlock;
}
if (gc_count >= ARRAY_SIZE(gc_nodes))
continue;
if (do_gc && nf_conncount_gc_list(net, &rbconn->list))
gc_nodes[gc_count++] = rbconn;
}
if (gc_count) {
tree_nodes_free(root, gc_nodes, gc_count);
schedule_gc_worker(data, hash);
gc_count = 0;
do_gc = false;
goto restart;
}
/* expected case: match, insert new node */
rbconn = kmem_cache_alloc(conncount_rb_cachep, GFP_ATOMIC);
if (rbconn == NULL)
goto out_unlock;
conn = kmem_cache_alloc(conncount_conn_cachep, GFP_ATOMIC);
if (conn == NULL) {
kmem_cache_free(conncount_rb_cachep, rbconn);
goto out_unlock;
}
conn->tuple = *tuple;
conn->zone = *zone;
memcpy(rbconn->key, key, sizeof(u32) * keylen);
nf_conncount_list_init(&rbconn->list);
list_add(&conn->node, &rbconn->list.head);
count = 1;
rbconn->list.count = count;
rb_link_node_rcu(&rbconn->node, parent, rbnode);
rb_insert_color(&rbconn->node, root);
out_unlock:
spin_unlock_bh(&nf_conncount_locks[hash]);
return count;
}
static unsigned int
count_tree(struct net *net,
struct nf_conncount_data *data,
const u32 *key,
const struct nf_conntrack_tuple *tuple,
const struct nf_conntrack_zone *zone)
{
struct rb_root *root;
struct rb_node *parent;
struct nf_conncount_rb *rbconn;
unsigned int hash;
u8 keylen = data->keylen;
hash = jhash2(key, data->keylen, conncount_rnd) % CONNCOUNT_SLOTS;
root = &data->root[hash];
parent = rcu_dereference_raw(root->rb_node);
while (parent) {
int diff;
rbconn = rb_entry(parent, struct nf_conncount_rb, node);
diff = key_diff(key, rbconn->key, keylen);
if (diff < 0) {
parent = rcu_dereference_raw(parent->rb_left);
} else if (diff > 0) {
parent = rcu_dereference_raw(parent->rb_right);
} else {
int ret;
if (!tuple) {
nf_conncount_gc_list(net, &rbconn->list);
return rbconn->list.count;
}
spin_lock_bh(&rbconn->list.list_lock);
/* Node might be about to be free'd.
* We need to defer to insert_tree() in this case.
*/
if (rbconn->list.count == 0) {
spin_unlock_bh(&rbconn->list.list_lock);
break;
}
/* same source network -> be counted! */
ret = __nf_conncount_add(net, &rbconn->list, tuple, zone);
spin_unlock_bh(&rbconn->list.list_lock);
if (ret)
return 0; /* hotdrop */
else
return rbconn->list.count;
}
}
if (!tuple)
return 0;
return insert_tree(net, data, root, hash, key, tuple, zone);
}
static void tree_gc_worker(struct work_struct *work)
{
struct nf_conncount_data *data = container_of(work, struct nf_conncount_data, gc_work);
struct nf_conncount_rb *gc_nodes[CONNCOUNT_GC_MAX_NODES], *rbconn;
struct rb_root *root;
struct rb_node *node;
unsigned int tree, next_tree, gc_count = 0;
tree = data->gc_tree % CONNCOUNT_SLOTS;
root = &data->root[tree];
local_bh_disable();
rcu_read_lock();
for (node = rb_first(root); node != NULL; node = rb_next(node)) {
rbconn = rb_entry(node, struct nf_conncount_rb, node);
if (nf_conncount_gc_list(data->net, &rbconn->list))
gc_count++;
}
rcu_read_unlock();
local_bh_enable();
cond_resched();
spin_lock_bh(&nf_conncount_locks[tree]);
if (gc_count < ARRAY_SIZE(gc_nodes))
goto next; /* do not bother */
gc_count = 0;
node = rb_first(root);
while (node != NULL) {
rbconn = rb_entry(node, struct nf_conncount_rb, node);
node = rb_next(node);
if (rbconn->list.count > 0)
continue;
gc_nodes[gc_count++] = rbconn;
if (gc_count >= ARRAY_SIZE(gc_nodes)) {
tree_nodes_free(root, gc_nodes, gc_count);
gc_count = 0;
}
}
tree_nodes_free(root, gc_nodes, gc_count);
next:
clear_bit(tree, data->pending_trees);
next_tree = (tree + 1) % CONNCOUNT_SLOTS;
next_tree = find_next_bit(data->pending_trees, CONNCOUNT_SLOTS, next_tree);
if (next_tree < CONNCOUNT_SLOTS) {
data->gc_tree = next_tree;
schedule_work(work);
}
spin_unlock_bh(&nf_conncount_locks[tree]);
}
/* Count and return number of conntrack entries in 'net' with particular 'key'.
* If 'tuple' is not null, insert it into the accounting data structure.
* Call with RCU read lock.
*/
unsigned int nf_conncount_count(struct net *net,
struct nf_conncount_data *data,
const u32 *key,
const struct nf_conntrack_tuple *tuple,
const struct nf_conntrack_zone *zone)
{
return count_tree(net, data, key, tuple, zone);
}
EXPORT_SYMBOL_GPL(nf_conncount_count);
struct nf_conncount_data *nf_conncount_init(struct net *net, unsigned int family,
unsigned int keylen)
{
struct nf_conncount_data *data;
int ret, i;
if (keylen % sizeof(u32) ||
keylen / sizeof(u32) > MAX_KEYLEN ||
keylen == 0)
return ERR_PTR(-EINVAL);
net_get_random_once(&conncount_rnd, sizeof(conncount_rnd));
data = kmalloc(sizeof(*data), GFP_KERNEL);
if (!data)
return ERR_PTR(-ENOMEM);
ret = nf_ct_netns_get(net, family);
if (ret < 0) {
kfree(data);
return ERR_PTR(ret);
}
for (i = 0; i < ARRAY_SIZE(data->root); ++i)
data->root[i] = RB_ROOT;
data->keylen = keylen / sizeof(u32);
data->net = net;
INIT_WORK(&data->gc_work, tree_gc_worker);
return data;
}
EXPORT_SYMBOL_GPL(nf_conncount_init);
void nf_conncount_cache_free(struct nf_conncount_list *list)
{
struct nf_conncount_tuple *conn, *conn_n;
list_for_each_entry_safe(conn, conn_n, &list->head, node)
kmem_cache_free(conncount_conn_cachep, conn);
}
EXPORT_SYMBOL_GPL(nf_conncount_cache_free);
static void destroy_tree(struct rb_root *r)
{
struct nf_conncount_rb *rbconn;
struct rb_node *node;
while ((node = rb_first(r)) != NULL) {
rbconn = rb_entry(node, struct nf_conncount_rb, node);
rb_erase(node, r);
nf_conncount_cache_free(&rbconn->list);
kmem_cache_free(conncount_rb_cachep, rbconn);
}
}
void nf_conncount_destroy(struct net *net, unsigned int family,
struct nf_conncount_data *data)
{
unsigned int i;
cancel_work_sync(&data->gc_work);
nf_ct_netns_put(net, family);
for (i = 0; i < ARRAY_SIZE(data->root); ++i)
destroy_tree(&data->root[i]);
kfree(data);
}
EXPORT_SYMBOL_GPL(nf_conncount_destroy);
static int __init nf_conncount_modinit(void)
{
int i;
for (i = 0; i < CONNCOUNT_SLOTS; ++i)
spin_lock_init(&nf_conncount_locks[i]);
conncount_conn_cachep = kmem_cache_create("nf_conncount_tuple",
sizeof(struct nf_conncount_tuple),
0, 0, NULL);
if (!conncount_conn_cachep)
return -ENOMEM;
conncount_rb_cachep = kmem_cache_create("nf_conncount_rb",
sizeof(struct nf_conncount_rb),
0, 0, NULL);
if (!conncount_rb_cachep) {
kmem_cache_destroy(conncount_conn_cachep);
return -ENOMEM;
}
return 0;
}
static void __exit nf_conncount_modexit(void)
{
kmem_cache_destroy(conncount_conn_cachep);
kmem_cache_destroy(conncount_rb_cachep);
}
module_init(nf_conncount_modinit);
module_exit(nf_conncount_modexit);
MODULE_AUTHOR("Jan Engelhardt <[email protected]>");
MODULE_AUTHOR("Florian Westphal <[email protected]>");
MODULE_DESCRIPTION("netfilter: count number of connections matching a key");
MODULE_LICENSE("GPL");
| linux-master | net/netfilter/nf_conncount.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2010 Patrick McHardy <[email protected]>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/gfp.h>
#include <linux/skbuff.h>
#include <linux/netfilter_ipv4/ip_tables.h>
#include <linux/netfilter_ipv6/ip6_tables.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter/xt_CT.h>
#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_conntrack_l4proto.h>
#include <net/netfilter/nf_conntrack_helper.h>
#include <net/netfilter/nf_conntrack_ecache.h>
#include <net/netfilter/nf_conntrack_timeout.h>
#include <net/netfilter/nf_conntrack_zones.h>
static inline int xt_ct_target(struct sk_buff *skb, struct nf_conn *ct)
{
/* Previously seen (loopback)? Ignore. */
if (skb->_nfct != 0)
return XT_CONTINUE;
if (ct) {
refcount_inc(&ct->ct_general.use);
nf_ct_set(skb, ct, IP_CT_NEW);
} else {
nf_ct_set(skb, ct, IP_CT_UNTRACKED);
}
return XT_CONTINUE;
}
static unsigned int xt_ct_target_v0(struct sk_buff *skb,
const struct xt_action_param *par)
{
const struct xt_ct_target_info *info = par->targinfo;
struct nf_conn *ct = info->ct;
return xt_ct_target(skb, ct);
}
static unsigned int xt_ct_target_v1(struct sk_buff *skb,
const struct xt_action_param *par)
{
const struct xt_ct_target_info_v1 *info = par->targinfo;
struct nf_conn *ct = info->ct;
return xt_ct_target(skb, ct);
}
static u8 xt_ct_find_proto(const struct xt_tgchk_param *par)
{
if (par->family == NFPROTO_IPV4) {
const struct ipt_entry *e = par->entryinfo;
if (e->ip.invflags & IPT_INV_PROTO)
return 0;
return e->ip.proto;
} else if (par->family == NFPROTO_IPV6) {
const struct ip6t_entry *e = par->entryinfo;
if (e->ipv6.invflags & IP6T_INV_PROTO)
return 0;
return e->ipv6.proto;
} else
return 0;
}
static int
xt_ct_set_helper(struct nf_conn *ct, const char *helper_name,
const struct xt_tgchk_param *par)
{
struct nf_conntrack_helper *helper;
struct nf_conn_help *help;
u8 proto;
proto = xt_ct_find_proto(par);
if (!proto) {
pr_info_ratelimited("You must specify a L4 protocol and not use inversions on it\n");
return -ENOENT;
}
helper = nf_conntrack_helper_try_module_get(helper_name, par->family,
proto);
if (helper == NULL) {
pr_info_ratelimited("No such helper \"%s\"\n", helper_name);
return -ENOENT;
}
help = nf_ct_helper_ext_add(ct, GFP_KERNEL);
if (help == NULL) {
nf_conntrack_helper_put(helper);
return -ENOMEM;
}
rcu_assign_pointer(help->helper, helper);
return 0;
}
static int
xt_ct_set_timeout(struct nf_conn *ct, const struct xt_tgchk_param *par,
const char *timeout_name)
{
#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
const struct nf_conntrack_l4proto *l4proto;
u8 proto;
proto = xt_ct_find_proto(par);
if (!proto) {
pr_info_ratelimited("You must specify a L4 protocol and not "
"use inversions on it");
return -EINVAL;
}
l4proto = nf_ct_l4proto_find(proto);
return nf_ct_set_timeout(par->net, ct, par->family, l4proto->l4proto,
timeout_name);
#else
return -EOPNOTSUPP;
#endif
}
static u16 xt_ct_flags_to_dir(const struct xt_ct_target_info_v1 *info)
{
switch (info->flags & (XT_CT_ZONE_DIR_ORIG |
XT_CT_ZONE_DIR_REPL)) {
case XT_CT_ZONE_DIR_ORIG:
return NF_CT_ZONE_DIR_ORIG;
case XT_CT_ZONE_DIR_REPL:
return NF_CT_ZONE_DIR_REPL;
default:
return NF_CT_DEFAULT_ZONE_DIR;
}
}
static void xt_ct_put_helper(struct nf_conn_help *help)
{
struct nf_conntrack_helper *helper;
if (!help)
return;
/* not yet exposed to other cpus, or ruleset
* already detached (post-replacement).
*/
helper = rcu_dereference_raw(help->helper);
if (helper)
nf_conntrack_helper_put(helper);
}
static int xt_ct_tg_check(const struct xt_tgchk_param *par,
struct xt_ct_target_info_v1 *info)
{
struct nf_conntrack_zone zone;
struct nf_conn_help *help;
struct nf_conn *ct;
int ret = -EOPNOTSUPP;
if (info->flags & XT_CT_NOTRACK) {
ct = NULL;
goto out;
}
#ifndef CONFIG_NF_CONNTRACK_ZONES
if (info->zone || info->flags & (XT_CT_ZONE_DIR_ORIG |
XT_CT_ZONE_DIR_REPL |
XT_CT_ZONE_MARK))
goto err1;
#endif
ret = nf_ct_netns_get(par->net, par->family);
if (ret < 0)
goto err1;
memset(&zone, 0, sizeof(zone));
zone.id = info->zone;
zone.dir = xt_ct_flags_to_dir(info);
if (info->flags & XT_CT_ZONE_MARK)
zone.flags |= NF_CT_FLAG_MARK;
ct = nf_ct_tmpl_alloc(par->net, &zone, GFP_KERNEL);
if (!ct) {
ret = -ENOMEM;
goto err2;
}
if ((info->ct_events || info->exp_events) &&
!nf_ct_ecache_ext_add(ct, info->ct_events, info->exp_events,
GFP_KERNEL)) {
ret = -EINVAL;
goto err3;
}
if (info->helper[0]) {
if (strnlen(info->helper, sizeof(info->helper)) == sizeof(info->helper)) {
ret = -ENAMETOOLONG;
goto err3;
}
ret = xt_ct_set_helper(ct, info->helper, par);
if (ret < 0)
goto err3;
}
if (info->timeout[0]) {
if (strnlen(info->timeout, sizeof(info->timeout)) == sizeof(info->timeout)) {
ret = -ENAMETOOLONG;
goto err4;
}
ret = xt_ct_set_timeout(ct, par, info->timeout);
if (ret < 0)
goto err4;
}
__set_bit(IPS_CONFIRMED_BIT, &ct->status);
out:
info->ct = ct;
return 0;
err4:
help = nfct_help(ct);
xt_ct_put_helper(help);
err3:
nf_ct_tmpl_free(ct);
err2:
nf_ct_netns_put(par->net, par->family);
err1:
return ret;
}
static int xt_ct_tg_check_v0(const struct xt_tgchk_param *par)
{
struct xt_ct_target_info *info = par->targinfo;
struct xt_ct_target_info_v1 info_v1 = {
.flags = info->flags,
.zone = info->zone,
.ct_events = info->ct_events,
.exp_events = info->exp_events,
};
int ret;
if (info->flags & ~XT_CT_NOTRACK)
return -EINVAL;
memcpy(info_v1.helper, info->helper, sizeof(info->helper));
ret = xt_ct_tg_check(par, &info_v1);
if (ret < 0)
return ret;
info->ct = info_v1.ct;
return ret;
}
static int xt_ct_tg_check_v1(const struct xt_tgchk_param *par)
{
struct xt_ct_target_info_v1 *info = par->targinfo;
if (info->flags & ~XT_CT_NOTRACK)
return -EINVAL;
return xt_ct_tg_check(par, par->targinfo);
}
static int xt_ct_tg_check_v2(const struct xt_tgchk_param *par)
{
struct xt_ct_target_info_v1 *info = par->targinfo;
if (info->flags & ~XT_CT_MASK)
return -EINVAL;
return xt_ct_tg_check(par, par->targinfo);
}
static void xt_ct_tg_destroy(const struct xt_tgdtor_param *par,
struct xt_ct_target_info_v1 *info)
{
struct nf_conn *ct = info->ct;
struct nf_conn_help *help;
if (ct) {
help = nfct_help(ct);
xt_ct_put_helper(help);
nf_ct_netns_put(par->net, par->family);
nf_ct_destroy_timeout(ct);
nf_ct_put(info->ct);
}
}
static void xt_ct_tg_destroy_v0(const struct xt_tgdtor_param *par)
{
struct xt_ct_target_info *info = par->targinfo;
struct xt_ct_target_info_v1 info_v1 = {
.flags = info->flags,
.zone = info->zone,
.ct_events = info->ct_events,
.exp_events = info->exp_events,
.ct = info->ct,
};
memcpy(info_v1.helper, info->helper, sizeof(info->helper));
xt_ct_tg_destroy(par, &info_v1);
}
static void xt_ct_tg_destroy_v1(const struct xt_tgdtor_param *par)
{
xt_ct_tg_destroy(par, par->targinfo);
}
static struct xt_target xt_ct_tg_reg[] __read_mostly = {
{
.name = "CT",
.family = NFPROTO_UNSPEC,
.targetsize = sizeof(struct xt_ct_target_info),
.usersize = offsetof(struct xt_ct_target_info, ct),
.checkentry = xt_ct_tg_check_v0,
.destroy = xt_ct_tg_destroy_v0,
.target = xt_ct_target_v0,
.table = "raw",
.me = THIS_MODULE,
},
{
.name = "CT",
.family = NFPROTO_UNSPEC,
.revision = 1,
.targetsize = sizeof(struct xt_ct_target_info_v1),
.usersize = offsetof(struct xt_ct_target_info, ct),
.checkentry = xt_ct_tg_check_v1,
.destroy = xt_ct_tg_destroy_v1,
.target = xt_ct_target_v1,
.table = "raw",
.me = THIS_MODULE,
},
{
.name = "CT",
.family = NFPROTO_UNSPEC,
.revision = 2,
.targetsize = sizeof(struct xt_ct_target_info_v1),
.usersize = offsetof(struct xt_ct_target_info, ct),
.checkentry = xt_ct_tg_check_v2,
.destroy = xt_ct_tg_destroy_v1,
.target = xt_ct_target_v1,
.table = "raw",
.me = THIS_MODULE,
},
};
static unsigned int
notrack_tg(struct sk_buff *skb, const struct xt_action_param *par)
{
/* Previously seen (loopback)? Ignore. */
if (skb->_nfct != 0)
return XT_CONTINUE;
nf_ct_set(skb, NULL, IP_CT_UNTRACKED);
return XT_CONTINUE;
}
static struct xt_target notrack_tg_reg __read_mostly = {
.name = "NOTRACK",
.revision = 0,
.family = NFPROTO_UNSPEC,
.target = notrack_tg,
.table = "raw",
.me = THIS_MODULE,
};
static int __init xt_ct_tg_init(void)
{
int ret;
ret = xt_register_target(¬rack_tg_reg);
if (ret < 0)
return ret;
ret = xt_register_targets(xt_ct_tg_reg, ARRAY_SIZE(xt_ct_tg_reg));
if (ret < 0) {
xt_unregister_target(¬rack_tg_reg);
return ret;
}
return 0;
}
static void __exit xt_ct_tg_exit(void)
{
xt_unregister_targets(xt_ct_tg_reg, ARRAY_SIZE(xt_ct_tg_reg));
xt_unregister_target(¬rack_tg_reg);
}
module_init(xt_ct_tg_init);
module_exit(xt_ct_tg_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Xtables: connection tracking target");
MODULE_ALIAS("ipt_CT");
MODULE_ALIAS("ip6t_CT");
MODULE_ALIAS("ipt_NOTRACK");
MODULE_ALIAS("ip6t_NOTRACK");
| linux-master | net/netfilter/xt_CT.c |
// SPDX-License-Identifier: GPL-2.0-only
/* (C) 1999-2001 Paul `Rusty' Russell
* (C) 2002-2004 Netfilter Core Team <[email protected]>
*/
#include <linux/types.h>
#include <linux/jiffies.h>
#include <linux/timer.h>
#include <linux/netfilter.h>
#include <net/netfilter/nf_conntrack_l4proto.h>
#include <net/netfilter/nf_conntrack_timeout.h>
static const unsigned int nf_ct_generic_timeout = 600*HZ;
#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
#include <linux/netfilter/nfnetlink.h>
#include <linux/netfilter/nfnetlink_cttimeout.h>
static int generic_timeout_nlattr_to_obj(struct nlattr *tb[],
struct net *net, void *data)
{
struct nf_generic_net *gn = nf_generic_pernet(net);
unsigned int *timeout = data;
if (!timeout)
timeout = &gn->timeout;
if (tb[CTA_TIMEOUT_GENERIC_TIMEOUT])
*timeout =
ntohl(nla_get_be32(tb[CTA_TIMEOUT_GENERIC_TIMEOUT])) * HZ;
else {
/* Set default generic timeout. */
*timeout = gn->timeout;
}
return 0;
}
static int
generic_timeout_obj_to_nlattr(struct sk_buff *skb, const void *data)
{
const unsigned int *timeout = data;
if (nla_put_be32(skb, CTA_TIMEOUT_GENERIC_TIMEOUT, htonl(*timeout / HZ)))
goto nla_put_failure;
return 0;
nla_put_failure:
return -ENOSPC;
}
static const struct nla_policy
generic_timeout_nla_policy[CTA_TIMEOUT_GENERIC_MAX+1] = {
[CTA_TIMEOUT_GENERIC_TIMEOUT] = { .type = NLA_U32 },
};
#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
void nf_conntrack_generic_init_net(struct net *net)
{
struct nf_generic_net *gn = nf_generic_pernet(net);
gn->timeout = nf_ct_generic_timeout;
}
const struct nf_conntrack_l4proto nf_conntrack_l4proto_generic =
{
.l4proto = 255,
#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
.ctnl_timeout = {
.nlattr_to_obj = generic_timeout_nlattr_to_obj,
.obj_to_nlattr = generic_timeout_obj_to_nlattr,
.nlattr_max = CTA_TIMEOUT_GENERIC_MAX,
.obj_size = sizeof(unsigned int),
.nla_policy = generic_timeout_nla_policy,
},
#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
};
| linux-master | net/netfilter/nf_conntrack_proto_generic.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2008-2014 Patrick McHardy <[email protected]>
*
* Development of this code funded by Astaro AG (http://www.astaro.com/)
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/list.h>
#include <linux/log2.h>
#include <linux/jhash.h>
#include <linux/netlink.h>
#include <linux/workqueue.h>
#include <linux/rhashtable.h>
#include <linux/netfilter.h>
#include <linux/netfilter/nf_tables.h>
#include <net/netfilter/nf_tables_core.h>
/* We target a hash table size of 4, element hint is 75% of final size */
#define NFT_RHASH_ELEMENT_HINT 3
struct nft_rhash {
struct rhashtable ht;
struct delayed_work gc_work;
};
struct nft_rhash_elem {
struct rhash_head node;
struct nft_set_ext ext;
};
struct nft_rhash_cmp_arg {
const struct nft_set *set;
const u32 *key;
u8 genmask;
};
static inline u32 nft_rhash_key(const void *data, u32 len, u32 seed)
{
const struct nft_rhash_cmp_arg *arg = data;
return jhash(arg->key, len, seed);
}
static inline u32 nft_rhash_obj(const void *data, u32 len, u32 seed)
{
const struct nft_rhash_elem *he = data;
return jhash(nft_set_ext_key(&he->ext), len, seed);
}
static inline int nft_rhash_cmp(struct rhashtable_compare_arg *arg,
const void *ptr)
{
const struct nft_rhash_cmp_arg *x = arg->key;
const struct nft_rhash_elem *he = ptr;
if (memcmp(nft_set_ext_key(&he->ext), x->key, x->set->klen))
return 1;
if (nft_set_elem_is_dead(&he->ext))
return 1;
if (nft_set_elem_expired(&he->ext))
return 1;
if (!nft_set_elem_active(&he->ext, x->genmask))
return 1;
return 0;
}
static const struct rhashtable_params nft_rhash_params = {
.head_offset = offsetof(struct nft_rhash_elem, node),
.hashfn = nft_rhash_key,
.obj_hashfn = nft_rhash_obj,
.obj_cmpfn = nft_rhash_cmp,
.automatic_shrinking = true,
};
INDIRECT_CALLABLE_SCOPE
bool nft_rhash_lookup(const struct net *net, const struct nft_set *set,
const u32 *key, const struct nft_set_ext **ext)
{
struct nft_rhash *priv = nft_set_priv(set);
const struct nft_rhash_elem *he;
struct nft_rhash_cmp_arg arg = {
.genmask = nft_genmask_cur(net),
.set = set,
.key = key,
};
he = rhashtable_lookup(&priv->ht, &arg, nft_rhash_params);
if (he != NULL)
*ext = &he->ext;
return !!he;
}
static void *nft_rhash_get(const struct net *net, const struct nft_set *set,
const struct nft_set_elem *elem, unsigned int flags)
{
struct nft_rhash *priv = nft_set_priv(set);
struct nft_rhash_elem *he;
struct nft_rhash_cmp_arg arg = {
.genmask = nft_genmask_cur(net),
.set = set,
.key = elem->key.val.data,
};
he = rhashtable_lookup(&priv->ht, &arg, nft_rhash_params);
if (he != NULL)
return he;
return ERR_PTR(-ENOENT);
}
static bool nft_rhash_update(struct nft_set *set, const u32 *key,
void *(*new)(struct nft_set *,
const struct nft_expr *,
struct nft_regs *regs),
const struct nft_expr *expr,
struct nft_regs *regs,
const struct nft_set_ext **ext)
{
struct nft_rhash *priv = nft_set_priv(set);
struct nft_rhash_elem *he, *prev;
struct nft_rhash_cmp_arg arg = {
.genmask = NFT_GENMASK_ANY,
.set = set,
.key = key,
};
he = rhashtable_lookup(&priv->ht, &arg, nft_rhash_params);
if (he != NULL)
goto out;
he = new(set, expr, regs);
if (he == NULL)
goto err1;
prev = rhashtable_lookup_get_insert_key(&priv->ht, &arg, &he->node,
nft_rhash_params);
if (IS_ERR(prev))
goto err2;
/* Another cpu may race to insert the element with the same key */
if (prev) {
nft_set_elem_destroy(set, he, true);
atomic_dec(&set->nelems);
he = prev;
}
out:
*ext = &he->ext;
return true;
err2:
nft_set_elem_destroy(set, he, true);
atomic_dec(&set->nelems);
err1:
return false;
}
static int nft_rhash_insert(const struct net *net, const struct nft_set *set,
const struct nft_set_elem *elem,
struct nft_set_ext **ext)
{
struct nft_rhash *priv = nft_set_priv(set);
struct nft_rhash_elem *he = elem->priv;
struct nft_rhash_cmp_arg arg = {
.genmask = nft_genmask_next(net),
.set = set,
.key = elem->key.val.data,
};
struct nft_rhash_elem *prev;
prev = rhashtable_lookup_get_insert_key(&priv->ht, &arg, &he->node,
nft_rhash_params);
if (IS_ERR(prev))
return PTR_ERR(prev);
if (prev) {
*ext = &prev->ext;
return -EEXIST;
}
return 0;
}
static void nft_rhash_activate(const struct net *net, const struct nft_set *set,
const struct nft_set_elem *elem)
{
struct nft_rhash_elem *he = elem->priv;
nft_set_elem_change_active(net, set, &he->ext);
}
static bool nft_rhash_flush(const struct net *net,
const struct nft_set *set, void *priv)
{
struct nft_rhash_elem *he = priv;
nft_set_elem_change_active(net, set, &he->ext);
return true;
}
static void *nft_rhash_deactivate(const struct net *net,
const struct nft_set *set,
const struct nft_set_elem *elem)
{
struct nft_rhash *priv = nft_set_priv(set);
struct nft_rhash_elem *he;
struct nft_rhash_cmp_arg arg = {
.genmask = nft_genmask_next(net),
.set = set,
.key = elem->key.val.data,
};
rcu_read_lock();
he = rhashtable_lookup(&priv->ht, &arg, nft_rhash_params);
if (he)
nft_set_elem_change_active(net, set, &he->ext);
rcu_read_unlock();
return he;
}
static void nft_rhash_remove(const struct net *net,
const struct nft_set *set,
const struct nft_set_elem *elem)
{
struct nft_rhash *priv = nft_set_priv(set);
struct nft_rhash_elem *he = elem->priv;
rhashtable_remove_fast(&priv->ht, &he->node, nft_rhash_params);
}
static bool nft_rhash_delete(const struct nft_set *set,
const u32 *key)
{
struct nft_rhash *priv = nft_set_priv(set);
struct nft_rhash_cmp_arg arg = {
.genmask = NFT_GENMASK_ANY,
.set = set,
.key = key,
};
struct nft_rhash_elem *he;
he = rhashtable_lookup(&priv->ht, &arg, nft_rhash_params);
if (he == NULL)
return false;
nft_set_elem_dead(&he->ext);
return true;
}
static void nft_rhash_walk(const struct nft_ctx *ctx, struct nft_set *set,
struct nft_set_iter *iter)
{
struct nft_rhash *priv = nft_set_priv(set);
struct nft_rhash_elem *he;
struct rhashtable_iter hti;
struct nft_set_elem elem;
rhashtable_walk_enter(&priv->ht, &hti);
rhashtable_walk_start(&hti);
while ((he = rhashtable_walk_next(&hti))) {
if (IS_ERR(he)) {
if (PTR_ERR(he) != -EAGAIN) {
iter->err = PTR_ERR(he);
break;
}
continue;
}
if (iter->count < iter->skip)
goto cont;
if (!nft_set_elem_active(&he->ext, iter->genmask))
goto cont;
elem.priv = he;
iter->err = iter->fn(ctx, set, iter, &elem);
if (iter->err < 0)
break;
cont:
iter->count++;
}
rhashtable_walk_stop(&hti);
rhashtable_walk_exit(&hti);
}
static bool nft_rhash_expr_needs_gc_run(const struct nft_set *set,
struct nft_set_ext *ext)
{
struct nft_set_elem_expr *elem_expr = nft_set_ext_expr(ext);
struct nft_expr *expr;
u32 size;
nft_setelem_expr_foreach(expr, elem_expr, size) {
if (expr->ops->gc &&
expr->ops->gc(read_pnet(&set->net), expr))
return true;
}
return false;
}
static void nft_rhash_gc(struct work_struct *work)
{
struct nftables_pernet *nft_net;
struct nft_set *set;
struct nft_rhash_elem *he;
struct nft_rhash *priv;
struct rhashtable_iter hti;
struct nft_trans_gc *gc;
struct net *net;
u32 gc_seq;
priv = container_of(work, struct nft_rhash, gc_work.work);
set = nft_set_container_of(priv);
net = read_pnet(&set->net);
nft_net = nft_pernet(net);
gc_seq = READ_ONCE(nft_net->gc_seq);
if (nft_set_gc_is_pending(set))
goto done;
gc = nft_trans_gc_alloc(set, gc_seq, GFP_KERNEL);
if (!gc)
goto done;
rhashtable_walk_enter(&priv->ht, &hti);
rhashtable_walk_start(&hti);
while ((he = rhashtable_walk_next(&hti))) {
if (IS_ERR(he)) {
nft_trans_gc_destroy(gc);
gc = NULL;
goto try_later;
}
/* Ruleset has been updated, try later. */
if (READ_ONCE(nft_net->gc_seq) != gc_seq) {
nft_trans_gc_destroy(gc);
gc = NULL;
goto try_later;
}
if (nft_set_elem_is_dead(&he->ext))
goto dead_elem;
if (nft_set_ext_exists(&he->ext, NFT_SET_EXT_EXPRESSIONS) &&
nft_rhash_expr_needs_gc_run(set, &he->ext))
goto needs_gc_run;
if (!nft_set_elem_expired(&he->ext))
continue;
needs_gc_run:
nft_set_elem_dead(&he->ext);
dead_elem:
gc = nft_trans_gc_queue_async(gc, gc_seq, GFP_ATOMIC);
if (!gc)
goto try_later;
nft_trans_gc_elem_add(gc, he);
}
gc = nft_trans_gc_catchall_async(gc, gc_seq);
try_later:
/* catchall list iteration requires rcu read side lock. */
rhashtable_walk_stop(&hti);
rhashtable_walk_exit(&hti);
if (gc)
nft_trans_gc_queue_async_done(gc);
done:
queue_delayed_work(system_power_efficient_wq, &priv->gc_work,
nft_set_gc_interval(set));
}
static u64 nft_rhash_privsize(const struct nlattr * const nla[],
const struct nft_set_desc *desc)
{
return sizeof(struct nft_rhash);
}
static void nft_rhash_gc_init(const struct nft_set *set)
{
struct nft_rhash *priv = nft_set_priv(set);
queue_delayed_work(system_power_efficient_wq, &priv->gc_work,
nft_set_gc_interval(set));
}
static int nft_rhash_init(const struct nft_set *set,
const struct nft_set_desc *desc,
const struct nlattr * const tb[])
{
struct nft_rhash *priv = nft_set_priv(set);
struct rhashtable_params params = nft_rhash_params;
int err;
params.nelem_hint = desc->size ?: NFT_RHASH_ELEMENT_HINT;
params.key_len = set->klen;
err = rhashtable_init(&priv->ht, ¶ms);
if (err < 0)
return err;
INIT_DEFERRABLE_WORK(&priv->gc_work, nft_rhash_gc);
if (set->flags & (NFT_SET_TIMEOUT | NFT_SET_EVAL))
nft_rhash_gc_init(set);
return 0;
}
struct nft_rhash_ctx {
const struct nft_ctx ctx;
const struct nft_set *set;
};
static void nft_rhash_elem_destroy(void *ptr, void *arg)
{
struct nft_rhash_ctx *rhash_ctx = arg;
nf_tables_set_elem_destroy(&rhash_ctx->ctx, rhash_ctx->set, ptr);
}
static void nft_rhash_destroy(const struct nft_ctx *ctx,
const struct nft_set *set)
{
struct nft_rhash *priv = nft_set_priv(set);
struct nft_rhash_ctx rhash_ctx = {
.ctx = *ctx,
.set = set,
};
cancel_delayed_work_sync(&priv->gc_work);
rhashtable_free_and_destroy(&priv->ht, nft_rhash_elem_destroy,
(void *)&rhash_ctx);
}
/* Number of buckets is stored in u32, so cap our result to 1U<<31 */
#define NFT_MAX_BUCKETS (1U << 31)
static u32 nft_hash_buckets(u32 size)
{
u64 val = div_u64((u64)size * 4, 3);
if (val >= NFT_MAX_BUCKETS)
return NFT_MAX_BUCKETS;
return roundup_pow_of_two(val);
}
static bool nft_rhash_estimate(const struct nft_set_desc *desc, u32 features,
struct nft_set_estimate *est)
{
est->size = ~0;
est->lookup = NFT_SET_CLASS_O_1;
est->space = NFT_SET_CLASS_O_N;
return true;
}
struct nft_hash {
u32 seed;
u32 buckets;
struct hlist_head table[];
};
struct nft_hash_elem {
struct hlist_node node;
struct nft_set_ext ext;
};
INDIRECT_CALLABLE_SCOPE
bool nft_hash_lookup(const struct net *net, const struct nft_set *set,
const u32 *key, const struct nft_set_ext **ext)
{
struct nft_hash *priv = nft_set_priv(set);
u8 genmask = nft_genmask_cur(net);
const struct nft_hash_elem *he;
u32 hash;
hash = jhash(key, set->klen, priv->seed);
hash = reciprocal_scale(hash, priv->buckets);
hlist_for_each_entry_rcu(he, &priv->table[hash], node) {
if (!memcmp(nft_set_ext_key(&he->ext), key, set->klen) &&
nft_set_elem_active(&he->ext, genmask)) {
*ext = &he->ext;
return true;
}
}
return false;
}
static void *nft_hash_get(const struct net *net, const struct nft_set *set,
const struct nft_set_elem *elem, unsigned int flags)
{
struct nft_hash *priv = nft_set_priv(set);
u8 genmask = nft_genmask_cur(net);
struct nft_hash_elem *he;
u32 hash;
hash = jhash(elem->key.val.data, set->klen, priv->seed);
hash = reciprocal_scale(hash, priv->buckets);
hlist_for_each_entry_rcu(he, &priv->table[hash], node) {
if (!memcmp(nft_set_ext_key(&he->ext), elem->key.val.data, set->klen) &&
nft_set_elem_active(&he->ext, genmask))
return he;
}
return ERR_PTR(-ENOENT);
}
INDIRECT_CALLABLE_SCOPE
bool nft_hash_lookup_fast(const struct net *net,
const struct nft_set *set,
const u32 *key, const struct nft_set_ext **ext)
{
struct nft_hash *priv = nft_set_priv(set);
u8 genmask = nft_genmask_cur(net);
const struct nft_hash_elem *he;
u32 hash, k1, k2;
k1 = *key;
hash = jhash_1word(k1, priv->seed);
hash = reciprocal_scale(hash, priv->buckets);
hlist_for_each_entry_rcu(he, &priv->table[hash], node) {
k2 = *(u32 *)nft_set_ext_key(&he->ext)->data;
if (k1 == k2 &&
nft_set_elem_active(&he->ext, genmask)) {
*ext = &he->ext;
return true;
}
}
return false;
}
static u32 nft_jhash(const struct nft_set *set, const struct nft_hash *priv,
const struct nft_set_ext *ext)
{
const struct nft_data *key = nft_set_ext_key(ext);
u32 hash, k1;
if (set->klen == 4) {
k1 = *(u32 *)key;
hash = jhash_1word(k1, priv->seed);
} else {
hash = jhash(key, set->klen, priv->seed);
}
hash = reciprocal_scale(hash, priv->buckets);
return hash;
}
static int nft_hash_insert(const struct net *net, const struct nft_set *set,
const struct nft_set_elem *elem,
struct nft_set_ext **ext)
{
struct nft_hash_elem *this = elem->priv, *he;
struct nft_hash *priv = nft_set_priv(set);
u8 genmask = nft_genmask_next(net);
u32 hash;
hash = nft_jhash(set, priv, &this->ext);
hlist_for_each_entry(he, &priv->table[hash], node) {
if (!memcmp(nft_set_ext_key(&this->ext),
nft_set_ext_key(&he->ext), set->klen) &&
nft_set_elem_active(&he->ext, genmask)) {
*ext = &he->ext;
return -EEXIST;
}
}
hlist_add_head_rcu(&this->node, &priv->table[hash]);
return 0;
}
static void nft_hash_activate(const struct net *net, const struct nft_set *set,
const struct nft_set_elem *elem)
{
struct nft_hash_elem *he = elem->priv;
nft_set_elem_change_active(net, set, &he->ext);
}
static bool nft_hash_flush(const struct net *net,
const struct nft_set *set, void *priv)
{
struct nft_hash_elem *he = priv;
nft_set_elem_change_active(net, set, &he->ext);
return true;
}
static void *nft_hash_deactivate(const struct net *net,
const struct nft_set *set,
const struct nft_set_elem *elem)
{
struct nft_hash *priv = nft_set_priv(set);
struct nft_hash_elem *this = elem->priv, *he;
u8 genmask = nft_genmask_next(net);
u32 hash;
hash = nft_jhash(set, priv, &this->ext);
hlist_for_each_entry(he, &priv->table[hash], node) {
if (!memcmp(nft_set_ext_key(&he->ext), &elem->key.val,
set->klen) &&
nft_set_elem_active(&he->ext, genmask)) {
nft_set_elem_change_active(net, set, &he->ext);
return he;
}
}
return NULL;
}
static void nft_hash_remove(const struct net *net,
const struct nft_set *set,
const struct nft_set_elem *elem)
{
struct nft_hash_elem *he = elem->priv;
hlist_del_rcu(&he->node);
}
static void nft_hash_walk(const struct nft_ctx *ctx, struct nft_set *set,
struct nft_set_iter *iter)
{
struct nft_hash *priv = nft_set_priv(set);
struct nft_hash_elem *he;
struct nft_set_elem elem;
int i;
for (i = 0; i < priv->buckets; i++) {
hlist_for_each_entry_rcu(he, &priv->table[i], node) {
if (iter->count < iter->skip)
goto cont;
if (!nft_set_elem_active(&he->ext, iter->genmask))
goto cont;
elem.priv = he;
iter->err = iter->fn(ctx, set, iter, &elem);
if (iter->err < 0)
return;
cont:
iter->count++;
}
}
}
static u64 nft_hash_privsize(const struct nlattr * const nla[],
const struct nft_set_desc *desc)
{
return sizeof(struct nft_hash) +
(u64)nft_hash_buckets(desc->size) * sizeof(struct hlist_head);
}
static int nft_hash_init(const struct nft_set *set,
const struct nft_set_desc *desc,
const struct nlattr * const tb[])
{
struct nft_hash *priv = nft_set_priv(set);
priv->buckets = nft_hash_buckets(desc->size);
get_random_bytes(&priv->seed, sizeof(priv->seed));
return 0;
}
static void nft_hash_destroy(const struct nft_ctx *ctx,
const struct nft_set *set)
{
struct nft_hash *priv = nft_set_priv(set);
struct nft_hash_elem *he;
struct hlist_node *next;
int i;
for (i = 0; i < priv->buckets; i++) {
hlist_for_each_entry_safe(he, next, &priv->table[i], node) {
hlist_del_rcu(&he->node);
nf_tables_set_elem_destroy(ctx, set, he);
}
}
}
static bool nft_hash_estimate(const struct nft_set_desc *desc, u32 features,
struct nft_set_estimate *est)
{
if (!desc->size)
return false;
if (desc->klen == 4)
return false;
est->size = sizeof(struct nft_hash) +
(u64)nft_hash_buckets(desc->size) * sizeof(struct hlist_head) +
(u64)desc->size * sizeof(struct nft_hash_elem);
est->lookup = NFT_SET_CLASS_O_1;
est->space = NFT_SET_CLASS_O_N;
return true;
}
static bool nft_hash_fast_estimate(const struct nft_set_desc *desc, u32 features,
struct nft_set_estimate *est)
{
if (!desc->size)
return false;
if (desc->klen != 4)
return false;
est->size = sizeof(struct nft_hash) +
(u64)nft_hash_buckets(desc->size) * sizeof(struct hlist_head) +
(u64)desc->size * sizeof(struct nft_hash_elem);
est->lookup = NFT_SET_CLASS_O_1;
est->space = NFT_SET_CLASS_O_N;
return true;
}
const struct nft_set_type nft_set_rhash_type = {
.features = NFT_SET_MAP | NFT_SET_OBJECT |
NFT_SET_TIMEOUT | NFT_SET_EVAL,
.ops = {
.privsize = nft_rhash_privsize,
.elemsize = offsetof(struct nft_rhash_elem, ext),
.estimate = nft_rhash_estimate,
.init = nft_rhash_init,
.gc_init = nft_rhash_gc_init,
.destroy = nft_rhash_destroy,
.insert = nft_rhash_insert,
.activate = nft_rhash_activate,
.deactivate = nft_rhash_deactivate,
.flush = nft_rhash_flush,
.remove = nft_rhash_remove,
.lookup = nft_rhash_lookup,
.update = nft_rhash_update,
.delete = nft_rhash_delete,
.walk = nft_rhash_walk,
.get = nft_rhash_get,
},
};
const struct nft_set_type nft_set_hash_type = {
.features = NFT_SET_MAP | NFT_SET_OBJECT,
.ops = {
.privsize = nft_hash_privsize,
.elemsize = offsetof(struct nft_hash_elem, ext),
.estimate = nft_hash_estimate,
.init = nft_hash_init,
.destroy = nft_hash_destroy,
.insert = nft_hash_insert,
.activate = nft_hash_activate,
.deactivate = nft_hash_deactivate,
.flush = nft_hash_flush,
.remove = nft_hash_remove,
.lookup = nft_hash_lookup,
.walk = nft_hash_walk,
.get = nft_hash_get,
},
};
const struct nft_set_type nft_set_hash_fast_type = {
.features = NFT_SET_MAP | NFT_SET_OBJECT,
.ops = {
.privsize = nft_hash_privsize,
.elemsize = offsetof(struct nft_hash_elem, ext),
.estimate = nft_hash_fast_estimate,
.init = nft_hash_init,
.destroy = nft_hash_destroy,
.insert = nft_hash_insert,
.activate = nft_hash_activate,
.deactivate = nft_hash_deactivate,
.flush = nft_hash_flush,
.remove = nft_hash_remove,
.lookup = nft_hash_lookup_fast,
.walk = nft_hash_walk,
.get = nft_hash_get,
},
};
| linux-master | net/netfilter/nft_set_hash.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* xt_iprange - Netfilter module to match IP address ranges
*
* (C) 2003 Jozsef Kadlecsik <[email protected]>
* (C) CC Computer Consultants GmbH, 2008
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter/xt_iprange.h>
static bool
iprange_mt4(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct xt_iprange_mtinfo *info = par->matchinfo;
const struct iphdr *iph = ip_hdr(skb);
bool m;
if (info->flags & IPRANGE_SRC) {
m = ntohl(iph->saddr) < ntohl(info->src_min.ip);
m |= ntohl(iph->saddr) > ntohl(info->src_max.ip);
m ^= !!(info->flags & IPRANGE_SRC_INV);
if (m) {
pr_debug("src IP %pI4 NOT in range %s%pI4-%pI4\n",
&iph->saddr,
(info->flags & IPRANGE_SRC_INV) ? "(INV) " : "",
&info->src_min.ip,
&info->src_max.ip);
return false;
}
}
if (info->flags & IPRANGE_DST) {
m = ntohl(iph->daddr) < ntohl(info->dst_min.ip);
m |= ntohl(iph->daddr) > ntohl(info->dst_max.ip);
m ^= !!(info->flags & IPRANGE_DST_INV);
if (m) {
pr_debug("dst IP %pI4 NOT in range %s%pI4-%pI4\n",
&iph->daddr,
(info->flags & IPRANGE_DST_INV) ? "(INV) " : "",
&info->dst_min.ip,
&info->dst_max.ip);
return false;
}
}
return true;
}
static inline int
iprange_ipv6_lt(const struct in6_addr *a, const struct in6_addr *b)
{
unsigned int i;
for (i = 0; i < 4; ++i) {
if (a->s6_addr32[i] != b->s6_addr32[i])
return ntohl(a->s6_addr32[i]) < ntohl(b->s6_addr32[i]);
}
return 0;
}
static bool
iprange_mt6(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct xt_iprange_mtinfo *info = par->matchinfo;
const struct ipv6hdr *iph = ipv6_hdr(skb);
bool m;
if (info->flags & IPRANGE_SRC) {
m = iprange_ipv6_lt(&iph->saddr, &info->src_min.in6);
m |= iprange_ipv6_lt(&info->src_max.in6, &iph->saddr);
m ^= !!(info->flags & IPRANGE_SRC_INV);
if (m) {
pr_debug("src IP %pI6 NOT in range %s%pI6-%pI6\n",
&iph->saddr,
(info->flags & IPRANGE_SRC_INV) ? "(INV) " : "",
&info->src_min.in6,
&info->src_max.in6);
return false;
}
}
if (info->flags & IPRANGE_DST) {
m = iprange_ipv6_lt(&iph->daddr, &info->dst_min.in6);
m |= iprange_ipv6_lt(&info->dst_max.in6, &iph->daddr);
m ^= !!(info->flags & IPRANGE_DST_INV);
if (m) {
pr_debug("dst IP %pI6 NOT in range %s%pI6-%pI6\n",
&iph->daddr,
(info->flags & IPRANGE_DST_INV) ? "(INV) " : "",
&info->dst_min.in6,
&info->dst_max.in6);
return false;
}
}
return true;
}
static struct xt_match iprange_mt_reg[] __read_mostly = {
{
.name = "iprange",
.revision = 1,
.family = NFPROTO_IPV4,
.match = iprange_mt4,
.matchsize = sizeof(struct xt_iprange_mtinfo),
.me = THIS_MODULE,
},
{
.name = "iprange",
.revision = 1,
.family = NFPROTO_IPV6,
.match = iprange_mt6,
.matchsize = sizeof(struct xt_iprange_mtinfo),
.me = THIS_MODULE,
},
};
static int __init iprange_mt_init(void)
{
return xt_register_matches(iprange_mt_reg, ARRAY_SIZE(iprange_mt_reg));
}
static void __exit iprange_mt_exit(void)
{
xt_unregister_matches(iprange_mt_reg, ARRAY_SIZE(iprange_mt_reg));
}
module_init(iprange_mt_init);
module_exit(iprange_mt_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jozsef Kadlecsik <[email protected]>");
MODULE_AUTHOR("Jan Engelhardt <[email protected]>");
MODULE_DESCRIPTION("Xtables: arbitrary IPv4 range matching");
MODULE_ALIAS("ipt_iprange");
MODULE_ALIAS("ip6t_iprange");
| linux-master | net/netfilter/xt_iprange.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Support ct functions for openvswitch and used by OVS and TC conntrack. */
#include <net/netfilter/nf_conntrack_helper.h>
#include <net/netfilter/nf_conntrack_seqadj.h>
#include <net/netfilter/ipv6/nf_defrag_ipv6.h>
#include <net/ipv6_frag.h>
#include <net/ip.h>
#include <linux/netfilter_ipv6.h>
/* 'skb' should already be pulled to nh_ofs. */
int nf_ct_helper(struct sk_buff *skb, struct nf_conn *ct,
enum ip_conntrack_info ctinfo, u16 proto)
{
const struct nf_conntrack_helper *helper;
const struct nf_conn_help *help;
unsigned int protoff;
int err;
if (ctinfo == IP_CT_RELATED_REPLY)
return NF_ACCEPT;
help = nfct_help(ct);
if (!help)
return NF_ACCEPT;
helper = rcu_dereference(help->helper);
if (!helper)
return NF_ACCEPT;
if (helper->tuple.src.l3num != NFPROTO_UNSPEC &&
helper->tuple.src.l3num != proto)
return NF_ACCEPT;
switch (proto) {
case NFPROTO_IPV4:
protoff = ip_hdrlen(skb);
proto = ip_hdr(skb)->protocol;
break;
case NFPROTO_IPV6: {
u8 nexthdr = ipv6_hdr(skb)->nexthdr;
__be16 frag_off;
int ofs;
ofs = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &nexthdr,
&frag_off);
if (ofs < 0 || (frag_off & htons(~0x7)) != 0) {
pr_debug("proto header not found\n");
return NF_ACCEPT;
}
protoff = ofs;
proto = nexthdr;
break;
}
default:
WARN_ONCE(1, "helper invoked on non-IP family!");
return NF_DROP;
}
if (helper->tuple.dst.protonum != proto)
return NF_ACCEPT;
err = helper->help(skb, protoff, ct, ctinfo);
if (err != NF_ACCEPT)
return err;
/* Adjust seqs after helper. This is needed due to some helpers (e.g.,
* FTP with NAT) adusting the TCP payload size when mangling IP
* addresses and/or port numbers in the text-based control connection.
*/
if (test_bit(IPS_SEQ_ADJUST_BIT, &ct->status) &&
!nf_ct_seq_adjust(skb, ct, ctinfo, protoff))
return NF_DROP;
return NF_ACCEPT;
}
EXPORT_SYMBOL_GPL(nf_ct_helper);
int nf_ct_add_helper(struct nf_conn *ct, const char *name, u8 family,
u8 proto, bool nat, struct nf_conntrack_helper **hp)
{
struct nf_conntrack_helper *helper;
struct nf_conn_help *help;
int ret = 0;
helper = nf_conntrack_helper_try_module_get(name, family, proto);
if (!helper)
return -EINVAL;
help = nf_ct_helper_ext_add(ct, GFP_KERNEL);
if (!help) {
nf_conntrack_helper_put(helper);
return -ENOMEM;
}
#if IS_ENABLED(CONFIG_NF_NAT)
if (nat) {
ret = nf_nat_helper_try_module_get(name, family, proto);
if (ret) {
nf_conntrack_helper_put(helper);
return ret;
}
}
#endif
rcu_assign_pointer(help->helper, helper);
*hp = helper;
return ret;
}
EXPORT_SYMBOL_GPL(nf_ct_add_helper);
/* Trim the skb to the length specified by the IP/IPv6 header,
* removing any trailing lower-layer padding. This prepares the skb
* for higher-layer processing that assumes skb->len excludes padding
* (such as nf_ip_checksum). The caller needs to pull the skb to the
* network header, and ensure ip_hdr/ipv6_hdr points to valid data.
*/
int nf_ct_skb_network_trim(struct sk_buff *skb, int family)
{
unsigned int len;
switch (family) {
case NFPROTO_IPV4:
len = skb_ip_totlen(skb);
break;
case NFPROTO_IPV6:
len = ntohs(ipv6_hdr(skb)->payload_len);
if (ipv6_hdr(skb)->nexthdr == NEXTHDR_HOP) {
int err = nf_ip6_check_hbh_len(skb, &len);
if (err)
return err;
}
len += sizeof(struct ipv6hdr);
break;
default:
len = skb->len;
}
return pskb_trim_rcsum(skb, len);
}
EXPORT_SYMBOL_GPL(nf_ct_skb_network_trim);
/* Returns 0 on success, -EINPROGRESS if 'skb' is stolen, or other nonzero
* value if 'skb' is freed.
*/
int nf_ct_handle_fragments(struct net *net, struct sk_buff *skb,
u16 zone, u8 family, u8 *proto, u16 *mru)
{
int err;
if (family == NFPROTO_IPV4) {
enum ip_defrag_users user = IP_DEFRAG_CONNTRACK_IN + zone;
memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
local_bh_disable();
err = ip_defrag(net, skb, user);
local_bh_enable();
if (err)
return err;
*mru = IPCB(skb)->frag_max_size;
#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
} else if (family == NFPROTO_IPV6) {
enum ip6_defrag_users user = IP6_DEFRAG_CONNTRACK_IN + zone;
memset(IP6CB(skb), 0, sizeof(struct inet6_skb_parm));
err = nf_ct_frag6_gather(net, skb, user);
if (err) {
if (err != -EINPROGRESS)
kfree_skb(skb);
return err;
}
*proto = ipv6_hdr(skb)->nexthdr;
*mru = IP6CB(skb)->frag_max_size;
#endif
} else {
kfree_skb(skb);
return -EPFNOSUPPORT;
}
skb_clear_hash(skb);
skb->ignore_df = 1;
return 0;
}
EXPORT_SYMBOL_GPL(nf_ct_handle_fragments);
| linux-master | net/netfilter/nf_conntrack_ovs.c |
// SPDX-License-Identifier: GPL-2.0-only
/* SANE connection tracking helper
* (SANE = Scanner Access Now Easy)
* For documentation about the SANE network protocol see
* http://www.sane-project.org/html/doc015.html
*/
/* Copyright (C) 2007 Red Hat, Inc.
* Author: Michal Schmidt <[email protected]>
* Based on the FTP conntrack helper (net/netfilter/nf_conntrack_ftp.c):
* (C) 1999-2001 Paul `Rusty' Russell
* (C) 2002-2004 Netfilter Core Team <[email protected]>
* (C) 2003,2004 USAGI/WIDE Project <http://www.linux-ipv6.org>
* (C) 2003 Yasuyuki Kozakai @USAGI <[email protected]>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/netfilter.h>
#include <linux/slab.h>
#include <linux/in.h>
#include <linux/tcp.h>
#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_conntrack_helper.h>
#include <net/netfilter/nf_conntrack_expect.h>
#include <linux/netfilter/nf_conntrack_sane.h>
#define HELPER_NAME "sane"
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Michal Schmidt <[email protected]>");
MODULE_DESCRIPTION("SANE connection tracking helper");
MODULE_ALIAS_NFCT_HELPER(HELPER_NAME);
#define MAX_PORTS 8
static u_int16_t ports[MAX_PORTS];
static unsigned int ports_c;
module_param_array(ports, ushort, &ports_c, 0400);
struct sane_request {
__be32 RPC_code;
#define SANE_NET_START 7 /* RPC code */
__be32 handle;
};
struct sane_reply_net_start {
__be32 status;
#define SANE_STATUS_SUCCESS 0
__be16 zero;
__be16 port;
/* other fields aren't interesting for conntrack */
};
static int help(struct sk_buff *skb,
unsigned int protoff,
struct nf_conn *ct,
enum ip_conntrack_info ctinfo)
{
unsigned int dataoff, datalen;
const struct tcphdr *th;
struct tcphdr _tcph;
int ret = NF_ACCEPT;
int dir = CTINFO2DIR(ctinfo);
struct nf_ct_sane_master *ct_sane_info = nfct_help_data(ct);
struct nf_conntrack_expect *exp;
struct nf_conntrack_tuple *tuple;
struct sane_reply_net_start *reply;
union {
struct sane_request req;
struct sane_reply_net_start repl;
} buf;
/* Until there's been traffic both ways, don't look in packets. */
if (ctinfo != IP_CT_ESTABLISHED &&
ctinfo != IP_CT_ESTABLISHED_REPLY)
return NF_ACCEPT;
/* Not a full tcp header? */
th = skb_header_pointer(skb, protoff, sizeof(_tcph), &_tcph);
if (th == NULL)
return NF_ACCEPT;
/* No data? */
dataoff = protoff + th->doff * 4;
if (dataoff >= skb->len)
return NF_ACCEPT;
datalen = skb->len - dataoff;
if (dir == IP_CT_DIR_ORIGINAL) {
const struct sane_request *req;
if (datalen != sizeof(struct sane_request))
return NF_ACCEPT;
req = skb_header_pointer(skb, dataoff, datalen, &buf.req);
if (!req)
return NF_ACCEPT;
if (req->RPC_code != htonl(SANE_NET_START)) {
/* Not an interesting command */
WRITE_ONCE(ct_sane_info->state, SANE_STATE_NORMAL);
return NF_ACCEPT;
}
/* We're interested in the next reply */
WRITE_ONCE(ct_sane_info->state, SANE_STATE_START_REQUESTED);
return NF_ACCEPT;
}
/* IP_CT_DIR_REPLY */
/* Is it a reply to an uninteresting command? */
if (READ_ONCE(ct_sane_info->state) != SANE_STATE_START_REQUESTED)
return NF_ACCEPT;
/* It's a reply to SANE_NET_START. */
WRITE_ONCE(ct_sane_info->state, SANE_STATE_NORMAL);
if (datalen < sizeof(struct sane_reply_net_start)) {
pr_debug("NET_START reply too short\n");
return NF_ACCEPT;
}
datalen = sizeof(struct sane_reply_net_start);
reply = skb_header_pointer(skb, dataoff, datalen, &buf.repl);
if (!reply)
return NF_ACCEPT;
if (reply->status != htonl(SANE_STATUS_SUCCESS)) {
/* saned refused the command */
pr_debug("unsuccessful SANE_STATUS = %u\n",
ntohl(reply->status));
return NF_ACCEPT;
}
/* Invalid saned reply? Ignore it. */
if (reply->zero != 0)
return NF_ACCEPT;
exp = nf_ct_expect_alloc(ct);
if (exp == NULL) {
nf_ct_helper_log(skb, ct, "cannot alloc expectation");
return NF_DROP;
}
tuple = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
nf_ct_expect_init(exp, NF_CT_EXPECT_CLASS_DEFAULT, nf_ct_l3num(ct),
&tuple->src.u3, &tuple->dst.u3,
IPPROTO_TCP, NULL, &reply->port);
pr_debug("expect: ");
nf_ct_dump_tuple(&exp->tuple);
/* Can't expect this? Best to drop packet now. */
if (nf_ct_expect_related(exp, 0) != 0) {
nf_ct_helper_log(skb, ct, "cannot add expectation");
ret = NF_DROP;
}
nf_ct_expect_put(exp);
return ret;
}
static struct nf_conntrack_helper sane[MAX_PORTS * 2] __read_mostly;
static const struct nf_conntrack_expect_policy sane_exp_policy = {
.max_expected = 1,
.timeout = 5 * 60,
};
static void __exit nf_conntrack_sane_fini(void)
{
nf_conntrack_helpers_unregister(sane, ports_c * 2);
}
static int __init nf_conntrack_sane_init(void)
{
int i, ret = 0;
NF_CT_HELPER_BUILD_BUG_ON(sizeof(struct nf_ct_sane_master));
if (ports_c == 0)
ports[ports_c++] = SANE_PORT;
/* FIXME should be configurable whether IPv4 and IPv6 connections
are tracked or not - YK */
for (i = 0; i < ports_c; i++) {
nf_ct_helper_init(&sane[2 * i], AF_INET, IPPROTO_TCP,
HELPER_NAME, SANE_PORT, ports[i], ports[i],
&sane_exp_policy, 0, help, NULL,
THIS_MODULE);
nf_ct_helper_init(&sane[2 * i + 1], AF_INET6, IPPROTO_TCP,
HELPER_NAME, SANE_PORT, ports[i], ports[i],
&sane_exp_policy, 0, help, NULL,
THIS_MODULE);
}
ret = nf_conntrack_helpers_register(sane, ports_c * 2);
if (ret < 0) {
pr_err("failed to register helpers\n");
return ret;
}
return 0;
}
module_init(nf_conntrack_sane_init);
module_exit(nf_conntrack_sane_fini);
| linux-master | net/netfilter/nf_conntrack_sane.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* BER and PER decoding library for H.323 conntrack/NAT module.
*
* Copyright (c) 2006 by Jing Min Zhao <[email protected]>
*
* See nf_conntrack_helper_h323_asn1.h for details.
*/
#ifdef __KERNEL__
#include <linux/kernel.h>
#else
#include <stdio.h>
#endif
#include <linux/netfilter/nf_conntrack_h323_asn1.h>
/* Trace Flag */
#ifndef H323_TRACE
#define H323_TRACE 0
#endif
#if H323_TRACE
#define TAB_SIZE 4
#define IFTHEN(cond, act) if(cond){act;}
#ifdef __KERNEL__
#define PRINT printk
#else
#define PRINT printf
#endif
#define FNAME(name) name,
#else
#define IFTHEN(cond, act)
#define PRINT(fmt, args...)
#define FNAME(name)
#endif
/* ASN.1 Types */
#define NUL 0
#define BOOL 1
#define OID 2
#define INT 3
#define ENUM 4
#define BITSTR 5
#define NUMSTR 6
#define NUMDGT 6
#define TBCDSTR 6
#define OCTSTR 7
#define PRTSTR 7
#define IA5STR 7
#define GENSTR 7
#define BMPSTR 8
#define SEQ 9
#define SET 9
#define SEQOF 10
#define SETOF 10
#define CHOICE 11
/* Constraint Types */
#define FIXD 0
/* #define BITS 1-8 */
#define BYTE 9
#define WORD 10
#define CONS 11
#define SEMI 12
#define UNCO 13
/* ASN.1 Type Attributes */
#define SKIP 0
#define STOP 1
#define DECODE 2
#define EXT 4
#define OPEN 8
#define OPT 16
/* ASN.1 Field Structure */
typedef struct field_t {
#if H323_TRACE
char *name;
#endif
unsigned char type;
unsigned char sz;
unsigned char lb;
unsigned char ub;
unsigned short attr;
unsigned short offset;
const struct field_t *fields;
} field_t;
/* Bit Stream */
struct bitstr {
unsigned char *buf;
unsigned char *beg;
unsigned char *end;
unsigned char *cur;
unsigned int bit;
};
/* Tool Functions */
#define INC_BIT(bs) if((++(bs)->bit)>7){(bs)->cur++;(bs)->bit=0;}
#define INC_BITS(bs,b) if(((bs)->bit+=(b))>7){(bs)->cur+=(bs)->bit>>3;(bs)->bit&=7;}
#define BYTE_ALIGN(bs) if((bs)->bit){(bs)->cur++;(bs)->bit=0;}
static unsigned int get_len(struct bitstr *bs);
static unsigned int get_bit(struct bitstr *bs);
static unsigned int get_bits(struct bitstr *bs, unsigned int b);
static unsigned int get_bitmap(struct bitstr *bs, unsigned int b);
static unsigned int get_uint(struct bitstr *bs, int b);
/* Decoder Functions */
static int decode_nul(struct bitstr *bs, const struct field_t *f, char *base, int level);
static int decode_bool(struct bitstr *bs, const struct field_t *f, char *base, int level);
static int decode_oid(struct bitstr *bs, const struct field_t *f, char *base, int level);
static int decode_int(struct bitstr *bs, const struct field_t *f, char *base, int level);
static int decode_enum(struct bitstr *bs, const struct field_t *f, char *base, int level);
static int decode_bitstr(struct bitstr *bs, const struct field_t *f, char *base, int level);
static int decode_numstr(struct bitstr *bs, const struct field_t *f, char *base, int level);
static int decode_octstr(struct bitstr *bs, const struct field_t *f, char *base, int level);
static int decode_bmpstr(struct bitstr *bs, const struct field_t *f, char *base, int level);
static int decode_seq(struct bitstr *bs, const struct field_t *f, char *base, int level);
static int decode_seqof(struct bitstr *bs, const struct field_t *f, char *base, int level);
static int decode_choice(struct bitstr *bs, const struct field_t *f, char *base, int level);
/* Decoder Functions Vector */
typedef int (*decoder_t)(struct bitstr *, const struct field_t *, char *, int);
static const decoder_t Decoders[] = {
decode_nul,
decode_bool,
decode_oid,
decode_int,
decode_enum,
decode_bitstr,
decode_numstr,
decode_octstr,
decode_bmpstr,
decode_seq,
decode_seqof,
decode_choice,
};
/*
* H.323 Types
*/
#include "nf_conntrack_h323_types.c"
/*
* Functions
*/
/* Assume bs is aligned && v < 16384 */
static unsigned int get_len(struct bitstr *bs)
{
unsigned int v;
v = *bs->cur++;
if (v & 0x80) {
v &= 0x3f;
v <<= 8;
v += *bs->cur++;
}
return v;
}
static int nf_h323_error_boundary(struct bitstr *bs, size_t bytes, size_t bits)
{
bits += bs->bit;
bytes += bits / BITS_PER_BYTE;
if (bits % BITS_PER_BYTE > 0)
bytes++;
if (bs->cur + bytes > bs->end)
return 1;
return 0;
}
static unsigned int get_bit(struct bitstr *bs)
{
unsigned int b = (*bs->cur) & (0x80 >> bs->bit);
INC_BIT(bs);
return b;
}
/* Assume b <= 8 */
static unsigned int get_bits(struct bitstr *bs, unsigned int b)
{
unsigned int v, l;
v = (*bs->cur) & (0xffU >> bs->bit);
l = b + bs->bit;
if (l < 8) {
v >>= 8 - l;
bs->bit = l;
} else if (l == 8) {
bs->cur++;
bs->bit = 0;
} else { /* l > 8 */
v <<= 8;
v += *(++bs->cur);
v >>= 16 - l;
bs->bit = l - 8;
}
return v;
}
/* Assume b <= 32 */
static unsigned int get_bitmap(struct bitstr *bs, unsigned int b)
{
unsigned int v, l, shift, bytes;
if (!b)
return 0;
l = bs->bit + b;
if (l < 8) {
v = (unsigned int)(*bs->cur) << (bs->bit + 24);
bs->bit = l;
} else if (l == 8) {
v = (unsigned int)(*bs->cur++) << (bs->bit + 24);
bs->bit = 0;
} else {
for (bytes = l >> 3, shift = 24, v = 0; bytes;
bytes--, shift -= 8)
v |= (unsigned int)(*bs->cur++) << shift;
if (l < 32) {
v |= (unsigned int)(*bs->cur) << shift;
v <<= bs->bit;
} else if (l > 32) {
v <<= bs->bit;
v |= (*bs->cur) >> (8 - bs->bit);
}
bs->bit = l & 0x7;
}
v &= 0xffffffff << (32 - b);
return v;
}
/*
* Assume bs is aligned and sizeof(unsigned int) == 4
*/
static unsigned int get_uint(struct bitstr *bs, int b)
{
unsigned int v = 0;
switch (b) {
case 4:
v |= *bs->cur++;
v <<= 8;
fallthrough;
case 3:
v |= *bs->cur++;
v <<= 8;
fallthrough;
case 2:
v |= *bs->cur++;
v <<= 8;
fallthrough;
case 1:
v |= *bs->cur++;
break;
}
return v;
}
static int decode_nul(struct bitstr *bs, const struct field_t *f,
char *base, int level)
{
PRINT("%*.s%s\n", level * TAB_SIZE, " ", f->name);
return H323_ERROR_NONE;
}
static int decode_bool(struct bitstr *bs, const struct field_t *f,
char *base, int level)
{
PRINT("%*.s%s\n", level * TAB_SIZE, " ", f->name);
INC_BIT(bs);
if (nf_h323_error_boundary(bs, 0, 0))
return H323_ERROR_BOUND;
return H323_ERROR_NONE;
}
static int decode_oid(struct bitstr *bs, const struct field_t *f,
char *base, int level)
{
int len;
PRINT("%*.s%s\n", level * TAB_SIZE, " ", f->name);
BYTE_ALIGN(bs);
if (nf_h323_error_boundary(bs, 1, 0))
return H323_ERROR_BOUND;
len = *bs->cur++;
bs->cur += len;
if (nf_h323_error_boundary(bs, 0, 0))
return H323_ERROR_BOUND;
return H323_ERROR_NONE;
}
static int decode_int(struct bitstr *bs, const struct field_t *f,
char *base, int level)
{
unsigned int len;
PRINT("%*.s%s", level * TAB_SIZE, " ", f->name);
switch (f->sz) {
case BYTE: /* Range == 256 */
BYTE_ALIGN(bs);
bs->cur++;
break;
case WORD: /* 257 <= Range <= 64K */
BYTE_ALIGN(bs);
bs->cur += 2;
break;
case CONS: /* 64K < Range < 4G */
if (nf_h323_error_boundary(bs, 0, 2))
return H323_ERROR_BOUND;
len = get_bits(bs, 2) + 1;
BYTE_ALIGN(bs);
if (base && (f->attr & DECODE)) { /* timeToLive */
unsigned int v = get_uint(bs, len) + f->lb;
PRINT(" = %u", v);
*((unsigned int *)(base + f->offset)) = v;
}
bs->cur += len;
break;
case UNCO:
BYTE_ALIGN(bs);
if (nf_h323_error_boundary(bs, 2, 0))
return H323_ERROR_BOUND;
len = get_len(bs);
bs->cur += len;
break;
default: /* 2 <= Range <= 255 */
INC_BITS(bs, f->sz);
break;
}
PRINT("\n");
if (nf_h323_error_boundary(bs, 0, 0))
return H323_ERROR_BOUND;
return H323_ERROR_NONE;
}
static int decode_enum(struct bitstr *bs, const struct field_t *f,
char *base, int level)
{
PRINT("%*.s%s\n", level * TAB_SIZE, " ", f->name);
if ((f->attr & EXT) && get_bit(bs)) {
INC_BITS(bs, 7);
} else {
INC_BITS(bs, f->sz);
}
if (nf_h323_error_boundary(bs, 0, 0))
return H323_ERROR_BOUND;
return H323_ERROR_NONE;
}
static int decode_bitstr(struct bitstr *bs, const struct field_t *f,
char *base, int level)
{
unsigned int len;
PRINT("%*.s%s\n", level * TAB_SIZE, " ", f->name);
BYTE_ALIGN(bs);
switch (f->sz) {
case FIXD: /* fixed length > 16 */
len = f->lb;
break;
case WORD: /* 2-byte length */
if (nf_h323_error_boundary(bs, 2, 0))
return H323_ERROR_BOUND;
len = (*bs->cur++) << 8;
len += (*bs->cur++) + f->lb;
break;
case SEMI:
if (nf_h323_error_boundary(bs, 2, 0))
return H323_ERROR_BOUND;
len = get_len(bs);
break;
default:
len = 0;
break;
}
bs->cur += len >> 3;
bs->bit = len & 7;
if (nf_h323_error_boundary(bs, 0, 0))
return H323_ERROR_BOUND;
return H323_ERROR_NONE;
}
static int decode_numstr(struct bitstr *bs, const struct field_t *f,
char *base, int level)
{
unsigned int len;
PRINT("%*.s%s\n", level * TAB_SIZE, " ", f->name);
/* 2 <= Range <= 255 */
if (nf_h323_error_boundary(bs, 0, f->sz))
return H323_ERROR_BOUND;
len = get_bits(bs, f->sz) + f->lb;
BYTE_ALIGN(bs);
INC_BITS(bs, (len << 2));
if (nf_h323_error_boundary(bs, 0, 0))
return H323_ERROR_BOUND;
return H323_ERROR_NONE;
}
static int decode_octstr(struct bitstr *bs, const struct field_t *f,
char *base, int level)
{
unsigned int len;
PRINT("%*.s%s", level * TAB_SIZE, " ", f->name);
switch (f->sz) {
case FIXD: /* Range == 1 */
if (f->lb > 2) {
BYTE_ALIGN(bs);
if (base && (f->attr & DECODE)) {
/* The IP Address */
IFTHEN(f->lb == 4,
PRINT(" = %d.%d.%d.%d:%d",
bs->cur[0], bs->cur[1],
bs->cur[2], bs->cur[3],
bs->cur[4] * 256 + bs->cur[5]));
*((unsigned int *)(base + f->offset)) =
bs->cur - bs->buf;
}
}
len = f->lb;
break;
case BYTE: /* Range == 256 */
BYTE_ALIGN(bs);
if (nf_h323_error_boundary(bs, 1, 0))
return H323_ERROR_BOUND;
len = (*bs->cur++) + f->lb;
break;
case SEMI:
BYTE_ALIGN(bs);
if (nf_h323_error_boundary(bs, 2, 0))
return H323_ERROR_BOUND;
len = get_len(bs) + f->lb;
break;
default: /* 2 <= Range <= 255 */
if (nf_h323_error_boundary(bs, 0, f->sz))
return H323_ERROR_BOUND;
len = get_bits(bs, f->sz) + f->lb;
BYTE_ALIGN(bs);
break;
}
bs->cur += len;
PRINT("\n");
if (nf_h323_error_boundary(bs, 0, 0))
return H323_ERROR_BOUND;
return H323_ERROR_NONE;
}
static int decode_bmpstr(struct bitstr *bs, const struct field_t *f,
char *base, int level)
{
unsigned int len;
PRINT("%*.s%s\n", level * TAB_SIZE, " ", f->name);
switch (f->sz) {
case BYTE: /* Range == 256 */
BYTE_ALIGN(bs);
if (nf_h323_error_boundary(bs, 1, 0))
return H323_ERROR_BOUND;
len = (*bs->cur++) + f->lb;
break;
default: /* 2 <= Range <= 255 */
if (nf_h323_error_boundary(bs, 0, f->sz))
return H323_ERROR_BOUND;
len = get_bits(bs, f->sz) + f->lb;
BYTE_ALIGN(bs);
break;
}
bs->cur += len << 1;
if (nf_h323_error_boundary(bs, 0, 0))
return H323_ERROR_BOUND;
return H323_ERROR_NONE;
}
static int decode_seq(struct bitstr *bs, const struct field_t *f,
char *base, int level)
{
unsigned int ext, bmp, i, opt, len = 0, bmp2, bmp2_len;
int err;
const struct field_t *son;
unsigned char *beg = NULL;
PRINT("%*.s%s\n", level * TAB_SIZE, " ", f->name);
/* Decode? */
base = (base && (f->attr & DECODE)) ? base + f->offset : NULL;
/* Extensible? */
if (nf_h323_error_boundary(bs, 0, 1))
return H323_ERROR_BOUND;
ext = (f->attr & EXT) ? get_bit(bs) : 0;
/* Get fields bitmap */
if (nf_h323_error_boundary(bs, 0, f->sz))
return H323_ERROR_BOUND;
bmp = get_bitmap(bs, f->sz);
if (base)
*(unsigned int *)base = bmp;
/* Decode the root components */
for (i = opt = 0, son = f->fields; i < f->lb; i++, son++) {
if (son->attr & STOP) {
PRINT("%*.s%s\n", (level + 1) * TAB_SIZE, " ",
son->name);
return H323_ERROR_STOP;
}
if (son->attr & OPT) { /* Optional component */
if (!((0x80000000U >> (opt++)) & bmp)) /* Not exist */
continue;
}
/* Decode */
if (son->attr & OPEN) { /* Open field */
if (nf_h323_error_boundary(bs, 2, 0))
return H323_ERROR_BOUND;
len = get_len(bs);
if (nf_h323_error_boundary(bs, len, 0))
return H323_ERROR_BOUND;
if (!base || !(son->attr & DECODE)) {
PRINT("%*.s%s\n", (level + 1) * TAB_SIZE,
" ", son->name);
bs->cur += len;
continue;
}
beg = bs->cur;
/* Decode */
if ((err = (Decoders[son->type]) (bs, son, base,
level + 1)) <
H323_ERROR_NONE)
return err;
bs->cur = beg + len;
bs->bit = 0;
} else if ((err = (Decoders[son->type]) (bs, son, base,
level + 1)) <
H323_ERROR_NONE)
return err;
}
/* No extension? */
if (!ext)
return H323_ERROR_NONE;
/* Get the extension bitmap */
if (nf_h323_error_boundary(bs, 0, 7))
return H323_ERROR_BOUND;
bmp2_len = get_bits(bs, 7) + 1;
if (nf_h323_error_boundary(bs, 0, bmp2_len))
return H323_ERROR_BOUND;
bmp2 = get_bitmap(bs, bmp2_len);
bmp |= bmp2 >> f->sz;
if (base)
*(unsigned int *)base = bmp;
BYTE_ALIGN(bs);
/* Decode the extension components */
for (opt = 0; opt < bmp2_len; opt++, i++, son++) {
/* Check Range */
if (i >= f->ub) { /* Newer Version? */
if (nf_h323_error_boundary(bs, 2, 0))
return H323_ERROR_BOUND;
len = get_len(bs);
if (nf_h323_error_boundary(bs, len, 0))
return H323_ERROR_BOUND;
bs->cur += len;
continue;
}
if (son->attr & STOP) {
PRINT("%*.s%s\n", (level + 1) * TAB_SIZE, " ",
son->name);
return H323_ERROR_STOP;
}
if (!((0x80000000 >> opt) & bmp2)) /* Not present */
continue;
if (nf_h323_error_boundary(bs, 2, 0))
return H323_ERROR_BOUND;
len = get_len(bs);
if (nf_h323_error_boundary(bs, len, 0))
return H323_ERROR_BOUND;
if (!base || !(son->attr & DECODE)) {
PRINT("%*.s%s\n", (level + 1) * TAB_SIZE, " ",
son->name);
bs->cur += len;
continue;
}
beg = bs->cur;
if ((err = (Decoders[son->type]) (bs, son, base,
level + 1)) <
H323_ERROR_NONE)
return err;
bs->cur = beg + len;
bs->bit = 0;
}
return H323_ERROR_NONE;
}
static int decode_seqof(struct bitstr *bs, const struct field_t *f,
char *base, int level)
{
unsigned int count, effective_count = 0, i, len = 0;
int err;
const struct field_t *son;
unsigned char *beg = NULL;
PRINT("%*.s%s\n", level * TAB_SIZE, " ", f->name);
/* Decode? */
base = (base && (f->attr & DECODE)) ? base + f->offset : NULL;
/* Decode item count */
switch (f->sz) {
case BYTE:
BYTE_ALIGN(bs);
if (nf_h323_error_boundary(bs, 1, 0))
return H323_ERROR_BOUND;
count = *bs->cur++;
break;
case WORD:
BYTE_ALIGN(bs);
if (nf_h323_error_boundary(bs, 2, 0))
return H323_ERROR_BOUND;
count = *bs->cur++;
count <<= 8;
count += *bs->cur++;
break;
case SEMI:
BYTE_ALIGN(bs);
if (nf_h323_error_boundary(bs, 2, 0))
return H323_ERROR_BOUND;
count = get_len(bs);
break;
default:
if (nf_h323_error_boundary(bs, 0, f->sz))
return H323_ERROR_BOUND;
count = get_bits(bs, f->sz);
break;
}
count += f->lb;
/* Write Count */
if (base) {
effective_count = count > f->ub ? f->ub : count;
*(unsigned int *)base = effective_count;
base += sizeof(unsigned int);
}
/* Decode nested field */
son = f->fields;
if (base)
base -= son->offset;
for (i = 0; i < count; i++) {
if (son->attr & OPEN) {
BYTE_ALIGN(bs);
if (nf_h323_error_boundary(bs, 2, 0))
return H323_ERROR_BOUND;
len = get_len(bs);
if (nf_h323_error_boundary(bs, len, 0))
return H323_ERROR_BOUND;
if (!base || !(son->attr & DECODE)) {
PRINT("%*.s%s\n", (level + 1) * TAB_SIZE,
" ", son->name);
bs->cur += len;
continue;
}
beg = bs->cur;
if ((err = (Decoders[son->type]) (bs, son,
i <
effective_count ?
base : NULL,
level + 1)) <
H323_ERROR_NONE)
return err;
bs->cur = beg + len;
bs->bit = 0;
} else
if ((err = (Decoders[son->type]) (bs, son,
i <
effective_count ?
base : NULL,
level + 1)) <
H323_ERROR_NONE)
return err;
if (base)
base += son->offset;
}
return H323_ERROR_NONE;
}
static int decode_choice(struct bitstr *bs, const struct field_t *f,
char *base, int level)
{
unsigned int type, ext, len = 0;
int err;
const struct field_t *son;
unsigned char *beg = NULL;
PRINT("%*.s%s\n", level * TAB_SIZE, " ", f->name);
/* Decode? */
base = (base && (f->attr & DECODE)) ? base + f->offset : NULL;
/* Decode the choice index number */
if (nf_h323_error_boundary(bs, 0, 1))
return H323_ERROR_BOUND;
if ((f->attr & EXT) && get_bit(bs)) {
ext = 1;
if (nf_h323_error_boundary(bs, 0, 7))
return H323_ERROR_BOUND;
type = get_bits(bs, 7) + f->lb;
} else {
ext = 0;
if (nf_h323_error_boundary(bs, 0, f->sz))
return H323_ERROR_BOUND;
type = get_bits(bs, f->sz);
if (type >= f->lb)
return H323_ERROR_RANGE;
}
/* Write Type */
if (base)
*(unsigned int *)base = type;
/* Check Range */
if (type >= f->ub) { /* Newer version? */
BYTE_ALIGN(bs);
if (nf_h323_error_boundary(bs, 2, 0))
return H323_ERROR_BOUND;
len = get_len(bs);
if (nf_h323_error_boundary(bs, len, 0))
return H323_ERROR_BOUND;
bs->cur += len;
return H323_ERROR_NONE;
}
/* Transfer to son level */
son = &f->fields[type];
if (son->attr & STOP) {
PRINT("%*.s%s\n", (level + 1) * TAB_SIZE, " ", son->name);
return H323_ERROR_STOP;
}
if (ext || (son->attr & OPEN)) {
BYTE_ALIGN(bs);
if (nf_h323_error_boundary(bs, len, 0))
return H323_ERROR_BOUND;
len = get_len(bs);
if (nf_h323_error_boundary(bs, len, 0))
return H323_ERROR_BOUND;
if (!base || !(son->attr & DECODE)) {
PRINT("%*.s%s\n", (level + 1) * TAB_SIZE, " ",
son->name);
bs->cur += len;
return H323_ERROR_NONE;
}
beg = bs->cur;
if ((err = (Decoders[son->type]) (bs, son, base, level + 1)) <
H323_ERROR_NONE)
return err;
bs->cur = beg + len;
bs->bit = 0;
} else if ((err = (Decoders[son->type]) (bs, son, base, level + 1)) <
H323_ERROR_NONE)
return err;
return H323_ERROR_NONE;
}
int DecodeRasMessage(unsigned char *buf, size_t sz, RasMessage *ras)
{
static const struct field_t ras_message = {
FNAME("RasMessage") CHOICE, 5, 24, 32, DECODE | EXT,
0, _RasMessage
};
struct bitstr bs;
bs.buf = bs.beg = bs.cur = buf;
bs.end = buf + sz;
bs.bit = 0;
return decode_choice(&bs, &ras_message, (char *) ras, 0);
}
static int DecodeH323_UserInformation(unsigned char *buf, unsigned char *beg,
size_t sz, H323_UserInformation *uuie)
{
static const struct field_t h323_userinformation = {
FNAME("H323-UserInformation") SEQ, 1, 2, 2, DECODE | EXT,
0, _H323_UserInformation
};
struct bitstr bs;
bs.buf = buf;
bs.beg = bs.cur = beg;
bs.end = beg + sz;
bs.bit = 0;
return decode_seq(&bs, &h323_userinformation, (char *) uuie, 0);
}
int DecodeMultimediaSystemControlMessage(unsigned char *buf, size_t sz,
MultimediaSystemControlMessage *
mscm)
{
static const struct field_t multimediasystemcontrolmessage = {
FNAME("MultimediaSystemControlMessage") CHOICE, 2, 4, 4,
DECODE | EXT, 0, _MultimediaSystemControlMessage
};
struct bitstr bs;
bs.buf = bs.beg = bs.cur = buf;
bs.end = buf + sz;
bs.bit = 0;
return decode_choice(&bs, &multimediasystemcontrolmessage,
(char *) mscm, 0);
}
int DecodeQ931(unsigned char *buf, size_t sz, Q931 *q931)
{
unsigned char *p = buf;
int len;
if (!p || sz < 1)
return H323_ERROR_BOUND;
/* Protocol Discriminator */
if (*p != 0x08) {
PRINT("Unknown Protocol Discriminator\n");
return H323_ERROR_RANGE;
}
p++;
sz--;
/* CallReferenceValue */
if (sz < 1)
return H323_ERROR_BOUND;
len = *p++;
sz--;
if (sz < len)
return H323_ERROR_BOUND;
p += len;
sz -= len;
/* Message Type */
if (sz < 2)
return H323_ERROR_BOUND;
q931->MessageType = *p++;
sz--;
PRINT("MessageType = %02X\n", q931->MessageType);
if (*p & 0x80) {
p++;
sz--;
}
/* Decode Information Elements */
while (sz > 0) {
if (*p == 0x7e) { /* UserUserIE */
if (sz < 3)
break;
p++;
len = *p++ << 8;
len |= *p++;
sz -= 3;
if (sz < len)
break;
p++;
len--;
return DecodeH323_UserInformation(buf, p, len,
&q931->UUIE);
}
p++;
sz--;
if (sz < 1)
break;
len = *p++;
sz--;
if (sz < len)
break;
p += len;
sz -= len;
}
PRINT("Q.931 UUIE not found\n");
return H323_ERROR_BOUND;
}
| linux-master | net/netfilter/nf_conntrack_h323_asn1.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Support nat functions for openvswitch and used by OVS and TC conntrack. */
#include <net/netfilter/nf_nat.h>
/* Modelled after nf_nat_ipv[46]_fn().
* range is only used for new, uninitialized NAT state.
* Returns either NF_ACCEPT or NF_DROP.
*/
static int nf_ct_nat_execute(struct sk_buff *skb, struct nf_conn *ct,
enum ip_conntrack_info ctinfo, int *action,
const struct nf_nat_range2 *range,
enum nf_nat_manip_type maniptype)
{
__be16 proto = skb_protocol(skb, true);
int hooknum, err = NF_ACCEPT;
/* See HOOK2MANIP(). */
if (maniptype == NF_NAT_MANIP_SRC)
hooknum = NF_INET_LOCAL_IN; /* Source NAT */
else
hooknum = NF_INET_LOCAL_OUT; /* Destination NAT */
switch (ctinfo) {
case IP_CT_RELATED:
case IP_CT_RELATED_REPLY:
if (proto == htons(ETH_P_IP) &&
ip_hdr(skb)->protocol == IPPROTO_ICMP) {
if (!nf_nat_icmp_reply_translation(skb, ct, ctinfo,
hooknum))
err = NF_DROP;
goto out;
} else if (IS_ENABLED(CONFIG_IPV6) && proto == htons(ETH_P_IPV6)) {
__be16 frag_off;
u8 nexthdr = ipv6_hdr(skb)->nexthdr;
int hdrlen = ipv6_skip_exthdr(skb,
sizeof(struct ipv6hdr),
&nexthdr, &frag_off);
if (hdrlen >= 0 && nexthdr == IPPROTO_ICMPV6) {
if (!nf_nat_icmpv6_reply_translation(skb, ct,
ctinfo,
hooknum,
hdrlen))
err = NF_DROP;
goto out;
}
}
/* Non-ICMP, fall thru to initialize if needed. */
fallthrough;
case IP_CT_NEW:
/* Seen it before? This can happen for loopback, retrans,
* or local packets.
*/
if (!nf_nat_initialized(ct, maniptype)) {
/* Initialize according to the NAT action. */
err = (range && range->flags & NF_NAT_RANGE_MAP_IPS)
/* Action is set up to establish a new
* mapping.
*/
? nf_nat_setup_info(ct, range, maniptype)
: nf_nat_alloc_null_binding(ct, hooknum);
if (err != NF_ACCEPT)
goto out;
}
break;
case IP_CT_ESTABLISHED:
case IP_CT_ESTABLISHED_REPLY:
break;
default:
err = NF_DROP;
goto out;
}
err = nf_nat_packet(ct, ctinfo, hooknum, skb);
if (err == NF_ACCEPT)
*action |= BIT(maniptype);
out:
return err;
}
int nf_ct_nat(struct sk_buff *skb, struct nf_conn *ct,
enum ip_conntrack_info ctinfo, int *action,
const struct nf_nat_range2 *range, bool commit)
{
enum nf_nat_manip_type maniptype;
int err, ct_action = *action;
*action = 0;
/* Add NAT extension if not confirmed yet. */
if (!nf_ct_is_confirmed(ct) && !nf_ct_nat_ext_add(ct))
return NF_DROP; /* Can't NAT. */
if (ctinfo != IP_CT_NEW && (ct->status & IPS_NAT_MASK) &&
(ctinfo != IP_CT_RELATED || commit)) {
/* NAT an established or related connection like before. */
if (CTINFO2DIR(ctinfo) == IP_CT_DIR_REPLY)
/* This is the REPLY direction for a connection
* for which NAT was applied in the forward
* direction. Do the reverse NAT.
*/
maniptype = ct->status & IPS_SRC_NAT
? NF_NAT_MANIP_DST : NF_NAT_MANIP_SRC;
else
maniptype = ct->status & IPS_SRC_NAT
? NF_NAT_MANIP_SRC : NF_NAT_MANIP_DST;
} else if (ct_action & BIT(NF_NAT_MANIP_SRC)) {
maniptype = NF_NAT_MANIP_SRC;
} else if (ct_action & BIT(NF_NAT_MANIP_DST)) {
maniptype = NF_NAT_MANIP_DST;
} else {
return NF_ACCEPT;
}
err = nf_ct_nat_execute(skb, ct, ctinfo, action, range, maniptype);
if (err == NF_ACCEPT && ct->status & IPS_DST_NAT) {
if (ct->status & IPS_SRC_NAT) {
if (maniptype == NF_NAT_MANIP_SRC)
maniptype = NF_NAT_MANIP_DST;
else
maniptype = NF_NAT_MANIP_SRC;
err = nf_ct_nat_execute(skb, ct, ctinfo, action, range,
maniptype);
} else if (CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL) {
err = nf_ct_nat_execute(skb, ct, ctinfo, action, NULL,
NF_NAT_MANIP_SRC);
}
}
return err;
}
EXPORT_SYMBOL_GPL(nf_ct_nat);
| linux-master | net/netfilter/nf_nat_ovs.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* This is a module which is used for logging packets.
*/
/* (C) 1999-2001 Paul `Rusty' Russell
* (C) 2002-2004 Netfilter Core Team <[email protected]>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/skbuff.h>
#include <linux/if_arp.h>
#include <linux/ip.h>
#include <net/ipv6.h>
#include <net/icmp.h>
#include <net/udp.h>
#include <net/tcp.h>
#include <net/route.h>
#include <linux/netfilter.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter/xt_LOG.h>
#include <linux/netfilter_ipv6/ip6_tables.h>
#include <net/netfilter/nf_log.h>
static unsigned int
log_tg(struct sk_buff *skb, const struct xt_action_param *par)
{
const struct xt_log_info *loginfo = par->targinfo;
struct net *net = xt_net(par);
struct nf_loginfo li;
li.type = NF_LOG_TYPE_LOG;
li.u.log.level = loginfo->level;
li.u.log.logflags = loginfo->logflags;
nf_log_packet(net, xt_family(par), xt_hooknum(par), skb, xt_in(par),
xt_out(par), &li, "%s", loginfo->prefix);
return XT_CONTINUE;
}
static int log_tg_check(const struct xt_tgchk_param *par)
{
const struct xt_log_info *loginfo = par->targinfo;
int ret;
if (par->family != NFPROTO_IPV4 && par->family != NFPROTO_IPV6)
return -EINVAL;
if (loginfo->level >= 8) {
pr_debug("level %u >= 8\n", loginfo->level);
return -EINVAL;
}
if (loginfo->prefix[sizeof(loginfo->prefix)-1] != '\0') {
pr_debug("prefix is not null-terminated\n");
return -EINVAL;
}
ret = nf_logger_find_get(par->family, NF_LOG_TYPE_LOG);
if (ret != 0 && !par->nft_compat) {
request_module("%s", "nf_log_syslog");
ret = nf_logger_find_get(par->family, NF_LOG_TYPE_LOG);
}
return ret;
}
static void log_tg_destroy(const struct xt_tgdtor_param *par)
{
nf_logger_put(par->family, NF_LOG_TYPE_LOG);
}
static struct xt_target log_tg_regs[] __read_mostly = {
{
.name = "LOG",
.family = NFPROTO_IPV4,
.target = log_tg,
.targetsize = sizeof(struct xt_log_info),
.checkentry = log_tg_check,
.destroy = log_tg_destroy,
.me = THIS_MODULE,
},
#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
{
.name = "LOG",
.family = NFPROTO_IPV6,
.target = log_tg,
.targetsize = sizeof(struct xt_log_info),
.checkentry = log_tg_check,
.destroy = log_tg_destroy,
.me = THIS_MODULE,
},
#endif
};
static int __init log_tg_init(void)
{
return xt_register_targets(log_tg_regs, ARRAY_SIZE(log_tg_regs));
}
static void __exit log_tg_exit(void)
{
xt_unregister_targets(log_tg_regs, ARRAY_SIZE(log_tg_regs));
}
module_init(log_tg_init);
module_exit(log_tg_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Netfilter Core Team <[email protected]>");
MODULE_AUTHOR("Jan Rekorajski <[email protected]>");
MODULE_DESCRIPTION("Xtables: IPv4/IPv6 packet logging");
MODULE_ALIAS("ipt_LOG");
MODULE_ALIAS("ip6t_LOG");
MODULE_SOFTDEP("pre: nf_log_syslog");
| linux-master | net/netfilter/xt_LOG.c |
// SPDX-License-Identifier: GPL-2.0-only
/* (C) 1999-2001 Michal Ludvig <[email protected]>
*/
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/if_ether.h>
#include <linux/if_packet.h>
#include <linux/in.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/netfilter/xt_pkttype.h>
#include <linux/netfilter/x_tables.h>
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Michal Ludvig <[email protected]>");
MODULE_DESCRIPTION("Xtables: link layer packet type match");
MODULE_ALIAS("ipt_pkttype");
MODULE_ALIAS("ip6t_pkttype");
static bool
pkttype_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct xt_pkttype_info *info = par->matchinfo;
u_int8_t type;
if (skb->pkt_type != PACKET_LOOPBACK)
type = skb->pkt_type;
else if (xt_family(par) == NFPROTO_IPV4 &&
ipv4_is_multicast(ip_hdr(skb)->daddr))
type = PACKET_MULTICAST;
else if (xt_family(par) == NFPROTO_IPV6)
type = PACKET_MULTICAST;
else
type = PACKET_BROADCAST;
return (type == info->pkttype) ^ info->invert;
}
static struct xt_match pkttype_mt_reg __read_mostly = {
.name = "pkttype",
.revision = 0,
.family = NFPROTO_UNSPEC,
.match = pkttype_mt,
.matchsize = sizeof(struct xt_pkttype_info),
.me = THIS_MODULE,
};
static int __init pkttype_mt_init(void)
{
return xt_register_match(&pkttype_mt_reg);
}
static void __exit pkttype_mt_exit(void)
{
xt_unregister_match(&pkttype_mt_reg);
}
module_init(pkttype_mt_init);
module_exit(pkttype_mt_exit);
| linux-master | net/netfilter/xt_pkttype.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* xt_LED.c - netfilter target to make LEDs blink upon packet matches
*
* Copyright (C) 2008 Adam Nielsen <[email protected]>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/netfilter/x_tables.h>
#include <linux/slab.h>
#include <linux/leds.h>
#include <linux/mutex.h>
#include <linux/netfilter/xt_LED.h>
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Adam Nielsen <[email protected]>");
MODULE_DESCRIPTION("Xtables: trigger LED devices on packet match");
MODULE_ALIAS("ipt_LED");
MODULE_ALIAS("ip6t_LED");
static LIST_HEAD(xt_led_triggers);
static DEFINE_MUTEX(xt_led_mutex);
/*
* This is declared in here (the kernel module) only, to avoid having these
* dependencies in userspace code. This is what xt_led_info.internal_data
* points to.
*/
struct xt_led_info_internal {
struct list_head list;
int refcnt;
char *trigger_id;
struct led_trigger netfilter_led_trigger;
struct timer_list timer;
};
#define XT_LED_BLINK_DELAY 50 /* ms */
static unsigned int
led_tg(struct sk_buff *skb, const struct xt_action_param *par)
{
const struct xt_led_info *ledinfo = par->targinfo;
struct xt_led_info_internal *ledinternal = ledinfo->internal_data;
/*
* If "always blink" is enabled, and there's still some time until the
* LED will switch off, briefly switch it off now.
*/
if ((ledinfo->delay > 0) && ledinfo->always_blink &&
timer_pending(&ledinternal->timer))
led_trigger_blink_oneshot(&ledinternal->netfilter_led_trigger,
XT_LED_BLINK_DELAY, XT_LED_BLINK_DELAY, 1);
else
led_trigger_event(&ledinternal->netfilter_led_trigger, LED_FULL);
/* If there's a positive delay, start/update the timer */
if (ledinfo->delay > 0) {
mod_timer(&ledinternal->timer,
jiffies + msecs_to_jiffies(ledinfo->delay));
/* Otherwise if there was no delay given, blink as fast as possible */
} else if (ledinfo->delay == 0) {
led_trigger_event(&ledinternal->netfilter_led_trigger, LED_OFF);
}
/* else the delay is negative, which means switch on and stay on */
return XT_CONTINUE;
}
static void led_timeout_callback(struct timer_list *t)
{
struct xt_led_info_internal *ledinternal = from_timer(ledinternal, t,
timer);
led_trigger_event(&ledinternal->netfilter_led_trigger, LED_OFF);
}
static struct xt_led_info_internal *led_trigger_lookup(const char *name)
{
struct xt_led_info_internal *ledinternal;
list_for_each_entry(ledinternal, &xt_led_triggers, list) {
if (!strcmp(name, ledinternal->netfilter_led_trigger.name)) {
return ledinternal;
}
}
return NULL;
}
static int led_tg_check(const struct xt_tgchk_param *par)
{
struct xt_led_info *ledinfo = par->targinfo;
struct xt_led_info_internal *ledinternal;
int err;
if (ledinfo->id[0] == '\0')
return -EINVAL;
mutex_lock(&xt_led_mutex);
ledinternal = led_trigger_lookup(ledinfo->id);
if (ledinternal) {
ledinternal->refcnt++;
goto out;
}
err = -ENOMEM;
ledinternal = kzalloc(sizeof(struct xt_led_info_internal), GFP_KERNEL);
if (!ledinternal)
goto exit_mutex_only;
ledinternal->trigger_id = kstrdup(ledinfo->id, GFP_KERNEL);
if (!ledinternal->trigger_id)
goto exit_internal_alloc;
ledinternal->refcnt = 1;
ledinternal->netfilter_led_trigger.name = ledinternal->trigger_id;
err = led_trigger_register(&ledinternal->netfilter_led_trigger);
if (err) {
pr_info_ratelimited("Trigger name is already in use.\n");
goto exit_alloc;
}
/* Since the letinternal timer can be shared between multiple targets,
* always set it up, even if the current target does not need it
*/
timer_setup(&ledinternal->timer, led_timeout_callback, 0);
list_add_tail(&ledinternal->list, &xt_led_triggers);
out:
mutex_unlock(&xt_led_mutex);
ledinfo->internal_data = ledinternal;
return 0;
exit_alloc:
kfree(ledinternal->trigger_id);
exit_internal_alloc:
kfree(ledinternal);
exit_mutex_only:
mutex_unlock(&xt_led_mutex);
return err;
}
static void led_tg_destroy(const struct xt_tgdtor_param *par)
{
const struct xt_led_info *ledinfo = par->targinfo;
struct xt_led_info_internal *ledinternal = ledinfo->internal_data;
mutex_lock(&xt_led_mutex);
if (--ledinternal->refcnt) {
mutex_unlock(&xt_led_mutex);
return;
}
list_del(&ledinternal->list);
timer_shutdown_sync(&ledinternal->timer);
led_trigger_unregister(&ledinternal->netfilter_led_trigger);
mutex_unlock(&xt_led_mutex);
kfree(ledinternal->trigger_id);
kfree(ledinternal);
}
static struct xt_target led_tg_reg __read_mostly = {
.name = "LED",
.revision = 0,
.family = NFPROTO_UNSPEC,
.target = led_tg,
.targetsize = sizeof(struct xt_led_info),
.usersize = offsetof(struct xt_led_info, internal_data),
.checkentry = led_tg_check,
.destroy = led_tg_destroy,
.me = THIS_MODULE,
};
static int __init led_tg_init(void)
{
return xt_register_target(&led_tg_reg);
}
static void __exit led_tg_exit(void)
{
xt_unregister_target(&led_tg_reg);
}
module_init(led_tg_init);
module_exit(led_tg_exit);
| linux-master | net/netfilter/xt_LED.c |
/* Netfilter messages via netlink socket. Allows for user space
* protocol helpers and general trouble making from userspace.
*
* (C) 2001 by Jay Schulist <[email protected]>,
* (C) 2002-2005 by Harald Welte <[email protected]>
* (C) 2005-2017 by Pablo Neira Ayuso <[email protected]>
*
* Initial netfilter messages via netlink development funded and
* generally made possible by Network Robots, Inc. (www.networkrobots.com)
*
* Further development of this code funded by Astaro AG (http://www.astaro.com)
*
* This software may be used and distributed according to the terms
* of the GNU General Public License, incorporated herein by reference.
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/socket.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/sockios.h>
#include <linux/net.h>
#include <linux/skbuff.h>
#include <linux/uaccess.h>
#include <net/sock.h>
#include <linux/init.h>
#include <linux/sched/signal.h>
#include <net/netlink.h>
#include <net/netns/generic.h>
#include <linux/netfilter.h>
#include <linux/netfilter/nfnetlink.h>
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Harald Welte <[email protected]>");
MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_NETFILTER);
MODULE_DESCRIPTION("Netfilter messages via netlink socket");
#define nfnl_dereference_protected(id) \
rcu_dereference_protected(table[(id)].subsys, \
lockdep_nfnl_is_held((id)))
#define NFNL_MAX_ATTR_COUNT 32
static unsigned int nfnetlink_pernet_id __read_mostly;
#ifdef CONFIG_NF_CONNTRACK_EVENTS
static DEFINE_SPINLOCK(nfnl_grp_active_lock);
#endif
struct nfnl_net {
struct sock *nfnl;
};
static struct {
struct mutex mutex;
const struct nfnetlink_subsystem __rcu *subsys;
} table[NFNL_SUBSYS_COUNT];
static struct lock_class_key nfnl_lockdep_keys[NFNL_SUBSYS_COUNT];
static const char *const nfnl_lockdep_names[NFNL_SUBSYS_COUNT] = {
[NFNL_SUBSYS_NONE] = "nfnl_subsys_none",
[NFNL_SUBSYS_CTNETLINK] = "nfnl_subsys_ctnetlink",
[NFNL_SUBSYS_CTNETLINK_EXP] = "nfnl_subsys_ctnetlink_exp",
[NFNL_SUBSYS_QUEUE] = "nfnl_subsys_queue",
[NFNL_SUBSYS_ULOG] = "nfnl_subsys_ulog",
[NFNL_SUBSYS_OSF] = "nfnl_subsys_osf",
[NFNL_SUBSYS_IPSET] = "nfnl_subsys_ipset",
[NFNL_SUBSYS_ACCT] = "nfnl_subsys_acct",
[NFNL_SUBSYS_CTNETLINK_TIMEOUT] = "nfnl_subsys_cttimeout",
[NFNL_SUBSYS_CTHELPER] = "nfnl_subsys_cthelper",
[NFNL_SUBSYS_NFTABLES] = "nfnl_subsys_nftables",
[NFNL_SUBSYS_NFT_COMPAT] = "nfnl_subsys_nftcompat",
[NFNL_SUBSYS_HOOK] = "nfnl_subsys_hook",
};
static const int nfnl_group2type[NFNLGRP_MAX+1] = {
[NFNLGRP_CONNTRACK_NEW] = NFNL_SUBSYS_CTNETLINK,
[NFNLGRP_CONNTRACK_UPDATE] = NFNL_SUBSYS_CTNETLINK,
[NFNLGRP_CONNTRACK_DESTROY] = NFNL_SUBSYS_CTNETLINK,
[NFNLGRP_CONNTRACK_EXP_NEW] = NFNL_SUBSYS_CTNETLINK_EXP,
[NFNLGRP_CONNTRACK_EXP_UPDATE] = NFNL_SUBSYS_CTNETLINK_EXP,
[NFNLGRP_CONNTRACK_EXP_DESTROY] = NFNL_SUBSYS_CTNETLINK_EXP,
[NFNLGRP_NFTABLES] = NFNL_SUBSYS_NFTABLES,
[NFNLGRP_ACCT_QUOTA] = NFNL_SUBSYS_ACCT,
[NFNLGRP_NFTRACE] = NFNL_SUBSYS_NFTABLES,
};
static struct nfnl_net *nfnl_pernet(struct net *net)
{
return net_generic(net, nfnetlink_pernet_id);
}
void nfnl_lock(__u8 subsys_id)
{
mutex_lock(&table[subsys_id].mutex);
}
EXPORT_SYMBOL_GPL(nfnl_lock);
void nfnl_unlock(__u8 subsys_id)
{
mutex_unlock(&table[subsys_id].mutex);
}
EXPORT_SYMBOL_GPL(nfnl_unlock);
#ifdef CONFIG_PROVE_LOCKING
bool lockdep_nfnl_is_held(u8 subsys_id)
{
return lockdep_is_held(&table[subsys_id].mutex);
}
EXPORT_SYMBOL_GPL(lockdep_nfnl_is_held);
#endif
int nfnetlink_subsys_register(const struct nfnetlink_subsystem *n)
{
u8 cb_id;
/* Sanity-check attr_count size to avoid stack buffer overflow. */
for (cb_id = 0; cb_id < n->cb_count; cb_id++)
if (WARN_ON(n->cb[cb_id].attr_count > NFNL_MAX_ATTR_COUNT))
return -EINVAL;
nfnl_lock(n->subsys_id);
if (table[n->subsys_id].subsys) {
nfnl_unlock(n->subsys_id);
return -EBUSY;
}
rcu_assign_pointer(table[n->subsys_id].subsys, n);
nfnl_unlock(n->subsys_id);
return 0;
}
EXPORT_SYMBOL_GPL(nfnetlink_subsys_register);
int nfnetlink_subsys_unregister(const struct nfnetlink_subsystem *n)
{
nfnl_lock(n->subsys_id);
table[n->subsys_id].subsys = NULL;
nfnl_unlock(n->subsys_id);
synchronize_rcu();
return 0;
}
EXPORT_SYMBOL_GPL(nfnetlink_subsys_unregister);
static inline const struct nfnetlink_subsystem *nfnetlink_get_subsys(u16 type)
{
u8 subsys_id = NFNL_SUBSYS_ID(type);
if (subsys_id >= NFNL_SUBSYS_COUNT)
return NULL;
return rcu_dereference(table[subsys_id].subsys);
}
static inline const struct nfnl_callback *
nfnetlink_find_client(u16 type, const struct nfnetlink_subsystem *ss)
{
u8 cb_id = NFNL_MSG_TYPE(type);
if (cb_id >= ss->cb_count)
return NULL;
return &ss->cb[cb_id];
}
int nfnetlink_has_listeners(struct net *net, unsigned int group)
{
struct nfnl_net *nfnlnet = nfnl_pernet(net);
return netlink_has_listeners(nfnlnet->nfnl, group);
}
EXPORT_SYMBOL_GPL(nfnetlink_has_listeners);
int nfnetlink_send(struct sk_buff *skb, struct net *net, u32 portid,
unsigned int group, int echo, gfp_t flags)
{
struct nfnl_net *nfnlnet = nfnl_pernet(net);
return nlmsg_notify(nfnlnet->nfnl, skb, portid, group, echo, flags);
}
EXPORT_SYMBOL_GPL(nfnetlink_send);
int nfnetlink_set_err(struct net *net, u32 portid, u32 group, int error)
{
struct nfnl_net *nfnlnet = nfnl_pernet(net);
return netlink_set_err(nfnlnet->nfnl, portid, group, error);
}
EXPORT_SYMBOL_GPL(nfnetlink_set_err);
int nfnetlink_unicast(struct sk_buff *skb, struct net *net, u32 portid)
{
struct nfnl_net *nfnlnet = nfnl_pernet(net);
int err;
err = nlmsg_unicast(nfnlnet->nfnl, skb, portid);
if (err == -EAGAIN)
err = -ENOBUFS;
return err;
}
EXPORT_SYMBOL_GPL(nfnetlink_unicast);
void nfnetlink_broadcast(struct net *net, struct sk_buff *skb, __u32 portid,
__u32 group, gfp_t allocation)
{
struct nfnl_net *nfnlnet = nfnl_pernet(net);
netlink_broadcast(nfnlnet->nfnl, skb, portid, group, allocation);
}
EXPORT_SYMBOL_GPL(nfnetlink_broadcast);
/* Process one complete nfnetlink message. */
static int nfnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
struct netlink_ext_ack *extack)
{
struct net *net = sock_net(skb->sk);
const struct nfnl_callback *nc;
const struct nfnetlink_subsystem *ss;
int type, err;
/* All the messages must at least contain nfgenmsg */
if (nlmsg_len(nlh) < sizeof(struct nfgenmsg))
return 0;
type = nlh->nlmsg_type;
replay:
rcu_read_lock();
ss = nfnetlink_get_subsys(type);
if (!ss) {
#ifdef CONFIG_MODULES
rcu_read_unlock();
request_module("nfnetlink-subsys-%d", NFNL_SUBSYS_ID(type));
rcu_read_lock();
ss = nfnetlink_get_subsys(type);
if (!ss)
#endif
{
rcu_read_unlock();
return -EINVAL;
}
}
nc = nfnetlink_find_client(type, ss);
if (!nc) {
rcu_read_unlock();
return -EINVAL;
}
{
int min_len = nlmsg_total_size(sizeof(struct nfgenmsg));
struct nfnl_net *nfnlnet = nfnl_pernet(net);
u8 cb_id = NFNL_MSG_TYPE(nlh->nlmsg_type);
struct nlattr *cda[NFNL_MAX_ATTR_COUNT + 1];
struct nlattr *attr = (void *)nlh + min_len;
int attrlen = nlh->nlmsg_len - min_len;
__u8 subsys_id = NFNL_SUBSYS_ID(type);
struct nfnl_info info = {
.net = net,
.sk = nfnlnet->nfnl,
.nlh = nlh,
.nfmsg = nlmsg_data(nlh),
.extack = extack,
};
/* Sanity-check NFNL_MAX_ATTR_COUNT */
if (ss->cb[cb_id].attr_count > NFNL_MAX_ATTR_COUNT) {
rcu_read_unlock();
return -ENOMEM;
}
err = nla_parse_deprecated(cda, ss->cb[cb_id].attr_count,
attr, attrlen,
ss->cb[cb_id].policy, extack);
if (err < 0) {
rcu_read_unlock();
return err;
}
if (!nc->call) {
rcu_read_unlock();
return -EINVAL;
}
switch (nc->type) {
case NFNL_CB_RCU:
err = nc->call(skb, &info, (const struct nlattr **)cda);
rcu_read_unlock();
break;
case NFNL_CB_MUTEX:
rcu_read_unlock();
nfnl_lock(subsys_id);
if (nfnl_dereference_protected(subsys_id) != ss ||
nfnetlink_find_client(type, ss) != nc) {
nfnl_unlock(subsys_id);
err = -EAGAIN;
break;
}
err = nc->call(skb, &info, (const struct nlattr **)cda);
nfnl_unlock(subsys_id);
break;
default:
rcu_read_unlock();
err = -EINVAL;
break;
}
if (err == -EAGAIN)
goto replay;
return err;
}
}
struct nfnl_err {
struct list_head head;
struct nlmsghdr *nlh;
int err;
struct netlink_ext_ack extack;
};
static int nfnl_err_add(struct list_head *list, struct nlmsghdr *nlh, int err,
const struct netlink_ext_ack *extack)
{
struct nfnl_err *nfnl_err;
nfnl_err = kmalloc(sizeof(struct nfnl_err), GFP_KERNEL);
if (nfnl_err == NULL)
return -ENOMEM;
nfnl_err->nlh = nlh;
nfnl_err->err = err;
nfnl_err->extack = *extack;
list_add_tail(&nfnl_err->head, list);
return 0;
}
static void nfnl_err_del(struct nfnl_err *nfnl_err)
{
list_del(&nfnl_err->head);
kfree(nfnl_err);
}
static void nfnl_err_reset(struct list_head *err_list)
{
struct nfnl_err *nfnl_err, *next;
list_for_each_entry_safe(nfnl_err, next, err_list, head)
nfnl_err_del(nfnl_err);
}
static void nfnl_err_deliver(struct list_head *err_list, struct sk_buff *skb)
{
struct nfnl_err *nfnl_err, *next;
list_for_each_entry_safe(nfnl_err, next, err_list, head) {
netlink_ack(skb, nfnl_err->nlh, nfnl_err->err,
&nfnl_err->extack);
nfnl_err_del(nfnl_err);
}
}
enum {
NFNL_BATCH_FAILURE = (1 << 0),
NFNL_BATCH_DONE = (1 << 1),
NFNL_BATCH_REPLAY = (1 << 2),
};
static void nfnetlink_rcv_batch(struct sk_buff *skb, struct nlmsghdr *nlh,
u16 subsys_id, u32 genid)
{
struct sk_buff *oskb = skb;
struct net *net = sock_net(skb->sk);
const struct nfnetlink_subsystem *ss;
const struct nfnl_callback *nc;
struct netlink_ext_ack extack;
LIST_HEAD(err_list);
u32 status;
int err;
if (subsys_id >= NFNL_SUBSYS_COUNT)
return netlink_ack(skb, nlh, -EINVAL, NULL);
replay:
status = 0;
replay_abort:
skb = netlink_skb_clone(oskb, GFP_KERNEL);
if (!skb)
return netlink_ack(oskb, nlh, -ENOMEM, NULL);
nfnl_lock(subsys_id);
ss = nfnl_dereference_protected(subsys_id);
if (!ss) {
#ifdef CONFIG_MODULES
nfnl_unlock(subsys_id);
request_module("nfnetlink-subsys-%d", subsys_id);
nfnl_lock(subsys_id);
ss = nfnl_dereference_protected(subsys_id);
if (!ss)
#endif
{
nfnl_unlock(subsys_id);
netlink_ack(oskb, nlh, -EOPNOTSUPP, NULL);
return kfree_skb(skb);
}
}
if (!ss->valid_genid || !ss->commit || !ss->abort) {
nfnl_unlock(subsys_id);
netlink_ack(oskb, nlh, -EOPNOTSUPP, NULL);
return kfree_skb(skb);
}
if (!try_module_get(ss->owner)) {
nfnl_unlock(subsys_id);
netlink_ack(oskb, nlh, -EOPNOTSUPP, NULL);
return kfree_skb(skb);
}
if (!ss->valid_genid(net, genid)) {
module_put(ss->owner);
nfnl_unlock(subsys_id);
netlink_ack(oskb, nlh, -ERESTART, NULL);
return kfree_skb(skb);
}
nfnl_unlock(subsys_id);
while (skb->len >= nlmsg_total_size(0)) {
int msglen, type;
if (fatal_signal_pending(current)) {
nfnl_err_reset(&err_list);
err = -EINTR;
status = NFNL_BATCH_FAILURE;
goto done;
}
memset(&extack, 0, sizeof(extack));
nlh = nlmsg_hdr(skb);
err = 0;
if (nlh->nlmsg_len < NLMSG_HDRLEN ||
skb->len < nlh->nlmsg_len ||
nlmsg_len(nlh) < sizeof(struct nfgenmsg)) {
nfnl_err_reset(&err_list);
status |= NFNL_BATCH_FAILURE;
goto done;
}
/* Only requests are handled by the kernel */
if (!(nlh->nlmsg_flags & NLM_F_REQUEST)) {
err = -EINVAL;
goto ack;
}
type = nlh->nlmsg_type;
if (type == NFNL_MSG_BATCH_BEGIN) {
/* Malformed: Batch begin twice */
nfnl_err_reset(&err_list);
status |= NFNL_BATCH_FAILURE;
goto done;
} else if (type == NFNL_MSG_BATCH_END) {
status |= NFNL_BATCH_DONE;
goto done;
} else if (type < NLMSG_MIN_TYPE) {
err = -EINVAL;
goto ack;
}
/* We only accept a batch with messages for the same
* subsystem.
*/
if (NFNL_SUBSYS_ID(type) != subsys_id) {
err = -EINVAL;
goto ack;
}
nc = nfnetlink_find_client(type, ss);
if (!nc) {
err = -EINVAL;
goto ack;
}
if (nc->type != NFNL_CB_BATCH) {
err = -EINVAL;
goto ack;
}
{
int min_len = nlmsg_total_size(sizeof(struct nfgenmsg));
struct nfnl_net *nfnlnet = nfnl_pernet(net);
struct nlattr *cda[NFNL_MAX_ATTR_COUNT + 1];
struct nlattr *attr = (void *)nlh + min_len;
u8 cb_id = NFNL_MSG_TYPE(nlh->nlmsg_type);
int attrlen = nlh->nlmsg_len - min_len;
struct nfnl_info info = {
.net = net,
.sk = nfnlnet->nfnl,
.nlh = nlh,
.nfmsg = nlmsg_data(nlh),
.extack = &extack,
};
/* Sanity-check NFTA_MAX_ATTR */
if (ss->cb[cb_id].attr_count > NFNL_MAX_ATTR_COUNT) {
err = -ENOMEM;
goto ack;
}
err = nla_parse_deprecated(cda,
ss->cb[cb_id].attr_count,
attr, attrlen,
ss->cb[cb_id].policy, NULL);
if (err < 0)
goto ack;
err = nc->call(skb, &info, (const struct nlattr **)cda);
/* The lock was released to autoload some module, we
* have to abort and start from scratch using the
* original skb.
*/
if (err == -EAGAIN) {
status |= NFNL_BATCH_REPLAY;
goto done;
}
}
ack:
if (nlh->nlmsg_flags & NLM_F_ACK || err) {
/* Errors are delivered once the full batch has been
* processed, this avoids that the same error is
* reported several times when replaying the batch.
*/
if (err == -ENOMEM ||
nfnl_err_add(&err_list, nlh, err, &extack) < 0) {
/* We failed to enqueue an error, reset the
* list of errors and send OOM to userspace
* pointing to the batch header.
*/
nfnl_err_reset(&err_list);
netlink_ack(oskb, nlmsg_hdr(oskb), -ENOMEM,
NULL);
status |= NFNL_BATCH_FAILURE;
goto done;
}
/* We don't stop processing the batch on errors, thus,
* userspace gets all the errors that the batch
* triggers.
*/
if (err)
status |= NFNL_BATCH_FAILURE;
}
msglen = NLMSG_ALIGN(nlh->nlmsg_len);
if (msglen > skb->len)
msglen = skb->len;
skb_pull(skb, msglen);
}
done:
if (status & NFNL_BATCH_REPLAY) {
ss->abort(net, oskb, NFNL_ABORT_AUTOLOAD);
nfnl_err_reset(&err_list);
kfree_skb(skb);
module_put(ss->owner);
goto replay;
} else if (status == NFNL_BATCH_DONE) {
err = ss->commit(net, oskb);
if (err == -EAGAIN) {
status |= NFNL_BATCH_REPLAY;
goto done;
} else if (err) {
ss->abort(net, oskb, NFNL_ABORT_NONE);
netlink_ack(oskb, nlmsg_hdr(oskb), err, NULL);
}
} else {
enum nfnl_abort_action abort_action;
if (status & NFNL_BATCH_FAILURE)
abort_action = NFNL_ABORT_NONE;
else
abort_action = NFNL_ABORT_VALIDATE;
err = ss->abort(net, oskb, abort_action);
if (err == -EAGAIN) {
nfnl_err_reset(&err_list);
kfree_skb(skb);
module_put(ss->owner);
status |= NFNL_BATCH_FAILURE;
goto replay_abort;
}
}
nfnl_err_deliver(&err_list, oskb);
kfree_skb(skb);
module_put(ss->owner);
}
static const struct nla_policy nfnl_batch_policy[NFNL_BATCH_MAX + 1] = {
[NFNL_BATCH_GENID] = { .type = NLA_U32 },
};
static void nfnetlink_rcv_skb_batch(struct sk_buff *skb, struct nlmsghdr *nlh)
{
int min_len = nlmsg_total_size(sizeof(struct nfgenmsg));
struct nlattr *attr = (void *)nlh + min_len;
struct nlattr *cda[NFNL_BATCH_MAX + 1];
int attrlen = nlh->nlmsg_len - min_len;
struct nfgenmsg *nfgenmsg;
int msglen, err;
u32 gen_id = 0;
u16 res_id;
msglen = NLMSG_ALIGN(nlh->nlmsg_len);
if (msglen > skb->len)
msglen = skb->len;
if (skb->len < NLMSG_HDRLEN + sizeof(struct nfgenmsg))
return;
err = nla_parse_deprecated(cda, NFNL_BATCH_MAX, attr, attrlen,
nfnl_batch_policy, NULL);
if (err < 0) {
netlink_ack(skb, nlh, err, NULL);
return;
}
if (cda[NFNL_BATCH_GENID])
gen_id = ntohl(nla_get_be32(cda[NFNL_BATCH_GENID]));
nfgenmsg = nlmsg_data(nlh);
skb_pull(skb, msglen);
/* Work around old nft using host byte order */
if (nfgenmsg->res_id == (__force __be16)NFNL_SUBSYS_NFTABLES)
res_id = NFNL_SUBSYS_NFTABLES;
else
res_id = ntohs(nfgenmsg->res_id);
nfnetlink_rcv_batch(skb, nlh, res_id, gen_id);
}
static void nfnetlink_rcv(struct sk_buff *skb)
{
struct nlmsghdr *nlh = nlmsg_hdr(skb);
if (skb->len < NLMSG_HDRLEN ||
nlh->nlmsg_len < NLMSG_HDRLEN ||
skb->len < nlh->nlmsg_len)
return;
if (!netlink_net_capable(skb, CAP_NET_ADMIN)) {
netlink_ack(skb, nlh, -EPERM, NULL);
return;
}
if (nlh->nlmsg_type == NFNL_MSG_BATCH_BEGIN)
nfnetlink_rcv_skb_batch(skb, nlh);
else
netlink_rcv_skb(skb, nfnetlink_rcv_msg);
}
static void nfnetlink_bind_event(struct net *net, unsigned int group)
{
#ifdef CONFIG_NF_CONNTRACK_EVENTS
int type, group_bit;
u8 v;
/* All NFNLGRP_CONNTRACK_* group bits fit into u8.
* The other groups are not relevant and can be ignored.
*/
if (group >= 8)
return;
type = nfnl_group2type[group];
switch (type) {
case NFNL_SUBSYS_CTNETLINK:
break;
case NFNL_SUBSYS_CTNETLINK_EXP:
break;
default:
return;
}
group_bit = (1 << group);
spin_lock(&nfnl_grp_active_lock);
v = READ_ONCE(nf_ctnetlink_has_listener);
if ((v & group_bit) == 0) {
v |= group_bit;
/* read concurrently without nfnl_grp_active_lock held. */
WRITE_ONCE(nf_ctnetlink_has_listener, v);
}
spin_unlock(&nfnl_grp_active_lock);
#endif
}
static int nfnetlink_bind(struct net *net, int group)
{
const struct nfnetlink_subsystem *ss;
int type;
if (group <= NFNLGRP_NONE || group > NFNLGRP_MAX)
return 0;
type = nfnl_group2type[group];
rcu_read_lock();
ss = nfnetlink_get_subsys(type << 8);
rcu_read_unlock();
if (!ss)
request_module_nowait("nfnetlink-subsys-%d", type);
nfnetlink_bind_event(net, group);
return 0;
}
static void nfnetlink_unbind(struct net *net, int group)
{
#ifdef CONFIG_NF_CONNTRACK_EVENTS
int type, group_bit;
if (group <= NFNLGRP_NONE || group > NFNLGRP_MAX)
return;
type = nfnl_group2type[group];
switch (type) {
case NFNL_SUBSYS_CTNETLINK:
break;
case NFNL_SUBSYS_CTNETLINK_EXP:
break;
default:
return;
}
/* ctnetlink_has_listener is u8 */
if (group >= 8)
return;
group_bit = (1 << group);
spin_lock(&nfnl_grp_active_lock);
if (!nfnetlink_has_listeners(net, group)) {
u8 v = READ_ONCE(nf_ctnetlink_has_listener);
v &= ~group_bit;
/* read concurrently without nfnl_grp_active_lock held. */
WRITE_ONCE(nf_ctnetlink_has_listener, v);
}
spin_unlock(&nfnl_grp_active_lock);
#endif
}
static int __net_init nfnetlink_net_init(struct net *net)
{
struct nfnl_net *nfnlnet = nfnl_pernet(net);
struct netlink_kernel_cfg cfg = {
.groups = NFNLGRP_MAX,
.input = nfnetlink_rcv,
.bind = nfnetlink_bind,
.unbind = nfnetlink_unbind,
};
nfnlnet->nfnl = netlink_kernel_create(net, NETLINK_NETFILTER, &cfg);
if (!nfnlnet->nfnl)
return -ENOMEM;
return 0;
}
static void __net_exit nfnetlink_net_exit_batch(struct list_head *net_exit_list)
{
struct nfnl_net *nfnlnet;
struct net *net;
list_for_each_entry(net, net_exit_list, exit_list) {
nfnlnet = nfnl_pernet(net);
netlink_kernel_release(nfnlnet->nfnl);
}
}
static struct pernet_operations nfnetlink_net_ops = {
.init = nfnetlink_net_init,
.exit_batch = nfnetlink_net_exit_batch,
.id = &nfnetlink_pernet_id,
.size = sizeof(struct nfnl_net),
};
static int __init nfnetlink_init(void)
{
int i;
for (i = NFNLGRP_NONE + 1; i <= NFNLGRP_MAX; i++)
BUG_ON(nfnl_group2type[i] == NFNL_SUBSYS_NONE);
for (i=0; i<NFNL_SUBSYS_COUNT; i++)
__mutex_init(&table[i].mutex, nfnl_lockdep_names[i], &nfnl_lockdep_keys[i]);
return register_pernet_subsys(&nfnetlink_net_ops);
}
static void __exit nfnetlink_exit(void)
{
unregister_pernet_subsys(&nfnetlink_net_ops);
}
module_init(nfnetlink_init);
module_exit(nfnetlink_exit);
| linux-master | net/netfilter/nfnetlink.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Xtables module for matching the value of the IPv4/IPv6 and TCP ECN bits
*
* (C) 2002 by Harald Welte <[email protected]>
* (C) 2011 Patrick McHardy <[email protected]>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/in.h>
#include <linux/ip.h>
#include <net/ip.h>
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/tcp.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter/xt_ecn.h>
#include <linux/netfilter_ipv4/ip_tables.h>
#include <linux/netfilter_ipv6/ip6_tables.h>
MODULE_AUTHOR("Harald Welte <[email protected]>");
MODULE_DESCRIPTION("Xtables: Explicit Congestion Notification (ECN) flag match");
MODULE_LICENSE("GPL");
MODULE_ALIAS("ipt_ecn");
MODULE_ALIAS("ip6t_ecn");
static bool match_tcp(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct xt_ecn_info *einfo = par->matchinfo;
struct tcphdr _tcph;
const struct tcphdr *th;
/* In practice, TCP match does this, so can't fail. But let's
* be good citizens.
*/
th = skb_header_pointer(skb, par->thoff, sizeof(_tcph), &_tcph);
if (th == NULL)
return false;
if (einfo->operation & XT_ECN_OP_MATCH_ECE) {
if (einfo->invert & XT_ECN_OP_MATCH_ECE) {
if (th->ece == 1)
return false;
} else {
if (th->ece == 0)
return false;
}
}
if (einfo->operation & XT_ECN_OP_MATCH_CWR) {
if (einfo->invert & XT_ECN_OP_MATCH_CWR) {
if (th->cwr == 1)
return false;
} else {
if (th->cwr == 0)
return false;
}
}
return true;
}
static inline bool match_ip(const struct sk_buff *skb,
const struct xt_ecn_info *einfo)
{
return ((ip_hdr(skb)->tos & XT_ECN_IP_MASK) == einfo->ip_ect) ^
!!(einfo->invert & XT_ECN_OP_MATCH_IP);
}
static bool ecn_mt4(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct xt_ecn_info *info = par->matchinfo;
if (info->operation & XT_ECN_OP_MATCH_IP && !match_ip(skb, info))
return false;
if (info->operation & (XT_ECN_OP_MATCH_ECE | XT_ECN_OP_MATCH_CWR) &&
!match_tcp(skb, par))
return false;
return true;
}
static int ecn_mt_check4(const struct xt_mtchk_param *par)
{
const struct xt_ecn_info *info = par->matchinfo;
const struct ipt_ip *ip = par->entryinfo;
if (info->operation & XT_ECN_OP_MATCH_MASK)
return -EINVAL;
if (info->invert & XT_ECN_OP_MATCH_MASK)
return -EINVAL;
if (info->operation & (XT_ECN_OP_MATCH_ECE | XT_ECN_OP_MATCH_CWR) &&
(ip->proto != IPPROTO_TCP || ip->invflags & IPT_INV_PROTO)) {
pr_info_ratelimited("cannot match TCP bits for non-tcp packets\n");
return -EINVAL;
}
return 0;
}
static inline bool match_ipv6(const struct sk_buff *skb,
const struct xt_ecn_info *einfo)
{
return (((ipv6_hdr(skb)->flow_lbl[0] >> 4) & XT_ECN_IP_MASK) ==
einfo->ip_ect) ^
!!(einfo->invert & XT_ECN_OP_MATCH_IP);
}
static bool ecn_mt6(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct xt_ecn_info *info = par->matchinfo;
if (info->operation & XT_ECN_OP_MATCH_IP && !match_ipv6(skb, info))
return false;
if (info->operation & (XT_ECN_OP_MATCH_ECE | XT_ECN_OP_MATCH_CWR) &&
!match_tcp(skb, par))
return false;
return true;
}
static int ecn_mt_check6(const struct xt_mtchk_param *par)
{
const struct xt_ecn_info *info = par->matchinfo;
const struct ip6t_ip6 *ip = par->entryinfo;
if (info->operation & XT_ECN_OP_MATCH_MASK)
return -EINVAL;
if (info->invert & XT_ECN_OP_MATCH_MASK)
return -EINVAL;
if (info->operation & (XT_ECN_OP_MATCH_ECE | XT_ECN_OP_MATCH_CWR) &&
(ip->proto != IPPROTO_TCP || ip->invflags & IP6T_INV_PROTO)) {
pr_info_ratelimited("cannot match TCP bits for non-tcp packets\n");
return -EINVAL;
}
return 0;
}
static struct xt_match ecn_mt_reg[] __read_mostly = {
{
.name = "ecn",
.family = NFPROTO_IPV4,
.match = ecn_mt4,
.matchsize = sizeof(struct xt_ecn_info),
.checkentry = ecn_mt_check4,
.me = THIS_MODULE,
},
{
.name = "ecn",
.family = NFPROTO_IPV6,
.match = ecn_mt6,
.matchsize = sizeof(struct xt_ecn_info),
.checkentry = ecn_mt_check6,
.me = THIS_MODULE,
},
};
static int __init ecn_mt_init(void)
{
return xt_register_matches(ecn_mt_reg, ARRAY_SIZE(ecn_mt_reg));
}
static void __exit ecn_mt_exit(void)
{
xt_unregister_matches(ecn_mt_reg, ARRAY_SIZE(ecn_mt_reg));
}
module_init(ecn_mt_init);
module_exit(ecn_mt_exit);
| linux-master | net/netfilter/xt_ecn.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2008-2009 Patrick McHardy <[email protected]>
* Copyright (c) 2012-2014 Pablo Neira Ayuso <[email protected]>
*
* Development of this code funded by Astaro AG (http://www.astaro.com/)
*/
#include <linux/audit.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/netlink.h>
#include <linux/netfilter.h>
#include <linux/netfilter/nf_tables.h>
#include <net/ipv6.h>
#include <net/ip.h>
#include <net/netfilter/nf_tables.h>
#include <net/netfilter/nf_log.h>
#include <linux/netdevice.h>
static const char *nft_log_null_prefix = "";
struct nft_log {
struct nf_loginfo loginfo;
char *prefix;
};
static bool audit_ip4(struct audit_buffer *ab, struct sk_buff *skb)
{
struct iphdr _iph;
const struct iphdr *ih;
ih = skb_header_pointer(skb, skb_network_offset(skb), sizeof(_iph), &_iph);
if (!ih)
return false;
audit_log_format(ab, " saddr=%pI4 daddr=%pI4 proto=%hhu",
&ih->saddr, &ih->daddr, ih->protocol);
return true;
}
static bool audit_ip6(struct audit_buffer *ab, struct sk_buff *skb)
{
struct ipv6hdr _ip6h;
const struct ipv6hdr *ih;
u8 nexthdr;
__be16 frag_off;
ih = skb_header_pointer(skb, skb_network_offset(skb), sizeof(_ip6h), &_ip6h);
if (!ih)
return false;
nexthdr = ih->nexthdr;
ipv6_skip_exthdr(skb, skb_network_offset(skb) + sizeof(_ip6h), &nexthdr, &frag_off);
audit_log_format(ab, " saddr=%pI6c daddr=%pI6c proto=%hhu",
&ih->saddr, &ih->daddr, nexthdr);
return true;
}
static void nft_log_eval_audit(const struct nft_pktinfo *pkt)
{
struct sk_buff *skb = pkt->skb;
struct audit_buffer *ab;
int fam = -1;
if (!audit_enabled)
return;
ab = audit_log_start(NULL, GFP_ATOMIC, AUDIT_NETFILTER_PKT);
if (!ab)
return;
audit_log_format(ab, "mark=%#x", skb->mark);
switch (nft_pf(pkt)) {
case NFPROTO_BRIDGE:
switch (eth_hdr(skb)->h_proto) {
case htons(ETH_P_IP):
fam = audit_ip4(ab, skb) ? NFPROTO_IPV4 : -1;
break;
case htons(ETH_P_IPV6):
fam = audit_ip6(ab, skb) ? NFPROTO_IPV6 : -1;
break;
}
break;
case NFPROTO_IPV4:
fam = audit_ip4(ab, skb) ? NFPROTO_IPV4 : -1;
break;
case NFPROTO_IPV6:
fam = audit_ip6(ab, skb) ? NFPROTO_IPV6 : -1;
break;
}
if (fam == -1)
audit_log_format(ab, " saddr=? daddr=? proto=-1");
audit_log_end(ab);
}
static void nft_log_eval(const struct nft_expr *expr,
struct nft_regs *regs,
const struct nft_pktinfo *pkt)
{
const struct nft_log *priv = nft_expr_priv(expr);
if (priv->loginfo.type == NF_LOG_TYPE_LOG &&
priv->loginfo.u.log.level == NFT_LOGLEVEL_AUDIT) {
nft_log_eval_audit(pkt);
return;
}
nf_log_packet(nft_net(pkt), nft_pf(pkt), nft_hook(pkt), pkt->skb,
nft_in(pkt), nft_out(pkt), &priv->loginfo, "%s",
priv->prefix);
}
static const struct nla_policy nft_log_policy[NFTA_LOG_MAX + 1] = {
[NFTA_LOG_GROUP] = { .type = NLA_U16 },
[NFTA_LOG_PREFIX] = { .type = NLA_STRING,
.len = NF_LOG_PREFIXLEN - 1 },
[NFTA_LOG_SNAPLEN] = { .type = NLA_U32 },
[NFTA_LOG_QTHRESHOLD] = { .type = NLA_U16 },
[NFTA_LOG_LEVEL] = { .type = NLA_U32 },
[NFTA_LOG_FLAGS] = { .type = NLA_U32 },
};
static int nft_log_modprobe(struct net *net, enum nf_log_type t)
{
switch (t) {
case NF_LOG_TYPE_LOG:
return nft_request_module(net, "%s", "nf_log_syslog");
case NF_LOG_TYPE_ULOG:
return nft_request_module(net, "%s", "nfnetlink_log");
case NF_LOG_TYPE_MAX:
break;
}
return -ENOENT;
}
static int nft_log_init(const struct nft_ctx *ctx,
const struct nft_expr *expr,
const struct nlattr * const tb[])
{
struct nft_log *priv = nft_expr_priv(expr);
struct nf_loginfo *li = &priv->loginfo;
const struct nlattr *nla;
int err;
li->type = NF_LOG_TYPE_LOG;
if (tb[NFTA_LOG_LEVEL] != NULL &&
tb[NFTA_LOG_GROUP] != NULL)
return -EINVAL;
if (tb[NFTA_LOG_GROUP] != NULL) {
li->type = NF_LOG_TYPE_ULOG;
if (tb[NFTA_LOG_FLAGS] != NULL)
return -EINVAL;
}
nla = tb[NFTA_LOG_PREFIX];
if (nla != NULL) {
priv->prefix = kmalloc(nla_len(nla) + 1, GFP_KERNEL);
if (priv->prefix == NULL)
return -ENOMEM;
nla_strscpy(priv->prefix, nla, nla_len(nla) + 1);
} else {
priv->prefix = (char *)nft_log_null_prefix;
}
switch (li->type) {
case NF_LOG_TYPE_LOG:
if (tb[NFTA_LOG_LEVEL] != NULL) {
li->u.log.level =
ntohl(nla_get_be32(tb[NFTA_LOG_LEVEL]));
} else {
li->u.log.level = NFT_LOGLEVEL_WARNING;
}
if (li->u.log.level > NFT_LOGLEVEL_AUDIT) {
err = -EINVAL;
goto err1;
}
if (tb[NFTA_LOG_FLAGS] != NULL) {
li->u.log.logflags =
ntohl(nla_get_be32(tb[NFTA_LOG_FLAGS]));
if (li->u.log.logflags & ~NF_LOG_MASK) {
err = -EINVAL;
goto err1;
}
}
break;
case NF_LOG_TYPE_ULOG:
li->u.ulog.group = ntohs(nla_get_be16(tb[NFTA_LOG_GROUP]));
if (tb[NFTA_LOG_SNAPLEN] != NULL) {
li->u.ulog.flags |= NF_LOG_F_COPY_LEN;
li->u.ulog.copy_len =
ntohl(nla_get_be32(tb[NFTA_LOG_SNAPLEN]));
}
if (tb[NFTA_LOG_QTHRESHOLD] != NULL) {
li->u.ulog.qthreshold =
ntohs(nla_get_be16(tb[NFTA_LOG_QTHRESHOLD]));
}
break;
}
if (li->u.log.level == NFT_LOGLEVEL_AUDIT)
return 0;
err = nf_logger_find_get(ctx->family, li->type);
if (err < 0) {
if (nft_log_modprobe(ctx->net, li->type) == -EAGAIN)
err = -EAGAIN;
goto err1;
}
return 0;
err1:
if (priv->prefix != nft_log_null_prefix)
kfree(priv->prefix);
return err;
}
static void nft_log_destroy(const struct nft_ctx *ctx,
const struct nft_expr *expr)
{
struct nft_log *priv = nft_expr_priv(expr);
struct nf_loginfo *li = &priv->loginfo;
if (priv->prefix != nft_log_null_prefix)
kfree(priv->prefix);
if (li->u.log.level == NFT_LOGLEVEL_AUDIT)
return;
nf_logger_put(ctx->family, li->type);
}
static int nft_log_dump(struct sk_buff *skb,
const struct nft_expr *expr, bool reset)
{
const struct nft_log *priv = nft_expr_priv(expr);
const struct nf_loginfo *li = &priv->loginfo;
if (priv->prefix != nft_log_null_prefix)
if (nla_put_string(skb, NFTA_LOG_PREFIX, priv->prefix))
goto nla_put_failure;
switch (li->type) {
case NF_LOG_TYPE_LOG:
if (nla_put_be32(skb, NFTA_LOG_LEVEL, htonl(li->u.log.level)))
goto nla_put_failure;
if (li->u.log.logflags) {
if (nla_put_be32(skb, NFTA_LOG_FLAGS,
htonl(li->u.log.logflags)))
goto nla_put_failure;
}
break;
case NF_LOG_TYPE_ULOG:
if (nla_put_be16(skb, NFTA_LOG_GROUP, htons(li->u.ulog.group)))
goto nla_put_failure;
if (li->u.ulog.flags & NF_LOG_F_COPY_LEN) {
if (nla_put_be32(skb, NFTA_LOG_SNAPLEN,
htonl(li->u.ulog.copy_len)))
goto nla_put_failure;
}
if (li->u.ulog.qthreshold) {
if (nla_put_be16(skb, NFTA_LOG_QTHRESHOLD,
htons(li->u.ulog.qthreshold)))
goto nla_put_failure;
}
break;
}
return 0;
nla_put_failure:
return -1;
}
static struct nft_expr_type nft_log_type;
static const struct nft_expr_ops nft_log_ops = {
.type = &nft_log_type,
.size = NFT_EXPR_SIZE(sizeof(struct nft_log)),
.eval = nft_log_eval,
.init = nft_log_init,
.destroy = nft_log_destroy,
.dump = nft_log_dump,
.reduce = NFT_REDUCE_READONLY,
};
static struct nft_expr_type nft_log_type __read_mostly = {
.name = "log",
.ops = &nft_log_ops,
.policy = nft_log_policy,
.maxattr = NFTA_LOG_MAX,
.owner = THIS_MODULE,
};
static int __init nft_log_module_init(void)
{
return nft_register_expr(&nft_log_type);
}
static void __exit nft_log_module_exit(void)
{
nft_unregister_expr(&nft_log_type);
}
module_init(nft_log_module_init);
module_exit(nft_log_module_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Patrick McHardy <[email protected]>");
MODULE_ALIAS_NFT_EXPR("log");
MODULE_DESCRIPTION("Netfilter nf_tables log module");
| linux-master | net/netfilter/nft_log.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C)2003,2004 USAGI/WIDE Project
*
* Author:
* Yasuyuki Kozakai @USAGI <[email protected]>
*/
#include <linux/types.h>
#include <linux/timer.h>
#include <linux/module.h>
#include <linux/netfilter.h>
#include <linux/in6.h>
#include <linux/icmpv6.h>
#include <linux/ipv6.h>
#include <net/ipv6.h>
#include <net/ip6_checksum.h>
#include <linux/seq_file.h>
#include <linux/netfilter_ipv6.h>
#include <net/netfilter/nf_conntrack_tuple.h>
#include <net/netfilter/nf_conntrack_l4proto.h>
#include <net/netfilter/nf_conntrack_core.h>
#include <net/netfilter/nf_conntrack_timeout.h>
#include <net/netfilter/nf_conntrack_zones.h>
#include <net/netfilter/nf_log.h>
#include "nf_internals.h"
static const unsigned int nf_ct_icmpv6_timeout = 30*HZ;
bool icmpv6_pkt_to_tuple(const struct sk_buff *skb,
unsigned int dataoff,
struct net *net,
struct nf_conntrack_tuple *tuple)
{
const struct icmp6hdr *hp;
struct icmp6hdr _hdr;
hp = skb_header_pointer(skb, dataoff, sizeof(_hdr), &_hdr);
if (hp == NULL)
return false;
tuple->dst.u.icmp.type = hp->icmp6_type;
tuple->src.u.icmp.id = hp->icmp6_identifier;
tuple->dst.u.icmp.code = hp->icmp6_code;
return true;
}
/* Add 1; spaces filled with 0. */
static const u_int8_t invmap[] = {
[ICMPV6_ECHO_REQUEST - 128] = ICMPV6_ECHO_REPLY + 1,
[ICMPV6_ECHO_REPLY - 128] = ICMPV6_ECHO_REQUEST + 1,
[ICMPV6_NI_QUERY - 128] = ICMPV6_NI_REPLY + 1,
[ICMPV6_NI_REPLY - 128] = ICMPV6_NI_QUERY + 1
};
static const u_int8_t noct_valid_new[] = {
[ICMPV6_MGM_QUERY - 130] = 1,
[ICMPV6_MGM_REPORT - 130] = 1,
[ICMPV6_MGM_REDUCTION - 130] = 1,
[NDISC_ROUTER_SOLICITATION - 130] = 1,
[NDISC_ROUTER_ADVERTISEMENT - 130] = 1,
[NDISC_NEIGHBOUR_SOLICITATION - 130] = 1,
[NDISC_NEIGHBOUR_ADVERTISEMENT - 130] = 1,
[ICMPV6_MLD2_REPORT - 130] = 1
};
bool nf_conntrack_invert_icmpv6_tuple(struct nf_conntrack_tuple *tuple,
const struct nf_conntrack_tuple *orig)
{
int type = orig->dst.u.icmp.type - 128;
if (type < 0 || type >= sizeof(invmap) || !invmap[type])
return false;
tuple->src.u.icmp.id = orig->src.u.icmp.id;
tuple->dst.u.icmp.type = invmap[type] - 1;
tuple->dst.u.icmp.code = orig->dst.u.icmp.code;
return true;
}
static unsigned int *icmpv6_get_timeouts(struct net *net)
{
return &nf_icmpv6_pernet(net)->timeout;
}
/* Returns verdict for packet, or -1 for invalid. */
int nf_conntrack_icmpv6_packet(struct nf_conn *ct,
struct sk_buff *skb,
enum ip_conntrack_info ctinfo,
const struct nf_hook_state *state)
{
unsigned int *timeout = nf_ct_timeout_lookup(ct);
static const u8 valid_new[] = {
[ICMPV6_ECHO_REQUEST - 128] = 1,
[ICMPV6_NI_QUERY - 128] = 1
};
if (state->pf != NFPROTO_IPV6)
return -NF_ACCEPT;
if (!nf_ct_is_confirmed(ct)) {
int type = ct->tuplehash[0].tuple.dst.u.icmp.type - 128;
if (type < 0 || type >= sizeof(valid_new) || !valid_new[type]) {
/* Can't create a new ICMPv6 `conn' with this. */
pr_debug("icmpv6: can't create new conn with type %u\n",
type + 128);
nf_ct_dump_tuple_ipv6(&ct->tuplehash[0].tuple);
return -NF_ACCEPT;
}
}
if (!timeout)
timeout = icmpv6_get_timeouts(nf_ct_net(ct));
/* Do not immediately delete the connection after the first
successful reply to avoid excessive conntrackd traffic
and also to handle correctly ICMP echo reply duplicates. */
nf_ct_refresh_acct(ct, ctinfo, skb, *timeout);
return NF_ACCEPT;
}
static void icmpv6_error_log(const struct sk_buff *skb,
const struct nf_hook_state *state,
const char *msg)
{
nf_l4proto_log_invalid(skb, state, IPPROTO_ICMPV6, "%s", msg);
}
static noinline_for_stack int
nf_conntrack_icmpv6_redirect(struct nf_conn *tmpl, struct sk_buff *skb,
unsigned int dataoff,
const struct nf_hook_state *state)
{
u8 hl = ipv6_hdr(skb)->hop_limit;
union nf_inet_addr outer_daddr;
union {
struct nd_opt_hdr nd_opt;
struct rd_msg rd_msg;
} tmp;
const struct nd_opt_hdr *nd_opt;
const struct rd_msg *rd_msg;
rd_msg = skb_header_pointer(skb, dataoff, sizeof(*rd_msg), &tmp.rd_msg);
if (!rd_msg) {
icmpv6_error_log(skb, state, "short redirect");
return -NF_ACCEPT;
}
if (rd_msg->icmph.icmp6_code != 0)
return NF_ACCEPT;
if (hl != 255 || !(ipv6_addr_type(&ipv6_hdr(skb)->saddr) & IPV6_ADDR_LINKLOCAL)) {
icmpv6_error_log(skb, state, "invalid saddr or hoplimit for redirect");
return -NF_ACCEPT;
}
dataoff += sizeof(*rd_msg);
/* warning: rd_msg no longer usable after this call */
nd_opt = skb_header_pointer(skb, dataoff, sizeof(*nd_opt), &tmp.nd_opt);
if (!nd_opt || nd_opt->nd_opt_len == 0) {
icmpv6_error_log(skb, state, "redirect without options");
return -NF_ACCEPT;
}
/* We could call ndisc_parse_options(), but it would need
* skb_linearize() and a bit more work.
*/
if (nd_opt->nd_opt_type != ND_OPT_REDIRECT_HDR)
return NF_ACCEPT;
memcpy(&outer_daddr.ip6, &ipv6_hdr(skb)->daddr,
sizeof(outer_daddr.ip6));
dataoff += 8;
return nf_conntrack_inet_error(tmpl, skb, dataoff, state,
IPPROTO_ICMPV6, &outer_daddr);
}
int nf_conntrack_icmpv6_error(struct nf_conn *tmpl,
struct sk_buff *skb,
unsigned int dataoff,
const struct nf_hook_state *state)
{
union nf_inet_addr outer_daddr;
const struct icmp6hdr *icmp6h;
struct icmp6hdr _ih;
int type;
icmp6h = skb_header_pointer(skb, dataoff, sizeof(_ih), &_ih);
if (icmp6h == NULL) {
icmpv6_error_log(skb, state, "short packet");
return -NF_ACCEPT;
}
if (state->hook == NF_INET_PRE_ROUTING &&
state->net->ct.sysctl_checksum &&
nf_ip6_checksum(skb, state->hook, dataoff, IPPROTO_ICMPV6)) {
icmpv6_error_log(skb, state, "ICMPv6 checksum failed");
return -NF_ACCEPT;
}
type = icmp6h->icmp6_type - 130;
if (type >= 0 && type < sizeof(noct_valid_new) &&
noct_valid_new[type]) {
nf_ct_set(skb, NULL, IP_CT_UNTRACKED);
return NF_ACCEPT;
}
if (icmp6h->icmp6_type == NDISC_REDIRECT)
return nf_conntrack_icmpv6_redirect(tmpl, skb, dataoff, state);
/* is not error message ? */
if (icmp6h->icmp6_type >= 128)
return NF_ACCEPT;
memcpy(&outer_daddr.ip6, &ipv6_hdr(skb)->daddr,
sizeof(outer_daddr.ip6));
dataoff += sizeof(*icmp6h);
return nf_conntrack_inet_error(tmpl, skb, dataoff, state,
IPPROTO_ICMPV6, &outer_daddr);
}
#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
#include <linux/netfilter/nfnetlink.h>
#include <linux/netfilter/nfnetlink_conntrack.h>
static int icmpv6_tuple_to_nlattr(struct sk_buff *skb,
const struct nf_conntrack_tuple *t)
{
if (nla_put_be16(skb, CTA_PROTO_ICMPV6_ID, t->src.u.icmp.id) ||
nla_put_u8(skb, CTA_PROTO_ICMPV6_TYPE, t->dst.u.icmp.type) ||
nla_put_u8(skb, CTA_PROTO_ICMPV6_CODE, t->dst.u.icmp.code))
goto nla_put_failure;
return 0;
nla_put_failure:
return -1;
}
static const struct nla_policy icmpv6_nla_policy[CTA_PROTO_MAX+1] = {
[CTA_PROTO_ICMPV6_TYPE] = { .type = NLA_U8 },
[CTA_PROTO_ICMPV6_CODE] = { .type = NLA_U8 },
[CTA_PROTO_ICMPV6_ID] = { .type = NLA_U16 },
};
static int icmpv6_nlattr_to_tuple(struct nlattr *tb[],
struct nf_conntrack_tuple *tuple,
u_int32_t flags)
{
if (flags & CTA_FILTER_FLAG(CTA_PROTO_ICMPV6_TYPE)) {
if (!tb[CTA_PROTO_ICMPV6_TYPE])
return -EINVAL;
tuple->dst.u.icmp.type = nla_get_u8(tb[CTA_PROTO_ICMPV6_TYPE]);
if (tuple->dst.u.icmp.type < 128 ||
tuple->dst.u.icmp.type - 128 >= sizeof(invmap) ||
!invmap[tuple->dst.u.icmp.type - 128])
return -EINVAL;
}
if (flags & CTA_FILTER_FLAG(CTA_PROTO_ICMPV6_CODE)) {
if (!tb[CTA_PROTO_ICMPV6_CODE])
return -EINVAL;
tuple->dst.u.icmp.code = nla_get_u8(tb[CTA_PROTO_ICMPV6_CODE]);
}
if (flags & CTA_FILTER_FLAG(CTA_PROTO_ICMPV6_ID)) {
if (!tb[CTA_PROTO_ICMPV6_ID])
return -EINVAL;
tuple->src.u.icmp.id = nla_get_be16(tb[CTA_PROTO_ICMPV6_ID]);
}
return 0;
}
static unsigned int icmpv6_nlattr_tuple_size(void)
{
static unsigned int size __read_mostly;
if (!size)
size = nla_policy_len(icmpv6_nla_policy, CTA_PROTO_MAX + 1);
return size;
}
#endif
#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
#include <linux/netfilter/nfnetlink.h>
#include <linux/netfilter/nfnetlink_cttimeout.h>
static int icmpv6_timeout_nlattr_to_obj(struct nlattr *tb[],
struct net *net, void *data)
{
unsigned int *timeout = data;
struct nf_icmp_net *in = nf_icmpv6_pernet(net);
if (!timeout)
timeout = icmpv6_get_timeouts(net);
if (tb[CTA_TIMEOUT_ICMPV6_TIMEOUT]) {
*timeout =
ntohl(nla_get_be32(tb[CTA_TIMEOUT_ICMPV6_TIMEOUT])) * HZ;
} else {
/* Set default ICMPv6 timeout. */
*timeout = in->timeout;
}
return 0;
}
static int
icmpv6_timeout_obj_to_nlattr(struct sk_buff *skb, const void *data)
{
const unsigned int *timeout = data;
if (nla_put_be32(skb, CTA_TIMEOUT_ICMPV6_TIMEOUT, htonl(*timeout / HZ)))
goto nla_put_failure;
return 0;
nla_put_failure:
return -ENOSPC;
}
static const struct nla_policy
icmpv6_timeout_nla_policy[CTA_TIMEOUT_ICMPV6_MAX+1] = {
[CTA_TIMEOUT_ICMPV6_TIMEOUT] = { .type = NLA_U32 },
};
#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
void nf_conntrack_icmpv6_init_net(struct net *net)
{
struct nf_icmp_net *in = nf_icmpv6_pernet(net);
in->timeout = nf_ct_icmpv6_timeout;
}
const struct nf_conntrack_l4proto nf_conntrack_l4proto_icmpv6 =
{
.l4proto = IPPROTO_ICMPV6,
#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
.tuple_to_nlattr = icmpv6_tuple_to_nlattr,
.nlattr_tuple_size = icmpv6_nlattr_tuple_size,
.nlattr_to_tuple = icmpv6_nlattr_to_tuple,
.nla_policy = icmpv6_nla_policy,
#endif
#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
.ctnl_timeout = {
.nlattr_to_obj = icmpv6_timeout_nlattr_to_obj,
.obj_to_nlattr = icmpv6_timeout_obj_to_nlattr,
.nlattr_max = CTA_TIMEOUT_ICMP_MAX,
.obj_size = sizeof(unsigned int),
.nla_policy = icmpv6_timeout_nla_policy,
},
#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
};
| linux-master | net/netfilter/nf_conntrack_proto_icmpv6.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Kernel module to match one of a list of TCP/UDP(-Lite)/SCTP/DCCP ports:
ports are in the same place so we can treat them as equal. */
/* (C) 1999-2001 Paul `Rusty' Russell
* (C) 2002-2004 Netfilter Core Team <[email protected]>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/types.h>
#include <linux/udp.h>
#include <linux/skbuff.h>
#include <linux/in.h>
#include <linux/netfilter/xt_multiport.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter_ipv4/ip_tables.h>
#include <linux/netfilter_ipv6/ip6_tables.h>
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Netfilter Core Team <[email protected]>");
MODULE_DESCRIPTION("Xtables: multiple port matching for TCP, UDP, UDP-Lite, SCTP and DCCP");
MODULE_ALIAS("ipt_multiport");
MODULE_ALIAS("ip6t_multiport");
/* Returns 1 if the port is matched by the test, 0 otherwise. */
static inline bool
ports_match_v1(const struct xt_multiport_v1 *minfo,
u_int16_t src, u_int16_t dst)
{
unsigned int i;
u_int16_t s, e;
for (i = 0; i < minfo->count; i++) {
s = minfo->ports[i];
if (minfo->pflags[i]) {
/* range port matching */
e = minfo->ports[++i];
pr_debug("src or dst matches with %d-%d?\n", s, e);
switch (minfo->flags) {
case XT_MULTIPORT_SOURCE:
if (src >= s && src <= e)
return true ^ minfo->invert;
break;
case XT_MULTIPORT_DESTINATION:
if (dst >= s && dst <= e)
return true ^ minfo->invert;
break;
case XT_MULTIPORT_EITHER:
if ((dst >= s && dst <= e) ||
(src >= s && src <= e))
return true ^ minfo->invert;
break;
default:
break;
}
} else {
/* exact port matching */
pr_debug("src or dst matches with %d?\n", s);
switch (minfo->flags) {
case XT_MULTIPORT_SOURCE:
if (src == s)
return true ^ minfo->invert;
break;
case XT_MULTIPORT_DESTINATION:
if (dst == s)
return true ^ minfo->invert;
break;
case XT_MULTIPORT_EITHER:
if (src == s || dst == s)
return true ^ minfo->invert;
break;
default:
break;
}
}
}
return minfo->invert;
}
static bool
multiport_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
const __be16 *pptr;
__be16 _ports[2];
const struct xt_multiport_v1 *multiinfo = par->matchinfo;
if (par->fragoff != 0)
return false;
pptr = skb_header_pointer(skb, par->thoff, sizeof(_ports), _ports);
if (pptr == NULL) {
/* We've been asked to examine this packet, and we
* can't. Hence, no choice but to drop.
*/
pr_debug("Dropping evil offset=0 tinygram.\n");
par->hotdrop = true;
return false;
}
return ports_match_v1(multiinfo, ntohs(pptr[0]), ntohs(pptr[1]));
}
static inline bool
check(u_int16_t proto,
u_int8_t ip_invflags,
u_int8_t match_flags,
u_int8_t count)
{
/* Must specify supported protocol, no unknown flags or bad count */
return (proto == IPPROTO_TCP || proto == IPPROTO_UDP
|| proto == IPPROTO_UDPLITE
|| proto == IPPROTO_SCTP || proto == IPPROTO_DCCP)
&& !(ip_invflags & XT_INV_PROTO)
&& (match_flags == XT_MULTIPORT_SOURCE
|| match_flags == XT_MULTIPORT_DESTINATION
|| match_flags == XT_MULTIPORT_EITHER)
&& count <= XT_MULTI_PORTS;
}
static int multiport_mt_check(const struct xt_mtchk_param *par)
{
const struct ipt_ip *ip = par->entryinfo;
const struct xt_multiport_v1 *multiinfo = par->matchinfo;
return check(ip->proto, ip->invflags, multiinfo->flags,
multiinfo->count) ? 0 : -EINVAL;
}
static int multiport_mt6_check(const struct xt_mtchk_param *par)
{
const struct ip6t_ip6 *ip = par->entryinfo;
const struct xt_multiport_v1 *multiinfo = par->matchinfo;
return check(ip->proto, ip->invflags, multiinfo->flags,
multiinfo->count) ? 0 : -EINVAL;
}
static struct xt_match multiport_mt_reg[] __read_mostly = {
{
.name = "multiport",
.family = NFPROTO_IPV4,
.revision = 1,
.checkentry = multiport_mt_check,
.match = multiport_mt,
.matchsize = sizeof(struct xt_multiport_v1),
.me = THIS_MODULE,
},
{
.name = "multiport",
.family = NFPROTO_IPV6,
.revision = 1,
.checkentry = multiport_mt6_check,
.match = multiport_mt,
.matchsize = sizeof(struct xt_multiport_v1),
.me = THIS_MODULE,
},
};
static int __init multiport_mt_init(void)
{
return xt_register_matches(multiport_mt_reg,
ARRAY_SIZE(multiport_mt_reg));
}
static void __exit multiport_mt_exit(void)
{
xt_unregister_matches(multiport_mt_reg, ARRAY_SIZE(multiport_mt_reg));
}
module_init(multiport_mt_init);
module_exit(multiport_mt_exit);
| linux-master | net/netfilter/xt_multiport.c |
// SPDX-License-Identifier: GPL-2.0-only
/* (C) 1999-2001 Paul `Rusty' Russell
* (C) 2002-2004 Netfilter Core Team <[email protected]>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/skbuff.h>
#include <linux/if_arp.h>
#include <linux/ip.h>
#include <net/ipv6.h>
#include <net/icmp.h>
#include <net/udp.h>
#include <net/tcp.h>
#include <net/route.h>
#include <linux/netfilter.h>
#include <linux/netfilter_bridge.h>
#include <linux/netfilter_ipv6.h>
#include <linux/netfilter/xt_LOG.h>
#include <net/netfilter/nf_log.h>
static const struct nf_loginfo default_loginfo = {
.type = NF_LOG_TYPE_LOG,
.u = {
.log = {
.level = LOGLEVEL_NOTICE,
.logflags = NF_LOG_DEFAULT_MASK,
},
},
};
struct arppayload {
unsigned char mac_src[ETH_ALEN];
unsigned char ip_src[4];
unsigned char mac_dst[ETH_ALEN];
unsigned char ip_dst[4];
};
/* Guard against containers flooding syslog. */
static bool nf_log_allowed(const struct net *net)
{
return net_eq(net, &init_net) || sysctl_nf_log_all_netns;
}
static void nf_log_dump_vlan(struct nf_log_buf *m, const struct sk_buff *skb)
{
u16 vid;
if (!skb_vlan_tag_present(skb))
return;
vid = skb_vlan_tag_get(skb);
nf_log_buf_add(m, "VPROTO=%04x VID=%u ", ntohs(skb->vlan_proto), vid);
}
static void noinline_for_stack
dump_arp_packet(struct nf_log_buf *m,
const struct nf_loginfo *info,
const struct sk_buff *skb, unsigned int nhoff)
{
const struct arppayload *ap;
struct arppayload _arpp;
const struct arphdr *ah;
unsigned int logflags;
struct arphdr _arph;
ah = skb_header_pointer(skb, nhoff, sizeof(_arph), &_arph);
if (!ah) {
nf_log_buf_add(m, "TRUNCATED");
return;
}
if (info->type == NF_LOG_TYPE_LOG)
logflags = info->u.log.logflags;
else
logflags = NF_LOG_DEFAULT_MASK;
if (logflags & NF_LOG_MACDECODE) {
nf_log_buf_add(m, "MACSRC=%pM MACDST=%pM ",
eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest);
nf_log_dump_vlan(m, skb);
nf_log_buf_add(m, "MACPROTO=%04x ",
ntohs(eth_hdr(skb)->h_proto));
}
nf_log_buf_add(m, "ARP HTYPE=%d PTYPE=0x%04x OPCODE=%d",
ntohs(ah->ar_hrd), ntohs(ah->ar_pro), ntohs(ah->ar_op));
/* If it's for Ethernet and the lengths are OK, then log the ARP
* payload.
*/
if (ah->ar_hrd != htons(ARPHRD_ETHER) ||
ah->ar_hln != ETH_ALEN ||
ah->ar_pln != sizeof(__be32))
return;
ap = skb_header_pointer(skb, nhoff + sizeof(_arph), sizeof(_arpp), &_arpp);
if (!ap) {
nf_log_buf_add(m, " INCOMPLETE [%zu bytes]",
skb->len - sizeof(_arph));
return;
}
nf_log_buf_add(m, " MACSRC=%pM IPSRC=%pI4 MACDST=%pM IPDST=%pI4",
ap->mac_src, ap->ip_src, ap->mac_dst, ap->ip_dst);
}
static void
nf_log_dump_packet_common(struct nf_log_buf *m, u8 pf,
unsigned int hooknum, const struct sk_buff *skb,
const struct net_device *in,
const struct net_device *out,
const struct nf_loginfo *loginfo, const char *prefix)
{
const struct net_device *physoutdev __maybe_unused;
const struct net_device *physindev __maybe_unused;
nf_log_buf_add(m, KERN_SOH "%c%sIN=%s OUT=%s ",
'0' + loginfo->u.log.level, prefix,
in ? in->name : "",
out ? out->name : "");
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
physindev = nf_bridge_get_physindev(skb);
if (physindev && in != physindev)
nf_log_buf_add(m, "PHYSIN=%s ", physindev->name);
physoutdev = nf_bridge_get_physoutdev(skb);
if (physoutdev && out != physoutdev)
nf_log_buf_add(m, "PHYSOUT=%s ", physoutdev->name);
#endif
}
static void nf_log_arp_packet(struct net *net, u_int8_t pf,
unsigned int hooknum, const struct sk_buff *skb,
const struct net_device *in,
const struct net_device *out,
const struct nf_loginfo *loginfo,
const char *prefix)
{
struct nf_log_buf *m;
if (!nf_log_allowed(net))
return;
m = nf_log_buf_open();
if (!loginfo)
loginfo = &default_loginfo;
nf_log_dump_packet_common(m, pf, hooknum, skb, in, out, loginfo,
prefix);
dump_arp_packet(m, loginfo, skb, skb_network_offset(skb));
nf_log_buf_close(m);
}
static struct nf_logger nf_arp_logger __read_mostly = {
.name = "nf_log_arp",
.type = NF_LOG_TYPE_LOG,
.logfn = nf_log_arp_packet,
.me = THIS_MODULE,
};
static void nf_log_dump_sk_uid_gid(struct net *net, struct nf_log_buf *m,
struct sock *sk)
{
if (!sk || !sk_fullsock(sk) || !net_eq(net, sock_net(sk)))
return;
read_lock_bh(&sk->sk_callback_lock);
if (sk->sk_socket && sk->sk_socket->file) {
const struct cred *cred = sk->sk_socket->file->f_cred;
nf_log_buf_add(m, "UID=%u GID=%u ",
from_kuid_munged(&init_user_ns, cred->fsuid),
from_kgid_munged(&init_user_ns, cred->fsgid));
}
read_unlock_bh(&sk->sk_callback_lock);
}
static noinline_for_stack int
nf_log_dump_tcp_header(struct nf_log_buf *m,
const struct sk_buff *skb,
u8 proto, int fragment,
unsigned int offset,
unsigned int logflags)
{
struct tcphdr _tcph;
const struct tcphdr *th;
/* Max length: 10 "PROTO=TCP " */
nf_log_buf_add(m, "PROTO=TCP ");
if (fragment)
return 0;
/* Max length: 25 "INCOMPLETE [65535 bytes] " */
th = skb_header_pointer(skb, offset, sizeof(_tcph), &_tcph);
if (!th) {
nf_log_buf_add(m, "INCOMPLETE [%u bytes] ", skb->len - offset);
return 1;
}
/* Max length: 20 "SPT=65535 DPT=65535 " */
nf_log_buf_add(m, "SPT=%u DPT=%u ",
ntohs(th->source), ntohs(th->dest));
/* Max length: 30 "SEQ=4294967295 ACK=4294967295 " */
if (logflags & NF_LOG_TCPSEQ) {
nf_log_buf_add(m, "SEQ=%u ACK=%u ",
ntohl(th->seq), ntohl(th->ack_seq));
}
/* Max length: 13 "WINDOW=65535 " */
nf_log_buf_add(m, "WINDOW=%u ", ntohs(th->window));
/* Max length: 9 "RES=0x3C " */
nf_log_buf_add(m, "RES=0x%02x ", (u_int8_t)(ntohl(tcp_flag_word(th) &
TCP_RESERVED_BITS) >> 22));
/* Max length: 32 "CWR ECE URG ACK PSH RST SYN FIN " */
if (th->cwr)
nf_log_buf_add(m, "CWR ");
if (th->ece)
nf_log_buf_add(m, "ECE ");
if (th->urg)
nf_log_buf_add(m, "URG ");
if (th->ack)
nf_log_buf_add(m, "ACK ");
if (th->psh)
nf_log_buf_add(m, "PSH ");
if (th->rst)
nf_log_buf_add(m, "RST ");
if (th->syn)
nf_log_buf_add(m, "SYN ");
if (th->fin)
nf_log_buf_add(m, "FIN ");
/* Max length: 11 "URGP=65535 " */
nf_log_buf_add(m, "URGP=%u ", ntohs(th->urg_ptr));
if ((logflags & NF_LOG_TCPOPT) && th->doff * 4 > sizeof(struct tcphdr)) {
unsigned int optsize = th->doff * 4 - sizeof(struct tcphdr);
u8 _opt[60 - sizeof(struct tcphdr)];
unsigned int i;
const u8 *op;
op = skb_header_pointer(skb, offset + sizeof(struct tcphdr),
optsize, _opt);
if (!op) {
nf_log_buf_add(m, "OPT (TRUNCATED)");
return 1;
}
/* Max length: 127 "OPT (" 15*4*2chars ") " */
nf_log_buf_add(m, "OPT (");
for (i = 0; i < optsize; i++)
nf_log_buf_add(m, "%02X", op[i]);
nf_log_buf_add(m, ") ");
}
return 0;
}
static noinline_for_stack int
nf_log_dump_udp_header(struct nf_log_buf *m,
const struct sk_buff *skb,
u8 proto, int fragment,
unsigned int offset)
{
struct udphdr _udph;
const struct udphdr *uh;
if (proto == IPPROTO_UDP)
/* Max length: 10 "PROTO=UDP " */
nf_log_buf_add(m, "PROTO=UDP ");
else /* Max length: 14 "PROTO=UDPLITE " */
nf_log_buf_add(m, "PROTO=UDPLITE ");
if (fragment)
goto out;
/* Max length: 25 "INCOMPLETE [65535 bytes] " */
uh = skb_header_pointer(skb, offset, sizeof(_udph), &_udph);
if (!uh) {
nf_log_buf_add(m, "INCOMPLETE [%u bytes] ", skb->len - offset);
return 1;
}
/* Max length: 20 "SPT=65535 DPT=65535 " */
nf_log_buf_add(m, "SPT=%u DPT=%u LEN=%u ",
ntohs(uh->source), ntohs(uh->dest), ntohs(uh->len));
out:
return 0;
}
/* One level of recursion won't kill us */
static noinline_for_stack void
dump_ipv4_packet(struct net *net, struct nf_log_buf *m,
const struct nf_loginfo *info,
const struct sk_buff *skb, unsigned int iphoff)
{
const struct iphdr *ih;
unsigned int logflags;
struct iphdr _iph;
if (info->type == NF_LOG_TYPE_LOG)
logflags = info->u.log.logflags;
else
logflags = NF_LOG_DEFAULT_MASK;
ih = skb_header_pointer(skb, iphoff, sizeof(_iph), &_iph);
if (!ih) {
nf_log_buf_add(m, "TRUNCATED");
return;
}
/* Important fields:
* TOS, len, DF/MF, fragment offset, TTL, src, dst, options.
* Max length: 40 "SRC=255.255.255.255 DST=255.255.255.255 "
*/
nf_log_buf_add(m, "SRC=%pI4 DST=%pI4 ", &ih->saddr, &ih->daddr);
/* Max length: 46 "LEN=65535 TOS=0xFF PREC=0xFF TTL=255 ID=65535 " */
nf_log_buf_add(m, "LEN=%u TOS=0x%02X PREC=0x%02X TTL=%u ID=%u ",
iph_totlen(skb, ih), ih->tos & IPTOS_TOS_MASK,
ih->tos & IPTOS_PREC_MASK, ih->ttl, ntohs(ih->id));
/* Max length: 6 "CE DF MF " */
if (ntohs(ih->frag_off) & IP_CE)
nf_log_buf_add(m, "CE ");
if (ntohs(ih->frag_off) & IP_DF)
nf_log_buf_add(m, "DF ");
if (ntohs(ih->frag_off) & IP_MF)
nf_log_buf_add(m, "MF ");
/* Max length: 11 "FRAG:65535 " */
if (ntohs(ih->frag_off) & IP_OFFSET)
nf_log_buf_add(m, "FRAG:%u ", ntohs(ih->frag_off) & IP_OFFSET);
if ((logflags & NF_LOG_IPOPT) &&
ih->ihl * 4 > sizeof(struct iphdr)) {
unsigned char _opt[4 * 15 - sizeof(struct iphdr)];
const unsigned char *op;
unsigned int i, optsize;
optsize = ih->ihl * 4 - sizeof(struct iphdr);
op = skb_header_pointer(skb, iphoff + sizeof(_iph),
optsize, _opt);
if (!op) {
nf_log_buf_add(m, "TRUNCATED");
return;
}
/* Max length: 127 "OPT (" 15*4*2chars ") " */
nf_log_buf_add(m, "OPT (");
for (i = 0; i < optsize; i++)
nf_log_buf_add(m, "%02X", op[i]);
nf_log_buf_add(m, ") ");
}
switch (ih->protocol) {
case IPPROTO_TCP:
if (nf_log_dump_tcp_header(m, skb, ih->protocol,
ntohs(ih->frag_off) & IP_OFFSET,
iphoff + ih->ihl * 4, logflags))
return;
break;
case IPPROTO_UDP:
case IPPROTO_UDPLITE:
if (nf_log_dump_udp_header(m, skb, ih->protocol,
ntohs(ih->frag_off) & IP_OFFSET,
iphoff + ih->ihl * 4))
return;
break;
case IPPROTO_ICMP: {
static const size_t required_len[NR_ICMP_TYPES + 1] = {
[ICMP_ECHOREPLY] = 4,
[ICMP_DEST_UNREACH] = 8 + sizeof(struct iphdr),
[ICMP_SOURCE_QUENCH] = 8 + sizeof(struct iphdr),
[ICMP_REDIRECT] = 8 + sizeof(struct iphdr),
[ICMP_ECHO] = 4,
[ICMP_TIME_EXCEEDED] = 8 + sizeof(struct iphdr),
[ICMP_PARAMETERPROB] = 8 + sizeof(struct iphdr),
[ICMP_TIMESTAMP] = 20,
[ICMP_TIMESTAMPREPLY] = 20,
[ICMP_ADDRESS] = 12,
[ICMP_ADDRESSREPLY] = 12 };
const struct icmphdr *ich;
struct icmphdr _icmph;
/* Max length: 11 "PROTO=ICMP " */
nf_log_buf_add(m, "PROTO=ICMP ");
if (ntohs(ih->frag_off) & IP_OFFSET)
break;
/* Max length: 25 "INCOMPLETE [65535 bytes] " */
ich = skb_header_pointer(skb, iphoff + ih->ihl * 4,
sizeof(_icmph), &_icmph);
if (!ich) {
nf_log_buf_add(m, "INCOMPLETE [%u bytes] ",
skb->len - iphoff - ih->ihl * 4);
break;
}
/* Max length: 18 "TYPE=255 CODE=255 " */
nf_log_buf_add(m, "TYPE=%u CODE=%u ", ich->type, ich->code);
/* Max length: 25 "INCOMPLETE [65535 bytes] " */
if (ich->type <= NR_ICMP_TYPES &&
required_len[ich->type] &&
skb->len - iphoff - ih->ihl * 4 < required_len[ich->type]) {
nf_log_buf_add(m, "INCOMPLETE [%u bytes] ",
skb->len - iphoff - ih->ihl * 4);
break;
}
switch (ich->type) {
case ICMP_ECHOREPLY:
case ICMP_ECHO:
/* Max length: 19 "ID=65535 SEQ=65535 " */
nf_log_buf_add(m, "ID=%u SEQ=%u ",
ntohs(ich->un.echo.id),
ntohs(ich->un.echo.sequence));
break;
case ICMP_PARAMETERPROB:
/* Max length: 14 "PARAMETER=255 " */
nf_log_buf_add(m, "PARAMETER=%u ",
ntohl(ich->un.gateway) >> 24);
break;
case ICMP_REDIRECT:
/* Max length: 24 "GATEWAY=255.255.255.255 " */
nf_log_buf_add(m, "GATEWAY=%pI4 ", &ich->un.gateway);
fallthrough;
case ICMP_DEST_UNREACH:
case ICMP_SOURCE_QUENCH:
case ICMP_TIME_EXCEEDED:
/* Max length: 3+maxlen */
if (!iphoff) { /* Only recurse once. */
nf_log_buf_add(m, "[");
dump_ipv4_packet(net, m, info, skb,
iphoff + ih->ihl * 4 + sizeof(_icmph));
nf_log_buf_add(m, "] ");
}
/* Max length: 10 "MTU=65535 " */
if (ich->type == ICMP_DEST_UNREACH &&
ich->code == ICMP_FRAG_NEEDED) {
nf_log_buf_add(m, "MTU=%u ",
ntohs(ich->un.frag.mtu));
}
}
break;
}
/* Max Length */
case IPPROTO_AH: {
const struct ip_auth_hdr *ah;
struct ip_auth_hdr _ahdr;
if (ntohs(ih->frag_off) & IP_OFFSET)
break;
/* Max length: 9 "PROTO=AH " */
nf_log_buf_add(m, "PROTO=AH ");
/* Max length: 25 "INCOMPLETE [65535 bytes] " */
ah = skb_header_pointer(skb, iphoff + ih->ihl * 4,
sizeof(_ahdr), &_ahdr);
if (!ah) {
nf_log_buf_add(m, "INCOMPLETE [%u bytes] ",
skb->len - iphoff - ih->ihl * 4);
break;
}
/* Length: 15 "SPI=0xF1234567 " */
nf_log_buf_add(m, "SPI=0x%x ", ntohl(ah->spi));
break;
}
case IPPROTO_ESP: {
const struct ip_esp_hdr *eh;
struct ip_esp_hdr _esph;
/* Max length: 10 "PROTO=ESP " */
nf_log_buf_add(m, "PROTO=ESP ");
if (ntohs(ih->frag_off) & IP_OFFSET)
break;
/* Max length: 25 "INCOMPLETE [65535 bytes] " */
eh = skb_header_pointer(skb, iphoff + ih->ihl * 4,
sizeof(_esph), &_esph);
if (!eh) {
nf_log_buf_add(m, "INCOMPLETE [%u bytes] ",
skb->len - iphoff - ih->ihl * 4);
break;
}
/* Length: 15 "SPI=0xF1234567 " */
nf_log_buf_add(m, "SPI=0x%x ", ntohl(eh->spi));
break;
}
/* Max length: 10 "PROTO 255 " */
default:
nf_log_buf_add(m, "PROTO=%u ", ih->protocol);
}
/* Max length: 15 "UID=4294967295 " */
if ((logflags & NF_LOG_UID) && !iphoff)
nf_log_dump_sk_uid_gid(net, m, skb->sk);
/* Max length: 16 "MARK=0xFFFFFFFF " */
if (!iphoff && skb->mark)
nf_log_buf_add(m, "MARK=0x%x ", skb->mark);
/* Proto Max log string length */
/* IP: 40+46+6+11+127 = 230 */
/* TCP: 10+max(25,20+30+13+9+32+11+127) = 252 */
/* UDP: 10+max(25,20) = 35 */
/* UDPLITE: 14+max(25,20) = 39 */
/* ICMP: 11+max(25, 18+25+max(19,14,24+3+n+10,3+n+10)) = 91+n */
/* ESP: 10+max(25)+15 = 50 */
/* AH: 9+max(25)+15 = 49 */
/* unknown: 10 */
/* (ICMP allows recursion one level deep) */
/* maxlen = IP + ICMP + IP + max(TCP,UDP,ICMP,unknown) */
/* maxlen = 230+ 91 + 230 + 252 = 803 */
}
static noinline_for_stack void
dump_ipv6_packet(struct net *net, struct nf_log_buf *m,
const struct nf_loginfo *info,
const struct sk_buff *skb, unsigned int ip6hoff,
int recurse)
{
const struct ipv6hdr *ih;
unsigned int hdrlen = 0;
unsigned int logflags;
struct ipv6hdr _ip6h;
unsigned int ptr;
u8 currenthdr;
int fragment;
if (info->type == NF_LOG_TYPE_LOG)
logflags = info->u.log.logflags;
else
logflags = NF_LOG_DEFAULT_MASK;
ih = skb_header_pointer(skb, ip6hoff, sizeof(_ip6h), &_ip6h);
if (!ih) {
nf_log_buf_add(m, "TRUNCATED");
return;
}
/* Max length: 88 "SRC=0000.0000.0000.0000.0000.0000.0000.0000 DST=0000.0000.0000.0000.0000.0000.0000.0000 " */
nf_log_buf_add(m, "SRC=%pI6 DST=%pI6 ", &ih->saddr, &ih->daddr);
/* Max length: 44 "LEN=65535 TC=255 HOPLIMIT=255 FLOWLBL=FFFFF " */
nf_log_buf_add(m, "LEN=%zu TC=%u HOPLIMIT=%u FLOWLBL=%u ",
ntohs(ih->payload_len) + sizeof(struct ipv6hdr),
(ntohl(*(__be32 *)ih) & 0x0ff00000) >> 20,
ih->hop_limit,
(ntohl(*(__be32 *)ih) & 0x000fffff));
fragment = 0;
ptr = ip6hoff + sizeof(struct ipv6hdr);
currenthdr = ih->nexthdr;
while (currenthdr != NEXTHDR_NONE && nf_ip6_ext_hdr(currenthdr)) {
struct ipv6_opt_hdr _hdr;
const struct ipv6_opt_hdr *hp;
hp = skb_header_pointer(skb, ptr, sizeof(_hdr), &_hdr);
if (!hp) {
nf_log_buf_add(m, "TRUNCATED");
return;
}
/* Max length: 48 "OPT (...) " */
if (logflags & NF_LOG_IPOPT)
nf_log_buf_add(m, "OPT ( ");
switch (currenthdr) {
case IPPROTO_FRAGMENT: {
struct frag_hdr _fhdr;
const struct frag_hdr *fh;
nf_log_buf_add(m, "FRAG:");
fh = skb_header_pointer(skb, ptr, sizeof(_fhdr),
&_fhdr);
if (!fh) {
nf_log_buf_add(m, "TRUNCATED ");
return;
}
/* Max length: 6 "65535 " */
nf_log_buf_add(m, "%u ", ntohs(fh->frag_off) & 0xFFF8);
/* Max length: 11 "INCOMPLETE " */
if (fh->frag_off & htons(0x0001))
nf_log_buf_add(m, "INCOMPLETE ");
nf_log_buf_add(m, "ID:%08x ",
ntohl(fh->identification));
if (ntohs(fh->frag_off) & 0xFFF8)
fragment = 1;
hdrlen = 8;
break;
}
case IPPROTO_DSTOPTS:
case IPPROTO_ROUTING:
case IPPROTO_HOPOPTS:
if (fragment) {
if (logflags & NF_LOG_IPOPT)
nf_log_buf_add(m, ")");
return;
}
hdrlen = ipv6_optlen(hp);
break;
/* Max Length */
case IPPROTO_AH:
if (logflags & NF_LOG_IPOPT) {
struct ip_auth_hdr _ahdr;
const struct ip_auth_hdr *ah;
/* Max length: 3 "AH " */
nf_log_buf_add(m, "AH ");
if (fragment) {
nf_log_buf_add(m, ")");
return;
}
ah = skb_header_pointer(skb, ptr, sizeof(_ahdr),
&_ahdr);
if (!ah) {
/* Max length: 26 "INCOMPLETE [65535 bytes] )" */
nf_log_buf_add(m, "INCOMPLETE [%u bytes] )",
skb->len - ptr);
return;
}
/* Length: 15 "SPI=0xF1234567 */
nf_log_buf_add(m, "SPI=0x%x ", ntohl(ah->spi));
}
hdrlen = ipv6_authlen(hp);
break;
case IPPROTO_ESP:
if (logflags & NF_LOG_IPOPT) {
struct ip_esp_hdr _esph;
const struct ip_esp_hdr *eh;
/* Max length: 4 "ESP " */
nf_log_buf_add(m, "ESP ");
if (fragment) {
nf_log_buf_add(m, ")");
return;
}
/* Max length: 26 "INCOMPLETE [65535 bytes] )" */
eh = skb_header_pointer(skb, ptr, sizeof(_esph),
&_esph);
if (!eh) {
nf_log_buf_add(m, "INCOMPLETE [%u bytes] )",
skb->len - ptr);
return;
}
/* Length: 16 "SPI=0xF1234567 )" */
nf_log_buf_add(m, "SPI=0x%x )",
ntohl(eh->spi));
}
return;
default:
/* Max length: 20 "Unknown Ext Hdr 255" */
nf_log_buf_add(m, "Unknown Ext Hdr %u", currenthdr);
return;
}
if (logflags & NF_LOG_IPOPT)
nf_log_buf_add(m, ") ");
currenthdr = hp->nexthdr;
ptr += hdrlen;
}
switch (currenthdr) {
case IPPROTO_TCP:
if (nf_log_dump_tcp_header(m, skb, currenthdr, fragment,
ptr, logflags))
return;
break;
case IPPROTO_UDP:
case IPPROTO_UDPLITE:
if (nf_log_dump_udp_header(m, skb, currenthdr, fragment, ptr))
return;
break;
case IPPROTO_ICMPV6: {
struct icmp6hdr _icmp6h;
const struct icmp6hdr *ic;
/* Max length: 13 "PROTO=ICMPv6 " */
nf_log_buf_add(m, "PROTO=ICMPv6 ");
if (fragment)
break;
/* Max length: 25 "INCOMPLETE [65535 bytes] " */
ic = skb_header_pointer(skb, ptr, sizeof(_icmp6h), &_icmp6h);
if (!ic) {
nf_log_buf_add(m, "INCOMPLETE [%u bytes] ",
skb->len - ptr);
return;
}
/* Max length: 18 "TYPE=255 CODE=255 " */
nf_log_buf_add(m, "TYPE=%u CODE=%u ",
ic->icmp6_type, ic->icmp6_code);
switch (ic->icmp6_type) {
case ICMPV6_ECHO_REQUEST:
case ICMPV6_ECHO_REPLY:
/* Max length: 19 "ID=65535 SEQ=65535 " */
nf_log_buf_add(m, "ID=%u SEQ=%u ",
ntohs(ic->icmp6_identifier),
ntohs(ic->icmp6_sequence));
break;
case ICMPV6_MGM_QUERY:
case ICMPV6_MGM_REPORT:
case ICMPV6_MGM_REDUCTION:
break;
case ICMPV6_PARAMPROB:
/* Max length: 17 "POINTER=ffffffff " */
nf_log_buf_add(m, "POINTER=%08x ",
ntohl(ic->icmp6_pointer));
fallthrough;
case ICMPV6_DEST_UNREACH:
case ICMPV6_PKT_TOOBIG:
case ICMPV6_TIME_EXCEED:
/* Max length: 3+maxlen */
if (recurse) {
nf_log_buf_add(m, "[");
dump_ipv6_packet(net, m, info, skb,
ptr + sizeof(_icmp6h), 0);
nf_log_buf_add(m, "] ");
}
/* Max length: 10 "MTU=65535 " */
if (ic->icmp6_type == ICMPV6_PKT_TOOBIG) {
nf_log_buf_add(m, "MTU=%u ",
ntohl(ic->icmp6_mtu));
}
}
break;
}
/* Max length: 10 "PROTO=255 " */
default:
nf_log_buf_add(m, "PROTO=%u ", currenthdr);
}
/* Max length: 15 "UID=4294967295 " */
if ((logflags & NF_LOG_UID) && recurse)
nf_log_dump_sk_uid_gid(net, m, skb->sk);
/* Max length: 16 "MARK=0xFFFFFFFF " */
if (recurse && skb->mark)
nf_log_buf_add(m, "MARK=0x%x ", skb->mark);
}
static void dump_mac_header(struct nf_log_buf *m,
const struct nf_loginfo *info,
const struct sk_buff *skb)
{
struct net_device *dev = skb->dev;
unsigned int logflags = 0;
if (info->type == NF_LOG_TYPE_LOG)
logflags = info->u.log.logflags;
if (!(logflags & NF_LOG_MACDECODE))
goto fallback;
switch (dev->type) {
case ARPHRD_ETHER:
nf_log_buf_add(m, "MACSRC=%pM MACDST=%pM ",
eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest);
nf_log_dump_vlan(m, skb);
nf_log_buf_add(m, "MACPROTO=%04x ",
ntohs(eth_hdr(skb)->h_proto));
return;
default:
break;
}
fallback:
nf_log_buf_add(m, "MAC=");
if (dev->hard_header_len &&
skb->mac_header != skb->network_header) {
const unsigned char *p = skb_mac_header(skb);
unsigned int i;
if (dev->type == ARPHRD_SIT) {
p -= ETH_HLEN;
if (p < skb->head)
p = NULL;
}
if (p) {
nf_log_buf_add(m, "%02x", *p++);
for (i = 1; i < dev->hard_header_len; i++)
nf_log_buf_add(m, ":%02x", *p++);
}
if (dev->type == ARPHRD_SIT) {
const struct iphdr *iph =
(struct iphdr *)skb_mac_header(skb);
nf_log_buf_add(m, " TUNNEL=%pI4->%pI4", &iph->saddr,
&iph->daddr);
}
}
nf_log_buf_add(m, " ");
}
static void nf_log_ip_packet(struct net *net, u_int8_t pf,
unsigned int hooknum, const struct sk_buff *skb,
const struct net_device *in,
const struct net_device *out,
const struct nf_loginfo *loginfo,
const char *prefix)
{
struct nf_log_buf *m;
if (!nf_log_allowed(net))
return;
m = nf_log_buf_open();
if (!loginfo)
loginfo = &default_loginfo;
nf_log_dump_packet_common(m, pf, hooknum, skb, in,
out, loginfo, prefix);
if (in)
dump_mac_header(m, loginfo, skb);
dump_ipv4_packet(net, m, loginfo, skb, skb_network_offset(skb));
nf_log_buf_close(m);
}
static struct nf_logger nf_ip_logger __read_mostly = {
.name = "nf_log_ipv4",
.type = NF_LOG_TYPE_LOG,
.logfn = nf_log_ip_packet,
.me = THIS_MODULE,
};
static void nf_log_ip6_packet(struct net *net, u_int8_t pf,
unsigned int hooknum, const struct sk_buff *skb,
const struct net_device *in,
const struct net_device *out,
const struct nf_loginfo *loginfo,
const char *prefix)
{
struct nf_log_buf *m;
if (!nf_log_allowed(net))
return;
m = nf_log_buf_open();
if (!loginfo)
loginfo = &default_loginfo;
nf_log_dump_packet_common(m, pf, hooknum, skb, in, out,
loginfo, prefix);
if (in)
dump_mac_header(m, loginfo, skb);
dump_ipv6_packet(net, m, loginfo, skb, skb_network_offset(skb), 1);
nf_log_buf_close(m);
}
static struct nf_logger nf_ip6_logger __read_mostly = {
.name = "nf_log_ipv6",
.type = NF_LOG_TYPE_LOG,
.logfn = nf_log_ip6_packet,
.me = THIS_MODULE,
};
static void nf_log_unknown_packet(struct net *net, u_int8_t pf,
unsigned int hooknum,
const struct sk_buff *skb,
const struct net_device *in,
const struct net_device *out,
const struct nf_loginfo *loginfo,
const char *prefix)
{
struct nf_log_buf *m;
if (!nf_log_allowed(net))
return;
m = nf_log_buf_open();
if (!loginfo)
loginfo = &default_loginfo;
nf_log_dump_packet_common(m, pf, hooknum, skb, in, out, loginfo,
prefix);
dump_mac_header(m, loginfo, skb);
nf_log_buf_close(m);
}
static void nf_log_netdev_packet(struct net *net, u_int8_t pf,
unsigned int hooknum,
const struct sk_buff *skb,
const struct net_device *in,
const struct net_device *out,
const struct nf_loginfo *loginfo,
const char *prefix)
{
switch (skb->protocol) {
case htons(ETH_P_IP):
nf_log_ip_packet(net, pf, hooknum, skb, in, out, loginfo, prefix);
break;
case htons(ETH_P_IPV6):
nf_log_ip6_packet(net, pf, hooknum, skb, in, out, loginfo, prefix);
break;
case htons(ETH_P_ARP):
case htons(ETH_P_RARP):
nf_log_arp_packet(net, pf, hooknum, skb, in, out, loginfo, prefix);
break;
default:
nf_log_unknown_packet(net, pf, hooknum, skb,
in, out, loginfo, prefix);
break;
}
}
static struct nf_logger nf_netdev_logger __read_mostly = {
.name = "nf_log_netdev",
.type = NF_LOG_TYPE_LOG,
.logfn = nf_log_netdev_packet,
.me = THIS_MODULE,
};
static struct nf_logger nf_bridge_logger __read_mostly = {
.name = "nf_log_bridge",
.type = NF_LOG_TYPE_LOG,
.logfn = nf_log_netdev_packet,
.me = THIS_MODULE,
};
static int __net_init nf_log_syslog_net_init(struct net *net)
{
int ret = nf_log_set(net, NFPROTO_IPV4, &nf_ip_logger);
if (ret)
return ret;
ret = nf_log_set(net, NFPROTO_ARP, &nf_arp_logger);
if (ret)
goto err1;
ret = nf_log_set(net, NFPROTO_IPV6, &nf_ip6_logger);
if (ret)
goto err2;
ret = nf_log_set(net, NFPROTO_NETDEV, &nf_netdev_logger);
if (ret)
goto err3;
ret = nf_log_set(net, NFPROTO_BRIDGE, &nf_bridge_logger);
if (ret)
goto err4;
return 0;
err4:
nf_log_unset(net, &nf_netdev_logger);
err3:
nf_log_unset(net, &nf_ip6_logger);
err2:
nf_log_unset(net, &nf_arp_logger);
err1:
nf_log_unset(net, &nf_ip_logger);
return ret;
}
static void __net_exit nf_log_syslog_net_exit(struct net *net)
{
nf_log_unset(net, &nf_ip_logger);
nf_log_unset(net, &nf_arp_logger);
nf_log_unset(net, &nf_ip6_logger);
nf_log_unset(net, &nf_netdev_logger);
nf_log_unset(net, &nf_bridge_logger);
}
static struct pernet_operations nf_log_syslog_net_ops = {
.init = nf_log_syslog_net_init,
.exit = nf_log_syslog_net_exit,
};
static int __init nf_log_syslog_init(void)
{
int ret;
ret = register_pernet_subsys(&nf_log_syslog_net_ops);
if (ret < 0)
return ret;
ret = nf_log_register(NFPROTO_IPV4, &nf_ip_logger);
if (ret < 0)
goto err1;
ret = nf_log_register(NFPROTO_ARP, &nf_arp_logger);
if (ret < 0)
goto err2;
ret = nf_log_register(NFPROTO_IPV6, &nf_ip6_logger);
if (ret < 0)
goto err3;
ret = nf_log_register(NFPROTO_NETDEV, &nf_netdev_logger);
if (ret < 0)
goto err4;
ret = nf_log_register(NFPROTO_BRIDGE, &nf_bridge_logger);
if (ret < 0)
goto err5;
return 0;
err5:
nf_log_unregister(&nf_netdev_logger);
err4:
nf_log_unregister(&nf_ip6_logger);
err3:
nf_log_unregister(&nf_arp_logger);
err2:
nf_log_unregister(&nf_ip_logger);
err1:
pr_err("failed to register logger\n");
unregister_pernet_subsys(&nf_log_syslog_net_ops);
return ret;
}
static void __exit nf_log_syslog_exit(void)
{
unregister_pernet_subsys(&nf_log_syslog_net_ops);
nf_log_unregister(&nf_ip_logger);
nf_log_unregister(&nf_arp_logger);
nf_log_unregister(&nf_ip6_logger);
nf_log_unregister(&nf_netdev_logger);
nf_log_unregister(&nf_bridge_logger);
}
module_init(nf_log_syslog_init);
module_exit(nf_log_syslog_exit);
MODULE_AUTHOR("Netfilter Core Team <[email protected]>");
MODULE_DESCRIPTION("Netfilter syslog packet logging");
MODULE_LICENSE("GPL");
MODULE_ALIAS("nf_log_arp");
MODULE_ALIAS("nf_log_bridge");
MODULE_ALIAS("nf_log_ipv4");
MODULE_ALIAS("nf_log_ipv6");
MODULE_ALIAS("nf_log_netdev");
MODULE_ALIAS_NF_LOGGER(AF_BRIDGE, 0);
MODULE_ALIAS_NF_LOGGER(AF_INET, 0);
MODULE_ALIAS_NF_LOGGER(3, 0);
MODULE_ALIAS_NF_LOGGER(5, 0); /* NFPROTO_NETDEV */
MODULE_ALIAS_NF_LOGGER(AF_INET6, 0);
| linux-master | net/netfilter/nf_log_syslog.c |
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <net/net_namespace.h>
#include <net/netfilter/nf_tables.h>
#include <linux/netfilter_ipv4.h>
#include <linux/netfilter_ipv6.h>
#include <linux/netfilter_bridge.h>
#include <linux/netfilter_arp.h>
#include <net/netfilter/nf_tables_ipv4.h>
#include <net/netfilter/nf_tables_ipv6.h>
#ifdef CONFIG_NF_TABLES_IPV4
static unsigned int nft_do_chain_ipv4(void *priv,
struct sk_buff *skb,
const struct nf_hook_state *state)
{
struct nft_pktinfo pkt;
nft_set_pktinfo(&pkt, skb, state);
nft_set_pktinfo_ipv4(&pkt);
return nft_do_chain(&pkt, priv);
}
static const struct nft_chain_type nft_chain_filter_ipv4 = {
.name = "filter",
.type = NFT_CHAIN_T_DEFAULT,
.family = NFPROTO_IPV4,
.hook_mask = (1 << NF_INET_LOCAL_IN) |
(1 << NF_INET_LOCAL_OUT) |
(1 << NF_INET_FORWARD) |
(1 << NF_INET_PRE_ROUTING) |
(1 << NF_INET_POST_ROUTING),
.hooks = {
[NF_INET_LOCAL_IN] = nft_do_chain_ipv4,
[NF_INET_LOCAL_OUT] = nft_do_chain_ipv4,
[NF_INET_FORWARD] = nft_do_chain_ipv4,
[NF_INET_PRE_ROUTING] = nft_do_chain_ipv4,
[NF_INET_POST_ROUTING] = nft_do_chain_ipv4,
},
};
static void nft_chain_filter_ipv4_init(void)
{
nft_register_chain_type(&nft_chain_filter_ipv4);
}
static void nft_chain_filter_ipv4_fini(void)
{
nft_unregister_chain_type(&nft_chain_filter_ipv4);
}
#else
static inline void nft_chain_filter_ipv4_init(void) {}
static inline void nft_chain_filter_ipv4_fini(void) {}
#endif /* CONFIG_NF_TABLES_IPV4 */
#ifdef CONFIG_NF_TABLES_ARP
static unsigned int nft_do_chain_arp(void *priv, struct sk_buff *skb,
const struct nf_hook_state *state)
{
struct nft_pktinfo pkt;
nft_set_pktinfo(&pkt, skb, state);
nft_set_pktinfo_unspec(&pkt);
return nft_do_chain(&pkt, priv);
}
static const struct nft_chain_type nft_chain_filter_arp = {
.name = "filter",
.type = NFT_CHAIN_T_DEFAULT,
.family = NFPROTO_ARP,
.owner = THIS_MODULE,
.hook_mask = (1 << NF_ARP_IN) |
(1 << NF_ARP_OUT),
.hooks = {
[NF_ARP_IN] = nft_do_chain_arp,
[NF_ARP_OUT] = nft_do_chain_arp,
},
};
static void nft_chain_filter_arp_init(void)
{
nft_register_chain_type(&nft_chain_filter_arp);
}
static void nft_chain_filter_arp_fini(void)
{
nft_unregister_chain_type(&nft_chain_filter_arp);
}
#else
static inline void nft_chain_filter_arp_init(void) {}
static inline void nft_chain_filter_arp_fini(void) {}
#endif /* CONFIG_NF_TABLES_ARP */
#ifdef CONFIG_NF_TABLES_IPV6
static unsigned int nft_do_chain_ipv6(void *priv,
struct sk_buff *skb,
const struct nf_hook_state *state)
{
struct nft_pktinfo pkt;
nft_set_pktinfo(&pkt, skb, state);
nft_set_pktinfo_ipv6(&pkt);
return nft_do_chain(&pkt, priv);
}
static const struct nft_chain_type nft_chain_filter_ipv6 = {
.name = "filter",
.type = NFT_CHAIN_T_DEFAULT,
.family = NFPROTO_IPV6,
.hook_mask = (1 << NF_INET_LOCAL_IN) |
(1 << NF_INET_LOCAL_OUT) |
(1 << NF_INET_FORWARD) |
(1 << NF_INET_PRE_ROUTING) |
(1 << NF_INET_POST_ROUTING),
.hooks = {
[NF_INET_LOCAL_IN] = nft_do_chain_ipv6,
[NF_INET_LOCAL_OUT] = nft_do_chain_ipv6,
[NF_INET_FORWARD] = nft_do_chain_ipv6,
[NF_INET_PRE_ROUTING] = nft_do_chain_ipv6,
[NF_INET_POST_ROUTING] = nft_do_chain_ipv6,
},
};
static void nft_chain_filter_ipv6_init(void)
{
nft_register_chain_type(&nft_chain_filter_ipv6);
}
static void nft_chain_filter_ipv6_fini(void)
{
nft_unregister_chain_type(&nft_chain_filter_ipv6);
}
#else
static inline void nft_chain_filter_ipv6_init(void) {}
static inline void nft_chain_filter_ipv6_fini(void) {}
#endif /* CONFIG_NF_TABLES_IPV6 */
#ifdef CONFIG_NF_TABLES_INET
static unsigned int nft_do_chain_inet(void *priv, struct sk_buff *skb,
const struct nf_hook_state *state)
{
struct nft_pktinfo pkt;
nft_set_pktinfo(&pkt, skb, state);
switch (state->pf) {
case NFPROTO_IPV4:
nft_set_pktinfo_ipv4(&pkt);
break;
case NFPROTO_IPV6:
nft_set_pktinfo_ipv6(&pkt);
break;
default:
break;
}
return nft_do_chain(&pkt, priv);
}
static unsigned int nft_do_chain_inet_ingress(void *priv, struct sk_buff *skb,
const struct nf_hook_state *state)
{
struct nf_hook_state ingress_state = *state;
struct nft_pktinfo pkt;
switch (skb->protocol) {
case htons(ETH_P_IP):
/* Original hook is NFPROTO_NETDEV and NF_NETDEV_INGRESS. */
ingress_state.pf = NFPROTO_IPV4;
ingress_state.hook = NF_INET_INGRESS;
nft_set_pktinfo(&pkt, skb, &ingress_state);
if (nft_set_pktinfo_ipv4_ingress(&pkt) < 0)
return NF_DROP;
break;
case htons(ETH_P_IPV6):
ingress_state.pf = NFPROTO_IPV6;
ingress_state.hook = NF_INET_INGRESS;
nft_set_pktinfo(&pkt, skb, &ingress_state);
if (nft_set_pktinfo_ipv6_ingress(&pkt) < 0)
return NF_DROP;
break;
default:
return NF_ACCEPT;
}
return nft_do_chain(&pkt, priv);
}
static const struct nft_chain_type nft_chain_filter_inet = {
.name = "filter",
.type = NFT_CHAIN_T_DEFAULT,
.family = NFPROTO_INET,
.hook_mask = (1 << NF_INET_INGRESS) |
(1 << NF_INET_LOCAL_IN) |
(1 << NF_INET_LOCAL_OUT) |
(1 << NF_INET_FORWARD) |
(1 << NF_INET_PRE_ROUTING) |
(1 << NF_INET_POST_ROUTING),
.hooks = {
[NF_INET_INGRESS] = nft_do_chain_inet_ingress,
[NF_INET_LOCAL_IN] = nft_do_chain_inet,
[NF_INET_LOCAL_OUT] = nft_do_chain_inet,
[NF_INET_FORWARD] = nft_do_chain_inet,
[NF_INET_PRE_ROUTING] = nft_do_chain_inet,
[NF_INET_POST_ROUTING] = nft_do_chain_inet,
},
};
static void nft_chain_filter_inet_init(void)
{
nft_register_chain_type(&nft_chain_filter_inet);
}
static void nft_chain_filter_inet_fini(void)
{
nft_unregister_chain_type(&nft_chain_filter_inet);
}
#else
static inline void nft_chain_filter_inet_init(void) {}
static inline void nft_chain_filter_inet_fini(void) {}
#endif /* CONFIG_NF_TABLES_IPV6 */
#if IS_ENABLED(CONFIG_NF_TABLES_BRIDGE)
static unsigned int
nft_do_chain_bridge(void *priv,
struct sk_buff *skb,
const struct nf_hook_state *state)
{
struct nft_pktinfo pkt;
nft_set_pktinfo(&pkt, skb, state);
switch (eth_hdr(skb)->h_proto) {
case htons(ETH_P_IP):
nft_set_pktinfo_ipv4_validate(&pkt);
break;
case htons(ETH_P_IPV6):
nft_set_pktinfo_ipv6_validate(&pkt);
break;
default:
nft_set_pktinfo_unspec(&pkt);
break;
}
return nft_do_chain(&pkt, priv);
}
static const struct nft_chain_type nft_chain_filter_bridge = {
.name = "filter",
.type = NFT_CHAIN_T_DEFAULT,
.family = NFPROTO_BRIDGE,
.hook_mask = (1 << NF_BR_PRE_ROUTING) |
(1 << NF_BR_LOCAL_IN) |
(1 << NF_BR_FORWARD) |
(1 << NF_BR_LOCAL_OUT) |
(1 << NF_BR_POST_ROUTING),
.hooks = {
[NF_BR_PRE_ROUTING] = nft_do_chain_bridge,
[NF_BR_LOCAL_IN] = nft_do_chain_bridge,
[NF_BR_FORWARD] = nft_do_chain_bridge,
[NF_BR_LOCAL_OUT] = nft_do_chain_bridge,
[NF_BR_POST_ROUTING] = nft_do_chain_bridge,
},
};
static void nft_chain_filter_bridge_init(void)
{
nft_register_chain_type(&nft_chain_filter_bridge);
}
static void nft_chain_filter_bridge_fini(void)
{
nft_unregister_chain_type(&nft_chain_filter_bridge);
}
#else
static inline void nft_chain_filter_bridge_init(void) {}
static inline void nft_chain_filter_bridge_fini(void) {}
#endif /* CONFIG_NF_TABLES_BRIDGE */
#ifdef CONFIG_NF_TABLES_NETDEV
static unsigned int nft_do_chain_netdev(void *priv, struct sk_buff *skb,
const struct nf_hook_state *state)
{
struct nft_pktinfo pkt;
nft_set_pktinfo(&pkt, skb, state);
switch (skb->protocol) {
case htons(ETH_P_IP):
nft_set_pktinfo_ipv4_validate(&pkt);
break;
case htons(ETH_P_IPV6):
nft_set_pktinfo_ipv6_validate(&pkt);
break;
default:
nft_set_pktinfo_unspec(&pkt);
break;
}
return nft_do_chain(&pkt, priv);
}
static const struct nft_chain_type nft_chain_filter_netdev = {
.name = "filter",
.type = NFT_CHAIN_T_DEFAULT,
.family = NFPROTO_NETDEV,
.hook_mask = (1 << NF_NETDEV_INGRESS) |
(1 << NF_NETDEV_EGRESS),
.hooks = {
[NF_NETDEV_INGRESS] = nft_do_chain_netdev,
[NF_NETDEV_EGRESS] = nft_do_chain_netdev,
},
};
static void nft_netdev_event(unsigned long event, struct net_device *dev,
struct nft_ctx *ctx)
{
struct nft_base_chain *basechain = nft_base_chain(ctx->chain);
struct nft_hook *hook, *found = NULL;
int n = 0;
if (event != NETDEV_UNREGISTER)
return;
list_for_each_entry(hook, &basechain->hook_list, list) {
if (hook->ops.dev == dev)
found = hook;
n++;
}
if (!found)
return;
if (n > 1) {
nf_unregister_net_hook(ctx->net, &found->ops);
list_del_rcu(&found->list);
kfree_rcu(found, rcu);
return;
}
/* UNREGISTER events are also happening on netns exit.
*
* Although nf_tables core releases all tables/chains, only this event
* handler provides guarantee that hook->ops.dev is still accessible,
* so we cannot skip exiting net namespaces.
*/
__nft_release_basechain(ctx);
}
static int nf_tables_netdev_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
struct nftables_pernet *nft_net;
struct nft_table *table;
struct nft_chain *chain, *nr;
struct nft_ctx ctx = {
.net = dev_net(dev),
};
if (event != NETDEV_UNREGISTER &&
event != NETDEV_CHANGENAME)
return NOTIFY_DONE;
nft_net = nft_pernet(ctx.net);
mutex_lock(&nft_net->commit_mutex);
list_for_each_entry(table, &nft_net->tables, list) {
if (table->family != NFPROTO_NETDEV)
continue;
ctx.family = table->family;
ctx.table = table;
list_for_each_entry_safe(chain, nr, &table->chains, list) {
if (!nft_is_base_chain(chain))
continue;
ctx.chain = chain;
nft_netdev_event(event, dev, &ctx);
}
}
mutex_unlock(&nft_net->commit_mutex);
return NOTIFY_DONE;
}
static struct notifier_block nf_tables_netdev_notifier = {
.notifier_call = nf_tables_netdev_event,
};
static int nft_chain_filter_netdev_init(void)
{
int err;
nft_register_chain_type(&nft_chain_filter_netdev);
err = register_netdevice_notifier(&nf_tables_netdev_notifier);
if (err)
goto err_register_netdevice_notifier;
return 0;
err_register_netdevice_notifier:
nft_unregister_chain_type(&nft_chain_filter_netdev);
return err;
}
static void nft_chain_filter_netdev_fini(void)
{
nft_unregister_chain_type(&nft_chain_filter_netdev);
unregister_netdevice_notifier(&nf_tables_netdev_notifier);
}
#else
static inline int nft_chain_filter_netdev_init(void) { return 0; }
static inline void nft_chain_filter_netdev_fini(void) {}
#endif /* CONFIG_NF_TABLES_NETDEV */
int __init nft_chain_filter_init(void)
{
int err;
err = nft_chain_filter_netdev_init();
if (err < 0)
return err;
nft_chain_filter_ipv4_init();
nft_chain_filter_ipv6_init();
nft_chain_filter_arp_init();
nft_chain_filter_inet_init();
nft_chain_filter_bridge_init();
return 0;
}
void nft_chain_filter_fini(void)
{
nft_chain_filter_bridge_fini();
nft_chain_filter_inet_fini();
nft_chain_filter_arp_fini();
nft_chain_filter_ipv6_fini();
nft_chain_filter_ipv4_fini();
nft_chain_filter_netdev_fini();
}
| linux-master | net/netfilter/nft_chain_filter.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* broadcast connection tracking helper
*
* (c) 2005 Patrick McHardy <[email protected]>
*/
#include <linux/module.h>
#include <linux/ip.h>
#include <net/route.h>
#include <linux/inetdevice.h>
#include <linux/skbuff.h>
#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_conntrack_helper.h>
#include <net/netfilter/nf_conntrack_expect.h>
int nf_conntrack_broadcast_help(struct sk_buff *skb,
struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
unsigned int timeout)
{
const struct nf_conntrack_helper *helper;
struct nf_conntrack_expect *exp;
struct iphdr *iph = ip_hdr(skb);
struct rtable *rt = skb_rtable(skb);
struct in_device *in_dev;
struct nf_conn_help *help = nfct_help(ct);
__be32 mask = 0;
/* we're only interested in locally generated packets */
if (skb->sk == NULL || !net_eq(nf_ct_net(ct), sock_net(skb->sk)))
goto out;
if (rt == NULL || !(rt->rt_flags & RTCF_BROADCAST))
goto out;
if (CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL)
goto out;
in_dev = __in_dev_get_rcu(rt->dst.dev);
if (in_dev != NULL) {
const struct in_ifaddr *ifa;
in_dev_for_each_ifa_rcu(ifa, in_dev) {
if (ifa->ifa_flags & IFA_F_SECONDARY)
continue;
if (ifa->ifa_broadcast == iph->daddr) {
mask = ifa->ifa_mask;
break;
}
}
}
if (mask == 0)
goto out;
exp = nf_ct_expect_alloc(ct);
if (exp == NULL)
goto out;
exp->tuple = ct->tuplehash[IP_CT_DIR_REPLY].tuple;
helper = rcu_dereference(help->helper);
if (helper)
exp->tuple.src.u.udp.port = helper->tuple.src.u.udp.port;
exp->mask.src.u3.ip = mask;
exp->mask.src.u.udp.port = htons(0xFFFF);
exp->expectfn = NULL;
exp->flags = NF_CT_EXPECT_PERMANENT;
exp->class = NF_CT_EXPECT_CLASS_DEFAULT;
exp->helper = NULL;
nf_ct_expect_related(exp, 0);
nf_ct_expect_put(exp);
nf_ct_refresh(ct, skb, timeout * HZ);
out:
return NF_ACCEPT;
}
EXPORT_SYMBOL_GPL(nf_conntrack_broadcast_help);
MODULE_LICENSE("GPL");
| linux-master | net/netfilter/nf_conntrack_broadcast.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2008-2009 Patrick McHardy <[email protected]>
*
* Development of this code funded by Astaro AG (http://www.astaro.com/)
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/netlink.h>
#include <linux/netfilter.h>
#include <linux/netfilter/nf_tables.h>
#include <net/netfilter/nf_tables_core.h>
#include <net/netfilter/nf_tables.h>
#include <net/netfilter/nf_tables_offload.h>
void nft_immediate_eval(const struct nft_expr *expr,
struct nft_regs *regs,
const struct nft_pktinfo *pkt)
{
const struct nft_immediate_expr *priv = nft_expr_priv(expr);
nft_data_copy(®s->data[priv->dreg], &priv->data, priv->dlen);
}
static const struct nla_policy nft_immediate_policy[NFTA_IMMEDIATE_MAX + 1] = {
[NFTA_IMMEDIATE_DREG] = { .type = NLA_U32 },
[NFTA_IMMEDIATE_DATA] = { .type = NLA_NESTED },
};
static enum nft_data_types nft_reg_to_type(const struct nlattr *nla)
{
enum nft_data_types type;
u8 reg;
reg = ntohl(nla_get_be32(nla));
if (reg == NFT_REG_VERDICT)
type = NFT_DATA_VERDICT;
else
type = NFT_DATA_VALUE;
return type;
}
static int nft_immediate_init(const struct nft_ctx *ctx,
const struct nft_expr *expr,
const struct nlattr * const tb[])
{
struct nft_immediate_expr *priv = nft_expr_priv(expr);
struct nft_data_desc desc = {
.size = sizeof(priv->data),
};
int err;
if (tb[NFTA_IMMEDIATE_DREG] == NULL ||
tb[NFTA_IMMEDIATE_DATA] == NULL)
return -EINVAL;
desc.type = nft_reg_to_type(tb[NFTA_IMMEDIATE_DREG]);
err = nft_data_init(ctx, &priv->data, &desc, tb[NFTA_IMMEDIATE_DATA]);
if (err < 0)
return err;
priv->dlen = desc.len;
err = nft_parse_register_store(ctx, tb[NFTA_IMMEDIATE_DREG],
&priv->dreg, &priv->data, desc.type,
desc.len);
if (err < 0)
goto err1;
if (priv->dreg == NFT_REG_VERDICT) {
struct nft_chain *chain = priv->data.verdict.chain;
switch (priv->data.verdict.code) {
case NFT_JUMP:
case NFT_GOTO:
err = nf_tables_bind_chain(ctx, chain);
if (err < 0)
return err;
break;
default:
break;
}
}
return 0;
err1:
nft_data_release(&priv->data, desc.type);
return err;
}
static void nft_immediate_activate(const struct nft_ctx *ctx,
const struct nft_expr *expr)
{
const struct nft_immediate_expr *priv = nft_expr_priv(expr);
const struct nft_data *data = &priv->data;
struct nft_ctx chain_ctx;
struct nft_chain *chain;
struct nft_rule *rule;
if (priv->dreg == NFT_REG_VERDICT) {
switch (data->verdict.code) {
case NFT_JUMP:
case NFT_GOTO:
chain = data->verdict.chain;
if (!nft_chain_binding(chain))
break;
chain_ctx = *ctx;
chain_ctx.chain = chain;
list_for_each_entry(rule, &chain->rules, list)
nft_rule_expr_activate(&chain_ctx, rule);
nft_clear(ctx->net, chain);
break;
default:
break;
}
}
return nft_data_hold(&priv->data, nft_dreg_to_type(priv->dreg));
}
static void nft_immediate_chain_deactivate(const struct nft_ctx *ctx,
struct nft_chain *chain,
enum nft_trans_phase phase)
{
struct nft_ctx chain_ctx;
struct nft_rule *rule;
chain_ctx = *ctx;
chain_ctx.chain = chain;
list_for_each_entry(rule, &chain->rules, list)
nft_rule_expr_deactivate(&chain_ctx, rule, phase);
}
static void nft_immediate_deactivate(const struct nft_ctx *ctx,
const struct nft_expr *expr,
enum nft_trans_phase phase)
{
const struct nft_immediate_expr *priv = nft_expr_priv(expr);
const struct nft_data *data = &priv->data;
struct nft_chain *chain;
if (priv->dreg == NFT_REG_VERDICT) {
switch (data->verdict.code) {
case NFT_JUMP:
case NFT_GOTO:
chain = data->verdict.chain;
if (!nft_chain_binding(chain))
break;
switch (phase) {
case NFT_TRANS_PREPARE_ERROR:
nf_tables_unbind_chain(ctx, chain);
nft_deactivate_next(ctx->net, chain);
break;
case NFT_TRANS_PREPARE:
nft_immediate_chain_deactivate(ctx, chain, phase);
nft_deactivate_next(ctx->net, chain);
break;
default:
nft_immediate_chain_deactivate(ctx, chain, phase);
nft_chain_del(chain);
chain->bound = false;
nft_use_dec(&chain->table->use);
break;
}
break;
default:
break;
}
}
if (phase == NFT_TRANS_COMMIT)
return;
return nft_data_release(&priv->data, nft_dreg_to_type(priv->dreg));
}
static void nft_immediate_destroy(const struct nft_ctx *ctx,
const struct nft_expr *expr)
{
const struct nft_immediate_expr *priv = nft_expr_priv(expr);
const struct nft_data *data = &priv->data;
struct nft_rule *rule, *n;
struct nft_ctx chain_ctx;
struct nft_chain *chain;
if (priv->dreg != NFT_REG_VERDICT)
return;
switch (data->verdict.code) {
case NFT_JUMP:
case NFT_GOTO:
chain = data->verdict.chain;
if (!nft_chain_binding(chain))
break;
/* Rule construction failed, but chain is already bound:
* let the transaction records release this chain and its rules.
*/
if (chain->bound) {
nft_use_dec(&chain->use);
break;
}
/* Rule has been deleted, release chain and its rules. */
chain_ctx = *ctx;
chain_ctx.chain = chain;
nft_use_dec(&chain->use);
list_for_each_entry_safe(rule, n, &chain->rules, list) {
nft_use_dec(&chain->use);
list_del(&rule->list);
nf_tables_rule_destroy(&chain_ctx, rule);
}
nf_tables_chain_destroy(&chain_ctx);
break;
default:
break;
}
}
static int nft_immediate_dump(struct sk_buff *skb,
const struct nft_expr *expr, bool reset)
{
const struct nft_immediate_expr *priv = nft_expr_priv(expr);
if (nft_dump_register(skb, NFTA_IMMEDIATE_DREG, priv->dreg))
goto nla_put_failure;
return nft_data_dump(skb, NFTA_IMMEDIATE_DATA, &priv->data,
nft_dreg_to_type(priv->dreg), priv->dlen);
nla_put_failure:
return -1;
}
static int nft_immediate_validate(const struct nft_ctx *ctx,
const struct nft_expr *expr,
const struct nft_data **d)
{
const struct nft_immediate_expr *priv = nft_expr_priv(expr);
struct nft_ctx *pctx = (struct nft_ctx *)ctx;
const struct nft_data *data;
int err;
if (priv->dreg != NFT_REG_VERDICT)
return 0;
data = &priv->data;
switch (data->verdict.code) {
case NFT_JUMP:
case NFT_GOTO:
pctx->level++;
err = nft_chain_validate(ctx, data->verdict.chain);
if (err < 0)
return err;
pctx->level--;
break;
default:
break;
}
return 0;
}
static int nft_immediate_offload_verdict(struct nft_offload_ctx *ctx,
struct nft_flow_rule *flow,
const struct nft_immediate_expr *priv)
{
struct flow_action_entry *entry;
const struct nft_data *data;
entry = &flow->rule->action.entries[ctx->num_actions++];
data = &priv->data;
switch (data->verdict.code) {
case NF_ACCEPT:
entry->id = FLOW_ACTION_ACCEPT;
break;
case NF_DROP:
entry->id = FLOW_ACTION_DROP;
break;
default:
return -EOPNOTSUPP;
}
return 0;
}
static int nft_immediate_offload(struct nft_offload_ctx *ctx,
struct nft_flow_rule *flow,
const struct nft_expr *expr)
{
const struct nft_immediate_expr *priv = nft_expr_priv(expr);
if (priv->dreg == NFT_REG_VERDICT)
return nft_immediate_offload_verdict(ctx, flow, priv);
memcpy(&ctx->regs[priv->dreg].data, &priv->data, sizeof(priv->data));
return 0;
}
static bool nft_immediate_offload_action(const struct nft_expr *expr)
{
const struct nft_immediate_expr *priv = nft_expr_priv(expr);
if (priv->dreg == NFT_REG_VERDICT)
return true;
return false;
}
static bool nft_immediate_reduce(struct nft_regs_track *track,
const struct nft_expr *expr)
{
const struct nft_immediate_expr *priv = nft_expr_priv(expr);
if (priv->dreg != NFT_REG_VERDICT)
nft_reg_track_cancel(track, priv->dreg, priv->dlen);
return false;
}
static const struct nft_expr_ops nft_imm_ops = {
.type = &nft_imm_type,
.size = NFT_EXPR_SIZE(sizeof(struct nft_immediate_expr)),
.eval = nft_immediate_eval,
.init = nft_immediate_init,
.activate = nft_immediate_activate,
.deactivate = nft_immediate_deactivate,
.destroy = nft_immediate_destroy,
.dump = nft_immediate_dump,
.validate = nft_immediate_validate,
.reduce = nft_immediate_reduce,
.offload = nft_immediate_offload,
.offload_action = nft_immediate_offload_action,
};
struct nft_expr_type nft_imm_type __read_mostly = {
.name = "immediate",
.ops = &nft_imm_ops,
.policy = nft_immediate_policy,
.maxattr = NFTA_IMMEDIATE_MAX,
.owner = THIS_MODULE,
};
| linux-master | net/netfilter/nft_immediate.c |
// SPDX-License-Identifier: GPL-2.0-only
/* IP tables module for matching the value of the IPv4/IPv6 DSCP field
*
* (C) 2002 by Harald Welte <[email protected]>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <net/dsfield.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter/xt_dscp.h>
MODULE_AUTHOR("Harald Welte <[email protected]>");
MODULE_DESCRIPTION("Xtables: DSCP/TOS field match");
MODULE_LICENSE("GPL");
MODULE_ALIAS("ipt_dscp");
MODULE_ALIAS("ip6t_dscp");
MODULE_ALIAS("ipt_tos");
MODULE_ALIAS("ip6t_tos");
static bool
dscp_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct xt_dscp_info *info = par->matchinfo;
u_int8_t dscp = ipv4_get_dsfield(ip_hdr(skb)) >> XT_DSCP_SHIFT;
return (dscp == info->dscp) ^ !!info->invert;
}
static bool
dscp_mt6(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct xt_dscp_info *info = par->matchinfo;
u_int8_t dscp = ipv6_get_dsfield(ipv6_hdr(skb)) >> XT_DSCP_SHIFT;
return (dscp == info->dscp) ^ !!info->invert;
}
static int dscp_mt_check(const struct xt_mtchk_param *par)
{
const struct xt_dscp_info *info = par->matchinfo;
if (info->dscp > XT_DSCP_MAX)
return -EDOM;
return 0;
}
static bool tos_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct xt_tos_match_info *info = par->matchinfo;
if (xt_family(par) == NFPROTO_IPV4)
return ((ip_hdr(skb)->tos & info->tos_mask) ==
info->tos_value) ^ !!info->invert;
else
return ((ipv6_get_dsfield(ipv6_hdr(skb)) & info->tos_mask) ==
info->tos_value) ^ !!info->invert;
}
static struct xt_match dscp_mt_reg[] __read_mostly = {
{
.name = "dscp",
.family = NFPROTO_IPV4,
.checkentry = dscp_mt_check,
.match = dscp_mt,
.matchsize = sizeof(struct xt_dscp_info),
.me = THIS_MODULE,
},
{
.name = "dscp",
.family = NFPROTO_IPV6,
.checkentry = dscp_mt_check,
.match = dscp_mt6,
.matchsize = sizeof(struct xt_dscp_info),
.me = THIS_MODULE,
},
{
.name = "tos",
.revision = 1,
.family = NFPROTO_IPV4,
.match = tos_mt,
.matchsize = sizeof(struct xt_tos_match_info),
.me = THIS_MODULE,
},
{
.name = "tos",
.revision = 1,
.family = NFPROTO_IPV6,
.match = tos_mt,
.matchsize = sizeof(struct xt_tos_match_info),
.me = THIS_MODULE,
},
};
static int __init dscp_mt_init(void)
{
return xt_register_matches(dscp_mt_reg, ARRAY_SIZE(dscp_mt_reg));
}
static void __exit dscp_mt_exit(void)
{
xt_unregister_matches(dscp_mt_reg, ARRAY_SIZE(dscp_mt_reg));
}
module_init(dscp_mt_init);
module_exit(dscp_mt_exit);
| linux-master | net/netfilter/xt_DSCP.c |
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/netlink.h>
#include <linux/netfilter.h>
#include <linux/netfilter/nf_tables.h>
#include <net/netfilter/nf_tables_core.h>
#include <net/netfilter/nf_tables.h>
struct nft_last {
unsigned long jiffies;
unsigned int set;
};
struct nft_last_priv {
struct nft_last *last;
};
static const struct nla_policy nft_last_policy[NFTA_LAST_MAX + 1] = {
[NFTA_LAST_SET] = { .type = NLA_U32 },
[NFTA_LAST_MSECS] = { .type = NLA_U64 },
};
static int nft_last_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
const struct nlattr * const tb[])
{
struct nft_last_priv *priv = nft_expr_priv(expr);
struct nft_last *last;
u64 last_jiffies;
int err;
last = kzalloc(sizeof(*last), GFP_KERNEL_ACCOUNT);
if (!last)
return -ENOMEM;
if (tb[NFTA_LAST_SET])
last->set = ntohl(nla_get_be32(tb[NFTA_LAST_SET]));
if (last->set && tb[NFTA_LAST_MSECS]) {
err = nf_msecs_to_jiffies64(tb[NFTA_LAST_MSECS], &last_jiffies);
if (err < 0)
goto err;
last->jiffies = jiffies - (unsigned long)last_jiffies;
}
priv->last = last;
return 0;
err:
kfree(last);
return err;
}
static void nft_last_eval(const struct nft_expr *expr,
struct nft_regs *regs, const struct nft_pktinfo *pkt)
{
struct nft_last_priv *priv = nft_expr_priv(expr);
struct nft_last *last = priv->last;
if (READ_ONCE(last->jiffies) != jiffies)
WRITE_ONCE(last->jiffies, jiffies);
if (READ_ONCE(last->set) == 0)
WRITE_ONCE(last->set, 1);
}
static int nft_last_dump(struct sk_buff *skb,
const struct nft_expr *expr, bool reset)
{
struct nft_last_priv *priv = nft_expr_priv(expr);
struct nft_last *last = priv->last;
unsigned long last_jiffies = READ_ONCE(last->jiffies);
u32 last_set = READ_ONCE(last->set);
__be64 msecs;
if (time_before(jiffies, last_jiffies)) {
WRITE_ONCE(last->set, 0);
last_set = 0;
}
if (last_set)
msecs = nf_jiffies64_to_msecs(jiffies - last_jiffies);
else
msecs = 0;
if (nla_put_be32(skb, NFTA_LAST_SET, htonl(last_set)) ||
nla_put_be64(skb, NFTA_LAST_MSECS, msecs, NFTA_LAST_PAD))
goto nla_put_failure;
return 0;
nla_put_failure:
return -1;
}
static void nft_last_destroy(const struct nft_ctx *ctx,
const struct nft_expr *expr)
{
struct nft_last_priv *priv = nft_expr_priv(expr);
kfree(priv->last);
}
static int nft_last_clone(struct nft_expr *dst, const struct nft_expr *src)
{
struct nft_last_priv *priv_dst = nft_expr_priv(dst);
struct nft_last_priv *priv_src = nft_expr_priv(src);
priv_dst->last = kzalloc(sizeof(*priv_dst->last), GFP_ATOMIC);
if (!priv_dst->last)
return -ENOMEM;
priv_dst->last->set = priv_src->last->set;
priv_dst->last->jiffies = priv_src->last->jiffies;
return 0;
}
static const struct nft_expr_ops nft_last_ops = {
.type = &nft_last_type,
.size = NFT_EXPR_SIZE(sizeof(struct nft_last_priv)),
.eval = nft_last_eval,
.init = nft_last_init,
.destroy = nft_last_destroy,
.clone = nft_last_clone,
.dump = nft_last_dump,
.reduce = NFT_REDUCE_READONLY,
};
struct nft_expr_type nft_last_type __read_mostly = {
.name = "last",
.ops = &nft_last_ops,
.policy = nft_last_policy,
.maxattr = NFTA_LAST_MAX,
.flags = NFT_EXPR_STATEFUL,
.owner = THIS_MODULE,
};
| linux-master | net/netfilter/nft_last.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Expectation handling for nf_conntrack. */
/* (C) 1999-2001 Paul `Rusty' Russell
* (C) 2002-2006 Netfilter Core Team <[email protected]>
* (C) 2003,2004 USAGI/WIDE Project <http://www.linux-ipv6.org>
* (c) 2005-2012 Patrick McHardy <[email protected]>
*/
#include <linux/types.h>
#include <linux/netfilter.h>
#include <linux/skbuff.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/stddef.h>
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/percpu.h>
#include <linux/kernel.h>
#include <linux/siphash.h>
#include <linux/moduleparam.h>
#include <linux/export.h>
#include <net/net_namespace.h>
#include <net/netns/hash.h>
#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_conntrack_core.h>
#include <net/netfilter/nf_conntrack_ecache.h>
#include <net/netfilter/nf_conntrack_expect.h>
#include <net/netfilter/nf_conntrack_helper.h>
#include <net/netfilter/nf_conntrack_l4proto.h>
#include <net/netfilter/nf_conntrack_tuple.h>
#include <net/netfilter/nf_conntrack_zones.h>
unsigned int nf_ct_expect_hsize __read_mostly;
EXPORT_SYMBOL_GPL(nf_ct_expect_hsize);
struct hlist_head *nf_ct_expect_hash __read_mostly;
EXPORT_SYMBOL_GPL(nf_ct_expect_hash);
unsigned int nf_ct_expect_max __read_mostly;
static struct kmem_cache *nf_ct_expect_cachep __read_mostly;
static siphash_aligned_key_t nf_ct_expect_hashrnd;
/* nf_conntrack_expect helper functions */
void nf_ct_unlink_expect_report(struct nf_conntrack_expect *exp,
u32 portid, int report)
{
struct nf_conn_help *master_help = nfct_help(exp->master);
struct net *net = nf_ct_exp_net(exp);
struct nf_conntrack_net *cnet;
WARN_ON(!master_help);
WARN_ON(timer_pending(&exp->timeout));
hlist_del_rcu(&exp->hnode);
cnet = nf_ct_pernet(net);
cnet->expect_count--;
hlist_del_rcu(&exp->lnode);
master_help->expecting[exp->class]--;
nf_ct_expect_event_report(IPEXP_DESTROY, exp, portid, report);
nf_ct_expect_put(exp);
NF_CT_STAT_INC(net, expect_delete);
}
EXPORT_SYMBOL_GPL(nf_ct_unlink_expect_report);
static void nf_ct_expectation_timed_out(struct timer_list *t)
{
struct nf_conntrack_expect *exp = from_timer(exp, t, timeout);
spin_lock_bh(&nf_conntrack_expect_lock);
nf_ct_unlink_expect(exp);
spin_unlock_bh(&nf_conntrack_expect_lock);
nf_ct_expect_put(exp);
}
static unsigned int nf_ct_expect_dst_hash(const struct net *n, const struct nf_conntrack_tuple *tuple)
{
struct {
union nf_inet_addr dst_addr;
u32 net_mix;
u16 dport;
u8 l3num;
u8 protonum;
} __aligned(SIPHASH_ALIGNMENT) combined;
u32 hash;
get_random_once(&nf_ct_expect_hashrnd, sizeof(nf_ct_expect_hashrnd));
memset(&combined, 0, sizeof(combined));
combined.dst_addr = tuple->dst.u3;
combined.net_mix = net_hash_mix(n);
combined.dport = (__force __u16)tuple->dst.u.all;
combined.l3num = tuple->src.l3num;
combined.protonum = tuple->dst.protonum;
hash = siphash(&combined, sizeof(combined), &nf_ct_expect_hashrnd);
return reciprocal_scale(hash, nf_ct_expect_hsize);
}
static bool
nf_ct_exp_equal(const struct nf_conntrack_tuple *tuple,
const struct nf_conntrack_expect *i,
const struct nf_conntrack_zone *zone,
const struct net *net)
{
return nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask) &&
net_eq(net, nf_ct_net(i->master)) &&
nf_ct_zone_equal_any(i->master, zone);
}
bool nf_ct_remove_expect(struct nf_conntrack_expect *exp)
{
if (del_timer(&exp->timeout)) {
nf_ct_unlink_expect(exp);
nf_ct_expect_put(exp);
return true;
}
return false;
}
EXPORT_SYMBOL_GPL(nf_ct_remove_expect);
struct nf_conntrack_expect *
__nf_ct_expect_find(struct net *net,
const struct nf_conntrack_zone *zone,
const struct nf_conntrack_tuple *tuple)
{
struct nf_conntrack_net *cnet = nf_ct_pernet(net);
struct nf_conntrack_expect *i;
unsigned int h;
if (!cnet->expect_count)
return NULL;
h = nf_ct_expect_dst_hash(net, tuple);
hlist_for_each_entry_rcu(i, &nf_ct_expect_hash[h], hnode) {
if (nf_ct_exp_equal(tuple, i, zone, net))
return i;
}
return NULL;
}
EXPORT_SYMBOL_GPL(__nf_ct_expect_find);
/* Just find a expectation corresponding to a tuple. */
struct nf_conntrack_expect *
nf_ct_expect_find_get(struct net *net,
const struct nf_conntrack_zone *zone,
const struct nf_conntrack_tuple *tuple)
{
struct nf_conntrack_expect *i;
rcu_read_lock();
i = __nf_ct_expect_find(net, zone, tuple);
if (i && !refcount_inc_not_zero(&i->use))
i = NULL;
rcu_read_unlock();
return i;
}
EXPORT_SYMBOL_GPL(nf_ct_expect_find_get);
/* If an expectation for this connection is found, it gets delete from
* global list then returned. */
struct nf_conntrack_expect *
nf_ct_find_expectation(struct net *net,
const struct nf_conntrack_zone *zone,
const struct nf_conntrack_tuple *tuple, bool unlink)
{
struct nf_conntrack_net *cnet = nf_ct_pernet(net);
struct nf_conntrack_expect *i, *exp = NULL;
unsigned int h;
if (!cnet->expect_count)
return NULL;
h = nf_ct_expect_dst_hash(net, tuple);
hlist_for_each_entry(i, &nf_ct_expect_hash[h], hnode) {
if (!(i->flags & NF_CT_EXPECT_INACTIVE) &&
nf_ct_exp_equal(tuple, i, zone, net)) {
exp = i;
break;
}
}
if (!exp)
return NULL;
/* If master is not in hash table yet (ie. packet hasn't left
this machine yet), how can other end know about expected?
Hence these are not the droids you are looking for (if
master ct never got confirmed, we'd hold a reference to it
and weird things would happen to future packets). */
if (!nf_ct_is_confirmed(exp->master))
return NULL;
/* Avoid race with other CPUs, that for exp->master ct, is
* about to invoke ->destroy(), or nf_ct_delete() via timeout
* or early_drop().
*
* The refcount_inc_not_zero() check tells: If that fails, we
* know that the ct is being destroyed. If it succeeds, we
* can be sure the ct cannot disappear underneath.
*/
if (unlikely(nf_ct_is_dying(exp->master) ||
!refcount_inc_not_zero(&exp->master->ct_general.use)))
return NULL;
if (exp->flags & NF_CT_EXPECT_PERMANENT || !unlink) {
refcount_inc(&exp->use);
return exp;
} else if (del_timer(&exp->timeout)) {
nf_ct_unlink_expect(exp);
return exp;
}
/* Undo exp->master refcnt increase, if del_timer() failed */
nf_ct_put(exp->master);
return NULL;
}
/* delete all expectations for this conntrack */
void nf_ct_remove_expectations(struct nf_conn *ct)
{
struct nf_conn_help *help = nfct_help(ct);
struct nf_conntrack_expect *exp;
struct hlist_node *next;
/* Optimization: most connection never expect any others. */
if (!help)
return;
spin_lock_bh(&nf_conntrack_expect_lock);
hlist_for_each_entry_safe(exp, next, &help->expectations, lnode) {
nf_ct_remove_expect(exp);
}
spin_unlock_bh(&nf_conntrack_expect_lock);
}
EXPORT_SYMBOL_GPL(nf_ct_remove_expectations);
/* Would two expected things clash? */
static inline int expect_clash(const struct nf_conntrack_expect *a,
const struct nf_conntrack_expect *b)
{
/* Part covered by intersection of masks must be unequal,
otherwise they clash */
struct nf_conntrack_tuple_mask intersect_mask;
int count;
intersect_mask.src.u.all = a->mask.src.u.all & b->mask.src.u.all;
for (count = 0; count < NF_CT_TUPLE_L3SIZE; count++){
intersect_mask.src.u3.all[count] =
a->mask.src.u3.all[count] & b->mask.src.u3.all[count];
}
return nf_ct_tuple_mask_cmp(&a->tuple, &b->tuple, &intersect_mask) &&
net_eq(nf_ct_net(a->master), nf_ct_net(b->master)) &&
nf_ct_zone_equal_any(a->master, nf_ct_zone(b->master));
}
static inline int expect_matches(const struct nf_conntrack_expect *a,
const struct nf_conntrack_expect *b)
{
return nf_ct_tuple_equal(&a->tuple, &b->tuple) &&
nf_ct_tuple_mask_equal(&a->mask, &b->mask) &&
net_eq(nf_ct_net(a->master), nf_ct_net(b->master)) &&
nf_ct_zone_equal_any(a->master, nf_ct_zone(b->master));
}
static bool master_matches(const struct nf_conntrack_expect *a,
const struct nf_conntrack_expect *b,
unsigned int flags)
{
if (flags & NF_CT_EXP_F_SKIP_MASTER)
return true;
return a->master == b->master;
}
/* Generally a bad idea to call this: could have matched already. */
void nf_ct_unexpect_related(struct nf_conntrack_expect *exp)
{
spin_lock_bh(&nf_conntrack_expect_lock);
nf_ct_remove_expect(exp);
spin_unlock_bh(&nf_conntrack_expect_lock);
}
EXPORT_SYMBOL_GPL(nf_ct_unexpect_related);
/* We don't increase the master conntrack refcount for non-fulfilled
* conntracks. During the conntrack destruction, the expectations are
* always killed before the conntrack itself */
struct nf_conntrack_expect *nf_ct_expect_alloc(struct nf_conn *me)
{
struct nf_conntrack_expect *new;
new = kmem_cache_alloc(nf_ct_expect_cachep, GFP_ATOMIC);
if (!new)
return NULL;
new->master = me;
refcount_set(&new->use, 1);
return new;
}
EXPORT_SYMBOL_GPL(nf_ct_expect_alloc);
void nf_ct_expect_init(struct nf_conntrack_expect *exp, unsigned int class,
u_int8_t family,
const union nf_inet_addr *saddr,
const union nf_inet_addr *daddr,
u_int8_t proto, const __be16 *src, const __be16 *dst)
{
int len;
if (family == AF_INET)
len = 4;
else
len = 16;
exp->flags = 0;
exp->class = class;
exp->expectfn = NULL;
exp->helper = NULL;
exp->tuple.src.l3num = family;
exp->tuple.dst.protonum = proto;
if (saddr) {
memcpy(&exp->tuple.src.u3, saddr, len);
if (sizeof(exp->tuple.src.u3) > len)
/* address needs to be cleared for nf_ct_tuple_equal */
memset((void *)&exp->tuple.src.u3 + len, 0x00,
sizeof(exp->tuple.src.u3) - len);
memset(&exp->mask.src.u3, 0xFF, len);
if (sizeof(exp->mask.src.u3) > len)
memset((void *)&exp->mask.src.u3 + len, 0x00,
sizeof(exp->mask.src.u3) - len);
} else {
memset(&exp->tuple.src.u3, 0x00, sizeof(exp->tuple.src.u3));
memset(&exp->mask.src.u3, 0x00, sizeof(exp->mask.src.u3));
}
if (src) {
exp->tuple.src.u.all = *src;
exp->mask.src.u.all = htons(0xFFFF);
} else {
exp->tuple.src.u.all = 0;
exp->mask.src.u.all = 0;
}
memcpy(&exp->tuple.dst.u3, daddr, len);
if (sizeof(exp->tuple.dst.u3) > len)
/* address needs to be cleared for nf_ct_tuple_equal */
memset((void *)&exp->tuple.dst.u3 + len, 0x00,
sizeof(exp->tuple.dst.u3) - len);
exp->tuple.dst.u.all = *dst;
#if IS_ENABLED(CONFIG_NF_NAT)
memset(&exp->saved_addr, 0, sizeof(exp->saved_addr));
memset(&exp->saved_proto, 0, sizeof(exp->saved_proto));
#endif
}
EXPORT_SYMBOL_GPL(nf_ct_expect_init);
static void nf_ct_expect_free_rcu(struct rcu_head *head)
{
struct nf_conntrack_expect *exp;
exp = container_of(head, struct nf_conntrack_expect, rcu);
kmem_cache_free(nf_ct_expect_cachep, exp);
}
void nf_ct_expect_put(struct nf_conntrack_expect *exp)
{
if (refcount_dec_and_test(&exp->use))
call_rcu(&exp->rcu, nf_ct_expect_free_rcu);
}
EXPORT_SYMBOL_GPL(nf_ct_expect_put);
static void nf_ct_expect_insert(struct nf_conntrack_expect *exp)
{
struct nf_conntrack_net *cnet;
struct nf_conn_help *master_help = nfct_help(exp->master);
struct nf_conntrack_helper *helper;
struct net *net = nf_ct_exp_net(exp);
unsigned int h = nf_ct_expect_dst_hash(net, &exp->tuple);
/* two references : one for hash insert, one for the timer */
refcount_add(2, &exp->use);
timer_setup(&exp->timeout, nf_ct_expectation_timed_out, 0);
helper = rcu_dereference_protected(master_help->helper,
lockdep_is_held(&nf_conntrack_expect_lock));
if (helper) {
exp->timeout.expires = jiffies +
helper->expect_policy[exp->class].timeout * HZ;
}
add_timer(&exp->timeout);
hlist_add_head_rcu(&exp->lnode, &master_help->expectations);
master_help->expecting[exp->class]++;
hlist_add_head_rcu(&exp->hnode, &nf_ct_expect_hash[h]);
cnet = nf_ct_pernet(net);
cnet->expect_count++;
NF_CT_STAT_INC(net, expect_create);
}
/* Race with expectations being used means we could have none to find; OK. */
static void evict_oldest_expect(struct nf_conn *master,
struct nf_conntrack_expect *new)
{
struct nf_conn_help *master_help = nfct_help(master);
struct nf_conntrack_expect *exp, *last = NULL;
hlist_for_each_entry(exp, &master_help->expectations, lnode) {
if (exp->class == new->class)
last = exp;
}
if (last)
nf_ct_remove_expect(last);
}
static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect,
unsigned int flags)
{
const struct nf_conntrack_expect_policy *p;
struct nf_conntrack_expect *i;
struct nf_conntrack_net *cnet;
struct nf_conn *master = expect->master;
struct nf_conn_help *master_help = nfct_help(master);
struct nf_conntrack_helper *helper;
struct net *net = nf_ct_exp_net(expect);
struct hlist_node *next;
unsigned int h;
int ret = 0;
if (!master_help) {
ret = -ESHUTDOWN;
goto out;
}
h = nf_ct_expect_dst_hash(net, &expect->tuple);
hlist_for_each_entry_safe(i, next, &nf_ct_expect_hash[h], hnode) {
if (master_matches(i, expect, flags) &&
expect_matches(i, expect)) {
if (i->class != expect->class ||
i->master != expect->master)
return -EALREADY;
if (nf_ct_remove_expect(i))
break;
} else if (expect_clash(i, expect)) {
ret = -EBUSY;
goto out;
}
}
/* Will be over limit? */
helper = rcu_dereference_protected(master_help->helper,
lockdep_is_held(&nf_conntrack_expect_lock));
if (helper) {
p = &helper->expect_policy[expect->class];
if (p->max_expected &&
master_help->expecting[expect->class] >= p->max_expected) {
evict_oldest_expect(master, expect);
if (master_help->expecting[expect->class]
>= p->max_expected) {
ret = -EMFILE;
goto out;
}
}
}
cnet = nf_ct_pernet(net);
if (cnet->expect_count >= nf_ct_expect_max) {
net_warn_ratelimited("nf_conntrack: expectation table full\n");
ret = -EMFILE;
}
out:
return ret;
}
int nf_ct_expect_related_report(struct nf_conntrack_expect *expect,
u32 portid, int report, unsigned int flags)
{
int ret;
spin_lock_bh(&nf_conntrack_expect_lock);
ret = __nf_ct_expect_check(expect, flags);
if (ret < 0)
goto out;
nf_ct_expect_insert(expect);
spin_unlock_bh(&nf_conntrack_expect_lock);
nf_ct_expect_event_report(IPEXP_NEW, expect, portid, report);
return 0;
out:
spin_unlock_bh(&nf_conntrack_expect_lock);
return ret;
}
EXPORT_SYMBOL_GPL(nf_ct_expect_related_report);
void nf_ct_expect_iterate_destroy(bool (*iter)(struct nf_conntrack_expect *e, void *data),
void *data)
{
struct nf_conntrack_expect *exp;
const struct hlist_node *next;
unsigned int i;
spin_lock_bh(&nf_conntrack_expect_lock);
for (i = 0; i < nf_ct_expect_hsize; i++) {
hlist_for_each_entry_safe(exp, next,
&nf_ct_expect_hash[i],
hnode) {
if (iter(exp, data) && del_timer(&exp->timeout)) {
nf_ct_unlink_expect(exp);
nf_ct_expect_put(exp);
}
}
}
spin_unlock_bh(&nf_conntrack_expect_lock);
}
EXPORT_SYMBOL_GPL(nf_ct_expect_iterate_destroy);
void nf_ct_expect_iterate_net(struct net *net,
bool (*iter)(struct nf_conntrack_expect *e, void *data),
void *data,
u32 portid, int report)
{
struct nf_conntrack_expect *exp;
const struct hlist_node *next;
unsigned int i;
spin_lock_bh(&nf_conntrack_expect_lock);
for (i = 0; i < nf_ct_expect_hsize; i++) {
hlist_for_each_entry_safe(exp, next,
&nf_ct_expect_hash[i],
hnode) {
if (!net_eq(nf_ct_exp_net(exp), net))
continue;
if (iter(exp, data) && del_timer(&exp->timeout)) {
nf_ct_unlink_expect_report(exp, portid, report);
nf_ct_expect_put(exp);
}
}
}
spin_unlock_bh(&nf_conntrack_expect_lock);
}
EXPORT_SYMBOL_GPL(nf_ct_expect_iterate_net);
#ifdef CONFIG_NF_CONNTRACK_PROCFS
struct ct_expect_iter_state {
struct seq_net_private p;
unsigned int bucket;
};
static struct hlist_node *ct_expect_get_first(struct seq_file *seq)
{
struct ct_expect_iter_state *st = seq->private;
struct hlist_node *n;
for (st->bucket = 0; st->bucket < nf_ct_expect_hsize; st->bucket++) {
n = rcu_dereference(hlist_first_rcu(&nf_ct_expect_hash[st->bucket]));
if (n)
return n;
}
return NULL;
}
static struct hlist_node *ct_expect_get_next(struct seq_file *seq,
struct hlist_node *head)
{
struct ct_expect_iter_state *st = seq->private;
head = rcu_dereference(hlist_next_rcu(head));
while (head == NULL) {
if (++st->bucket >= nf_ct_expect_hsize)
return NULL;
head = rcu_dereference(hlist_first_rcu(&nf_ct_expect_hash[st->bucket]));
}
return head;
}
static struct hlist_node *ct_expect_get_idx(struct seq_file *seq, loff_t pos)
{
struct hlist_node *head = ct_expect_get_first(seq);
if (head)
while (pos && (head = ct_expect_get_next(seq, head)))
pos--;
return pos ? NULL : head;
}
static void *exp_seq_start(struct seq_file *seq, loff_t *pos)
__acquires(RCU)
{
rcu_read_lock();
return ct_expect_get_idx(seq, *pos);
}
static void *exp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
(*pos)++;
return ct_expect_get_next(seq, v);
}
static void exp_seq_stop(struct seq_file *seq, void *v)
__releases(RCU)
{
rcu_read_unlock();
}
static int exp_seq_show(struct seq_file *s, void *v)
{
struct nf_conntrack_expect *expect;
struct nf_conntrack_helper *helper;
struct hlist_node *n = v;
char *delim = "";
expect = hlist_entry(n, struct nf_conntrack_expect, hnode);
if (expect->timeout.function)
seq_printf(s, "%ld ", timer_pending(&expect->timeout)
? (long)(expect->timeout.expires - jiffies)/HZ : 0);
else
seq_puts(s, "- ");
seq_printf(s, "l3proto = %u proto=%u ",
expect->tuple.src.l3num,
expect->tuple.dst.protonum);
print_tuple(s, &expect->tuple,
nf_ct_l4proto_find(expect->tuple.dst.protonum));
if (expect->flags & NF_CT_EXPECT_PERMANENT) {
seq_puts(s, "PERMANENT");
delim = ",";
}
if (expect->flags & NF_CT_EXPECT_INACTIVE) {
seq_printf(s, "%sINACTIVE", delim);
delim = ",";
}
if (expect->flags & NF_CT_EXPECT_USERSPACE)
seq_printf(s, "%sUSERSPACE", delim);
helper = rcu_dereference(nfct_help(expect->master)->helper);
if (helper) {
seq_printf(s, "%s%s", expect->flags ? " " : "", helper->name);
if (helper->expect_policy[expect->class].name[0])
seq_printf(s, "/%s",
helper->expect_policy[expect->class].name);
}
seq_putc(s, '\n');
return 0;
}
static const struct seq_operations exp_seq_ops = {
.start = exp_seq_start,
.next = exp_seq_next,
.stop = exp_seq_stop,
.show = exp_seq_show
};
#endif /* CONFIG_NF_CONNTRACK_PROCFS */
static int exp_proc_init(struct net *net)
{
#ifdef CONFIG_NF_CONNTRACK_PROCFS
struct proc_dir_entry *proc;
kuid_t root_uid;
kgid_t root_gid;
proc = proc_create_net("nf_conntrack_expect", 0440, net->proc_net,
&exp_seq_ops, sizeof(struct ct_expect_iter_state));
if (!proc)
return -ENOMEM;
root_uid = make_kuid(net->user_ns, 0);
root_gid = make_kgid(net->user_ns, 0);
if (uid_valid(root_uid) && gid_valid(root_gid))
proc_set_user(proc, root_uid, root_gid);
#endif /* CONFIG_NF_CONNTRACK_PROCFS */
return 0;
}
static void exp_proc_remove(struct net *net)
{
#ifdef CONFIG_NF_CONNTRACK_PROCFS
remove_proc_entry("nf_conntrack_expect", net->proc_net);
#endif /* CONFIG_NF_CONNTRACK_PROCFS */
}
module_param_named(expect_hashsize, nf_ct_expect_hsize, uint, 0400);
int nf_conntrack_expect_pernet_init(struct net *net)
{
return exp_proc_init(net);
}
void nf_conntrack_expect_pernet_fini(struct net *net)
{
exp_proc_remove(net);
}
int nf_conntrack_expect_init(void)
{
if (!nf_ct_expect_hsize) {
nf_ct_expect_hsize = nf_conntrack_htable_size / 256;
if (!nf_ct_expect_hsize)
nf_ct_expect_hsize = 1;
}
nf_ct_expect_max = nf_ct_expect_hsize * 4;
nf_ct_expect_cachep = kmem_cache_create("nf_conntrack_expect",
sizeof(struct nf_conntrack_expect),
0, 0, NULL);
if (!nf_ct_expect_cachep)
return -ENOMEM;
nf_ct_expect_hash = nf_ct_alloc_hashtable(&nf_ct_expect_hsize, 0);
if (!nf_ct_expect_hash) {
kmem_cache_destroy(nf_ct_expect_cachep);
return -ENOMEM;
}
return 0;
}
void nf_conntrack_expect_fini(void)
{
rcu_barrier(); /* Wait for call_rcu() before destroy */
kmem_cache_destroy(nf_ct_expect_cachep);
kvfree(nf_ct_expect_hash);
}
| linux-master | net/netfilter/nf_conntrack_expect.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2006 Patrick McHardy <[email protected]>
*
* Based on ipt_random and ipt_nth by Fabrice MARIE <[email protected]>.
*/
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/skbuff.h>
#include <linux/net.h>
#include <linux/slab.h>
#include <linux/netfilter/xt_statistic.h>
#include <linux/netfilter/x_tables.h>
#include <linux/module.h>
struct xt_statistic_priv {
atomic_t count;
} ____cacheline_aligned_in_smp;
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Patrick McHardy <[email protected]>");
MODULE_DESCRIPTION("Xtables: statistics-based matching (\"Nth\", random)");
MODULE_ALIAS("ipt_statistic");
MODULE_ALIAS("ip6t_statistic");
static bool
statistic_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct xt_statistic_info *info = par->matchinfo;
bool ret = info->flags & XT_STATISTIC_INVERT;
int nval, oval;
switch (info->mode) {
case XT_STATISTIC_MODE_RANDOM:
if ((get_random_u32() & 0x7FFFFFFF) < info->u.random.probability)
ret = !ret;
break;
case XT_STATISTIC_MODE_NTH:
do {
oval = atomic_read(&info->master->count);
nval = (oval == info->u.nth.every) ? 0 : oval + 1;
} while (atomic_cmpxchg(&info->master->count, oval, nval) != oval);
if (nval == 0)
ret = !ret;
break;
}
return ret;
}
static int statistic_mt_check(const struct xt_mtchk_param *par)
{
struct xt_statistic_info *info = par->matchinfo;
if (info->mode > XT_STATISTIC_MODE_MAX ||
info->flags & ~XT_STATISTIC_MASK)
return -EINVAL;
info->master = kzalloc(sizeof(*info->master), GFP_KERNEL);
if (info->master == NULL)
return -ENOMEM;
atomic_set(&info->master->count, info->u.nth.count);
return 0;
}
static void statistic_mt_destroy(const struct xt_mtdtor_param *par)
{
const struct xt_statistic_info *info = par->matchinfo;
kfree(info->master);
}
static struct xt_match xt_statistic_mt_reg __read_mostly = {
.name = "statistic",
.revision = 0,
.family = NFPROTO_UNSPEC,
.match = statistic_mt,
.checkentry = statistic_mt_check,
.destroy = statistic_mt_destroy,
.matchsize = sizeof(struct xt_statistic_info),
.usersize = offsetof(struct xt_statistic_info, master),
.me = THIS_MODULE,
};
static int __init statistic_mt_init(void)
{
return xt_register_match(&xt_statistic_mt_reg);
}
static void __exit statistic_mt_exit(void)
{
xt_unregister_match(&xt_statistic_mt_reg);
}
module_init(statistic_mt_init);
module_exit(statistic_mt_exit);
| linux-master | net/netfilter/xt_statistic.c |
// SPDX-License-Identifier: GPL-2.0-only
/* (C) 2001-2002 Magnus Boden <[email protected]>
* (C) 2006-2012 Patrick McHardy <[email protected]>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/in.h>
#include <linux/udp.h>
#include <linux/netfilter.h>
#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_conntrack_tuple.h>
#include <net/netfilter/nf_conntrack_expect.h>
#include <net/netfilter/nf_conntrack_ecache.h>
#include <net/netfilter/nf_conntrack_helper.h>
#include <linux/netfilter/nf_conntrack_tftp.h>
#define HELPER_NAME "tftp"
MODULE_AUTHOR("Magnus Boden <[email protected]>");
MODULE_DESCRIPTION("TFTP connection tracking helper");
MODULE_LICENSE("GPL");
MODULE_ALIAS("ip_conntrack_tftp");
MODULE_ALIAS_NFCT_HELPER(HELPER_NAME);
#define MAX_PORTS 8
static unsigned short ports[MAX_PORTS];
static unsigned int ports_c;
module_param_array(ports, ushort, &ports_c, 0400);
MODULE_PARM_DESC(ports, "Port numbers of TFTP servers");
unsigned int (*nf_nat_tftp_hook)(struct sk_buff *skb,
enum ip_conntrack_info ctinfo,
struct nf_conntrack_expect *exp) __read_mostly;
EXPORT_SYMBOL_GPL(nf_nat_tftp_hook);
static int tftp_help(struct sk_buff *skb,
unsigned int protoff,
struct nf_conn *ct,
enum ip_conntrack_info ctinfo)
{
const struct tftphdr *tfh;
struct tftphdr _tftph;
struct nf_conntrack_expect *exp;
struct nf_conntrack_tuple *tuple;
unsigned int ret = NF_ACCEPT;
typeof(nf_nat_tftp_hook) nf_nat_tftp;
tfh = skb_header_pointer(skb, protoff + sizeof(struct udphdr),
sizeof(_tftph), &_tftph);
if (tfh == NULL)
return NF_ACCEPT;
switch (ntohs(tfh->opcode)) {
case TFTP_OPCODE_READ:
case TFTP_OPCODE_WRITE:
/* RRQ and WRQ works the same way */
nf_ct_dump_tuple(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
nf_ct_dump_tuple(&ct->tuplehash[IP_CT_DIR_REPLY].tuple);
exp = nf_ct_expect_alloc(ct);
if (exp == NULL) {
nf_ct_helper_log(skb, ct, "cannot alloc expectation");
return NF_DROP;
}
tuple = &ct->tuplehash[IP_CT_DIR_REPLY].tuple;
nf_ct_expect_init(exp, NF_CT_EXPECT_CLASS_DEFAULT,
nf_ct_l3num(ct),
&tuple->src.u3, &tuple->dst.u3,
IPPROTO_UDP, NULL, &tuple->dst.u.udp.port);
pr_debug("expect: ");
nf_ct_dump_tuple(&exp->tuple);
nf_nat_tftp = rcu_dereference(nf_nat_tftp_hook);
if (nf_nat_tftp && ct->status & IPS_NAT_MASK)
ret = nf_nat_tftp(skb, ctinfo, exp);
else if (nf_ct_expect_related(exp, 0) != 0) {
nf_ct_helper_log(skb, ct, "cannot add expectation");
ret = NF_DROP;
}
nf_ct_expect_put(exp);
break;
case TFTP_OPCODE_DATA:
case TFTP_OPCODE_ACK:
pr_debug("Data/ACK opcode\n");
break;
case TFTP_OPCODE_ERROR:
pr_debug("Error opcode\n");
break;
default:
pr_debug("Unknown opcode\n");
}
return ret;
}
static struct nf_conntrack_helper tftp[MAX_PORTS * 2] __read_mostly;
static const struct nf_conntrack_expect_policy tftp_exp_policy = {
.max_expected = 1,
.timeout = 5 * 60,
};
static void __exit nf_conntrack_tftp_fini(void)
{
nf_conntrack_helpers_unregister(tftp, ports_c * 2);
}
static int __init nf_conntrack_tftp_init(void)
{
int i, ret;
NF_CT_HELPER_BUILD_BUG_ON(0);
if (ports_c == 0)
ports[ports_c++] = TFTP_PORT;
for (i = 0; i < ports_c; i++) {
nf_ct_helper_init(&tftp[2 * i], AF_INET, IPPROTO_UDP,
HELPER_NAME, TFTP_PORT, ports[i], i,
&tftp_exp_policy, 0, tftp_help, NULL,
THIS_MODULE);
nf_ct_helper_init(&tftp[2 * i + 1], AF_INET6, IPPROTO_UDP,
HELPER_NAME, TFTP_PORT, ports[i], i,
&tftp_exp_policy, 0, tftp_help, NULL,
THIS_MODULE);
}
ret = nf_conntrack_helpers_register(tftp, ports_c * 2);
if (ret < 0) {
pr_err("failed to register helpers\n");
return ret;
}
return 0;
}
module_init(nf_conntrack_tftp_init);
module_exit(nf_conntrack_tftp_fini);
| linux-master | net/netfilter/nf_conntrack_tftp.c |
/* netfilter.c: look after the filters for various protocols.
* Heavily influenced by the old firewall.c by David Bonn and Alan Cox.
*
* Thanks to Rob `CmdrTaco' Malda for not influencing this code in any
* way.
*
* This code is GPL.
*/
#include <linux/kernel.h>
#include <linux/netfilter.h>
#include <net/protocol.h>
#include <linux/init.h>
#include <linux/skbuff.h>
#include <linux/wait.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/if.h>
#include <linux/netdevice.h>
#include <linux/netfilter_ipv6.h>
#include <linux/inetdevice.h>
#include <linux/proc_fs.h>
#include <linux/mutex.h>
#include <linux/mm.h>
#include <linux/rcupdate.h>
#include <net/net_namespace.h>
#include <net/netfilter/nf_queue.h>
#include <net/sock.h>
#include "nf_internals.h"
const struct nf_ipv6_ops __rcu *nf_ipv6_ops __read_mostly;
EXPORT_SYMBOL_GPL(nf_ipv6_ops);
DEFINE_PER_CPU(bool, nf_skb_duplicated);
EXPORT_SYMBOL_GPL(nf_skb_duplicated);
#ifdef CONFIG_JUMP_LABEL
struct static_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
EXPORT_SYMBOL(nf_hooks_needed);
#endif
static DEFINE_MUTEX(nf_hook_mutex);
/* max hooks per family/hooknum */
#define MAX_HOOK_COUNT 1024
#define nf_entry_dereference(e) \
rcu_dereference_protected(e, lockdep_is_held(&nf_hook_mutex))
static struct nf_hook_entries *allocate_hook_entries_size(u16 num)
{
struct nf_hook_entries *e;
size_t alloc = sizeof(*e) +
sizeof(struct nf_hook_entry) * num +
sizeof(struct nf_hook_ops *) * num +
sizeof(struct nf_hook_entries_rcu_head);
if (num == 0)
return NULL;
e = kvzalloc(alloc, GFP_KERNEL_ACCOUNT);
if (e)
e->num_hook_entries = num;
return e;
}
static void __nf_hook_entries_free(struct rcu_head *h)
{
struct nf_hook_entries_rcu_head *head;
head = container_of(h, struct nf_hook_entries_rcu_head, head);
kvfree(head->allocation);
}
static void nf_hook_entries_free(struct nf_hook_entries *e)
{
struct nf_hook_entries_rcu_head *head;
struct nf_hook_ops **ops;
unsigned int num;
if (!e)
return;
num = e->num_hook_entries;
ops = nf_hook_entries_get_hook_ops(e);
head = (void *)&ops[num];
head->allocation = e;
call_rcu(&head->head, __nf_hook_entries_free);
}
static unsigned int accept_all(void *priv,
struct sk_buff *skb,
const struct nf_hook_state *state)
{
return NF_ACCEPT; /* ACCEPT makes nf_hook_slow call next hook */
}
static const struct nf_hook_ops dummy_ops = {
.hook = accept_all,
.priority = INT_MIN,
};
static struct nf_hook_entries *
nf_hook_entries_grow(const struct nf_hook_entries *old,
const struct nf_hook_ops *reg)
{
unsigned int i, alloc_entries, nhooks, old_entries;
struct nf_hook_ops **orig_ops = NULL;
struct nf_hook_ops **new_ops;
struct nf_hook_entries *new;
bool inserted = false;
alloc_entries = 1;
old_entries = old ? old->num_hook_entries : 0;
if (old) {
orig_ops = nf_hook_entries_get_hook_ops(old);
for (i = 0; i < old_entries; i++) {
if (orig_ops[i] != &dummy_ops)
alloc_entries++;
/* Restrict BPF hook type to force a unique priority, not
* shared at attach time.
*
* This is mainly to avoid ordering issues between two
* different bpf programs, this doesn't prevent a normal
* hook at same priority as a bpf one (we don't want to
* prevent defrag, conntrack, iptables etc from attaching).
*/
if (reg->priority == orig_ops[i]->priority &&
reg->hook_ops_type == NF_HOOK_OP_BPF)
return ERR_PTR(-EBUSY);
}
}
if (alloc_entries > MAX_HOOK_COUNT)
return ERR_PTR(-E2BIG);
new = allocate_hook_entries_size(alloc_entries);
if (!new)
return ERR_PTR(-ENOMEM);
new_ops = nf_hook_entries_get_hook_ops(new);
i = 0;
nhooks = 0;
while (i < old_entries) {
if (orig_ops[i] == &dummy_ops) {
++i;
continue;
}
if (inserted || reg->priority > orig_ops[i]->priority) {
new_ops[nhooks] = (void *)orig_ops[i];
new->hooks[nhooks] = old->hooks[i];
i++;
} else {
new_ops[nhooks] = (void *)reg;
new->hooks[nhooks].hook = reg->hook;
new->hooks[nhooks].priv = reg->priv;
inserted = true;
}
nhooks++;
}
if (!inserted) {
new_ops[nhooks] = (void *)reg;
new->hooks[nhooks].hook = reg->hook;
new->hooks[nhooks].priv = reg->priv;
}
return new;
}
static void hooks_validate(const struct nf_hook_entries *hooks)
{
#ifdef CONFIG_DEBUG_MISC
struct nf_hook_ops **orig_ops;
int prio = INT_MIN;
size_t i = 0;
orig_ops = nf_hook_entries_get_hook_ops(hooks);
for (i = 0; i < hooks->num_hook_entries; i++) {
if (orig_ops[i] == &dummy_ops)
continue;
WARN_ON(orig_ops[i]->priority < prio);
if (orig_ops[i]->priority > prio)
prio = orig_ops[i]->priority;
}
#endif
}
int nf_hook_entries_insert_raw(struct nf_hook_entries __rcu **pp,
const struct nf_hook_ops *reg)
{
struct nf_hook_entries *new_hooks;
struct nf_hook_entries *p;
p = rcu_dereference_raw(*pp);
new_hooks = nf_hook_entries_grow(p, reg);
if (IS_ERR(new_hooks))
return PTR_ERR(new_hooks);
hooks_validate(new_hooks);
rcu_assign_pointer(*pp, new_hooks);
BUG_ON(p == new_hooks);
nf_hook_entries_free(p);
return 0;
}
EXPORT_SYMBOL_GPL(nf_hook_entries_insert_raw);
/*
* __nf_hook_entries_try_shrink - try to shrink hook array
*
* @old -- current hook blob at @pp
* @pp -- location of hook blob
*
* Hook unregistration must always succeed, so to-be-removed hooks
* are replaced by a dummy one that will just move to next hook.
*
* This counts the current dummy hooks, attempts to allocate new blob,
* copies the live hooks, then replaces and discards old one.
*
* return values:
*
* Returns address to free, or NULL.
*/
static void *__nf_hook_entries_try_shrink(struct nf_hook_entries *old,
struct nf_hook_entries __rcu **pp)
{
unsigned int i, j, skip = 0, hook_entries;
struct nf_hook_entries *new = NULL;
struct nf_hook_ops **orig_ops;
struct nf_hook_ops **new_ops;
if (WARN_ON_ONCE(!old))
return NULL;
orig_ops = nf_hook_entries_get_hook_ops(old);
for (i = 0; i < old->num_hook_entries; i++) {
if (orig_ops[i] == &dummy_ops)
skip++;
}
/* if skip == hook_entries all hooks have been removed */
hook_entries = old->num_hook_entries;
if (skip == hook_entries)
goto out_assign;
if (skip == 0)
return NULL;
hook_entries -= skip;
new = allocate_hook_entries_size(hook_entries);
if (!new)
return NULL;
new_ops = nf_hook_entries_get_hook_ops(new);
for (i = 0, j = 0; i < old->num_hook_entries; i++) {
if (orig_ops[i] == &dummy_ops)
continue;
new->hooks[j] = old->hooks[i];
new_ops[j] = (void *)orig_ops[i];
j++;
}
hooks_validate(new);
out_assign:
rcu_assign_pointer(*pp, new);
return old;
}
static struct nf_hook_entries __rcu **
nf_hook_entry_head(struct net *net, int pf, unsigned int hooknum,
struct net_device *dev)
{
switch (pf) {
case NFPROTO_NETDEV:
break;
#ifdef CONFIG_NETFILTER_FAMILY_ARP
case NFPROTO_ARP:
if (WARN_ON_ONCE(ARRAY_SIZE(net->nf.hooks_arp) <= hooknum))
return NULL;
return net->nf.hooks_arp + hooknum;
#endif
#ifdef CONFIG_NETFILTER_FAMILY_BRIDGE
case NFPROTO_BRIDGE:
if (WARN_ON_ONCE(ARRAY_SIZE(net->nf.hooks_bridge) <= hooknum))
return NULL;
return net->nf.hooks_bridge + hooknum;
#endif
#ifdef CONFIG_NETFILTER_INGRESS
case NFPROTO_INET:
if (WARN_ON_ONCE(hooknum != NF_INET_INGRESS))
return NULL;
if (!dev || dev_net(dev) != net) {
WARN_ON_ONCE(1);
return NULL;
}
return &dev->nf_hooks_ingress;
#endif
case NFPROTO_IPV4:
if (WARN_ON_ONCE(ARRAY_SIZE(net->nf.hooks_ipv4) <= hooknum))
return NULL;
return net->nf.hooks_ipv4 + hooknum;
case NFPROTO_IPV6:
if (WARN_ON_ONCE(ARRAY_SIZE(net->nf.hooks_ipv6) <= hooknum))
return NULL;
return net->nf.hooks_ipv6 + hooknum;
default:
WARN_ON_ONCE(1);
return NULL;
}
#ifdef CONFIG_NETFILTER_INGRESS
if (hooknum == NF_NETDEV_INGRESS) {
if (dev && dev_net(dev) == net)
return &dev->nf_hooks_ingress;
}
#endif
#ifdef CONFIG_NETFILTER_EGRESS
if (hooknum == NF_NETDEV_EGRESS) {
if (dev && dev_net(dev) == net)
return &dev->nf_hooks_egress;
}
#endif
WARN_ON_ONCE(1);
return NULL;
}
static int nf_ingress_check(struct net *net, const struct nf_hook_ops *reg,
int hooknum)
{
#ifndef CONFIG_NETFILTER_INGRESS
if (reg->hooknum == hooknum)
return -EOPNOTSUPP;
#endif
if (reg->hooknum != hooknum ||
!reg->dev || dev_net(reg->dev) != net)
return -EINVAL;
return 0;
}
static inline bool __maybe_unused nf_ingress_hook(const struct nf_hook_ops *reg,
int pf)
{
if ((pf == NFPROTO_NETDEV && reg->hooknum == NF_NETDEV_INGRESS) ||
(pf == NFPROTO_INET && reg->hooknum == NF_INET_INGRESS))
return true;
return false;
}
static inline bool __maybe_unused nf_egress_hook(const struct nf_hook_ops *reg,
int pf)
{
return pf == NFPROTO_NETDEV && reg->hooknum == NF_NETDEV_EGRESS;
}
static void nf_static_key_inc(const struct nf_hook_ops *reg, int pf)
{
#ifdef CONFIG_JUMP_LABEL
int hooknum;
if (pf == NFPROTO_INET && reg->hooknum == NF_INET_INGRESS) {
pf = NFPROTO_NETDEV;
hooknum = NF_NETDEV_INGRESS;
} else {
hooknum = reg->hooknum;
}
static_key_slow_inc(&nf_hooks_needed[pf][hooknum]);
#endif
}
static void nf_static_key_dec(const struct nf_hook_ops *reg, int pf)
{
#ifdef CONFIG_JUMP_LABEL
int hooknum;
if (pf == NFPROTO_INET && reg->hooknum == NF_INET_INGRESS) {
pf = NFPROTO_NETDEV;
hooknum = NF_NETDEV_INGRESS;
} else {
hooknum = reg->hooknum;
}
static_key_slow_dec(&nf_hooks_needed[pf][hooknum]);
#endif
}
static int __nf_register_net_hook(struct net *net, int pf,
const struct nf_hook_ops *reg)
{
struct nf_hook_entries *p, *new_hooks;
struct nf_hook_entries __rcu **pp;
int err;
switch (pf) {
case NFPROTO_NETDEV:
#ifndef CONFIG_NETFILTER_INGRESS
if (reg->hooknum == NF_NETDEV_INGRESS)
return -EOPNOTSUPP;
#endif
#ifndef CONFIG_NETFILTER_EGRESS
if (reg->hooknum == NF_NETDEV_EGRESS)
return -EOPNOTSUPP;
#endif
if ((reg->hooknum != NF_NETDEV_INGRESS &&
reg->hooknum != NF_NETDEV_EGRESS) ||
!reg->dev || dev_net(reg->dev) != net)
return -EINVAL;
break;
case NFPROTO_INET:
if (reg->hooknum != NF_INET_INGRESS)
break;
err = nf_ingress_check(net, reg, NF_INET_INGRESS);
if (err < 0)
return err;
break;
}
pp = nf_hook_entry_head(net, pf, reg->hooknum, reg->dev);
if (!pp)
return -EINVAL;
mutex_lock(&nf_hook_mutex);
p = nf_entry_dereference(*pp);
new_hooks = nf_hook_entries_grow(p, reg);
if (!IS_ERR(new_hooks)) {
hooks_validate(new_hooks);
rcu_assign_pointer(*pp, new_hooks);
}
mutex_unlock(&nf_hook_mutex);
if (IS_ERR(new_hooks))
return PTR_ERR(new_hooks);
#ifdef CONFIG_NETFILTER_INGRESS
if (nf_ingress_hook(reg, pf))
net_inc_ingress_queue();
#endif
#ifdef CONFIG_NETFILTER_EGRESS
if (nf_egress_hook(reg, pf))
net_inc_egress_queue();
#endif
nf_static_key_inc(reg, pf);
BUG_ON(p == new_hooks);
nf_hook_entries_free(p);
return 0;
}
/*
* nf_remove_net_hook - remove a hook from blob
*
* @oldp: current address of hook blob
* @unreg: hook to unregister
*
* This cannot fail, hook unregistration must always succeed.
* Therefore replace the to-be-removed hook with a dummy hook.
*/
static bool nf_remove_net_hook(struct nf_hook_entries *old,
const struct nf_hook_ops *unreg)
{
struct nf_hook_ops **orig_ops;
unsigned int i;
orig_ops = nf_hook_entries_get_hook_ops(old);
for (i = 0; i < old->num_hook_entries; i++) {
if (orig_ops[i] != unreg)
continue;
WRITE_ONCE(old->hooks[i].hook, accept_all);
WRITE_ONCE(orig_ops[i], (void *)&dummy_ops);
return true;
}
return false;
}
static void __nf_unregister_net_hook(struct net *net, int pf,
const struct nf_hook_ops *reg)
{
struct nf_hook_entries __rcu **pp;
struct nf_hook_entries *p;
pp = nf_hook_entry_head(net, pf, reg->hooknum, reg->dev);
if (!pp)
return;
mutex_lock(&nf_hook_mutex);
p = nf_entry_dereference(*pp);
if (WARN_ON_ONCE(!p)) {
mutex_unlock(&nf_hook_mutex);
return;
}
if (nf_remove_net_hook(p, reg)) {
#ifdef CONFIG_NETFILTER_INGRESS
if (nf_ingress_hook(reg, pf))
net_dec_ingress_queue();
#endif
#ifdef CONFIG_NETFILTER_EGRESS
if (nf_egress_hook(reg, pf))
net_dec_egress_queue();
#endif
nf_static_key_dec(reg, pf);
} else {
WARN_ONCE(1, "hook not found, pf %d num %d", pf, reg->hooknum);
}
p = __nf_hook_entries_try_shrink(p, pp);
mutex_unlock(&nf_hook_mutex);
if (!p)
return;
nf_queue_nf_hook_drop(net);
nf_hook_entries_free(p);
}
void nf_unregister_net_hook(struct net *net, const struct nf_hook_ops *reg)
{
if (reg->pf == NFPROTO_INET) {
if (reg->hooknum == NF_INET_INGRESS) {
__nf_unregister_net_hook(net, NFPROTO_INET, reg);
} else {
__nf_unregister_net_hook(net, NFPROTO_IPV4, reg);
__nf_unregister_net_hook(net, NFPROTO_IPV6, reg);
}
} else {
__nf_unregister_net_hook(net, reg->pf, reg);
}
}
EXPORT_SYMBOL(nf_unregister_net_hook);
void nf_hook_entries_delete_raw(struct nf_hook_entries __rcu **pp,
const struct nf_hook_ops *reg)
{
struct nf_hook_entries *p;
p = rcu_dereference_raw(*pp);
if (nf_remove_net_hook(p, reg)) {
p = __nf_hook_entries_try_shrink(p, pp);
nf_hook_entries_free(p);
}
}
EXPORT_SYMBOL_GPL(nf_hook_entries_delete_raw);
int nf_register_net_hook(struct net *net, const struct nf_hook_ops *reg)
{
int err;
if (reg->pf == NFPROTO_INET) {
if (reg->hooknum == NF_INET_INGRESS) {
err = __nf_register_net_hook(net, NFPROTO_INET, reg);
if (err < 0)
return err;
} else {
err = __nf_register_net_hook(net, NFPROTO_IPV4, reg);
if (err < 0)
return err;
err = __nf_register_net_hook(net, NFPROTO_IPV6, reg);
if (err < 0) {
__nf_unregister_net_hook(net, NFPROTO_IPV4, reg);
return err;
}
}
} else {
err = __nf_register_net_hook(net, reg->pf, reg);
if (err < 0)
return err;
}
return 0;
}
EXPORT_SYMBOL(nf_register_net_hook);
int nf_register_net_hooks(struct net *net, const struct nf_hook_ops *reg,
unsigned int n)
{
unsigned int i;
int err = 0;
for (i = 0; i < n; i++) {
err = nf_register_net_hook(net, ®[i]);
if (err)
goto err;
}
return err;
err:
if (i > 0)
nf_unregister_net_hooks(net, reg, i);
return err;
}
EXPORT_SYMBOL(nf_register_net_hooks);
void nf_unregister_net_hooks(struct net *net, const struct nf_hook_ops *reg,
unsigned int hookcount)
{
unsigned int i;
for (i = 0; i < hookcount; i++)
nf_unregister_net_hook(net, ®[i]);
}
EXPORT_SYMBOL(nf_unregister_net_hooks);
/* Returns 1 if okfn() needs to be executed by the caller,
* -EPERM for NF_DROP, 0 otherwise. Caller must hold rcu_read_lock. */
int nf_hook_slow(struct sk_buff *skb, struct nf_hook_state *state,
const struct nf_hook_entries *e, unsigned int s)
{
unsigned int verdict;
int ret;
for (; s < e->num_hook_entries; s++) {
verdict = nf_hook_entry_hookfn(&e->hooks[s], skb, state);
switch (verdict & NF_VERDICT_MASK) {
case NF_ACCEPT:
break;
case NF_DROP:
kfree_skb_reason(skb,
SKB_DROP_REASON_NETFILTER_DROP);
ret = NF_DROP_GETERR(verdict);
if (ret == 0)
ret = -EPERM;
return ret;
case NF_QUEUE:
ret = nf_queue(skb, state, s, verdict);
if (ret == 1)
continue;
return ret;
default:
/* Implicit handling for NF_STOLEN, as well as any other
* non conventional verdicts.
*/
return 0;
}
}
return 1;
}
EXPORT_SYMBOL(nf_hook_slow);
void nf_hook_slow_list(struct list_head *head, struct nf_hook_state *state,
const struct nf_hook_entries *e)
{
struct sk_buff *skb, *next;
struct list_head sublist;
int ret;
INIT_LIST_HEAD(&sublist);
list_for_each_entry_safe(skb, next, head, list) {
skb_list_del_init(skb);
ret = nf_hook_slow(skb, state, e, 0);
if (ret == 1)
list_add_tail(&skb->list, &sublist);
}
/* Put passed packets back on main list */
list_splice(&sublist, head);
}
EXPORT_SYMBOL(nf_hook_slow_list);
/* This needs to be compiled in any case to avoid dependencies between the
* nfnetlink_queue code and nf_conntrack.
*/
const struct nfnl_ct_hook __rcu *nfnl_ct_hook __read_mostly;
EXPORT_SYMBOL_GPL(nfnl_ct_hook);
const struct nf_ct_hook __rcu *nf_ct_hook __read_mostly;
EXPORT_SYMBOL_GPL(nf_ct_hook);
const struct nf_defrag_hook __rcu *nf_defrag_v4_hook __read_mostly;
EXPORT_SYMBOL_GPL(nf_defrag_v4_hook);
const struct nf_defrag_hook __rcu *nf_defrag_v6_hook __read_mostly;
EXPORT_SYMBOL_GPL(nf_defrag_v6_hook);
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
u8 nf_ctnetlink_has_listener;
EXPORT_SYMBOL_GPL(nf_ctnetlink_has_listener);
const struct nf_nat_hook __rcu *nf_nat_hook __read_mostly;
EXPORT_SYMBOL_GPL(nf_nat_hook);
/* This does not belong here, but locally generated errors need it if connection
* tracking in use: without this, connection may not be in hash table, and hence
* manufactured ICMP or RST packets will not be associated with it.
*/
void nf_ct_attach(struct sk_buff *new, const struct sk_buff *skb)
{
const struct nf_ct_hook *ct_hook;
if (skb->_nfct) {
rcu_read_lock();
ct_hook = rcu_dereference(nf_ct_hook);
if (ct_hook)
ct_hook->attach(new, skb);
rcu_read_unlock();
}
}
EXPORT_SYMBOL(nf_ct_attach);
void nf_conntrack_destroy(struct nf_conntrack *nfct)
{
const struct nf_ct_hook *ct_hook;
rcu_read_lock();
ct_hook = rcu_dereference(nf_ct_hook);
if (ct_hook)
ct_hook->destroy(nfct);
rcu_read_unlock();
WARN_ON(!ct_hook);
}
EXPORT_SYMBOL(nf_conntrack_destroy);
void nf_ct_set_closing(struct nf_conntrack *nfct)
{
const struct nf_ct_hook *ct_hook;
if (!nfct)
return;
rcu_read_lock();
ct_hook = rcu_dereference(nf_ct_hook);
if (ct_hook)
ct_hook->set_closing(nfct);
rcu_read_unlock();
}
EXPORT_SYMBOL_GPL(nf_ct_set_closing);
bool nf_ct_get_tuple_skb(struct nf_conntrack_tuple *dst_tuple,
const struct sk_buff *skb)
{
const struct nf_ct_hook *ct_hook;
bool ret = false;
rcu_read_lock();
ct_hook = rcu_dereference(nf_ct_hook);
if (ct_hook)
ret = ct_hook->get_tuple_skb(dst_tuple, skb);
rcu_read_unlock();
return ret;
}
EXPORT_SYMBOL(nf_ct_get_tuple_skb);
/* Built-in default zone used e.g. by modules. */
const struct nf_conntrack_zone nf_ct_zone_dflt = {
.id = NF_CT_DEFAULT_ZONE_ID,
.dir = NF_CT_DEFAULT_ZONE_DIR,
};
EXPORT_SYMBOL_GPL(nf_ct_zone_dflt);
#endif /* CONFIG_NF_CONNTRACK */
static void __net_init
__netfilter_net_init(struct nf_hook_entries __rcu **e, int max)
{
int h;
for (h = 0; h < max; h++)
RCU_INIT_POINTER(e[h], NULL);
}
static int __net_init netfilter_net_init(struct net *net)
{
__netfilter_net_init(net->nf.hooks_ipv4, ARRAY_SIZE(net->nf.hooks_ipv4));
__netfilter_net_init(net->nf.hooks_ipv6, ARRAY_SIZE(net->nf.hooks_ipv6));
#ifdef CONFIG_NETFILTER_FAMILY_ARP
__netfilter_net_init(net->nf.hooks_arp, ARRAY_SIZE(net->nf.hooks_arp));
#endif
#ifdef CONFIG_NETFILTER_FAMILY_BRIDGE
__netfilter_net_init(net->nf.hooks_bridge, ARRAY_SIZE(net->nf.hooks_bridge));
#endif
#ifdef CONFIG_PROC_FS
net->nf.proc_netfilter = proc_net_mkdir(net, "netfilter",
net->proc_net);
if (!net->nf.proc_netfilter) {
if (!net_eq(net, &init_net))
pr_err("cannot create netfilter proc entry");
return -ENOMEM;
}
#endif
return 0;
}
static void __net_exit netfilter_net_exit(struct net *net)
{
remove_proc_entry("netfilter", net->proc_net);
}
static struct pernet_operations netfilter_net_ops = {
.init = netfilter_net_init,
.exit = netfilter_net_exit,
};
int __init netfilter_init(void)
{
int ret;
ret = register_pernet_subsys(&netfilter_net_ops);
if (ret < 0)
goto err;
ret = netfilter_log_init();
if (ret < 0)
goto err_pernet;
return 0;
err_pernet:
unregister_pernet_subsys(&netfilter_net_ops);
err:
return ret;
}
| linux-master | net/netfilter/core.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Kernel module to match the bridge port in and
* out device for IP packets coming into contact with a bridge. */
/* (C) 2001-2003 Bart De Schuymer <[email protected]>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/if.h>
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/netfilter_bridge.h>
#include <linux/netfilter/x_tables.h>
#include <uapi/linux/netfilter/xt_physdev.h>
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Bart De Schuymer <[email protected]>");
MODULE_DESCRIPTION("Xtables: Bridge physical device match");
MODULE_ALIAS("ipt_physdev");
MODULE_ALIAS("ip6t_physdev");
static bool
physdev_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct xt_physdev_info *info = par->matchinfo;
const struct net_device *physdev;
unsigned long ret;
const char *indev, *outdev;
/* Not a bridged IP packet or no info available yet:
* LOCAL_OUT/mangle and LOCAL_OUT/nat don't know if
* the destination device will be a bridge. */
if (!nf_bridge_info_exists(skb)) {
/* Return MATCH if the invert flags of the used options are on */
if ((info->bitmask & XT_PHYSDEV_OP_BRIDGED) &&
!(info->invert & XT_PHYSDEV_OP_BRIDGED))
return false;
if ((info->bitmask & XT_PHYSDEV_OP_ISIN) &&
!(info->invert & XT_PHYSDEV_OP_ISIN))
return false;
if ((info->bitmask & XT_PHYSDEV_OP_ISOUT) &&
!(info->invert & XT_PHYSDEV_OP_ISOUT))
return false;
if ((info->bitmask & XT_PHYSDEV_OP_IN) &&
!(info->invert & XT_PHYSDEV_OP_IN))
return false;
if ((info->bitmask & XT_PHYSDEV_OP_OUT) &&
!(info->invert & XT_PHYSDEV_OP_OUT))
return false;
return true;
}
physdev = nf_bridge_get_physoutdev(skb);
outdev = physdev ? physdev->name : NULL;
/* This only makes sense in the FORWARD and POSTROUTING chains */
if ((info->bitmask & XT_PHYSDEV_OP_BRIDGED) &&
(!!outdev ^ !(info->invert & XT_PHYSDEV_OP_BRIDGED)))
return false;
physdev = nf_bridge_get_physindev(skb);
indev = physdev ? physdev->name : NULL;
if ((info->bitmask & XT_PHYSDEV_OP_ISIN &&
(!indev ^ !!(info->invert & XT_PHYSDEV_OP_ISIN))) ||
(info->bitmask & XT_PHYSDEV_OP_ISOUT &&
(!outdev ^ !!(info->invert & XT_PHYSDEV_OP_ISOUT))))
return false;
if (!(info->bitmask & XT_PHYSDEV_OP_IN))
goto match_outdev;
if (indev) {
ret = ifname_compare_aligned(indev, info->physindev,
info->in_mask);
if (!ret ^ !(info->invert & XT_PHYSDEV_OP_IN))
return false;
}
match_outdev:
if (!(info->bitmask & XT_PHYSDEV_OP_OUT))
return true;
if (!outdev)
return false;
ret = ifname_compare_aligned(outdev, info->physoutdev, info->out_mask);
return (!!ret ^ !(info->invert & XT_PHYSDEV_OP_OUT));
}
static int physdev_mt_check(const struct xt_mtchk_param *par)
{
const struct xt_physdev_info *info = par->matchinfo;
static bool brnf_probed __read_mostly;
if (!(info->bitmask & XT_PHYSDEV_OP_MASK) ||
info->bitmask & ~XT_PHYSDEV_OP_MASK)
return -EINVAL;
if (info->bitmask & (XT_PHYSDEV_OP_OUT | XT_PHYSDEV_OP_ISOUT) &&
(!(info->bitmask & XT_PHYSDEV_OP_BRIDGED) ||
info->invert & XT_PHYSDEV_OP_BRIDGED) &&
par->hook_mask & (1 << NF_INET_LOCAL_OUT)) {
pr_info_ratelimited("--physdev-out and --physdev-is-out only supported in the FORWARD and POSTROUTING chains with bridged traffic\n");
return -EINVAL;
}
if (!brnf_probed) {
brnf_probed = true;
request_module("br_netfilter");
}
return 0;
}
static struct xt_match physdev_mt_reg __read_mostly = {
.name = "physdev",
.revision = 0,
.family = NFPROTO_UNSPEC,
.checkentry = physdev_mt_check,
.match = physdev_mt,
.matchsize = sizeof(struct xt_physdev_info),
.me = THIS_MODULE,
};
static int __init physdev_mt_init(void)
{
return xt_register_match(&physdev_mt_reg);
}
static void __exit physdev_mt_exit(void)
{
xt_unregister_match(&physdev_mt_reg);
}
module_init(physdev_mt_init);
module_exit(physdev_mt_exit);
| linux-master | net/netfilter/xt_physdev.c |
// SPDX-License-Identifier: GPL-2.0-only
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/capability.h>
#include <linux/if.h>
#include <linux/inetdevice.h>
#include <linux/ip.h>
#include <linux/list.h>
#include <linux/rculist.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
#include <linux/tcp.h>
#include <net/ip.h>
#include <net/tcp.h>
#include <linux/netfilter/nfnetlink.h>
#include <linux/netfilter/x_tables.h>
#include <net/netfilter/nf_log.h>
#include <linux/netfilter/nfnetlink_osf.h>
/*
* Indexed by dont-fragment bit.
* It is the only constant value in the fingerprint.
*/
struct list_head nf_osf_fingers[2];
EXPORT_SYMBOL_GPL(nf_osf_fingers);
static inline int nf_osf_ttl(const struct sk_buff *skb,
int ttl_check, unsigned char f_ttl)
{
struct in_device *in_dev = __in_dev_get_rcu(skb->dev);
const struct iphdr *ip = ip_hdr(skb);
const struct in_ifaddr *ifa;
int ret = 0;
if (ttl_check == NF_OSF_TTL_TRUE)
return ip->ttl == f_ttl;
if (ttl_check == NF_OSF_TTL_NOCHECK)
return 1;
else if (ip->ttl <= f_ttl)
return 1;
in_dev_for_each_ifa_rcu(ifa, in_dev) {
if (inet_ifa_match(ip->saddr, ifa)) {
ret = (ip->ttl == f_ttl);
break;
}
}
return ret;
}
struct nf_osf_hdr_ctx {
bool df;
u16 window;
u16 totlen;
const unsigned char *optp;
unsigned int optsize;
};
static bool nf_osf_match_one(const struct sk_buff *skb,
const struct nf_osf_user_finger *f,
int ttl_check,
struct nf_osf_hdr_ctx *ctx)
{
const __u8 *optpinit = ctx->optp;
unsigned int check_WSS = 0;
int fmatch = FMATCH_WRONG;
int foptsize, optnum;
u16 mss = 0;
if (ctx->totlen != f->ss || !nf_osf_ttl(skb, ttl_check, f->ttl))
return false;
/*
* Should not happen if userspace parser was written correctly.
*/
if (f->wss.wc >= OSF_WSS_MAX)
return false;
/* Check options */
foptsize = 0;
for (optnum = 0; optnum < f->opt_num; ++optnum)
foptsize += f->opt[optnum].length;
if (foptsize > MAX_IPOPTLEN ||
ctx->optsize > MAX_IPOPTLEN ||
ctx->optsize != foptsize)
return false;
check_WSS = f->wss.wc;
for (optnum = 0; optnum < f->opt_num; ++optnum) {
if (f->opt[optnum].kind == *ctx->optp) {
__u32 len = f->opt[optnum].length;
const __u8 *optend = ctx->optp + len;
fmatch = FMATCH_OK;
switch (*ctx->optp) {
case OSFOPT_MSS:
mss = ctx->optp[3];
mss <<= 8;
mss |= ctx->optp[2];
mss = ntohs((__force __be16)mss);
break;
case OSFOPT_TS:
break;
}
ctx->optp = optend;
} else
fmatch = FMATCH_OPT_WRONG;
if (fmatch != FMATCH_OK)
break;
}
if (fmatch != FMATCH_OPT_WRONG) {
fmatch = FMATCH_WRONG;
switch (check_WSS) {
case OSF_WSS_PLAIN:
if (f->wss.val == 0 || ctx->window == f->wss.val)
fmatch = FMATCH_OK;
break;
case OSF_WSS_MSS:
/*
* Some smart modems decrease mangle MSS to
* SMART_MSS_2, so we check standard, decreased
* and the one provided in the fingerprint MSS
* values.
*/
#define SMART_MSS_1 1460
#define SMART_MSS_2 1448
if (ctx->window == f->wss.val * mss ||
ctx->window == f->wss.val * SMART_MSS_1 ||
ctx->window == f->wss.val * SMART_MSS_2)
fmatch = FMATCH_OK;
break;
case OSF_WSS_MTU:
if (ctx->window == f->wss.val * (mss + 40) ||
ctx->window == f->wss.val * (SMART_MSS_1 + 40) ||
ctx->window == f->wss.val * (SMART_MSS_2 + 40))
fmatch = FMATCH_OK;
break;
case OSF_WSS_MODULO:
if ((ctx->window % f->wss.val) == 0)
fmatch = FMATCH_OK;
break;
}
}
if (fmatch != FMATCH_OK)
ctx->optp = optpinit;
return fmatch == FMATCH_OK;
}
static const struct tcphdr *nf_osf_hdr_ctx_init(struct nf_osf_hdr_ctx *ctx,
const struct sk_buff *skb,
const struct iphdr *ip,
unsigned char *opts,
struct tcphdr *_tcph)
{
const struct tcphdr *tcp;
tcp = skb_header_pointer(skb, ip_hdrlen(skb), sizeof(struct tcphdr), _tcph);
if (!tcp)
return NULL;
if (!tcp->syn)
return NULL;
ctx->totlen = ntohs(ip->tot_len);
ctx->df = ntohs(ip->frag_off) & IP_DF;
ctx->window = ntohs(tcp->window);
if (tcp->doff * 4 > sizeof(struct tcphdr)) {
ctx->optsize = tcp->doff * 4 - sizeof(struct tcphdr);
ctx->optp = skb_header_pointer(skb, ip_hdrlen(skb) +
sizeof(struct tcphdr), ctx->optsize, opts);
if (!ctx->optp)
return NULL;
}
return tcp;
}
bool
nf_osf_match(const struct sk_buff *skb, u_int8_t family,
int hooknum, struct net_device *in, struct net_device *out,
const struct nf_osf_info *info, struct net *net,
const struct list_head *nf_osf_fingers)
{
const struct iphdr *ip = ip_hdr(skb);
const struct nf_osf_user_finger *f;
unsigned char opts[MAX_IPOPTLEN];
const struct nf_osf_finger *kf;
int fcount = 0, ttl_check;
int fmatch = FMATCH_WRONG;
struct nf_osf_hdr_ctx ctx;
const struct tcphdr *tcp;
struct tcphdr _tcph;
memset(&ctx, 0, sizeof(ctx));
tcp = nf_osf_hdr_ctx_init(&ctx, skb, ip, opts, &_tcph);
if (!tcp)
return false;
ttl_check = (info->flags & NF_OSF_TTL) ? info->ttl : 0;
list_for_each_entry_rcu(kf, &nf_osf_fingers[ctx.df], finger_entry) {
f = &kf->finger;
if (!(info->flags & NF_OSF_LOG) && strcmp(info->genre, f->genre))
continue;
if (!nf_osf_match_one(skb, f, ttl_check, &ctx))
continue;
fmatch = FMATCH_OK;
fcount++;
if (info->flags & NF_OSF_LOG)
nf_log_packet(net, family, hooknum, skb,
in, out, NULL,
"%s [%s:%s] : %pI4:%d -> %pI4:%d hops=%d\n",
f->genre, f->version, f->subtype,
&ip->saddr, ntohs(tcp->source),
&ip->daddr, ntohs(tcp->dest),
f->ttl - ip->ttl);
if ((info->flags & NF_OSF_LOG) &&
info->loglevel == NF_OSF_LOGLEVEL_FIRST)
break;
}
if (!fcount && (info->flags & NF_OSF_LOG))
nf_log_packet(net, family, hooknum, skb, in, out, NULL,
"Remote OS is not known: %pI4:%u -> %pI4:%u\n",
&ip->saddr, ntohs(tcp->source),
&ip->daddr, ntohs(tcp->dest));
if (fcount)
fmatch = FMATCH_OK;
return fmatch == FMATCH_OK;
}
EXPORT_SYMBOL_GPL(nf_osf_match);
bool nf_osf_find(const struct sk_buff *skb,
const struct list_head *nf_osf_fingers,
const int ttl_check, struct nf_osf_data *data)
{
const struct iphdr *ip = ip_hdr(skb);
const struct nf_osf_user_finger *f;
unsigned char opts[MAX_IPOPTLEN];
const struct nf_osf_finger *kf;
struct nf_osf_hdr_ctx ctx;
const struct tcphdr *tcp;
struct tcphdr _tcph;
bool found = false;
memset(&ctx, 0, sizeof(ctx));
tcp = nf_osf_hdr_ctx_init(&ctx, skb, ip, opts, &_tcph);
if (!tcp)
return false;
list_for_each_entry_rcu(kf, &nf_osf_fingers[ctx.df], finger_entry) {
f = &kf->finger;
if (!nf_osf_match_one(skb, f, ttl_check, &ctx))
continue;
data->genre = f->genre;
data->version = f->version;
found = true;
break;
}
return found;
}
EXPORT_SYMBOL_GPL(nf_osf_find);
static const struct nla_policy nfnl_osf_policy[OSF_ATTR_MAX + 1] = {
[OSF_ATTR_FINGER] = { .len = sizeof(struct nf_osf_user_finger) },
};
static int nfnl_osf_add_callback(struct sk_buff *skb,
const struct nfnl_info *info,
const struct nlattr * const osf_attrs[])
{
struct nf_osf_user_finger *f;
struct nf_osf_finger *kf = NULL, *sf;
int err = 0;
if (!capable(CAP_NET_ADMIN))
return -EPERM;
if (!osf_attrs[OSF_ATTR_FINGER])
return -EINVAL;
if (!(info->nlh->nlmsg_flags & NLM_F_CREATE))
return -EINVAL;
f = nla_data(osf_attrs[OSF_ATTR_FINGER]);
if (f->opt_num > ARRAY_SIZE(f->opt))
return -EINVAL;
if (!memchr(f->genre, 0, MAXGENRELEN) ||
!memchr(f->subtype, 0, MAXGENRELEN) ||
!memchr(f->version, 0, MAXGENRELEN))
return -EINVAL;
kf = kmalloc(sizeof(struct nf_osf_finger), GFP_KERNEL);
if (!kf)
return -ENOMEM;
memcpy(&kf->finger, f, sizeof(struct nf_osf_user_finger));
list_for_each_entry(sf, &nf_osf_fingers[!!f->df], finger_entry) {
if (memcmp(&sf->finger, f, sizeof(struct nf_osf_user_finger)))
continue;
kfree(kf);
kf = NULL;
if (info->nlh->nlmsg_flags & NLM_F_EXCL)
err = -EEXIST;
break;
}
/*
* We are protected by nfnl mutex.
*/
if (kf)
list_add_tail_rcu(&kf->finger_entry, &nf_osf_fingers[!!f->df]);
return err;
}
static int nfnl_osf_remove_callback(struct sk_buff *skb,
const struct nfnl_info *info,
const struct nlattr * const osf_attrs[])
{
struct nf_osf_user_finger *f;
struct nf_osf_finger *sf;
int err = -ENOENT;
if (!capable(CAP_NET_ADMIN))
return -EPERM;
if (!osf_attrs[OSF_ATTR_FINGER])
return -EINVAL;
f = nla_data(osf_attrs[OSF_ATTR_FINGER]);
list_for_each_entry(sf, &nf_osf_fingers[!!f->df], finger_entry) {
if (memcmp(&sf->finger, f, sizeof(struct nf_osf_user_finger)))
continue;
/*
* We are protected by nfnl mutex.
*/
list_del_rcu(&sf->finger_entry);
kfree_rcu(sf, rcu_head);
err = 0;
break;
}
return err;
}
static const struct nfnl_callback nfnl_osf_callbacks[OSF_MSG_MAX] = {
[OSF_MSG_ADD] = {
.call = nfnl_osf_add_callback,
.type = NFNL_CB_MUTEX,
.attr_count = OSF_ATTR_MAX,
.policy = nfnl_osf_policy,
},
[OSF_MSG_REMOVE] = {
.call = nfnl_osf_remove_callback,
.type = NFNL_CB_MUTEX,
.attr_count = OSF_ATTR_MAX,
.policy = nfnl_osf_policy,
},
};
static const struct nfnetlink_subsystem nfnl_osf_subsys = {
.name = "osf",
.subsys_id = NFNL_SUBSYS_OSF,
.cb_count = OSF_MSG_MAX,
.cb = nfnl_osf_callbacks,
};
static int __init nfnl_osf_init(void)
{
int err = -EINVAL;
int i;
for (i = 0; i < ARRAY_SIZE(nf_osf_fingers); ++i)
INIT_LIST_HEAD(&nf_osf_fingers[i]);
err = nfnetlink_subsys_register(&nfnl_osf_subsys);
if (err < 0) {
pr_err("Failed to register OSF nsfnetlink helper (%d)\n", err);
goto err_out_exit;
}
return 0;
err_out_exit:
return err;
}
static void __exit nfnl_osf_fini(void)
{
struct nf_osf_finger *f;
int i;
nfnetlink_subsys_unregister(&nfnl_osf_subsys);
rcu_read_lock();
for (i = 0; i < ARRAY_SIZE(nf_osf_fingers); ++i) {
list_for_each_entry_rcu(f, &nf_osf_fingers[i], finger_entry) {
list_del_rcu(&f->finger_entry);
kfree_rcu(f, rcu_head);
}
}
rcu_read_unlock();
rcu_barrier();
}
module_init(nfnl_osf_init);
module_exit(nfnl_osf_fini);
MODULE_LICENSE("GPL");
MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_OSF);
| linux-master | net/netfilter/nfnetlink_osf.c |
// SPDX-License-Identifier: GPL-2.0-only
#if IS_ENABLED(CONFIG_NFT_CT)
#include <linux/netfilter/nf_tables.h>
#include <net/netfilter/nf_tables_core.h>
#include <net/netfilter/nf_conntrack.h>
void nft_ct_get_fast_eval(const struct nft_expr *expr,
struct nft_regs *regs,
const struct nft_pktinfo *pkt)
{
const struct nft_ct *priv = nft_expr_priv(expr);
u32 *dest = ®s->data[priv->dreg];
enum ip_conntrack_info ctinfo;
const struct nf_conn *ct;
unsigned int state;
ct = nf_ct_get(pkt->skb, &ctinfo);
switch (priv->key) {
case NFT_CT_STATE:
if (ct)
state = NF_CT_STATE_BIT(ctinfo);
else if (ctinfo == IP_CT_UNTRACKED)
state = NF_CT_STATE_UNTRACKED_BIT;
else
state = NF_CT_STATE_INVALID_BIT;
*dest = state;
return;
default:
break;
}
if (!ct) {
regs->verdict.code = NFT_BREAK;
return;
}
switch (priv->key) {
case NFT_CT_DIRECTION:
nft_reg_store8(dest, CTINFO2DIR(ctinfo));
return;
case NFT_CT_STATUS:
*dest = ct->status;
return;
#ifdef CONFIG_NF_CONNTRACK_MARK
case NFT_CT_MARK:
*dest = ct->mark;
return;
#endif
#ifdef CONFIG_NF_CONNTRACK_SECMARK
case NFT_CT_SECMARK:
*dest = ct->secmark;
return;
#endif
default:
WARN_ON_ONCE(1);
regs->verdict.code = NFT_BREAK;
break;
}
}
EXPORT_SYMBOL_GPL(nft_ct_get_fast_eval);
#endif
| linux-master | net/netfilter/nft_ct_fast.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/bpf.h>
#include <linux/filter.h>
#include <linux/kmod.h>
#include <linux/module.h>
#include <linux/netfilter.h>
#include <net/netfilter/nf_bpf_link.h>
#include <uapi/linux/netfilter_ipv4.h>
static unsigned int nf_hook_run_bpf(void *bpf_prog, struct sk_buff *skb,
const struct nf_hook_state *s)
{
const struct bpf_prog *prog = bpf_prog;
struct bpf_nf_ctx ctx = {
.state = s,
.skb = skb,
};
return bpf_prog_run(prog, &ctx);
}
struct bpf_nf_link {
struct bpf_link link;
struct nf_hook_ops hook_ops;
struct net *net;
u32 dead;
const struct nf_defrag_hook *defrag_hook;
};
#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV4) || IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
static const struct nf_defrag_hook *
get_proto_defrag_hook(struct bpf_nf_link *link,
const struct nf_defrag_hook __rcu *global_hook,
const char *mod)
{
const struct nf_defrag_hook *hook;
int err;
/* RCU protects us from races against module unloading */
rcu_read_lock();
hook = rcu_dereference(global_hook);
if (!hook) {
rcu_read_unlock();
err = request_module(mod);
if (err)
return ERR_PTR(err < 0 ? err : -EINVAL);
rcu_read_lock();
hook = rcu_dereference(global_hook);
}
if (hook && try_module_get(hook->owner)) {
/* Once we have a refcnt on the module, we no longer need RCU */
hook = rcu_pointer_handoff(hook);
} else {
WARN_ONCE(!hook, "%s has bad registration", mod);
hook = ERR_PTR(-ENOENT);
}
rcu_read_unlock();
if (!IS_ERR(hook)) {
err = hook->enable(link->net);
if (err) {
module_put(hook->owner);
hook = ERR_PTR(err);
}
}
return hook;
}
#endif
static int bpf_nf_enable_defrag(struct bpf_nf_link *link)
{
const struct nf_defrag_hook __maybe_unused *hook;
switch (link->hook_ops.pf) {
#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV4)
case NFPROTO_IPV4:
hook = get_proto_defrag_hook(link, nf_defrag_v4_hook, "nf_defrag_ipv4");
if (IS_ERR(hook))
return PTR_ERR(hook);
link->defrag_hook = hook;
return 0;
#endif
#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
case NFPROTO_IPV6:
hook = get_proto_defrag_hook(link, nf_defrag_v6_hook, "nf_defrag_ipv6");
if (IS_ERR(hook))
return PTR_ERR(hook);
link->defrag_hook = hook;
return 0;
#endif
default:
return -EAFNOSUPPORT;
}
}
static void bpf_nf_disable_defrag(struct bpf_nf_link *link)
{
const struct nf_defrag_hook *hook = link->defrag_hook;
if (!hook)
return;
hook->disable(link->net);
module_put(hook->owner);
}
static void bpf_nf_link_release(struct bpf_link *link)
{
struct bpf_nf_link *nf_link = container_of(link, struct bpf_nf_link, link);
if (nf_link->dead)
return;
/* do not double release in case .detach was already called */
if (!cmpxchg(&nf_link->dead, 0, 1)) {
nf_unregister_net_hook(nf_link->net, &nf_link->hook_ops);
bpf_nf_disable_defrag(nf_link);
}
}
static void bpf_nf_link_dealloc(struct bpf_link *link)
{
struct bpf_nf_link *nf_link = container_of(link, struct bpf_nf_link, link);
kfree(nf_link);
}
static int bpf_nf_link_detach(struct bpf_link *link)
{
bpf_nf_link_release(link);
return 0;
}
static void bpf_nf_link_show_info(const struct bpf_link *link,
struct seq_file *seq)
{
struct bpf_nf_link *nf_link = container_of(link, struct bpf_nf_link, link);
seq_printf(seq, "pf:\t%u\thooknum:\t%u\tprio:\t%d\n",
nf_link->hook_ops.pf, nf_link->hook_ops.hooknum,
nf_link->hook_ops.priority);
}
static int bpf_nf_link_fill_link_info(const struct bpf_link *link,
struct bpf_link_info *info)
{
struct bpf_nf_link *nf_link = container_of(link, struct bpf_nf_link, link);
info->netfilter.pf = nf_link->hook_ops.pf;
info->netfilter.hooknum = nf_link->hook_ops.hooknum;
info->netfilter.priority = nf_link->hook_ops.priority;
info->netfilter.flags = 0;
return 0;
}
static int bpf_nf_link_update(struct bpf_link *link, struct bpf_prog *new_prog,
struct bpf_prog *old_prog)
{
return -EOPNOTSUPP;
}
static const struct bpf_link_ops bpf_nf_link_lops = {
.release = bpf_nf_link_release,
.dealloc = bpf_nf_link_dealloc,
.detach = bpf_nf_link_detach,
.show_fdinfo = bpf_nf_link_show_info,
.fill_link_info = bpf_nf_link_fill_link_info,
.update_prog = bpf_nf_link_update,
};
static int bpf_nf_check_pf_and_hooks(const union bpf_attr *attr)
{
int prio;
switch (attr->link_create.netfilter.pf) {
case NFPROTO_IPV4:
case NFPROTO_IPV6:
if (attr->link_create.netfilter.hooknum >= NF_INET_NUMHOOKS)
return -EPROTO;
break;
default:
return -EAFNOSUPPORT;
}
if (attr->link_create.netfilter.flags & ~BPF_F_NETFILTER_IP_DEFRAG)
return -EOPNOTSUPP;
/* make sure conntrack confirm is always last */
prio = attr->link_create.netfilter.priority;
if (prio == NF_IP_PRI_FIRST)
return -ERANGE; /* sabotage_in and other warts */
else if (prio == NF_IP_PRI_LAST)
return -ERANGE; /* e.g. conntrack confirm */
else if ((attr->link_create.netfilter.flags & BPF_F_NETFILTER_IP_DEFRAG) &&
prio <= NF_IP_PRI_CONNTRACK_DEFRAG)
return -ERANGE; /* cannot use defrag if prog runs before nf_defrag */
return 0;
}
int bpf_nf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
{
struct net *net = current->nsproxy->net_ns;
struct bpf_link_primer link_primer;
struct bpf_nf_link *link;
int err;
if (attr->link_create.flags)
return -EINVAL;
err = bpf_nf_check_pf_and_hooks(attr);
if (err)
return err;
link = kzalloc(sizeof(*link), GFP_USER);
if (!link)
return -ENOMEM;
bpf_link_init(&link->link, BPF_LINK_TYPE_NETFILTER, &bpf_nf_link_lops, prog);
link->hook_ops.hook = nf_hook_run_bpf;
link->hook_ops.hook_ops_type = NF_HOOK_OP_BPF;
link->hook_ops.priv = prog;
link->hook_ops.pf = attr->link_create.netfilter.pf;
link->hook_ops.priority = attr->link_create.netfilter.priority;
link->hook_ops.hooknum = attr->link_create.netfilter.hooknum;
link->net = net;
link->dead = false;
link->defrag_hook = NULL;
err = bpf_link_prime(&link->link, &link_primer);
if (err) {
kfree(link);
return err;
}
if (attr->link_create.netfilter.flags & BPF_F_NETFILTER_IP_DEFRAG) {
err = bpf_nf_enable_defrag(link);
if (err) {
bpf_link_cleanup(&link_primer);
return err;
}
}
err = nf_register_net_hook(net, &link->hook_ops);
if (err) {
bpf_nf_disable_defrag(link);
bpf_link_cleanup(&link_primer);
return err;
}
return bpf_link_settle(&link_primer);
}
const struct bpf_prog_ops netfilter_prog_ops = {
.test_run = bpf_prog_test_run_nf,
};
static bool nf_ptr_to_btf_id(struct bpf_insn_access_aux *info, const char *name)
{
struct btf *btf;
s32 type_id;
btf = bpf_get_btf_vmlinux();
if (IS_ERR_OR_NULL(btf))
return false;
type_id = btf_find_by_name_kind(btf, name, BTF_KIND_STRUCT);
if (WARN_ON_ONCE(type_id < 0))
return false;
info->btf = btf;
info->btf_id = type_id;
info->reg_type = PTR_TO_BTF_ID | PTR_TRUSTED;
return true;
}
static bool nf_is_valid_access(int off, int size, enum bpf_access_type type,
const struct bpf_prog *prog,
struct bpf_insn_access_aux *info)
{
if (off < 0 || off >= sizeof(struct bpf_nf_ctx))
return false;
if (type == BPF_WRITE)
return false;
switch (off) {
case bpf_ctx_range(struct bpf_nf_ctx, skb):
if (size != sizeof_field(struct bpf_nf_ctx, skb))
return false;
return nf_ptr_to_btf_id(info, "sk_buff");
case bpf_ctx_range(struct bpf_nf_ctx, state):
if (size != sizeof_field(struct bpf_nf_ctx, state))
return false;
return nf_ptr_to_btf_id(info, "nf_hook_state");
default:
return false;
}
return false;
}
static const struct bpf_func_proto *
bpf_nf_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
{
return bpf_base_func_proto(func_id);
}
const struct bpf_verifier_ops netfilter_verifier_ops = {
.is_valid_access = nf_is_valid_access,
.get_func_proto = bpf_nf_func_proto,
};
| linux-master | net/netfilter/nf_bpf_link.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* (C) 2008-2009 Pablo Neira Ayuso <[email protected]>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/jhash.h>
#include <linux/ip.h>
#include <net/ipv6.h>
#include <linux/netfilter/x_tables.h>
#include <net/netfilter/nf_conntrack.h>
#include <linux/netfilter/xt_cluster.h>
static inline u32 nf_ct_orig_ipv4_src(const struct nf_conn *ct)
{
return (__force u32)ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.ip;
}
static inline const u32 *nf_ct_orig_ipv6_src(const struct nf_conn *ct)
{
return (__force u32 *)ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.ip6;
}
static inline u_int32_t
xt_cluster_hash_ipv4(u_int32_t ip, const struct xt_cluster_match_info *info)
{
return jhash_1word(ip, info->hash_seed);
}
static inline u_int32_t
xt_cluster_hash_ipv6(const void *ip, const struct xt_cluster_match_info *info)
{
return jhash2(ip, NF_CT_TUPLE_L3SIZE / sizeof(__u32), info->hash_seed);
}
static inline u_int32_t
xt_cluster_hash(const struct nf_conn *ct,
const struct xt_cluster_match_info *info)
{
u_int32_t hash = 0;
switch(nf_ct_l3num(ct)) {
case AF_INET:
hash = xt_cluster_hash_ipv4(nf_ct_orig_ipv4_src(ct), info);
break;
case AF_INET6:
hash = xt_cluster_hash_ipv6(nf_ct_orig_ipv6_src(ct), info);
break;
default:
WARN_ON(1);
break;
}
return reciprocal_scale(hash, info->total_nodes);
}
static inline bool
xt_cluster_is_multicast_addr(const struct sk_buff *skb, u_int8_t family)
{
bool is_multicast = false;
switch(family) {
case NFPROTO_IPV4:
is_multicast = ipv4_is_multicast(ip_hdr(skb)->daddr);
break;
case NFPROTO_IPV6:
is_multicast = ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr);
break;
default:
WARN_ON(1);
break;
}
return is_multicast;
}
static bool
xt_cluster_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
struct sk_buff *pskb = (struct sk_buff *)skb;
const struct xt_cluster_match_info *info = par->matchinfo;
const struct nf_conn *ct;
enum ip_conntrack_info ctinfo;
unsigned long hash;
/* This match assumes that all nodes see the same packets. This can be
* achieved if the switch that connects the cluster nodes support some
* sort of 'port mirroring'. However, if your switch does not support
* this, your cluster nodes can reply ARP request using a multicast MAC
* address. Thus, your switch will flood the same packets to the
* cluster nodes with the same multicast MAC address. Using a multicast
* link address is a RFC 1812 (section 3.3.2) violation, but this works
* fine in practise.
*
* Unfortunately, if you use the multicast MAC address, the link layer
* sets skbuff's pkt_type to PACKET_MULTICAST, which is not accepted
* by TCP and others for packets coming to this node. For that reason,
* this match mangles skbuff's pkt_type if it detects a packet
* addressed to a unicast address but using PACKET_MULTICAST. Yes, I
* know, matches should not alter packets, but we are doing this here
* because we would need to add a PKTTYPE target for this sole purpose.
*/
if (!xt_cluster_is_multicast_addr(skb, xt_family(par)) &&
skb->pkt_type == PACKET_MULTICAST) {
pskb->pkt_type = PACKET_HOST;
}
ct = nf_ct_get(skb, &ctinfo);
if (ct == NULL)
return false;
if (ct->master)
hash = xt_cluster_hash(ct->master, info);
else
hash = xt_cluster_hash(ct, info);
return !!((1 << hash) & info->node_mask) ^
!!(info->flags & XT_CLUSTER_F_INV);
}
static int xt_cluster_mt_checkentry(const struct xt_mtchk_param *par)
{
struct xt_cluster_match_info *info = par->matchinfo;
int ret;
if (info->total_nodes > XT_CLUSTER_NODES_MAX) {
pr_info_ratelimited("you have exceeded the maximum number of cluster nodes (%u > %u)\n",
info->total_nodes, XT_CLUSTER_NODES_MAX);
return -EINVAL;
}
if (info->node_mask >= (1ULL << info->total_nodes)) {
pr_info_ratelimited("node mask cannot exceed total number of nodes\n");
return -EDOM;
}
ret = nf_ct_netns_get(par->net, par->family);
if (ret < 0)
pr_info_ratelimited("cannot load conntrack support for proto=%u\n",
par->family);
return ret;
}
static void xt_cluster_mt_destroy(const struct xt_mtdtor_param *par)
{
nf_ct_netns_put(par->net, par->family);
}
static struct xt_match xt_cluster_match __read_mostly = {
.name = "cluster",
.family = NFPROTO_UNSPEC,
.match = xt_cluster_mt,
.checkentry = xt_cluster_mt_checkentry,
.matchsize = sizeof(struct xt_cluster_match_info),
.destroy = xt_cluster_mt_destroy,
.me = THIS_MODULE,
};
static int __init xt_cluster_mt_init(void)
{
return xt_register_match(&xt_cluster_match);
}
static void __exit xt_cluster_mt_fini(void)
{
xt_unregister_match(&xt_cluster_match);
}
MODULE_AUTHOR("Pablo Neira Ayuso <[email protected]>");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Xtables: hash-based cluster match");
MODULE_ALIAS("ipt_cluster");
MODULE_ALIAS("ip6t_cluster");
module_init(xt_cluster_mt_init);
module_exit(xt_cluster_mt_fini);
| linux-master | net/netfilter/xt_cluster.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Kernel module to match connection tracking information. */
/* (C) 1999-2001 Paul `Rusty' Russell
* (C) 2002-2005 Netfilter Core Team <[email protected]>
*/
#include <linux/module.h>
#include <linux/skbuff.h>
#include <net/netfilter/nf_conntrack.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter/xt_state.h>
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Rusty Russell <[email protected]>");
MODULE_DESCRIPTION("ip[6]_tables connection tracking state match module");
MODULE_ALIAS("ipt_state");
MODULE_ALIAS("ip6t_state");
static bool
state_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct xt_state_info *sinfo = par->matchinfo;
enum ip_conntrack_info ctinfo;
unsigned int statebit;
struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
if (ct)
statebit = XT_STATE_BIT(ctinfo);
else if (ctinfo == IP_CT_UNTRACKED)
statebit = XT_STATE_UNTRACKED;
else
statebit = XT_STATE_INVALID;
return (sinfo->statemask & statebit);
}
static int state_mt_check(const struct xt_mtchk_param *par)
{
int ret;
ret = nf_ct_netns_get(par->net, par->family);
if (ret < 0)
pr_info_ratelimited("cannot load conntrack support for proto=%u\n",
par->family);
return ret;
}
static void state_mt_destroy(const struct xt_mtdtor_param *par)
{
nf_ct_netns_put(par->net, par->family);
}
static struct xt_match state_mt_reg __read_mostly = {
.name = "state",
.family = NFPROTO_UNSPEC,
.checkentry = state_mt_check,
.match = state_mt,
.destroy = state_mt_destroy,
.matchsize = sizeof(struct xt_state_info),
.me = THIS_MODULE,
};
static int __init state_mt_init(void)
{
return xt_register_match(&state_mt_reg);
}
static void __exit state_mt_exit(void)
{
xt_unregister_match(&state_mt_reg);
}
module_init(state_mt_init);
module_exit(state_mt_exit);
| linux-master | net/netfilter/xt_state.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Xtables module to match the process control group.
*
* Might be used to implement individual "per-application" firewall
* policies in contrast to global policies based on control groups.
* Matching is based upon processes tagged to net_cls' classid marker.
*
* (C) 2013 Daniel Borkmann <[email protected]>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/skbuff.h>
#include <linux/module.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter/xt_cgroup.h>
#include <net/sock.h>
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Daniel Borkmann <[email protected]>");
MODULE_DESCRIPTION("Xtables: process control group matching");
MODULE_ALIAS("ipt_cgroup");
MODULE_ALIAS("ip6t_cgroup");
static int cgroup_mt_check_v0(const struct xt_mtchk_param *par)
{
struct xt_cgroup_info_v0 *info = par->matchinfo;
if (info->invert & ~1)
return -EINVAL;
return 0;
}
static int cgroup_mt_check_v1(const struct xt_mtchk_param *par)
{
struct xt_cgroup_info_v1 *info = par->matchinfo;
struct cgroup *cgrp;
if ((info->invert_path & ~1) || (info->invert_classid & ~1))
return -EINVAL;
if (!info->has_path && !info->has_classid) {
pr_info("xt_cgroup: no path or classid specified\n");
return -EINVAL;
}
if (info->has_path && info->has_classid) {
pr_info_ratelimited("path and classid specified\n");
return -EINVAL;
}
info->priv = NULL;
if (info->has_path) {
cgrp = cgroup_get_from_path(info->path);
if (IS_ERR(cgrp)) {
pr_info_ratelimited("invalid path, errno=%ld\n",
PTR_ERR(cgrp));
return -EINVAL;
}
info->priv = cgrp;
}
return 0;
}
static int cgroup_mt_check_v2(const struct xt_mtchk_param *par)
{
struct xt_cgroup_info_v2 *info = par->matchinfo;
struct cgroup *cgrp;
if ((info->invert_path & ~1) || (info->invert_classid & ~1))
return -EINVAL;
if (!info->has_path && !info->has_classid) {
pr_info("xt_cgroup: no path or classid specified\n");
return -EINVAL;
}
if (info->has_path && info->has_classid) {
pr_info_ratelimited("path and classid specified\n");
return -EINVAL;
}
info->priv = NULL;
if (info->has_path) {
cgrp = cgroup_get_from_path(info->path);
if (IS_ERR(cgrp)) {
pr_info_ratelimited("invalid path, errno=%ld\n",
PTR_ERR(cgrp));
return -EINVAL;
}
info->priv = cgrp;
}
return 0;
}
static bool
cgroup_mt_v0(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct xt_cgroup_info_v0 *info = par->matchinfo;
struct sock *sk = skb->sk;
if (!sk || !sk_fullsock(sk) || !net_eq(xt_net(par), sock_net(sk)))
return false;
return (info->id == sock_cgroup_classid(&skb->sk->sk_cgrp_data)) ^
info->invert;
}
static bool cgroup_mt_v1(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct xt_cgroup_info_v1 *info = par->matchinfo;
struct sock_cgroup_data *skcd = &skb->sk->sk_cgrp_data;
struct cgroup *ancestor = info->priv;
struct sock *sk = skb->sk;
if (!sk || !sk_fullsock(sk) || !net_eq(xt_net(par), sock_net(sk)))
return false;
if (ancestor)
return cgroup_is_descendant(sock_cgroup_ptr(skcd), ancestor) ^
info->invert_path;
else
return (info->classid == sock_cgroup_classid(skcd)) ^
info->invert_classid;
}
static bool cgroup_mt_v2(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct xt_cgroup_info_v2 *info = par->matchinfo;
struct sock_cgroup_data *skcd = &skb->sk->sk_cgrp_data;
struct cgroup *ancestor = info->priv;
struct sock *sk = skb->sk;
if (!sk || !sk_fullsock(sk) || !net_eq(xt_net(par), sock_net(sk)))
return false;
if (ancestor)
return cgroup_is_descendant(sock_cgroup_ptr(skcd), ancestor) ^
info->invert_path;
else
return (info->classid == sock_cgroup_classid(skcd)) ^
info->invert_classid;
}
static void cgroup_mt_destroy_v1(const struct xt_mtdtor_param *par)
{
struct xt_cgroup_info_v1 *info = par->matchinfo;
if (info->priv)
cgroup_put(info->priv);
}
static void cgroup_mt_destroy_v2(const struct xt_mtdtor_param *par)
{
struct xt_cgroup_info_v2 *info = par->matchinfo;
if (info->priv)
cgroup_put(info->priv);
}
static struct xt_match cgroup_mt_reg[] __read_mostly = {
{
.name = "cgroup",
.revision = 0,
.family = NFPROTO_UNSPEC,
.checkentry = cgroup_mt_check_v0,
.match = cgroup_mt_v0,
.matchsize = sizeof(struct xt_cgroup_info_v0),
.me = THIS_MODULE,
.hooks = (1 << NF_INET_LOCAL_OUT) |
(1 << NF_INET_POST_ROUTING) |
(1 << NF_INET_LOCAL_IN),
},
{
.name = "cgroup",
.revision = 1,
.family = NFPROTO_UNSPEC,
.checkentry = cgroup_mt_check_v1,
.match = cgroup_mt_v1,
.matchsize = sizeof(struct xt_cgroup_info_v1),
.usersize = offsetof(struct xt_cgroup_info_v1, priv),
.destroy = cgroup_mt_destroy_v1,
.me = THIS_MODULE,
.hooks = (1 << NF_INET_LOCAL_OUT) |
(1 << NF_INET_POST_ROUTING) |
(1 << NF_INET_LOCAL_IN),
},
{
.name = "cgroup",
.revision = 2,
.family = NFPROTO_UNSPEC,
.checkentry = cgroup_mt_check_v2,
.match = cgroup_mt_v2,
.matchsize = sizeof(struct xt_cgroup_info_v2),
.usersize = offsetof(struct xt_cgroup_info_v2, priv),
.destroy = cgroup_mt_destroy_v2,
.me = THIS_MODULE,
.hooks = (1 << NF_INET_LOCAL_OUT) |
(1 << NF_INET_POST_ROUTING) |
(1 << NF_INET_LOCAL_IN),
},
};
static int __init cgroup_mt_init(void)
{
return xt_register_matches(cgroup_mt_reg, ARRAY_SIZE(cgroup_mt_reg));
}
static void __exit cgroup_mt_exit(void)
{
xt_unregister_matches(cgroup_mt_reg, ARRAY_SIZE(cgroup_mt_reg));
}
module_init(cgroup_mt_init);
module_exit(cgroup_mt_exit);
| linux-master | net/netfilter/xt_cgroup.c |
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/netfilter.h>
#include <linux/rhashtable.h>
#include <linux/netdevice.h>
#include <linux/tc_act/tc_csum.h>
#include <net/flow_offload.h>
#include <net/netfilter/nf_flow_table.h>
#include <net/netfilter/nf_tables.h>
#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_conntrack_acct.h>
#include <net/netfilter/nf_conntrack_core.h>
#include <net/netfilter/nf_conntrack_tuple.h>
static struct workqueue_struct *nf_flow_offload_add_wq;
static struct workqueue_struct *nf_flow_offload_del_wq;
static struct workqueue_struct *nf_flow_offload_stats_wq;
struct flow_offload_work {
struct list_head list;
enum flow_cls_command cmd;
struct nf_flowtable *flowtable;
struct flow_offload *flow;
struct work_struct work;
};
#define NF_FLOW_DISSECTOR(__match, __type, __field) \
(__match)->dissector.offset[__type] = \
offsetof(struct nf_flow_key, __field)
static void nf_flow_rule_lwt_match(struct nf_flow_match *match,
struct ip_tunnel_info *tun_info)
{
struct nf_flow_key *mask = &match->mask;
struct nf_flow_key *key = &match->key;
unsigned long long enc_keys;
if (!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX))
return;
NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_ENC_CONTROL, enc_control);
NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_ENC_KEYID, enc_key_id);
key->enc_key_id.keyid = tunnel_id_to_key32(tun_info->key.tun_id);
mask->enc_key_id.keyid = 0xffffffff;
enc_keys = BIT_ULL(FLOW_DISSECTOR_KEY_ENC_KEYID) |
BIT_ULL(FLOW_DISSECTOR_KEY_ENC_CONTROL);
if (ip_tunnel_info_af(tun_info) == AF_INET) {
NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
enc_ipv4);
key->enc_ipv4.src = tun_info->key.u.ipv4.dst;
key->enc_ipv4.dst = tun_info->key.u.ipv4.src;
if (key->enc_ipv4.src)
mask->enc_ipv4.src = 0xffffffff;
if (key->enc_ipv4.dst)
mask->enc_ipv4.dst = 0xffffffff;
enc_keys |= BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS);
key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
} else {
memcpy(&key->enc_ipv6.src, &tun_info->key.u.ipv6.dst,
sizeof(struct in6_addr));
memcpy(&key->enc_ipv6.dst, &tun_info->key.u.ipv6.src,
sizeof(struct in6_addr));
if (memcmp(&key->enc_ipv6.src, &in6addr_any,
sizeof(struct in6_addr)))
memset(&mask->enc_ipv6.src, 0xff,
sizeof(struct in6_addr));
if (memcmp(&key->enc_ipv6.dst, &in6addr_any,
sizeof(struct in6_addr)))
memset(&mask->enc_ipv6.dst, 0xff,
sizeof(struct in6_addr));
enc_keys |= BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS);
key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
}
match->dissector.used_keys |= enc_keys;
}
static void nf_flow_rule_vlan_match(struct flow_dissector_key_vlan *key,
struct flow_dissector_key_vlan *mask,
u16 vlan_id, __be16 proto)
{
key->vlan_id = vlan_id;
mask->vlan_id = VLAN_VID_MASK;
key->vlan_tpid = proto;
mask->vlan_tpid = 0xffff;
}
static int nf_flow_rule_match(struct nf_flow_match *match,
const struct flow_offload_tuple *tuple,
struct dst_entry *other_dst)
{
struct nf_flow_key *mask = &match->mask;
struct nf_flow_key *key = &match->key;
struct ip_tunnel_info *tun_info;
bool vlan_encap = false;
NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_META, meta);
NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_CONTROL, control);
NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_BASIC, basic);
NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4);
NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6);
NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_TCP, tcp);
NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_PORTS, tp);
if (other_dst && other_dst->lwtstate) {
tun_info = lwt_tun_info(other_dst->lwtstate);
nf_flow_rule_lwt_match(match, tun_info);
}
if (tuple->xmit_type == FLOW_OFFLOAD_XMIT_TC)
key->meta.ingress_ifindex = tuple->tc.iifidx;
else
key->meta.ingress_ifindex = tuple->iifidx;
mask->meta.ingress_ifindex = 0xffffffff;
if (tuple->encap_num > 0 && !(tuple->in_vlan_ingress & BIT(0)) &&
tuple->encap[0].proto == htons(ETH_P_8021Q)) {
NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_VLAN, vlan);
nf_flow_rule_vlan_match(&key->vlan, &mask->vlan,
tuple->encap[0].id,
tuple->encap[0].proto);
vlan_encap = true;
}
if (tuple->encap_num > 1 && !(tuple->in_vlan_ingress & BIT(1)) &&
tuple->encap[1].proto == htons(ETH_P_8021Q)) {
if (vlan_encap) {
NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_CVLAN,
cvlan);
nf_flow_rule_vlan_match(&key->cvlan, &mask->cvlan,
tuple->encap[1].id,
tuple->encap[1].proto);
} else {
NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_VLAN,
vlan);
nf_flow_rule_vlan_match(&key->vlan, &mask->vlan,
tuple->encap[1].id,
tuple->encap[1].proto);
}
}
switch (tuple->l3proto) {
case AF_INET:
key->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
key->basic.n_proto = htons(ETH_P_IP);
key->ipv4.src = tuple->src_v4.s_addr;
mask->ipv4.src = 0xffffffff;
key->ipv4.dst = tuple->dst_v4.s_addr;
mask->ipv4.dst = 0xffffffff;
break;
case AF_INET6:
key->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
key->basic.n_proto = htons(ETH_P_IPV6);
key->ipv6.src = tuple->src_v6;
memset(&mask->ipv6.src, 0xff, sizeof(mask->ipv6.src));
key->ipv6.dst = tuple->dst_v6;
memset(&mask->ipv6.dst, 0xff, sizeof(mask->ipv6.dst));
break;
default:
return -EOPNOTSUPP;
}
mask->control.addr_type = 0xffff;
match->dissector.used_keys |= BIT_ULL(key->control.addr_type);
mask->basic.n_proto = 0xffff;
switch (tuple->l4proto) {
case IPPROTO_TCP:
key->tcp.flags = 0;
mask->tcp.flags = cpu_to_be16(be32_to_cpu(TCP_FLAG_RST | TCP_FLAG_FIN) >> 16);
match->dissector.used_keys |= BIT_ULL(FLOW_DISSECTOR_KEY_TCP);
break;
case IPPROTO_UDP:
case IPPROTO_GRE:
break;
default:
return -EOPNOTSUPP;
}
key->basic.ip_proto = tuple->l4proto;
mask->basic.ip_proto = 0xff;
match->dissector.used_keys |= BIT_ULL(FLOW_DISSECTOR_KEY_META) |
BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) |
BIT_ULL(FLOW_DISSECTOR_KEY_BASIC);
switch (tuple->l4proto) {
case IPPROTO_TCP:
case IPPROTO_UDP:
key->tp.src = tuple->src_port;
mask->tp.src = 0xffff;
key->tp.dst = tuple->dst_port;
mask->tp.dst = 0xffff;
match->dissector.used_keys |= BIT_ULL(FLOW_DISSECTOR_KEY_PORTS);
break;
}
return 0;
}
static void flow_offload_mangle(struct flow_action_entry *entry,
enum flow_action_mangle_base htype, u32 offset,
const __be32 *value, const __be32 *mask)
{
entry->id = FLOW_ACTION_MANGLE;
entry->mangle.htype = htype;
entry->mangle.offset = offset;
memcpy(&entry->mangle.mask, mask, sizeof(u32));
memcpy(&entry->mangle.val, value, sizeof(u32));
}
static inline struct flow_action_entry *
flow_action_entry_next(struct nf_flow_rule *flow_rule)
{
int i = flow_rule->rule->action.num_entries++;
return &flow_rule->rule->action.entries[i];
}
static int flow_offload_eth_src(struct net *net,
const struct flow_offload *flow,
enum flow_offload_tuple_dir dir,
struct nf_flow_rule *flow_rule)
{
struct flow_action_entry *entry0 = flow_action_entry_next(flow_rule);
struct flow_action_entry *entry1 = flow_action_entry_next(flow_rule);
const struct flow_offload_tuple *other_tuple, *this_tuple;
struct net_device *dev = NULL;
const unsigned char *addr;
u32 mask, val;
u16 val16;
this_tuple = &flow->tuplehash[dir].tuple;
switch (this_tuple->xmit_type) {
case FLOW_OFFLOAD_XMIT_DIRECT:
addr = this_tuple->out.h_source;
break;
case FLOW_OFFLOAD_XMIT_NEIGH:
other_tuple = &flow->tuplehash[!dir].tuple;
dev = dev_get_by_index(net, other_tuple->iifidx);
if (!dev)
return -ENOENT;
addr = dev->dev_addr;
break;
default:
return -EOPNOTSUPP;
}
mask = ~0xffff0000;
memcpy(&val16, addr, 2);
val = val16 << 16;
flow_offload_mangle(entry0, FLOW_ACT_MANGLE_HDR_TYPE_ETH, 4,
&val, &mask);
mask = ~0xffffffff;
memcpy(&val, addr + 2, 4);
flow_offload_mangle(entry1, FLOW_ACT_MANGLE_HDR_TYPE_ETH, 8,
&val, &mask);
dev_put(dev);
return 0;
}
static int flow_offload_eth_dst(struct net *net,
const struct flow_offload *flow,
enum flow_offload_tuple_dir dir,
struct nf_flow_rule *flow_rule)
{
struct flow_action_entry *entry0 = flow_action_entry_next(flow_rule);
struct flow_action_entry *entry1 = flow_action_entry_next(flow_rule);
const struct flow_offload_tuple *other_tuple, *this_tuple;
const struct dst_entry *dst_cache;
unsigned char ha[ETH_ALEN];
struct neighbour *n;
const void *daddr;
u32 mask, val;
u8 nud_state;
u16 val16;
this_tuple = &flow->tuplehash[dir].tuple;
switch (this_tuple->xmit_type) {
case FLOW_OFFLOAD_XMIT_DIRECT:
ether_addr_copy(ha, this_tuple->out.h_dest);
break;
case FLOW_OFFLOAD_XMIT_NEIGH:
other_tuple = &flow->tuplehash[!dir].tuple;
daddr = &other_tuple->src_v4;
dst_cache = this_tuple->dst_cache;
n = dst_neigh_lookup(dst_cache, daddr);
if (!n)
return -ENOENT;
read_lock_bh(&n->lock);
nud_state = n->nud_state;
ether_addr_copy(ha, n->ha);
read_unlock_bh(&n->lock);
neigh_release(n);
if (!(nud_state & NUD_VALID))
return -ENOENT;
break;
default:
return -EOPNOTSUPP;
}
mask = ~0xffffffff;
memcpy(&val, ha, 4);
flow_offload_mangle(entry0, FLOW_ACT_MANGLE_HDR_TYPE_ETH, 0,
&val, &mask);
mask = ~0x0000ffff;
memcpy(&val16, ha + 4, 2);
val = val16;
flow_offload_mangle(entry1, FLOW_ACT_MANGLE_HDR_TYPE_ETH, 4,
&val, &mask);
return 0;
}
static void flow_offload_ipv4_snat(struct net *net,
const struct flow_offload *flow,
enum flow_offload_tuple_dir dir,
struct nf_flow_rule *flow_rule)
{
struct flow_action_entry *entry = flow_action_entry_next(flow_rule);
u32 mask = ~htonl(0xffffffff);
__be32 addr;
u32 offset;
switch (dir) {
case FLOW_OFFLOAD_DIR_ORIGINAL:
addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_v4.s_addr;
offset = offsetof(struct iphdr, saddr);
break;
case FLOW_OFFLOAD_DIR_REPLY:
addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_v4.s_addr;
offset = offsetof(struct iphdr, daddr);
break;
default:
return;
}
flow_offload_mangle(entry, FLOW_ACT_MANGLE_HDR_TYPE_IP4, offset,
&addr, &mask);
}
static void flow_offload_ipv4_dnat(struct net *net,
const struct flow_offload *flow,
enum flow_offload_tuple_dir dir,
struct nf_flow_rule *flow_rule)
{
struct flow_action_entry *entry = flow_action_entry_next(flow_rule);
u32 mask = ~htonl(0xffffffff);
__be32 addr;
u32 offset;
switch (dir) {
case FLOW_OFFLOAD_DIR_ORIGINAL:
addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_v4.s_addr;
offset = offsetof(struct iphdr, daddr);
break;
case FLOW_OFFLOAD_DIR_REPLY:
addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_v4.s_addr;
offset = offsetof(struct iphdr, saddr);
break;
default:
return;
}
flow_offload_mangle(entry, FLOW_ACT_MANGLE_HDR_TYPE_IP4, offset,
&addr, &mask);
}
static void flow_offload_ipv6_mangle(struct nf_flow_rule *flow_rule,
unsigned int offset,
const __be32 *addr, const __be32 *mask)
{
struct flow_action_entry *entry;
int i;
for (i = 0; i < sizeof(struct in6_addr) / sizeof(u32); i++) {
entry = flow_action_entry_next(flow_rule);
flow_offload_mangle(entry, FLOW_ACT_MANGLE_HDR_TYPE_IP6,
offset + i * sizeof(u32), &addr[i], mask);
}
}
static void flow_offload_ipv6_snat(struct net *net,
const struct flow_offload *flow,
enum flow_offload_tuple_dir dir,
struct nf_flow_rule *flow_rule)
{
u32 mask = ~htonl(0xffffffff);
const __be32 *addr;
u32 offset;
switch (dir) {
case FLOW_OFFLOAD_DIR_ORIGINAL:
addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_v6.s6_addr32;
offset = offsetof(struct ipv6hdr, saddr);
break;
case FLOW_OFFLOAD_DIR_REPLY:
addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_v6.s6_addr32;
offset = offsetof(struct ipv6hdr, daddr);
break;
default:
return;
}
flow_offload_ipv6_mangle(flow_rule, offset, addr, &mask);
}
static void flow_offload_ipv6_dnat(struct net *net,
const struct flow_offload *flow,
enum flow_offload_tuple_dir dir,
struct nf_flow_rule *flow_rule)
{
u32 mask = ~htonl(0xffffffff);
const __be32 *addr;
u32 offset;
switch (dir) {
case FLOW_OFFLOAD_DIR_ORIGINAL:
addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_v6.s6_addr32;
offset = offsetof(struct ipv6hdr, daddr);
break;
case FLOW_OFFLOAD_DIR_REPLY:
addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_v6.s6_addr32;
offset = offsetof(struct ipv6hdr, saddr);
break;
default:
return;
}
flow_offload_ipv6_mangle(flow_rule, offset, addr, &mask);
}
static int flow_offload_l4proto(const struct flow_offload *flow)
{
u8 protonum = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.l4proto;
u8 type = 0;
switch (protonum) {
case IPPROTO_TCP:
type = FLOW_ACT_MANGLE_HDR_TYPE_TCP;
break;
case IPPROTO_UDP:
type = FLOW_ACT_MANGLE_HDR_TYPE_UDP;
break;
default:
break;
}
return type;
}
static void flow_offload_port_snat(struct net *net,
const struct flow_offload *flow,
enum flow_offload_tuple_dir dir,
struct nf_flow_rule *flow_rule)
{
struct flow_action_entry *entry = flow_action_entry_next(flow_rule);
u32 mask, port;
u32 offset;
switch (dir) {
case FLOW_OFFLOAD_DIR_ORIGINAL:
port = ntohs(flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_port);
offset = 0; /* offsetof(struct tcphdr, source); */
port = htonl(port << 16);
mask = ~htonl(0xffff0000);
break;
case FLOW_OFFLOAD_DIR_REPLY:
port = ntohs(flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_port);
offset = 0; /* offsetof(struct tcphdr, dest); */
port = htonl(port);
mask = ~htonl(0xffff);
break;
default:
return;
}
flow_offload_mangle(entry, flow_offload_l4proto(flow), offset,
&port, &mask);
}
static void flow_offload_port_dnat(struct net *net,
const struct flow_offload *flow,
enum flow_offload_tuple_dir dir,
struct nf_flow_rule *flow_rule)
{
struct flow_action_entry *entry = flow_action_entry_next(flow_rule);
u32 mask, port;
u32 offset;
switch (dir) {
case FLOW_OFFLOAD_DIR_ORIGINAL:
port = ntohs(flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_port);
offset = 0; /* offsetof(struct tcphdr, dest); */
port = htonl(port);
mask = ~htonl(0xffff);
break;
case FLOW_OFFLOAD_DIR_REPLY:
port = ntohs(flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_port);
offset = 0; /* offsetof(struct tcphdr, source); */
port = htonl(port << 16);
mask = ~htonl(0xffff0000);
break;
default:
return;
}
flow_offload_mangle(entry, flow_offload_l4proto(flow), offset,
&port, &mask);
}
static void flow_offload_ipv4_checksum(struct net *net,
const struct flow_offload *flow,
struct nf_flow_rule *flow_rule)
{
u8 protonum = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.l4proto;
struct flow_action_entry *entry = flow_action_entry_next(flow_rule);
entry->id = FLOW_ACTION_CSUM;
entry->csum_flags = TCA_CSUM_UPDATE_FLAG_IPV4HDR;
switch (protonum) {
case IPPROTO_TCP:
entry->csum_flags |= TCA_CSUM_UPDATE_FLAG_TCP;
break;
case IPPROTO_UDP:
entry->csum_flags |= TCA_CSUM_UPDATE_FLAG_UDP;
break;
}
}
static void flow_offload_redirect(struct net *net,
const struct flow_offload *flow,
enum flow_offload_tuple_dir dir,
struct nf_flow_rule *flow_rule)
{
const struct flow_offload_tuple *this_tuple, *other_tuple;
struct flow_action_entry *entry;
struct net_device *dev;
int ifindex;
this_tuple = &flow->tuplehash[dir].tuple;
switch (this_tuple->xmit_type) {
case FLOW_OFFLOAD_XMIT_DIRECT:
this_tuple = &flow->tuplehash[dir].tuple;
ifindex = this_tuple->out.hw_ifidx;
break;
case FLOW_OFFLOAD_XMIT_NEIGH:
other_tuple = &flow->tuplehash[!dir].tuple;
ifindex = other_tuple->iifidx;
break;
default:
return;
}
dev = dev_get_by_index(net, ifindex);
if (!dev)
return;
entry = flow_action_entry_next(flow_rule);
entry->id = FLOW_ACTION_REDIRECT;
entry->dev = dev;
}
static void flow_offload_encap_tunnel(const struct flow_offload *flow,
enum flow_offload_tuple_dir dir,
struct nf_flow_rule *flow_rule)
{
const struct flow_offload_tuple *this_tuple;
struct flow_action_entry *entry;
struct dst_entry *dst;
this_tuple = &flow->tuplehash[dir].tuple;
if (this_tuple->xmit_type == FLOW_OFFLOAD_XMIT_DIRECT)
return;
dst = this_tuple->dst_cache;
if (dst && dst->lwtstate) {
struct ip_tunnel_info *tun_info;
tun_info = lwt_tun_info(dst->lwtstate);
if (tun_info && (tun_info->mode & IP_TUNNEL_INFO_TX)) {
entry = flow_action_entry_next(flow_rule);
entry->id = FLOW_ACTION_TUNNEL_ENCAP;
entry->tunnel = tun_info;
}
}
}
static void flow_offload_decap_tunnel(const struct flow_offload *flow,
enum flow_offload_tuple_dir dir,
struct nf_flow_rule *flow_rule)
{
const struct flow_offload_tuple *other_tuple;
struct flow_action_entry *entry;
struct dst_entry *dst;
other_tuple = &flow->tuplehash[!dir].tuple;
if (other_tuple->xmit_type == FLOW_OFFLOAD_XMIT_DIRECT)
return;
dst = other_tuple->dst_cache;
if (dst && dst->lwtstate) {
struct ip_tunnel_info *tun_info;
tun_info = lwt_tun_info(dst->lwtstate);
if (tun_info && (tun_info->mode & IP_TUNNEL_INFO_TX)) {
entry = flow_action_entry_next(flow_rule);
entry->id = FLOW_ACTION_TUNNEL_DECAP;
}
}
}
static int
nf_flow_rule_route_common(struct net *net, const struct flow_offload *flow,
enum flow_offload_tuple_dir dir,
struct nf_flow_rule *flow_rule)
{
const struct flow_offload_tuple *other_tuple;
const struct flow_offload_tuple *tuple;
int i;
flow_offload_decap_tunnel(flow, dir, flow_rule);
flow_offload_encap_tunnel(flow, dir, flow_rule);
if (flow_offload_eth_src(net, flow, dir, flow_rule) < 0 ||
flow_offload_eth_dst(net, flow, dir, flow_rule) < 0)
return -1;
tuple = &flow->tuplehash[dir].tuple;
for (i = 0; i < tuple->encap_num; i++) {
struct flow_action_entry *entry;
if (tuple->in_vlan_ingress & BIT(i))
continue;
if (tuple->encap[i].proto == htons(ETH_P_8021Q)) {
entry = flow_action_entry_next(flow_rule);
entry->id = FLOW_ACTION_VLAN_POP;
}
}
other_tuple = &flow->tuplehash[!dir].tuple;
for (i = 0; i < other_tuple->encap_num; i++) {
struct flow_action_entry *entry;
if (other_tuple->in_vlan_ingress & BIT(i))
continue;
entry = flow_action_entry_next(flow_rule);
switch (other_tuple->encap[i].proto) {
case htons(ETH_P_PPP_SES):
entry->id = FLOW_ACTION_PPPOE_PUSH;
entry->pppoe.sid = other_tuple->encap[i].id;
break;
case htons(ETH_P_8021Q):
entry->id = FLOW_ACTION_VLAN_PUSH;
entry->vlan.vid = other_tuple->encap[i].id;
entry->vlan.proto = other_tuple->encap[i].proto;
break;
}
}
return 0;
}
int nf_flow_rule_route_ipv4(struct net *net, struct flow_offload *flow,
enum flow_offload_tuple_dir dir,
struct nf_flow_rule *flow_rule)
{
if (nf_flow_rule_route_common(net, flow, dir, flow_rule) < 0)
return -1;
if (test_bit(NF_FLOW_SNAT, &flow->flags)) {
flow_offload_ipv4_snat(net, flow, dir, flow_rule);
flow_offload_port_snat(net, flow, dir, flow_rule);
}
if (test_bit(NF_FLOW_DNAT, &flow->flags)) {
flow_offload_ipv4_dnat(net, flow, dir, flow_rule);
flow_offload_port_dnat(net, flow, dir, flow_rule);
}
if (test_bit(NF_FLOW_SNAT, &flow->flags) ||
test_bit(NF_FLOW_DNAT, &flow->flags))
flow_offload_ipv4_checksum(net, flow, flow_rule);
flow_offload_redirect(net, flow, dir, flow_rule);
return 0;
}
EXPORT_SYMBOL_GPL(nf_flow_rule_route_ipv4);
int nf_flow_rule_route_ipv6(struct net *net, struct flow_offload *flow,
enum flow_offload_tuple_dir dir,
struct nf_flow_rule *flow_rule)
{
if (nf_flow_rule_route_common(net, flow, dir, flow_rule) < 0)
return -1;
if (test_bit(NF_FLOW_SNAT, &flow->flags)) {
flow_offload_ipv6_snat(net, flow, dir, flow_rule);
flow_offload_port_snat(net, flow, dir, flow_rule);
}
if (test_bit(NF_FLOW_DNAT, &flow->flags)) {
flow_offload_ipv6_dnat(net, flow, dir, flow_rule);
flow_offload_port_dnat(net, flow, dir, flow_rule);
}
flow_offload_redirect(net, flow, dir, flow_rule);
return 0;
}
EXPORT_SYMBOL_GPL(nf_flow_rule_route_ipv6);
#define NF_FLOW_RULE_ACTION_MAX 16
static struct nf_flow_rule *
nf_flow_offload_rule_alloc(struct net *net,
const struct flow_offload_work *offload,
enum flow_offload_tuple_dir dir)
{
const struct nf_flowtable *flowtable = offload->flowtable;
const struct flow_offload_tuple *tuple, *other_tuple;
struct flow_offload *flow = offload->flow;
struct dst_entry *other_dst = NULL;
struct nf_flow_rule *flow_rule;
int err = -ENOMEM;
flow_rule = kzalloc(sizeof(*flow_rule), GFP_KERNEL);
if (!flow_rule)
goto err_flow;
flow_rule->rule = flow_rule_alloc(NF_FLOW_RULE_ACTION_MAX);
if (!flow_rule->rule)
goto err_flow_rule;
flow_rule->rule->match.dissector = &flow_rule->match.dissector;
flow_rule->rule->match.mask = &flow_rule->match.mask;
flow_rule->rule->match.key = &flow_rule->match.key;
tuple = &flow->tuplehash[dir].tuple;
other_tuple = &flow->tuplehash[!dir].tuple;
if (other_tuple->xmit_type == FLOW_OFFLOAD_XMIT_NEIGH)
other_dst = other_tuple->dst_cache;
err = nf_flow_rule_match(&flow_rule->match, tuple, other_dst);
if (err < 0)
goto err_flow_match;
flow_rule->rule->action.num_entries = 0;
if (flowtable->type->action(net, flow, dir, flow_rule) < 0)
goto err_flow_match;
return flow_rule;
err_flow_match:
kfree(flow_rule->rule);
err_flow_rule:
kfree(flow_rule);
err_flow:
return NULL;
}
static void __nf_flow_offload_destroy(struct nf_flow_rule *flow_rule)
{
struct flow_action_entry *entry;
int i;
for (i = 0; i < flow_rule->rule->action.num_entries; i++) {
entry = &flow_rule->rule->action.entries[i];
if (entry->id != FLOW_ACTION_REDIRECT)
continue;
dev_put(entry->dev);
}
kfree(flow_rule->rule);
kfree(flow_rule);
}
static void nf_flow_offload_destroy(struct nf_flow_rule *flow_rule[])
{
int i;
for (i = 0; i < FLOW_OFFLOAD_DIR_MAX; i++)
__nf_flow_offload_destroy(flow_rule[i]);
}
static int nf_flow_offload_alloc(const struct flow_offload_work *offload,
struct nf_flow_rule *flow_rule[])
{
struct net *net = read_pnet(&offload->flowtable->net);
flow_rule[0] = nf_flow_offload_rule_alloc(net, offload,
FLOW_OFFLOAD_DIR_ORIGINAL);
if (!flow_rule[0])
return -ENOMEM;
flow_rule[1] = nf_flow_offload_rule_alloc(net, offload,
FLOW_OFFLOAD_DIR_REPLY);
if (!flow_rule[1]) {
__nf_flow_offload_destroy(flow_rule[0]);
return -ENOMEM;
}
return 0;
}
static void nf_flow_offload_init(struct flow_cls_offload *cls_flow,
__be16 proto, int priority,
enum flow_cls_command cmd,
const struct flow_offload_tuple *tuple,
struct netlink_ext_ack *extack)
{
cls_flow->common.protocol = proto;
cls_flow->common.prio = priority;
cls_flow->common.extack = extack;
cls_flow->command = cmd;
cls_flow->cookie = (unsigned long)tuple;
}
static int nf_flow_offload_tuple(struct nf_flowtable *flowtable,
struct flow_offload *flow,
struct nf_flow_rule *flow_rule,
enum flow_offload_tuple_dir dir,
int priority, int cmd,
struct flow_stats *stats,
struct list_head *block_cb_list)
{
struct flow_cls_offload cls_flow = {};
struct flow_block_cb *block_cb;
struct netlink_ext_ack extack;
__be16 proto = ETH_P_ALL;
int err, i = 0;
nf_flow_offload_init(&cls_flow, proto, priority, cmd,
&flow->tuplehash[dir].tuple, &extack);
if (cmd == FLOW_CLS_REPLACE)
cls_flow.rule = flow_rule->rule;
down_read(&flowtable->flow_block_lock);
list_for_each_entry(block_cb, block_cb_list, list) {
err = block_cb->cb(TC_SETUP_CLSFLOWER, &cls_flow,
block_cb->cb_priv);
if (err < 0)
continue;
i++;
}
up_read(&flowtable->flow_block_lock);
if (cmd == FLOW_CLS_STATS)
memcpy(stats, &cls_flow.stats, sizeof(*stats));
return i;
}
static int flow_offload_tuple_add(struct flow_offload_work *offload,
struct nf_flow_rule *flow_rule,
enum flow_offload_tuple_dir dir)
{
return nf_flow_offload_tuple(offload->flowtable, offload->flow,
flow_rule, dir,
offload->flowtable->priority,
FLOW_CLS_REPLACE, NULL,
&offload->flowtable->flow_block.cb_list);
}
static void flow_offload_tuple_del(struct flow_offload_work *offload,
enum flow_offload_tuple_dir dir)
{
nf_flow_offload_tuple(offload->flowtable, offload->flow, NULL, dir,
offload->flowtable->priority,
FLOW_CLS_DESTROY, NULL,
&offload->flowtable->flow_block.cb_list);
}
static int flow_offload_rule_add(struct flow_offload_work *offload,
struct nf_flow_rule *flow_rule[])
{
int ok_count = 0;
ok_count += flow_offload_tuple_add(offload, flow_rule[0],
FLOW_OFFLOAD_DIR_ORIGINAL);
if (test_bit(NF_FLOW_HW_BIDIRECTIONAL, &offload->flow->flags))
ok_count += flow_offload_tuple_add(offload, flow_rule[1],
FLOW_OFFLOAD_DIR_REPLY);
if (ok_count == 0)
return -ENOENT;
return 0;
}
static void flow_offload_work_add(struct flow_offload_work *offload)
{
struct nf_flow_rule *flow_rule[FLOW_OFFLOAD_DIR_MAX];
int err;
err = nf_flow_offload_alloc(offload, flow_rule);
if (err < 0)
return;
err = flow_offload_rule_add(offload, flow_rule);
if (err < 0)
goto out;
set_bit(IPS_HW_OFFLOAD_BIT, &offload->flow->ct->status);
out:
nf_flow_offload_destroy(flow_rule);
}
static void flow_offload_work_del(struct flow_offload_work *offload)
{
clear_bit(IPS_HW_OFFLOAD_BIT, &offload->flow->ct->status);
flow_offload_tuple_del(offload, FLOW_OFFLOAD_DIR_ORIGINAL);
if (test_bit(NF_FLOW_HW_BIDIRECTIONAL, &offload->flow->flags))
flow_offload_tuple_del(offload, FLOW_OFFLOAD_DIR_REPLY);
set_bit(NF_FLOW_HW_DEAD, &offload->flow->flags);
}
static void flow_offload_tuple_stats(struct flow_offload_work *offload,
enum flow_offload_tuple_dir dir,
struct flow_stats *stats)
{
nf_flow_offload_tuple(offload->flowtable, offload->flow, NULL, dir,
offload->flowtable->priority,
FLOW_CLS_STATS, stats,
&offload->flowtable->flow_block.cb_list);
}
static void flow_offload_work_stats(struct flow_offload_work *offload)
{
struct flow_stats stats[FLOW_OFFLOAD_DIR_MAX] = {};
u64 lastused;
flow_offload_tuple_stats(offload, FLOW_OFFLOAD_DIR_ORIGINAL, &stats[0]);
if (test_bit(NF_FLOW_HW_BIDIRECTIONAL, &offload->flow->flags))
flow_offload_tuple_stats(offload, FLOW_OFFLOAD_DIR_REPLY,
&stats[1]);
lastused = max_t(u64, stats[0].lastused, stats[1].lastused);
offload->flow->timeout = max_t(u64, offload->flow->timeout,
lastused + flow_offload_get_timeout(offload->flow));
if (offload->flowtable->flags & NF_FLOWTABLE_COUNTER) {
if (stats[0].pkts)
nf_ct_acct_add(offload->flow->ct,
FLOW_OFFLOAD_DIR_ORIGINAL,
stats[0].pkts, stats[0].bytes);
if (stats[1].pkts)
nf_ct_acct_add(offload->flow->ct,
FLOW_OFFLOAD_DIR_REPLY,
stats[1].pkts, stats[1].bytes);
}
}
static void flow_offload_work_handler(struct work_struct *work)
{
struct flow_offload_work *offload;
struct net *net;
offload = container_of(work, struct flow_offload_work, work);
net = read_pnet(&offload->flowtable->net);
switch (offload->cmd) {
case FLOW_CLS_REPLACE:
flow_offload_work_add(offload);
NF_FLOW_TABLE_STAT_DEC_ATOMIC(net, count_wq_add);
break;
case FLOW_CLS_DESTROY:
flow_offload_work_del(offload);
NF_FLOW_TABLE_STAT_DEC_ATOMIC(net, count_wq_del);
break;
case FLOW_CLS_STATS:
flow_offload_work_stats(offload);
NF_FLOW_TABLE_STAT_DEC_ATOMIC(net, count_wq_stats);
break;
default:
WARN_ON_ONCE(1);
}
clear_bit(NF_FLOW_HW_PENDING, &offload->flow->flags);
kfree(offload);
}
static void flow_offload_queue_work(struct flow_offload_work *offload)
{
struct net *net = read_pnet(&offload->flowtable->net);
if (offload->cmd == FLOW_CLS_REPLACE) {
NF_FLOW_TABLE_STAT_INC_ATOMIC(net, count_wq_add);
queue_work(nf_flow_offload_add_wq, &offload->work);
} else if (offload->cmd == FLOW_CLS_DESTROY) {
NF_FLOW_TABLE_STAT_INC_ATOMIC(net, count_wq_del);
queue_work(nf_flow_offload_del_wq, &offload->work);
} else {
NF_FLOW_TABLE_STAT_INC_ATOMIC(net, count_wq_stats);
queue_work(nf_flow_offload_stats_wq, &offload->work);
}
}
static struct flow_offload_work *
nf_flow_offload_work_alloc(struct nf_flowtable *flowtable,
struct flow_offload *flow, unsigned int cmd)
{
struct flow_offload_work *offload;
if (test_and_set_bit(NF_FLOW_HW_PENDING, &flow->flags))
return NULL;
offload = kmalloc(sizeof(struct flow_offload_work), GFP_ATOMIC);
if (!offload) {
clear_bit(NF_FLOW_HW_PENDING, &flow->flags);
return NULL;
}
offload->cmd = cmd;
offload->flow = flow;
offload->flowtable = flowtable;
INIT_WORK(&offload->work, flow_offload_work_handler);
return offload;
}
void nf_flow_offload_add(struct nf_flowtable *flowtable,
struct flow_offload *flow)
{
struct flow_offload_work *offload;
offload = nf_flow_offload_work_alloc(flowtable, flow, FLOW_CLS_REPLACE);
if (!offload)
return;
flow_offload_queue_work(offload);
}
void nf_flow_offload_del(struct nf_flowtable *flowtable,
struct flow_offload *flow)
{
struct flow_offload_work *offload;
offload = nf_flow_offload_work_alloc(flowtable, flow, FLOW_CLS_DESTROY);
if (!offload)
return;
set_bit(NF_FLOW_HW_DYING, &flow->flags);
flow_offload_queue_work(offload);
}
void nf_flow_offload_stats(struct nf_flowtable *flowtable,
struct flow_offload *flow)
{
struct flow_offload_work *offload;
__s32 delta;
delta = nf_flow_timeout_delta(flow->timeout);
if ((delta >= (9 * flow_offload_get_timeout(flow)) / 10))
return;
offload = nf_flow_offload_work_alloc(flowtable, flow, FLOW_CLS_STATS);
if (!offload)
return;
flow_offload_queue_work(offload);
}
void nf_flow_table_offload_flush_cleanup(struct nf_flowtable *flowtable)
{
if (nf_flowtable_hw_offload(flowtable)) {
flush_workqueue(nf_flow_offload_del_wq);
nf_flow_table_gc_run(flowtable);
}
}
void nf_flow_table_offload_flush(struct nf_flowtable *flowtable)
{
if (nf_flowtable_hw_offload(flowtable)) {
flush_workqueue(nf_flow_offload_add_wq);
flush_workqueue(nf_flow_offload_del_wq);
flush_workqueue(nf_flow_offload_stats_wq);
}
}
static int nf_flow_table_block_setup(struct nf_flowtable *flowtable,
struct flow_block_offload *bo,
enum flow_block_command cmd)
{
struct flow_block_cb *block_cb, *next;
int err = 0;
down_write(&flowtable->flow_block_lock);
switch (cmd) {
case FLOW_BLOCK_BIND:
list_splice(&bo->cb_list, &flowtable->flow_block.cb_list);
break;
case FLOW_BLOCK_UNBIND:
list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) {
list_del(&block_cb->list);
flow_block_cb_free(block_cb);
}
break;
default:
WARN_ON_ONCE(1);
err = -EOPNOTSUPP;
}
up_write(&flowtable->flow_block_lock);
return err;
}
static void nf_flow_table_block_offload_init(struct flow_block_offload *bo,
struct net *net,
enum flow_block_command cmd,
struct nf_flowtable *flowtable,
struct netlink_ext_ack *extack)
{
memset(bo, 0, sizeof(*bo));
bo->net = net;
bo->block = &flowtable->flow_block;
bo->command = cmd;
bo->binder_type = FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS;
bo->extack = extack;
bo->cb_list_head = &flowtable->flow_block.cb_list;
INIT_LIST_HEAD(&bo->cb_list);
}
static void nf_flow_table_indr_cleanup(struct flow_block_cb *block_cb)
{
struct nf_flowtable *flowtable = block_cb->indr.data;
struct net_device *dev = block_cb->indr.dev;
nf_flow_table_gc_cleanup(flowtable, dev);
down_write(&flowtable->flow_block_lock);
list_del(&block_cb->list);
list_del(&block_cb->driver_list);
flow_block_cb_free(block_cb);
up_write(&flowtable->flow_block_lock);
}
static int nf_flow_table_indr_offload_cmd(struct flow_block_offload *bo,
struct nf_flowtable *flowtable,
struct net_device *dev,
enum flow_block_command cmd,
struct netlink_ext_ack *extack)
{
nf_flow_table_block_offload_init(bo, dev_net(dev), cmd, flowtable,
extack);
return flow_indr_dev_setup_offload(dev, NULL, TC_SETUP_FT, flowtable, bo,
nf_flow_table_indr_cleanup);
}
static int nf_flow_table_offload_cmd(struct flow_block_offload *bo,
struct nf_flowtable *flowtable,
struct net_device *dev,
enum flow_block_command cmd,
struct netlink_ext_ack *extack)
{
int err;
nf_flow_table_block_offload_init(bo, dev_net(dev), cmd, flowtable,
extack);
down_write(&flowtable->flow_block_lock);
err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_FT, bo);
up_write(&flowtable->flow_block_lock);
if (err < 0)
return err;
return 0;
}
int nf_flow_table_offload_setup(struct nf_flowtable *flowtable,
struct net_device *dev,
enum flow_block_command cmd)
{
struct netlink_ext_ack extack = {};
struct flow_block_offload bo;
int err;
if (!nf_flowtable_hw_offload(flowtable))
return 0;
if (dev->netdev_ops->ndo_setup_tc)
err = nf_flow_table_offload_cmd(&bo, flowtable, dev, cmd,
&extack);
else
err = nf_flow_table_indr_offload_cmd(&bo, flowtable, dev, cmd,
&extack);
if (err < 0)
return err;
return nf_flow_table_block_setup(flowtable, &bo, cmd);
}
EXPORT_SYMBOL_GPL(nf_flow_table_offload_setup);
int nf_flow_table_offload_init(void)
{
nf_flow_offload_add_wq = alloc_workqueue("nf_ft_offload_add",
WQ_UNBOUND | WQ_SYSFS, 0);
if (!nf_flow_offload_add_wq)
return -ENOMEM;
nf_flow_offload_del_wq = alloc_workqueue("nf_ft_offload_del",
WQ_UNBOUND | WQ_SYSFS, 0);
if (!nf_flow_offload_del_wq)
goto err_del_wq;
nf_flow_offload_stats_wq = alloc_workqueue("nf_ft_offload_stats",
WQ_UNBOUND | WQ_SYSFS, 0);
if (!nf_flow_offload_stats_wq)
goto err_stats_wq;
return 0;
err_stats_wq:
destroy_workqueue(nf_flow_offload_del_wq);
err_del_wq:
destroy_workqueue(nf_flow_offload_add_wq);
return -ENOMEM;
}
void nf_flow_table_offload_exit(void)
{
destroy_workqueue(nf_flow_offload_add_wq);
destroy_workqueue(nf_flow_offload_del_wq);
destroy_workqueue(nf_flow_offload_stats_wq);
}
| linux-master | net/netfilter/nf_flow_table_offload.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* This is a module which is used for queueing packets and communicating with
* userspace via nfnetlink.
*
* (C) 2005 by Harald Welte <[email protected]>
* (C) 2007 by Patrick McHardy <[email protected]>
*
* Based on the old ipv4-only ip_queue.c:
* (C) 2000-2002 James Morris <[email protected]>
* (C) 2003-2005 Netfilter Core Team <[email protected]>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
#include <linux/notifier.h>
#include <linux/netdevice.h>
#include <linux/netfilter.h>
#include <linux/proc_fs.h>
#include <linux/netfilter_ipv4.h>
#include <linux/netfilter_ipv6.h>
#include <linux/netfilter_bridge.h>
#include <linux/netfilter/nfnetlink.h>
#include <linux/netfilter/nfnetlink_queue.h>
#include <linux/netfilter/nf_conntrack_common.h>
#include <linux/list.h>
#include <linux/cgroup-defs.h>
#include <net/gso.h>
#include <net/sock.h>
#include <net/tcp_states.h>
#include <net/netfilter/nf_queue.h>
#include <net/netns/generic.h>
#include <linux/atomic.h>
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
#include "../bridge/br_private.h"
#endif
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
#include <net/netfilter/nf_conntrack.h>
#endif
#define NFQNL_QMAX_DEFAULT 1024
/* We're using struct nlattr which has 16bit nla_len. Note that nla_len
* includes the header length. Thus, the maximum packet length that we
* support is 65531 bytes. We send truncated packets if the specified length
* is larger than that. Userspace can check for presence of NFQA_CAP_LEN
* attribute to detect truncation.
*/
#define NFQNL_MAX_COPY_RANGE (0xffff - NLA_HDRLEN)
struct nfqnl_instance {
struct hlist_node hlist; /* global list of queues */
struct rcu_head rcu;
u32 peer_portid;
unsigned int queue_maxlen;
unsigned int copy_range;
unsigned int queue_dropped;
unsigned int queue_user_dropped;
u_int16_t queue_num; /* number of this queue */
u_int8_t copy_mode;
u_int32_t flags; /* Set using NFQA_CFG_FLAGS */
/*
* Following fields are dirtied for each queued packet,
* keep them in same cache line if possible.
*/
spinlock_t lock ____cacheline_aligned_in_smp;
unsigned int queue_total;
unsigned int id_sequence; /* 'sequence' of pkt ids */
struct list_head queue_list; /* packets in queue */
};
typedef int (*nfqnl_cmpfn)(struct nf_queue_entry *, unsigned long);
static unsigned int nfnl_queue_net_id __read_mostly;
#define INSTANCE_BUCKETS 16
struct nfnl_queue_net {
spinlock_t instances_lock;
struct hlist_head instance_table[INSTANCE_BUCKETS];
};
static struct nfnl_queue_net *nfnl_queue_pernet(struct net *net)
{
return net_generic(net, nfnl_queue_net_id);
}
static inline u_int8_t instance_hashfn(u_int16_t queue_num)
{
return ((queue_num >> 8) ^ queue_num) % INSTANCE_BUCKETS;
}
static struct nfqnl_instance *
instance_lookup(struct nfnl_queue_net *q, u_int16_t queue_num)
{
struct hlist_head *head;
struct nfqnl_instance *inst;
head = &q->instance_table[instance_hashfn(queue_num)];
hlist_for_each_entry_rcu(inst, head, hlist) {
if (inst->queue_num == queue_num)
return inst;
}
return NULL;
}
static struct nfqnl_instance *
instance_create(struct nfnl_queue_net *q, u_int16_t queue_num, u32 portid)
{
struct nfqnl_instance *inst;
unsigned int h;
int err;
spin_lock(&q->instances_lock);
if (instance_lookup(q, queue_num)) {
err = -EEXIST;
goto out_unlock;
}
inst = kzalloc(sizeof(*inst), GFP_ATOMIC);
if (!inst) {
err = -ENOMEM;
goto out_unlock;
}
inst->queue_num = queue_num;
inst->peer_portid = portid;
inst->queue_maxlen = NFQNL_QMAX_DEFAULT;
inst->copy_range = NFQNL_MAX_COPY_RANGE;
inst->copy_mode = NFQNL_COPY_NONE;
spin_lock_init(&inst->lock);
INIT_LIST_HEAD(&inst->queue_list);
if (!try_module_get(THIS_MODULE)) {
err = -EAGAIN;
goto out_free;
}
h = instance_hashfn(queue_num);
hlist_add_head_rcu(&inst->hlist, &q->instance_table[h]);
spin_unlock(&q->instances_lock);
return inst;
out_free:
kfree(inst);
out_unlock:
spin_unlock(&q->instances_lock);
return ERR_PTR(err);
}
static void nfqnl_flush(struct nfqnl_instance *queue, nfqnl_cmpfn cmpfn,
unsigned long data);
static void
instance_destroy_rcu(struct rcu_head *head)
{
struct nfqnl_instance *inst = container_of(head, struct nfqnl_instance,
rcu);
nfqnl_flush(inst, NULL, 0);
kfree(inst);
module_put(THIS_MODULE);
}
static void
__instance_destroy(struct nfqnl_instance *inst)
{
hlist_del_rcu(&inst->hlist);
call_rcu(&inst->rcu, instance_destroy_rcu);
}
static void
instance_destroy(struct nfnl_queue_net *q, struct nfqnl_instance *inst)
{
spin_lock(&q->instances_lock);
__instance_destroy(inst);
spin_unlock(&q->instances_lock);
}
static inline void
__enqueue_entry(struct nfqnl_instance *queue, struct nf_queue_entry *entry)
{
list_add_tail(&entry->list, &queue->queue_list);
queue->queue_total++;
}
static void
__dequeue_entry(struct nfqnl_instance *queue, struct nf_queue_entry *entry)
{
list_del(&entry->list);
queue->queue_total--;
}
static struct nf_queue_entry *
find_dequeue_entry(struct nfqnl_instance *queue, unsigned int id)
{
struct nf_queue_entry *entry = NULL, *i;
spin_lock_bh(&queue->lock);
list_for_each_entry(i, &queue->queue_list, list) {
if (i->id == id) {
entry = i;
break;
}
}
if (entry)
__dequeue_entry(queue, entry);
spin_unlock_bh(&queue->lock);
return entry;
}
static void nfqnl_reinject(struct nf_queue_entry *entry, unsigned int verdict)
{
const struct nf_ct_hook *ct_hook;
int err;
if (verdict == NF_ACCEPT ||
verdict == NF_REPEAT ||
verdict == NF_STOP) {
rcu_read_lock();
ct_hook = rcu_dereference(nf_ct_hook);
if (ct_hook) {
err = ct_hook->update(entry->state.net, entry->skb);
if (err < 0)
verdict = NF_DROP;
}
rcu_read_unlock();
}
nf_reinject(entry, verdict);
}
static void
nfqnl_flush(struct nfqnl_instance *queue, nfqnl_cmpfn cmpfn, unsigned long data)
{
struct nf_queue_entry *entry, *next;
spin_lock_bh(&queue->lock);
list_for_each_entry_safe(entry, next, &queue->queue_list, list) {
if (!cmpfn || cmpfn(entry, data)) {
list_del(&entry->list);
queue->queue_total--;
nfqnl_reinject(entry, NF_DROP);
}
}
spin_unlock_bh(&queue->lock);
}
static int
nfqnl_put_packet_info(struct sk_buff *nlskb, struct sk_buff *packet,
bool csum_verify)
{
__u32 flags = 0;
if (packet->ip_summed == CHECKSUM_PARTIAL)
flags = NFQA_SKB_CSUMNOTREADY;
else if (csum_verify)
flags = NFQA_SKB_CSUM_NOTVERIFIED;
if (skb_is_gso(packet))
flags |= NFQA_SKB_GSO;
return flags ? nla_put_be32(nlskb, NFQA_SKB_INFO, htonl(flags)) : 0;
}
static int nfqnl_put_sk_uidgid(struct sk_buff *skb, struct sock *sk)
{
const struct cred *cred;
if (!sk_fullsock(sk))
return 0;
read_lock_bh(&sk->sk_callback_lock);
if (sk->sk_socket && sk->sk_socket->file) {
cred = sk->sk_socket->file->f_cred;
if (nla_put_be32(skb, NFQA_UID,
htonl(from_kuid_munged(&init_user_ns, cred->fsuid))))
goto nla_put_failure;
if (nla_put_be32(skb, NFQA_GID,
htonl(from_kgid_munged(&init_user_ns, cred->fsgid))))
goto nla_put_failure;
}
read_unlock_bh(&sk->sk_callback_lock);
return 0;
nla_put_failure:
read_unlock_bh(&sk->sk_callback_lock);
return -1;
}
static int nfqnl_put_sk_classid(struct sk_buff *skb, struct sock *sk)
{
#if IS_ENABLED(CONFIG_CGROUP_NET_CLASSID)
if (sk && sk_fullsock(sk)) {
u32 classid = sock_cgroup_classid(&sk->sk_cgrp_data);
if (classid && nla_put_be32(skb, NFQA_CGROUP_CLASSID, htonl(classid)))
return -1;
}
#endif
return 0;
}
static u32 nfqnl_get_sk_secctx(struct sk_buff *skb, char **secdata)
{
u32 seclen = 0;
#if IS_ENABLED(CONFIG_NETWORK_SECMARK)
if (!skb || !sk_fullsock(skb->sk))
return 0;
read_lock_bh(&skb->sk->sk_callback_lock);
if (skb->secmark)
security_secid_to_secctx(skb->secmark, secdata, &seclen);
read_unlock_bh(&skb->sk->sk_callback_lock);
#endif
return seclen;
}
static u32 nfqnl_get_bridge_size(struct nf_queue_entry *entry)
{
struct sk_buff *entskb = entry->skb;
u32 nlalen = 0;
if (entry->state.pf != PF_BRIDGE || !skb_mac_header_was_set(entskb))
return 0;
if (skb_vlan_tag_present(entskb))
nlalen += nla_total_size(nla_total_size(sizeof(__be16)) +
nla_total_size(sizeof(__be16)));
if (entskb->network_header > entskb->mac_header)
nlalen += nla_total_size((entskb->network_header -
entskb->mac_header));
return nlalen;
}
static int nfqnl_put_bridge(struct nf_queue_entry *entry, struct sk_buff *skb)
{
struct sk_buff *entskb = entry->skb;
if (entry->state.pf != PF_BRIDGE || !skb_mac_header_was_set(entskb))
return 0;
if (skb_vlan_tag_present(entskb)) {
struct nlattr *nest;
nest = nla_nest_start(skb, NFQA_VLAN);
if (!nest)
goto nla_put_failure;
if (nla_put_be16(skb, NFQA_VLAN_TCI, htons(entskb->vlan_tci)) ||
nla_put_be16(skb, NFQA_VLAN_PROTO, entskb->vlan_proto))
goto nla_put_failure;
nla_nest_end(skb, nest);
}
if (entskb->mac_header < entskb->network_header) {
int len = (int)(entskb->network_header - entskb->mac_header);
if (nla_put(skb, NFQA_L2HDR, len, skb_mac_header(entskb)))
goto nla_put_failure;
}
return 0;
nla_put_failure:
return -1;
}
static struct sk_buff *
nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
struct nf_queue_entry *entry,
__be32 **packet_id_ptr)
{
size_t size;
size_t data_len = 0, cap_len = 0;
unsigned int hlen = 0;
struct sk_buff *skb;
struct nlattr *nla;
struct nfqnl_msg_packet_hdr *pmsg;
struct nlmsghdr *nlh;
struct sk_buff *entskb = entry->skb;
struct net_device *indev;
struct net_device *outdev;
struct nf_conn *ct = NULL;
enum ip_conntrack_info ctinfo = 0;
const struct nfnl_ct_hook *nfnl_ct;
bool csum_verify;
char *secdata = NULL;
u32 seclen = 0;
ktime_t tstamp;
size = nlmsg_total_size(sizeof(struct nfgenmsg))
+ nla_total_size(sizeof(struct nfqnl_msg_packet_hdr))
+ nla_total_size(sizeof(u_int32_t)) /* ifindex */
+ nla_total_size(sizeof(u_int32_t)) /* ifindex */
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
+ nla_total_size(sizeof(u_int32_t)) /* ifindex */
+ nla_total_size(sizeof(u_int32_t)) /* ifindex */
#endif
+ nla_total_size(sizeof(u_int32_t)) /* mark */
+ nla_total_size(sizeof(u_int32_t)) /* priority */
+ nla_total_size(sizeof(struct nfqnl_msg_packet_hw))
+ nla_total_size(sizeof(u_int32_t)) /* skbinfo */
#if IS_ENABLED(CONFIG_CGROUP_NET_CLASSID)
+ nla_total_size(sizeof(u_int32_t)) /* classid */
#endif
+ nla_total_size(sizeof(u_int32_t)); /* cap_len */
tstamp = skb_tstamp_cond(entskb, false);
if (tstamp)
size += nla_total_size(sizeof(struct nfqnl_msg_packet_timestamp));
size += nfqnl_get_bridge_size(entry);
if (entry->state.hook <= NF_INET_FORWARD ||
(entry->state.hook == NF_INET_POST_ROUTING && entskb->sk == NULL))
csum_verify = !skb_csum_unnecessary(entskb);
else
csum_verify = false;
outdev = entry->state.out;
switch ((enum nfqnl_config_mode)READ_ONCE(queue->copy_mode)) {
case NFQNL_COPY_META:
case NFQNL_COPY_NONE:
break;
case NFQNL_COPY_PACKET:
if (!(queue->flags & NFQA_CFG_F_GSO) &&
entskb->ip_summed == CHECKSUM_PARTIAL &&
skb_checksum_help(entskb))
return NULL;
data_len = READ_ONCE(queue->copy_range);
if (data_len > entskb->len)
data_len = entskb->len;
hlen = skb_zerocopy_headlen(entskb);
hlen = min_t(unsigned int, hlen, data_len);
size += sizeof(struct nlattr) + hlen;
cap_len = entskb->len;
break;
}
nfnl_ct = rcu_dereference(nfnl_ct_hook);
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
if (queue->flags & NFQA_CFG_F_CONNTRACK) {
if (nfnl_ct != NULL) {
ct = nf_ct_get(entskb, &ctinfo);
if (ct != NULL)
size += nfnl_ct->build_size(ct);
}
}
#endif
if (queue->flags & NFQA_CFG_F_UID_GID) {
size += (nla_total_size(sizeof(u_int32_t)) /* uid */
+ nla_total_size(sizeof(u_int32_t))); /* gid */
}
if ((queue->flags & NFQA_CFG_F_SECCTX) && entskb->sk) {
seclen = nfqnl_get_sk_secctx(entskb, &secdata);
if (seclen)
size += nla_total_size(seclen);
}
skb = alloc_skb(size, GFP_ATOMIC);
if (!skb) {
skb_tx_error(entskb);
goto nlmsg_failure;
}
nlh = nfnl_msg_put(skb, 0, 0,
nfnl_msg_type(NFNL_SUBSYS_QUEUE, NFQNL_MSG_PACKET),
0, entry->state.pf, NFNETLINK_V0,
htons(queue->queue_num));
if (!nlh) {
skb_tx_error(entskb);
kfree_skb(skb);
goto nlmsg_failure;
}
nla = __nla_reserve(skb, NFQA_PACKET_HDR, sizeof(*pmsg));
pmsg = nla_data(nla);
pmsg->hw_protocol = entskb->protocol;
pmsg->hook = entry->state.hook;
*packet_id_ptr = &pmsg->packet_id;
indev = entry->state.in;
if (indev) {
#if !IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
if (nla_put_be32(skb, NFQA_IFINDEX_INDEV, htonl(indev->ifindex)))
goto nla_put_failure;
#else
if (entry->state.pf == PF_BRIDGE) {
/* Case 1: indev is physical input device, we need to
* look for bridge group (when called from
* netfilter_bridge) */
if (nla_put_be32(skb, NFQA_IFINDEX_PHYSINDEV,
htonl(indev->ifindex)) ||
/* this is the bridge group "brX" */
/* rcu_read_lock()ed by __nf_queue */
nla_put_be32(skb, NFQA_IFINDEX_INDEV,
htonl(br_port_get_rcu(indev)->br->dev->ifindex)))
goto nla_put_failure;
} else {
int physinif;
/* Case 2: indev is bridge group, we need to look for
* physical device (when called from ipv4) */
if (nla_put_be32(skb, NFQA_IFINDEX_INDEV,
htonl(indev->ifindex)))
goto nla_put_failure;
physinif = nf_bridge_get_physinif(entskb);
if (physinif &&
nla_put_be32(skb, NFQA_IFINDEX_PHYSINDEV,
htonl(physinif)))
goto nla_put_failure;
}
#endif
}
if (outdev) {
#if !IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
if (nla_put_be32(skb, NFQA_IFINDEX_OUTDEV, htonl(outdev->ifindex)))
goto nla_put_failure;
#else
if (entry->state.pf == PF_BRIDGE) {
/* Case 1: outdev is physical output device, we need to
* look for bridge group (when called from
* netfilter_bridge) */
if (nla_put_be32(skb, NFQA_IFINDEX_PHYSOUTDEV,
htonl(outdev->ifindex)) ||
/* this is the bridge group "brX" */
/* rcu_read_lock()ed by __nf_queue */
nla_put_be32(skb, NFQA_IFINDEX_OUTDEV,
htonl(br_port_get_rcu(outdev)->br->dev->ifindex)))
goto nla_put_failure;
} else {
int physoutif;
/* Case 2: outdev is bridge group, we need to look for
* physical output device (when called from ipv4) */
if (nla_put_be32(skb, NFQA_IFINDEX_OUTDEV,
htonl(outdev->ifindex)))
goto nla_put_failure;
physoutif = nf_bridge_get_physoutif(entskb);
if (physoutif &&
nla_put_be32(skb, NFQA_IFINDEX_PHYSOUTDEV,
htonl(physoutif)))
goto nla_put_failure;
}
#endif
}
if (entskb->mark &&
nla_put_be32(skb, NFQA_MARK, htonl(entskb->mark)))
goto nla_put_failure;
if (entskb->priority &&
nla_put_be32(skb, NFQA_PRIORITY, htonl(entskb->priority)))
goto nla_put_failure;
if (indev && entskb->dev &&
skb_mac_header_was_set(entskb) &&
skb_mac_header_len(entskb) != 0) {
struct nfqnl_msg_packet_hw phw;
int len;
memset(&phw, 0, sizeof(phw));
len = dev_parse_header(entskb, phw.hw_addr);
if (len) {
phw.hw_addrlen = htons(len);
if (nla_put(skb, NFQA_HWADDR, sizeof(phw), &phw))
goto nla_put_failure;
}
}
if (nfqnl_put_bridge(entry, skb) < 0)
goto nla_put_failure;
if (entry->state.hook <= NF_INET_FORWARD && tstamp) {
struct nfqnl_msg_packet_timestamp ts;
struct timespec64 kts = ktime_to_timespec64(tstamp);
ts.sec = cpu_to_be64(kts.tv_sec);
ts.usec = cpu_to_be64(kts.tv_nsec / NSEC_PER_USEC);
if (nla_put(skb, NFQA_TIMESTAMP, sizeof(ts), &ts))
goto nla_put_failure;
}
if ((queue->flags & NFQA_CFG_F_UID_GID) && entskb->sk &&
nfqnl_put_sk_uidgid(skb, entskb->sk) < 0)
goto nla_put_failure;
if (nfqnl_put_sk_classid(skb, entskb->sk) < 0)
goto nla_put_failure;
if (seclen && nla_put(skb, NFQA_SECCTX, seclen, secdata))
goto nla_put_failure;
if (ct && nfnl_ct->build(skb, ct, ctinfo, NFQA_CT, NFQA_CT_INFO) < 0)
goto nla_put_failure;
if (cap_len > data_len &&
nla_put_be32(skb, NFQA_CAP_LEN, htonl(cap_len)))
goto nla_put_failure;
if (nfqnl_put_packet_info(skb, entskb, csum_verify))
goto nla_put_failure;
if (data_len) {
struct nlattr *nla;
if (skb_tailroom(skb) < sizeof(*nla) + hlen)
goto nla_put_failure;
nla = skb_put(skb, sizeof(*nla));
nla->nla_type = NFQA_PAYLOAD;
nla->nla_len = nla_attr_size(data_len);
if (skb_zerocopy(skb, entskb, data_len, hlen))
goto nla_put_failure;
}
nlh->nlmsg_len = skb->len;
if (seclen)
security_release_secctx(secdata, seclen);
return skb;
nla_put_failure:
skb_tx_error(entskb);
kfree_skb(skb);
net_err_ratelimited("nf_queue: error creating packet message\n");
nlmsg_failure:
if (seclen)
security_release_secctx(secdata, seclen);
return NULL;
}
static bool nf_ct_drop_unconfirmed(const struct nf_queue_entry *entry)
{
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
static const unsigned long flags = IPS_CONFIRMED | IPS_DYING;
const struct nf_conn *ct = (void *)skb_nfct(entry->skb);
if (ct && ((ct->status & flags) == IPS_DYING))
return true;
#endif
return false;
}
static int
__nfqnl_enqueue_packet(struct net *net, struct nfqnl_instance *queue,
struct nf_queue_entry *entry)
{
struct sk_buff *nskb;
int err = -ENOBUFS;
__be32 *packet_id_ptr;
int failopen = 0;
nskb = nfqnl_build_packet_message(net, queue, entry, &packet_id_ptr);
if (nskb == NULL) {
err = -ENOMEM;
goto err_out;
}
spin_lock_bh(&queue->lock);
if (nf_ct_drop_unconfirmed(entry))
goto err_out_free_nskb;
if (queue->queue_total >= queue->queue_maxlen) {
if (queue->flags & NFQA_CFG_F_FAIL_OPEN) {
failopen = 1;
err = 0;
} else {
queue->queue_dropped++;
net_warn_ratelimited("nf_queue: full at %d entries, dropping packets(s)\n",
queue->queue_total);
}
goto err_out_free_nskb;
}
entry->id = ++queue->id_sequence;
*packet_id_ptr = htonl(entry->id);
/* nfnetlink_unicast will either free the nskb or add it to a socket */
err = nfnetlink_unicast(nskb, net, queue->peer_portid);
if (err < 0) {
if (queue->flags & NFQA_CFG_F_FAIL_OPEN) {
failopen = 1;
err = 0;
} else {
queue->queue_user_dropped++;
}
goto err_out_unlock;
}
__enqueue_entry(queue, entry);
spin_unlock_bh(&queue->lock);
return 0;
err_out_free_nskb:
kfree_skb(nskb);
err_out_unlock:
spin_unlock_bh(&queue->lock);
if (failopen)
nfqnl_reinject(entry, NF_ACCEPT);
err_out:
return err;
}
static struct nf_queue_entry *
nf_queue_entry_dup(struct nf_queue_entry *e)
{
struct nf_queue_entry *entry = kmemdup(e, e->size, GFP_ATOMIC);
if (!entry)
return NULL;
if (nf_queue_entry_get_refs(entry))
return entry;
kfree(entry);
return NULL;
}
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
/* When called from bridge netfilter, skb->data must point to MAC header
* before calling skb_gso_segment(). Else, original MAC header is lost
* and segmented skbs will be sent to wrong destination.
*/
static void nf_bridge_adjust_skb_data(struct sk_buff *skb)
{
if (nf_bridge_info_get(skb))
__skb_push(skb, skb->network_header - skb->mac_header);
}
static void nf_bridge_adjust_segmented_data(struct sk_buff *skb)
{
if (nf_bridge_info_get(skb))
__skb_pull(skb, skb->network_header - skb->mac_header);
}
#else
#define nf_bridge_adjust_skb_data(s) do {} while (0)
#define nf_bridge_adjust_segmented_data(s) do {} while (0)
#endif
static int
__nfqnl_enqueue_packet_gso(struct net *net, struct nfqnl_instance *queue,
struct sk_buff *skb, struct nf_queue_entry *entry)
{
int ret = -ENOMEM;
struct nf_queue_entry *entry_seg;
nf_bridge_adjust_segmented_data(skb);
if (skb->next == NULL) { /* last packet, no need to copy entry */
struct sk_buff *gso_skb = entry->skb;
entry->skb = skb;
ret = __nfqnl_enqueue_packet(net, queue, entry);
if (ret)
entry->skb = gso_skb;
return ret;
}
skb_mark_not_on_list(skb);
entry_seg = nf_queue_entry_dup(entry);
if (entry_seg) {
entry_seg->skb = skb;
ret = __nfqnl_enqueue_packet(net, queue, entry_seg);
if (ret)
nf_queue_entry_free(entry_seg);
}
return ret;
}
static int
nfqnl_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum)
{
unsigned int queued;
struct nfqnl_instance *queue;
struct sk_buff *skb, *segs, *nskb;
int err = -ENOBUFS;
struct net *net = entry->state.net;
struct nfnl_queue_net *q = nfnl_queue_pernet(net);
/* rcu_read_lock()ed by nf_hook_thresh */
queue = instance_lookup(q, queuenum);
if (!queue)
return -ESRCH;
if (queue->copy_mode == NFQNL_COPY_NONE)
return -EINVAL;
skb = entry->skb;
switch (entry->state.pf) {
case NFPROTO_IPV4:
skb->protocol = htons(ETH_P_IP);
break;
case NFPROTO_IPV6:
skb->protocol = htons(ETH_P_IPV6);
break;
}
if ((queue->flags & NFQA_CFG_F_GSO) || !skb_is_gso(skb))
return __nfqnl_enqueue_packet(net, queue, entry);
nf_bridge_adjust_skb_data(skb);
segs = skb_gso_segment(skb, 0);
/* Does not use PTR_ERR to limit the number of error codes that can be
* returned by nf_queue. For instance, callers rely on -ESRCH to
* mean 'ignore this hook'.
*/
if (IS_ERR_OR_NULL(segs))
goto out_err;
queued = 0;
err = 0;
skb_list_walk_safe(segs, segs, nskb) {
if (err == 0)
err = __nfqnl_enqueue_packet_gso(net, queue,
segs, entry);
if (err == 0)
queued++;
else
kfree_skb(segs);
}
if (queued) {
if (err) /* some segments are already queued */
nf_queue_entry_free(entry);
kfree_skb(skb);
return 0;
}
out_err:
nf_bridge_adjust_segmented_data(skb);
return err;
}
static int
nfqnl_mangle(void *data, unsigned int data_len, struct nf_queue_entry *e, int diff)
{
struct sk_buff *nskb;
if (diff < 0) {
unsigned int min_len = skb_transport_offset(e->skb);
if (data_len < min_len)
return -EINVAL;
if (pskb_trim(e->skb, data_len))
return -ENOMEM;
} else if (diff > 0) {
if (data_len > 0xFFFF)
return -EINVAL;
if (diff > skb_tailroom(e->skb)) {
nskb = skb_copy_expand(e->skb, skb_headroom(e->skb),
diff, GFP_ATOMIC);
if (!nskb)
return -ENOMEM;
kfree_skb(e->skb);
e->skb = nskb;
}
skb_put(e->skb, diff);
}
if (skb_ensure_writable(e->skb, data_len))
return -ENOMEM;
skb_copy_to_linear_data(e->skb, data, data_len);
e->skb->ip_summed = CHECKSUM_NONE;
return 0;
}
static int
nfqnl_set_mode(struct nfqnl_instance *queue,
unsigned char mode, unsigned int range)
{
int status = 0;
spin_lock_bh(&queue->lock);
switch (mode) {
case NFQNL_COPY_NONE:
case NFQNL_COPY_META:
queue->copy_mode = mode;
queue->copy_range = 0;
break;
case NFQNL_COPY_PACKET:
queue->copy_mode = mode;
if (range == 0 || range > NFQNL_MAX_COPY_RANGE)
queue->copy_range = NFQNL_MAX_COPY_RANGE;
else
queue->copy_range = range;
break;
default:
status = -EINVAL;
}
spin_unlock_bh(&queue->lock);
return status;
}
static int
dev_cmp(struct nf_queue_entry *entry, unsigned long ifindex)
{
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
int physinif, physoutif;
physinif = nf_bridge_get_physinif(entry->skb);
physoutif = nf_bridge_get_physoutif(entry->skb);
if (physinif == ifindex || physoutif == ifindex)
return 1;
#endif
if (entry->state.in)
if (entry->state.in->ifindex == ifindex)
return 1;
if (entry->state.out)
if (entry->state.out->ifindex == ifindex)
return 1;
return 0;
}
/* drop all packets with either indev or outdev == ifindex from all queue
* instances */
static void
nfqnl_dev_drop(struct net *net, int ifindex)
{
int i;
struct nfnl_queue_net *q = nfnl_queue_pernet(net);
rcu_read_lock();
for (i = 0; i < INSTANCE_BUCKETS; i++) {
struct nfqnl_instance *inst;
struct hlist_head *head = &q->instance_table[i];
hlist_for_each_entry_rcu(inst, head, hlist)
nfqnl_flush(inst, dev_cmp, ifindex);
}
rcu_read_unlock();
}
static int
nfqnl_rcv_dev_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
/* Drop any packets associated with the downed device */
if (event == NETDEV_DOWN)
nfqnl_dev_drop(dev_net(dev), dev->ifindex);
return NOTIFY_DONE;
}
static struct notifier_block nfqnl_dev_notifier = {
.notifier_call = nfqnl_rcv_dev_event,
};
static void nfqnl_nf_hook_drop(struct net *net)
{
struct nfnl_queue_net *q = nfnl_queue_pernet(net);
int i;
/* This function is also called on net namespace error unwind,
* when pernet_ops->init() failed and ->exit() functions of the
* previous pernet_ops gets called.
*
* This may result in a call to nfqnl_nf_hook_drop() before
* struct nfnl_queue_net was allocated.
*/
if (!q)
return;
for (i = 0; i < INSTANCE_BUCKETS; i++) {
struct nfqnl_instance *inst;
struct hlist_head *head = &q->instance_table[i];
hlist_for_each_entry_rcu(inst, head, hlist)
nfqnl_flush(inst, NULL, 0);
}
}
static int
nfqnl_rcv_nl_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
struct netlink_notify *n = ptr;
struct nfnl_queue_net *q = nfnl_queue_pernet(n->net);
if (event == NETLINK_URELEASE && n->protocol == NETLINK_NETFILTER) {
int i;
/* destroy all instances for this portid */
spin_lock(&q->instances_lock);
for (i = 0; i < INSTANCE_BUCKETS; i++) {
struct hlist_node *t2;
struct nfqnl_instance *inst;
struct hlist_head *head = &q->instance_table[i];
hlist_for_each_entry_safe(inst, t2, head, hlist) {
if (n->portid == inst->peer_portid)
__instance_destroy(inst);
}
}
spin_unlock(&q->instances_lock);
}
return NOTIFY_DONE;
}
static struct notifier_block nfqnl_rtnl_notifier = {
.notifier_call = nfqnl_rcv_nl_event,
};
static const struct nla_policy nfqa_vlan_policy[NFQA_VLAN_MAX + 1] = {
[NFQA_VLAN_TCI] = { .type = NLA_U16},
[NFQA_VLAN_PROTO] = { .type = NLA_U16},
};
static const struct nla_policy nfqa_verdict_policy[NFQA_MAX+1] = {
[NFQA_VERDICT_HDR] = { .len = sizeof(struct nfqnl_msg_verdict_hdr) },
[NFQA_MARK] = { .type = NLA_U32 },
[NFQA_PAYLOAD] = { .type = NLA_UNSPEC },
[NFQA_CT] = { .type = NLA_UNSPEC },
[NFQA_EXP] = { .type = NLA_UNSPEC },
[NFQA_VLAN] = { .type = NLA_NESTED },
[NFQA_PRIORITY] = { .type = NLA_U32 },
};
static const struct nla_policy nfqa_verdict_batch_policy[NFQA_MAX+1] = {
[NFQA_VERDICT_HDR] = { .len = sizeof(struct nfqnl_msg_verdict_hdr) },
[NFQA_MARK] = { .type = NLA_U32 },
[NFQA_PRIORITY] = { .type = NLA_U32 },
};
static struct nfqnl_instance *
verdict_instance_lookup(struct nfnl_queue_net *q, u16 queue_num, u32 nlportid)
{
struct nfqnl_instance *queue;
queue = instance_lookup(q, queue_num);
if (!queue)
return ERR_PTR(-ENODEV);
if (queue->peer_portid != nlportid)
return ERR_PTR(-EPERM);
return queue;
}
static struct nfqnl_msg_verdict_hdr*
verdicthdr_get(const struct nlattr * const nfqa[])
{
struct nfqnl_msg_verdict_hdr *vhdr;
unsigned int verdict;
if (!nfqa[NFQA_VERDICT_HDR])
return NULL;
vhdr = nla_data(nfqa[NFQA_VERDICT_HDR]);
verdict = ntohl(vhdr->verdict) & NF_VERDICT_MASK;
if (verdict > NF_MAX_VERDICT || verdict == NF_STOLEN)
return NULL;
return vhdr;
}
static int nfq_id_after(unsigned int id, unsigned int max)
{
return (int)(id - max) > 0;
}
static int nfqnl_recv_verdict_batch(struct sk_buff *skb,
const struct nfnl_info *info,
const struct nlattr * const nfqa[])
{
struct nfnl_queue_net *q = nfnl_queue_pernet(info->net);
u16 queue_num = ntohs(info->nfmsg->res_id);
struct nf_queue_entry *entry, *tmp;
struct nfqnl_msg_verdict_hdr *vhdr;
struct nfqnl_instance *queue;
unsigned int verdict, maxid;
LIST_HEAD(batch_list);
queue = verdict_instance_lookup(q, queue_num,
NETLINK_CB(skb).portid);
if (IS_ERR(queue))
return PTR_ERR(queue);
vhdr = verdicthdr_get(nfqa);
if (!vhdr)
return -EINVAL;
verdict = ntohl(vhdr->verdict);
maxid = ntohl(vhdr->id);
spin_lock_bh(&queue->lock);
list_for_each_entry_safe(entry, tmp, &queue->queue_list, list) {
if (nfq_id_after(entry->id, maxid))
break;
__dequeue_entry(queue, entry);
list_add_tail(&entry->list, &batch_list);
}
spin_unlock_bh(&queue->lock);
if (list_empty(&batch_list))
return -ENOENT;
list_for_each_entry_safe(entry, tmp, &batch_list, list) {
if (nfqa[NFQA_MARK])
entry->skb->mark = ntohl(nla_get_be32(nfqa[NFQA_MARK]));
if (nfqa[NFQA_PRIORITY])
entry->skb->priority = ntohl(nla_get_be32(nfqa[NFQA_PRIORITY]));
nfqnl_reinject(entry, verdict);
}
return 0;
}
static struct nf_conn *nfqnl_ct_parse(const struct nfnl_ct_hook *nfnl_ct,
const struct nlmsghdr *nlh,
const struct nlattr * const nfqa[],
struct nf_queue_entry *entry,
enum ip_conntrack_info *ctinfo)
{
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
struct nf_conn *ct;
ct = nf_ct_get(entry->skb, ctinfo);
if (ct == NULL)
return NULL;
if (nfnl_ct->parse(nfqa[NFQA_CT], ct) < 0)
return NULL;
if (nfqa[NFQA_EXP])
nfnl_ct->attach_expect(nfqa[NFQA_EXP], ct,
NETLINK_CB(entry->skb).portid,
nlmsg_report(nlh));
return ct;
#else
return NULL;
#endif
}
static int nfqa_parse_bridge(struct nf_queue_entry *entry,
const struct nlattr * const nfqa[])
{
if (nfqa[NFQA_VLAN]) {
struct nlattr *tb[NFQA_VLAN_MAX + 1];
int err;
err = nla_parse_nested_deprecated(tb, NFQA_VLAN_MAX,
nfqa[NFQA_VLAN],
nfqa_vlan_policy, NULL);
if (err < 0)
return err;
if (!tb[NFQA_VLAN_TCI] || !tb[NFQA_VLAN_PROTO])
return -EINVAL;
__vlan_hwaccel_put_tag(entry->skb,
nla_get_be16(tb[NFQA_VLAN_PROTO]),
ntohs(nla_get_be16(tb[NFQA_VLAN_TCI])));
}
if (nfqa[NFQA_L2HDR]) {
int mac_header_len = entry->skb->network_header -
entry->skb->mac_header;
if (mac_header_len != nla_len(nfqa[NFQA_L2HDR]))
return -EINVAL;
else if (mac_header_len > 0)
memcpy(skb_mac_header(entry->skb),
nla_data(nfqa[NFQA_L2HDR]),
mac_header_len);
}
return 0;
}
static int nfqnl_recv_verdict(struct sk_buff *skb, const struct nfnl_info *info,
const struct nlattr * const nfqa[])
{
struct nfnl_queue_net *q = nfnl_queue_pernet(info->net);
u_int16_t queue_num = ntohs(info->nfmsg->res_id);
const struct nfnl_ct_hook *nfnl_ct;
struct nfqnl_msg_verdict_hdr *vhdr;
enum ip_conntrack_info ctinfo;
struct nfqnl_instance *queue;
struct nf_queue_entry *entry;
struct nf_conn *ct = NULL;
unsigned int verdict;
int err;
queue = verdict_instance_lookup(q, queue_num,
NETLINK_CB(skb).portid);
if (IS_ERR(queue))
return PTR_ERR(queue);
vhdr = verdicthdr_get(nfqa);
if (!vhdr)
return -EINVAL;
verdict = ntohl(vhdr->verdict);
entry = find_dequeue_entry(queue, ntohl(vhdr->id));
if (entry == NULL)
return -ENOENT;
/* rcu lock already held from nfnl->call_rcu. */
nfnl_ct = rcu_dereference(nfnl_ct_hook);
if (nfqa[NFQA_CT]) {
if (nfnl_ct != NULL)
ct = nfqnl_ct_parse(nfnl_ct, info->nlh, nfqa, entry,
&ctinfo);
}
if (entry->state.pf == PF_BRIDGE) {
err = nfqa_parse_bridge(entry, nfqa);
if (err < 0)
return err;
}
if (nfqa[NFQA_PAYLOAD]) {
u16 payload_len = nla_len(nfqa[NFQA_PAYLOAD]);
int diff = payload_len - entry->skb->len;
if (nfqnl_mangle(nla_data(nfqa[NFQA_PAYLOAD]),
payload_len, entry, diff) < 0)
verdict = NF_DROP;
if (ct && diff)
nfnl_ct->seq_adjust(entry->skb, ct, ctinfo, diff);
}
if (nfqa[NFQA_MARK])
entry->skb->mark = ntohl(nla_get_be32(nfqa[NFQA_MARK]));
if (nfqa[NFQA_PRIORITY])
entry->skb->priority = ntohl(nla_get_be32(nfqa[NFQA_PRIORITY]));
nfqnl_reinject(entry, verdict);
return 0;
}
static int nfqnl_recv_unsupp(struct sk_buff *skb, const struct nfnl_info *info,
const struct nlattr * const cda[])
{
return -ENOTSUPP;
}
static const struct nla_policy nfqa_cfg_policy[NFQA_CFG_MAX+1] = {
[NFQA_CFG_CMD] = { .len = sizeof(struct nfqnl_msg_config_cmd) },
[NFQA_CFG_PARAMS] = { .len = sizeof(struct nfqnl_msg_config_params) },
[NFQA_CFG_QUEUE_MAXLEN] = { .type = NLA_U32 },
[NFQA_CFG_MASK] = { .type = NLA_U32 },
[NFQA_CFG_FLAGS] = { .type = NLA_U32 },
};
static const struct nf_queue_handler nfqh = {
.outfn = nfqnl_enqueue_packet,
.nf_hook_drop = nfqnl_nf_hook_drop,
};
static int nfqnl_recv_config(struct sk_buff *skb, const struct nfnl_info *info,
const struct nlattr * const nfqa[])
{
struct nfnl_queue_net *q = nfnl_queue_pernet(info->net);
u_int16_t queue_num = ntohs(info->nfmsg->res_id);
struct nfqnl_msg_config_cmd *cmd = NULL;
struct nfqnl_instance *queue;
__u32 flags = 0, mask = 0;
int ret = 0;
if (nfqa[NFQA_CFG_CMD]) {
cmd = nla_data(nfqa[NFQA_CFG_CMD]);
/* Obsolete commands without queue context */
switch (cmd->command) {
case NFQNL_CFG_CMD_PF_BIND: return 0;
case NFQNL_CFG_CMD_PF_UNBIND: return 0;
}
}
/* Check if we support these flags in first place, dependencies should
* be there too not to break atomicity.
*/
if (nfqa[NFQA_CFG_FLAGS]) {
if (!nfqa[NFQA_CFG_MASK]) {
/* A mask is needed to specify which flags are being
* changed.
*/
return -EINVAL;
}
flags = ntohl(nla_get_be32(nfqa[NFQA_CFG_FLAGS]));
mask = ntohl(nla_get_be32(nfqa[NFQA_CFG_MASK]));
if (flags >= NFQA_CFG_F_MAX)
return -EOPNOTSUPP;
#if !IS_ENABLED(CONFIG_NETWORK_SECMARK)
if (flags & mask & NFQA_CFG_F_SECCTX)
return -EOPNOTSUPP;
#endif
if ((flags & mask & NFQA_CFG_F_CONNTRACK) &&
!rcu_access_pointer(nfnl_ct_hook)) {
#ifdef CONFIG_MODULES
nfnl_unlock(NFNL_SUBSYS_QUEUE);
request_module("ip_conntrack_netlink");
nfnl_lock(NFNL_SUBSYS_QUEUE);
if (rcu_access_pointer(nfnl_ct_hook))
return -EAGAIN;
#endif
return -EOPNOTSUPP;
}
}
rcu_read_lock();
queue = instance_lookup(q, queue_num);
if (queue && queue->peer_portid != NETLINK_CB(skb).portid) {
ret = -EPERM;
goto err_out_unlock;
}
if (cmd != NULL) {
switch (cmd->command) {
case NFQNL_CFG_CMD_BIND:
if (queue) {
ret = -EBUSY;
goto err_out_unlock;
}
queue = instance_create(q, queue_num,
NETLINK_CB(skb).portid);
if (IS_ERR(queue)) {
ret = PTR_ERR(queue);
goto err_out_unlock;
}
break;
case NFQNL_CFG_CMD_UNBIND:
if (!queue) {
ret = -ENODEV;
goto err_out_unlock;
}
instance_destroy(q, queue);
goto err_out_unlock;
case NFQNL_CFG_CMD_PF_BIND:
case NFQNL_CFG_CMD_PF_UNBIND:
break;
default:
ret = -ENOTSUPP;
goto err_out_unlock;
}
}
if (!queue) {
ret = -ENODEV;
goto err_out_unlock;
}
if (nfqa[NFQA_CFG_PARAMS]) {
struct nfqnl_msg_config_params *params =
nla_data(nfqa[NFQA_CFG_PARAMS]);
nfqnl_set_mode(queue, params->copy_mode,
ntohl(params->copy_range));
}
if (nfqa[NFQA_CFG_QUEUE_MAXLEN]) {
__be32 *queue_maxlen = nla_data(nfqa[NFQA_CFG_QUEUE_MAXLEN]);
spin_lock_bh(&queue->lock);
queue->queue_maxlen = ntohl(*queue_maxlen);
spin_unlock_bh(&queue->lock);
}
if (nfqa[NFQA_CFG_FLAGS]) {
spin_lock_bh(&queue->lock);
queue->flags &= ~mask;
queue->flags |= flags & mask;
spin_unlock_bh(&queue->lock);
}
err_out_unlock:
rcu_read_unlock();
return ret;
}
static const struct nfnl_callback nfqnl_cb[NFQNL_MSG_MAX] = {
[NFQNL_MSG_PACKET] = {
.call = nfqnl_recv_unsupp,
.type = NFNL_CB_RCU,
.attr_count = NFQA_MAX,
},
[NFQNL_MSG_VERDICT] = {
.call = nfqnl_recv_verdict,
.type = NFNL_CB_RCU,
.attr_count = NFQA_MAX,
.policy = nfqa_verdict_policy
},
[NFQNL_MSG_CONFIG] = {
.call = nfqnl_recv_config,
.type = NFNL_CB_MUTEX,
.attr_count = NFQA_CFG_MAX,
.policy = nfqa_cfg_policy
},
[NFQNL_MSG_VERDICT_BATCH] = {
.call = nfqnl_recv_verdict_batch,
.type = NFNL_CB_RCU,
.attr_count = NFQA_MAX,
.policy = nfqa_verdict_batch_policy
},
};
static const struct nfnetlink_subsystem nfqnl_subsys = {
.name = "nf_queue",
.subsys_id = NFNL_SUBSYS_QUEUE,
.cb_count = NFQNL_MSG_MAX,
.cb = nfqnl_cb,
};
#ifdef CONFIG_PROC_FS
struct iter_state {
struct seq_net_private p;
unsigned int bucket;
};
static struct hlist_node *get_first(struct seq_file *seq)
{
struct iter_state *st = seq->private;
struct net *net;
struct nfnl_queue_net *q;
if (!st)
return NULL;
net = seq_file_net(seq);
q = nfnl_queue_pernet(net);
for (st->bucket = 0; st->bucket < INSTANCE_BUCKETS; st->bucket++) {
if (!hlist_empty(&q->instance_table[st->bucket]))
return q->instance_table[st->bucket].first;
}
return NULL;
}
static struct hlist_node *get_next(struct seq_file *seq, struct hlist_node *h)
{
struct iter_state *st = seq->private;
struct net *net = seq_file_net(seq);
h = h->next;
while (!h) {
struct nfnl_queue_net *q;
if (++st->bucket >= INSTANCE_BUCKETS)
return NULL;
q = nfnl_queue_pernet(net);
h = q->instance_table[st->bucket].first;
}
return h;
}
static struct hlist_node *get_idx(struct seq_file *seq, loff_t pos)
{
struct hlist_node *head;
head = get_first(seq);
if (head)
while (pos && (head = get_next(seq, head)))
pos--;
return pos ? NULL : head;
}
static void *seq_start(struct seq_file *s, loff_t *pos)
__acquires(nfnl_queue_pernet(seq_file_net(s))->instances_lock)
{
spin_lock(&nfnl_queue_pernet(seq_file_net(s))->instances_lock);
return get_idx(s, *pos);
}
static void *seq_next(struct seq_file *s, void *v, loff_t *pos)
{
(*pos)++;
return get_next(s, v);
}
static void seq_stop(struct seq_file *s, void *v)
__releases(nfnl_queue_pernet(seq_file_net(s))->instances_lock)
{
spin_unlock(&nfnl_queue_pernet(seq_file_net(s))->instances_lock);
}
static int seq_show(struct seq_file *s, void *v)
{
const struct nfqnl_instance *inst = v;
seq_printf(s, "%5u %6u %5u %1u %5u %5u %5u %8u %2d\n",
inst->queue_num,
inst->peer_portid, inst->queue_total,
inst->copy_mode, inst->copy_range,
inst->queue_dropped, inst->queue_user_dropped,
inst->id_sequence, 1);
return 0;
}
static const struct seq_operations nfqnl_seq_ops = {
.start = seq_start,
.next = seq_next,
.stop = seq_stop,
.show = seq_show,
};
#endif /* PROC_FS */
static int __net_init nfnl_queue_net_init(struct net *net)
{
unsigned int i;
struct nfnl_queue_net *q = nfnl_queue_pernet(net);
for (i = 0; i < INSTANCE_BUCKETS; i++)
INIT_HLIST_HEAD(&q->instance_table[i]);
spin_lock_init(&q->instances_lock);
#ifdef CONFIG_PROC_FS
if (!proc_create_net("nfnetlink_queue", 0440, net->nf.proc_netfilter,
&nfqnl_seq_ops, sizeof(struct iter_state)))
return -ENOMEM;
#endif
return 0;
}
static void __net_exit nfnl_queue_net_exit(struct net *net)
{
struct nfnl_queue_net *q = nfnl_queue_pernet(net);
unsigned int i;
#ifdef CONFIG_PROC_FS
remove_proc_entry("nfnetlink_queue", net->nf.proc_netfilter);
#endif
for (i = 0; i < INSTANCE_BUCKETS; i++)
WARN_ON_ONCE(!hlist_empty(&q->instance_table[i]));
}
static struct pernet_operations nfnl_queue_net_ops = {
.init = nfnl_queue_net_init,
.exit = nfnl_queue_net_exit,
.id = &nfnl_queue_net_id,
.size = sizeof(struct nfnl_queue_net),
};
static int __init nfnetlink_queue_init(void)
{
int status;
status = register_pernet_subsys(&nfnl_queue_net_ops);
if (status < 0) {
pr_err("failed to register pernet ops\n");
goto out;
}
netlink_register_notifier(&nfqnl_rtnl_notifier);
status = nfnetlink_subsys_register(&nfqnl_subsys);
if (status < 0) {
pr_err("failed to create netlink socket\n");
goto cleanup_netlink_notifier;
}
status = register_netdevice_notifier(&nfqnl_dev_notifier);
if (status < 0) {
pr_err("failed to register netdevice notifier\n");
goto cleanup_netlink_subsys;
}
nf_register_queue_handler(&nfqh);
return status;
cleanup_netlink_subsys:
nfnetlink_subsys_unregister(&nfqnl_subsys);
cleanup_netlink_notifier:
netlink_unregister_notifier(&nfqnl_rtnl_notifier);
unregister_pernet_subsys(&nfnl_queue_net_ops);
out:
return status;
}
static void __exit nfnetlink_queue_fini(void)
{
nf_unregister_queue_handler();
unregister_netdevice_notifier(&nfqnl_dev_notifier);
nfnetlink_subsys_unregister(&nfqnl_subsys);
netlink_unregister_notifier(&nfqnl_rtnl_notifier);
unregister_pernet_subsys(&nfnl_queue_net_ops);
rcu_barrier(); /* Wait for completion of call_rcu()'s */
}
MODULE_DESCRIPTION("netfilter packet queue handler");
MODULE_AUTHOR("Harald Welte <[email protected]>");
MODULE_LICENSE("GPL");
MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_QUEUE);
module_init(nfnetlink_queue_init);
module_exit(nfnetlink_queue_fini);
| linux-master | net/netfilter/nfnetlink_queue.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/kernel.h>
#include <linux/netfilter.h>
#include <linux/netfilter_ipv4.h>
#include <linux/netfilter_ipv6.h>
#include <net/netfilter/nf_queue.h>
#include <net/ip6_checksum.h>
#ifdef CONFIG_INET
__sum16 nf_ip_checksum(struct sk_buff *skb, unsigned int hook,
unsigned int dataoff, u8 protocol)
{
const struct iphdr *iph = ip_hdr(skb);
__sum16 csum = 0;
switch (skb->ip_summed) {
case CHECKSUM_COMPLETE:
if (hook != NF_INET_PRE_ROUTING && hook != NF_INET_LOCAL_IN)
break;
if ((protocol != IPPROTO_TCP && protocol != IPPROTO_UDP &&
!csum_fold(skb->csum)) ||
!csum_tcpudp_magic(iph->saddr, iph->daddr,
skb->len - dataoff, protocol,
skb->csum)) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
break;
}
fallthrough;
case CHECKSUM_NONE:
if (protocol != IPPROTO_TCP && protocol != IPPROTO_UDP)
skb->csum = 0;
else
skb->csum = csum_tcpudp_nofold(iph->saddr, iph->daddr,
skb->len - dataoff,
protocol, 0);
csum = __skb_checksum_complete(skb);
}
return csum;
}
EXPORT_SYMBOL(nf_ip_checksum);
#endif
static __sum16 nf_ip_checksum_partial(struct sk_buff *skb, unsigned int hook,
unsigned int dataoff, unsigned int len,
u8 protocol)
{
const struct iphdr *iph = ip_hdr(skb);
__sum16 csum = 0;
switch (skb->ip_summed) {
case CHECKSUM_COMPLETE:
if (len == skb->len - dataoff)
return nf_ip_checksum(skb, hook, dataoff, protocol);
fallthrough;
case CHECKSUM_NONE:
skb->csum = csum_tcpudp_nofold(iph->saddr, iph->daddr, protocol,
skb->len - dataoff, 0);
skb->ip_summed = CHECKSUM_NONE;
return __skb_checksum_complete_head(skb, dataoff + len);
}
return csum;
}
__sum16 nf_ip6_checksum(struct sk_buff *skb, unsigned int hook,
unsigned int dataoff, u8 protocol)
{
const struct ipv6hdr *ip6h = ipv6_hdr(skb);
__sum16 csum = 0;
switch (skb->ip_summed) {
case CHECKSUM_COMPLETE:
if (hook != NF_INET_PRE_ROUTING && hook != NF_INET_LOCAL_IN)
break;
if (!csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
skb->len - dataoff, protocol,
csum_sub(skb->csum,
skb_checksum(skb, 0,
dataoff, 0)))) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
break;
}
fallthrough;
case CHECKSUM_NONE:
skb->csum = ~csum_unfold(
csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
skb->len - dataoff,
protocol,
csum_sub(0,
skb_checksum(skb, 0,
dataoff, 0))));
csum = __skb_checksum_complete(skb);
}
return csum;
}
EXPORT_SYMBOL(nf_ip6_checksum);
static __sum16 nf_ip6_checksum_partial(struct sk_buff *skb, unsigned int hook,
unsigned int dataoff, unsigned int len,
u8 protocol)
{
const struct ipv6hdr *ip6h = ipv6_hdr(skb);
__wsum hsum;
__sum16 csum = 0;
switch (skb->ip_summed) {
case CHECKSUM_COMPLETE:
if (len == skb->len - dataoff)
return nf_ip6_checksum(skb, hook, dataoff, protocol);
fallthrough;
case CHECKSUM_NONE:
hsum = skb_checksum(skb, 0, dataoff, 0);
skb->csum = ~csum_unfold(csum_ipv6_magic(&ip6h->saddr,
&ip6h->daddr,
skb->len - dataoff,
protocol,
csum_sub(0, hsum)));
skb->ip_summed = CHECKSUM_NONE;
return __skb_checksum_complete_head(skb, dataoff + len);
}
return csum;
};
__sum16 nf_checksum(struct sk_buff *skb, unsigned int hook,
unsigned int dataoff, u8 protocol,
unsigned short family)
{
__sum16 csum = 0;
switch (family) {
case AF_INET:
csum = nf_ip_checksum(skb, hook, dataoff, protocol);
break;
case AF_INET6:
csum = nf_ip6_checksum(skb, hook, dataoff, protocol);
break;
}
return csum;
}
EXPORT_SYMBOL_GPL(nf_checksum);
__sum16 nf_checksum_partial(struct sk_buff *skb, unsigned int hook,
unsigned int dataoff, unsigned int len,
u8 protocol, unsigned short family)
{
__sum16 csum = 0;
switch (family) {
case AF_INET:
csum = nf_ip_checksum_partial(skb, hook, dataoff, len,
protocol);
break;
case AF_INET6:
csum = nf_ip6_checksum_partial(skb, hook, dataoff, len,
protocol);
break;
}
return csum;
}
EXPORT_SYMBOL_GPL(nf_checksum_partial);
int nf_route(struct net *net, struct dst_entry **dst, struct flowi *fl,
bool strict, unsigned short family)
{
const struct nf_ipv6_ops *v6ops __maybe_unused;
int ret = 0;
switch (family) {
case AF_INET:
ret = nf_ip_route(net, dst, fl, strict);
break;
case AF_INET6:
ret = nf_ip6_route(net, dst, fl, strict);
break;
}
return ret;
}
EXPORT_SYMBOL_GPL(nf_route);
static int nf_ip_reroute(struct sk_buff *skb, const struct nf_queue_entry *entry)
{
#ifdef CONFIG_INET
const struct ip_rt_info *rt_info = nf_queue_entry_reroute(entry);
if (entry->state.hook == NF_INET_LOCAL_OUT) {
const struct iphdr *iph = ip_hdr(skb);
if (!(iph->tos == rt_info->tos &&
skb->mark == rt_info->mark &&
iph->daddr == rt_info->daddr &&
iph->saddr == rt_info->saddr))
return ip_route_me_harder(entry->state.net, entry->state.sk,
skb, RTN_UNSPEC);
}
#endif
return 0;
}
int nf_reroute(struct sk_buff *skb, struct nf_queue_entry *entry)
{
const struct nf_ipv6_ops *v6ops;
int ret = 0;
switch (entry->state.pf) {
case AF_INET:
ret = nf_ip_reroute(skb, entry);
break;
case AF_INET6:
v6ops = rcu_dereference(nf_ipv6_ops);
if (v6ops)
ret = v6ops->reroute(skb, entry);
break;
}
return ret;
}
/* Only get and check the lengths, not do any hop-by-hop stuff. */
int nf_ip6_check_hbh_len(struct sk_buff *skb, u32 *plen)
{
int len, off = sizeof(struct ipv6hdr);
unsigned char *nh;
if (!pskb_may_pull(skb, off + 8))
return -ENOMEM;
nh = (unsigned char *)(ipv6_hdr(skb) + 1);
len = (nh[1] + 1) << 3;
if (!pskb_may_pull(skb, off + len))
return -ENOMEM;
nh = skb_network_header(skb);
off += 2;
len -= 2;
while (len > 0) {
int optlen;
if (nh[off] == IPV6_TLV_PAD1) {
off++;
len--;
continue;
}
if (len < 2)
return -EBADMSG;
optlen = nh[off + 1] + 2;
if (optlen > len)
return -EBADMSG;
if (nh[off] == IPV6_TLV_JUMBO) {
u32 pkt_len;
if (nh[off + 1] != 4 || (off & 3) != 2)
return -EBADMSG;
pkt_len = ntohl(*(__be32 *)(nh + off + 2));
if (pkt_len <= IPV6_MAXPLEN ||
ipv6_hdr(skb)->payload_len)
return -EBADMSG;
if (pkt_len > skb->len - sizeof(struct ipv6hdr))
return -EBADMSG;
*plen = pkt_len;
}
off += optlen;
len -= optlen;
}
return len ? -EBADMSG : 0;
}
EXPORT_SYMBOL_GPL(nf_ip6_check_hbh_len);
| linux-master | net/netfilter/utils.c |
// SPDX-License-Identifier: GPL-2.0-only
/* (C) 1999 Jérôme de Vivie <[email protected]>
* (C) 1999 Hervé Eychenne <[email protected]>
* (C) 2006-2012 Patrick McHardy <[email protected]>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/interrupt.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter/xt_limit.h>
struct xt_limit_priv {
unsigned long prev;
u32 credit;
};
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Herve Eychenne <[email protected]>");
MODULE_DESCRIPTION("Xtables: rate-limit match");
MODULE_ALIAS("ipt_limit");
MODULE_ALIAS("ip6t_limit");
/* The algorithm used is the Simple Token Bucket Filter (TBF)
* see net/sched/sch_tbf.c in the linux source tree
*/
/* Rusty: This is my (non-mathematically-inclined) understanding of
this algorithm. The `average rate' in jiffies becomes your initial
amount of credit `credit' and the most credit you can ever have
`credit_cap'. The `peak rate' becomes the cost of passing the
test, `cost'.
`prev' tracks the last packet hit: you gain one credit per jiffy.
If you get credit balance more than this, the extra credit is
discarded. Every time the match passes, you lose `cost' credits;
if you don't have that many, the test fails.
See Alexey's formal explanation in net/sched/sch_tbf.c.
To get the maximum range, we multiply by this factor (ie. you get N
credits per jiffy). We want to allow a rate as low as 1 per day
(slowest userspace tool allows), which means
CREDITS_PER_JIFFY*HZ*60*60*24 < 2^32. ie. */
#define MAX_CPJ (0xFFFFFFFF / (HZ*60*60*24))
/* Repeated shift and or gives us all 1s, final shift and add 1 gives
* us the power of 2 below the theoretical max, so GCC simply does a
* shift. */
#define _POW2_BELOW2(x) ((x)|((x)>>1))
#define _POW2_BELOW4(x) (_POW2_BELOW2(x)|_POW2_BELOW2((x)>>2))
#define _POW2_BELOW8(x) (_POW2_BELOW4(x)|_POW2_BELOW4((x)>>4))
#define _POW2_BELOW16(x) (_POW2_BELOW8(x)|_POW2_BELOW8((x)>>8))
#define _POW2_BELOW32(x) (_POW2_BELOW16(x)|_POW2_BELOW16((x)>>16))
#define POW2_BELOW32(x) ((_POW2_BELOW32(x)>>1) + 1)
#define CREDITS_PER_JIFFY POW2_BELOW32(MAX_CPJ)
static bool
limit_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct xt_rateinfo *r = par->matchinfo;
struct xt_limit_priv *priv = r->master;
unsigned long now;
u32 old_credit, new_credit, credit_increase = 0;
bool ret;
/* fastpath if there is nothing to update */
if ((READ_ONCE(priv->credit) < r->cost) && (READ_ONCE(priv->prev) == jiffies))
return false;
do {
now = jiffies;
credit_increase += (now - xchg(&priv->prev, now)) * CREDITS_PER_JIFFY;
old_credit = READ_ONCE(priv->credit);
new_credit = old_credit;
new_credit += credit_increase;
if (new_credit > r->credit_cap)
new_credit = r->credit_cap;
if (new_credit >= r->cost) {
ret = true;
new_credit -= r->cost;
} else {
ret = false;
}
} while (cmpxchg(&priv->credit, old_credit, new_credit) != old_credit);
return ret;
}
/* Precision saver. */
static u32 user2credits(u32 user)
{
/* If multiplying would overflow... */
if (user > 0xFFFFFFFF / (HZ*CREDITS_PER_JIFFY))
/* Divide first. */
return (user / XT_LIMIT_SCALE) * HZ * CREDITS_PER_JIFFY;
return (user * HZ * CREDITS_PER_JIFFY) / XT_LIMIT_SCALE;
}
static int limit_mt_check(const struct xt_mtchk_param *par)
{
struct xt_rateinfo *r = par->matchinfo;
struct xt_limit_priv *priv;
/* Check for overflow. */
if (r->burst == 0
|| user2credits(r->avg * r->burst) < user2credits(r->avg)) {
pr_info_ratelimited("Overflow, try lower: %u/%u\n",
r->avg, r->burst);
return -ERANGE;
}
priv = kmalloc(sizeof(*priv), GFP_KERNEL);
if (priv == NULL)
return -ENOMEM;
/* For SMP, we only want to use one set of state. */
r->master = priv;
/* User avg in seconds * XT_LIMIT_SCALE: convert to jiffies *
128. */
priv->prev = jiffies;
priv->credit = user2credits(r->avg * r->burst); /* Credits full. */
if (r->cost == 0) {
r->credit_cap = priv->credit; /* Credits full. */
r->cost = user2credits(r->avg);
}
return 0;
}
static void limit_mt_destroy(const struct xt_mtdtor_param *par)
{
const struct xt_rateinfo *info = par->matchinfo;
kfree(info->master);
}
#ifdef CONFIG_NETFILTER_XTABLES_COMPAT
struct compat_xt_rateinfo {
u_int32_t avg;
u_int32_t burst;
compat_ulong_t prev;
u_int32_t credit;
u_int32_t credit_cap, cost;
u_int32_t master;
};
/* To keep the full "prev" timestamp, the upper 32 bits are stored in the
* master pointer, which does not need to be preserved. */
static void limit_mt_compat_from_user(void *dst, const void *src)
{
const struct compat_xt_rateinfo *cm = src;
struct xt_rateinfo m = {
.avg = cm->avg,
.burst = cm->burst,
.prev = cm->prev | (unsigned long)cm->master << 32,
.credit = cm->credit,
.credit_cap = cm->credit_cap,
.cost = cm->cost,
};
memcpy(dst, &m, sizeof(m));
}
static int limit_mt_compat_to_user(void __user *dst, const void *src)
{
const struct xt_rateinfo *m = src;
struct compat_xt_rateinfo cm = {
.avg = m->avg,
.burst = m->burst,
.prev = m->prev,
.credit = m->credit,
.credit_cap = m->credit_cap,
.cost = m->cost,
.master = m->prev >> 32,
};
return copy_to_user(dst, &cm, sizeof(cm)) ? -EFAULT : 0;
}
#endif /* CONFIG_NETFILTER_XTABLES_COMPAT */
static struct xt_match limit_mt_reg __read_mostly = {
.name = "limit",
.revision = 0,
.family = NFPROTO_UNSPEC,
.match = limit_mt,
.checkentry = limit_mt_check,
.destroy = limit_mt_destroy,
.matchsize = sizeof(struct xt_rateinfo),
#ifdef CONFIG_NETFILTER_XTABLES_COMPAT
.compatsize = sizeof(struct compat_xt_rateinfo),
.compat_from_user = limit_mt_compat_from_user,
.compat_to_user = limit_mt_compat_to_user,
#endif
.usersize = offsetof(struct xt_rateinfo, prev),
.me = THIS_MODULE,
};
static int __init limit_mt_init(void)
{
return xt_register_match(&limit_mt_reg);
}
static void __exit limit_mt_exit(void)
{
xt_unregister_match(&limit_mt_reg);
}
module_init(limit_mt_init);
module_exit(limit_mt_exit);
| linux-master | net/netfilter/xt_limit.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* (C) 2011 Pablo Neira Ayuso <[email protected]>
* (C) 2011 Intra2net AG <https://www.intra2net.com>
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/skbuff.h>
#include <linux/atomic.h>
#include <linux/refcount.h>
#include <linux/netlink.h>
#include <linux/rculist.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <net/netlink.h>
#include <net/sock.h>
#include <net/netns/generic.h>
#include <linux/netfilter.h>
#include <linux/netfilter/nfnetlink.h>
#include <linux/netfilter/nfnetlink_acct.h>
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Pablo Neira Ayuso <[email protected]>");
MODULE_DESCRIPTION("nfacct: Extended Netfilter accounting infrastructure");
struct nf_acct {
atomic64_t pkts;
atomic64_t bytes;
unsigned long flags;
struct list_head head;
refcount_t refcnt;
char name[NFACCT_NAME_MAX];
struct rcu_head rcu_head;
char data[];
};
struct nfacct_filter {
u32 value;
u32 mask;
};
struct nfnl_acct_net {
struct list_head nfnl_acct_list;
};
static unsigned int nfnl_acct_net_id __read_mostly;
static inline struct nfnl_acct_net *nfnl_acct_pernet(struct net *net)
{
return net_generic(net, nfnl_acct_net_id);
}
#define NFACCT_F_QUOTA (NFACCT_F_QUOTA_PKTS | NFACCT_F_QUOTA_BYTES)
#define NFACCT_OVERQUOTA_BIT 2 /* NFACCT_F_OVERQUOTA */
static int nfnl_acct_new(struct sk_buff *skb, const struct nfnl_info *info,
const struct nlattr * const tb[])
{
struct nfnl_acct_net *nfnl_acct_net = nfnl_acct_pernet(info->net);
struct nf_acct *nfacct, *matching = NULL;
unsigned int size = 0;
char *acct_name;
u32 flags = 0;
if (!tb[NFACCT_NAME])
return -EINVAL;
acct_name = nla_data(tb[NFACCT_NAME]);
if (strlen(acct_name) == 0)
return -EINVAL;
list_for_each_entry(nfacct, &nfnl_acct_net->nfnl_acct_list, head) {
if (strncmp(nfacct->name, acct_name, NFACCT_NAME_MAX) != 0)
continue;
if (info->nlh->nlmsg_flags & NLM_F_EXCL)
return -EEXIST;
matching = nfacct;
break;
}
if (matching) {
if (info->nlh->nlmsg_flags & NLM_F_REPLACE) {
/* reset counters if you request a replacement. */
atomic64_set(&matching->pkts, 0);
atomic64_set(&matching->bytes, 0);
smp_mb__before_atomic();
/* reset overquota flag if quota is enabled. */
if ((matching->flags & NFACCT_F_QUOTA))
clear_bit(NFACCT_OVERQUOTA_BIT,
&matching->flags);
return 0;
}
return -EBUSY;
}
if (tb[NFACCT_FLAGS]) {
flags = ntohl(nla_get_be32(tb[NFACCT_FLAGS]));
if (flags & ~NFACCT_F_QUOTA)
return -EOPNOTSUPP;
if ((flags & NFACCT_F_QUOTA) == NFACCT_F_QUOTA)
return -EINVAL;
if (flags & NFACCT_F_OVERQUOTA)
return -EINVAL;
if ((flags & NFACCT_F_QUOTA) && !tb[NFACCT_QUOTA])
return -EINVAL;
size += sizeof(u64);
}
nfacct = kzalloc(sizeof(struct nf_acct) + size, GFP_KERNEL);
if (nfacct == NULL)
return -ENOMEM;
if (flags & NFACCT_F_QUOTA) {
u64 *quota = (u64 *)nfacct->data;
*quota = be64_to_cpu(nla_get_be64(tb[NFACCT_QUOTA]));
nfacct->flags = flags;
}
nla_strscpy(nfacct->name, tb[NFACCT_NAME], NFACCT_NAME_MAX);
if (tb[NFACCT_BYTES]) {
atomic64_set(&nfacct->bytes,
be64_to_cpu(nla_get_be64(tb[NFACCT_BYTES])));
}
if (tb[NFACCT_PKTS]) {
atomic64_set(&nfacct->pkts,
be64_to_cpu(nla_get_be64(tb[NFACCT_PKTS])));
}
refcount_set(&nfacct->refcnt, 1);
list_add_tail_rcu(&nfacct->head, &nfnl_acct_net->nfnl_acct_list);
return 0;
}
static int
nfnl_acct_fill_info(struct sk_buff *skb, u32 portid, u32 seq, u32 type,
int event, struct nf_acct *acct)
{
struct nlmsghdr *nlh;
unsigned int flags = portid ? NLM_F_MULTI : 0;
u64 pkts, bytes;
u32 old_flags;
event = nfnl_msg_type(NFNL_SUBSYS_ACCT, event);
nlh = nfnl_msg_put(skb, portid, seq, event, flags, AF_UNSPEC,
NFNETLINK_V0, 0);
if (!nlh)
goto nlmsg_failure;
if (nla_put_string(skb, NFACCT_NAME, acct->name))
goto nla_put_failure;
old_flags = acct->flags;
if (type == NFNL_MSG_ACCT_GET_CTRZERO) {
pkts = atomic64_xchg(&acct->pkts, 0);
bytes = atomic64_xchg(&acct->bytes, 0);
smp_mb__before_atomic();
if (acct->flags & NFACCT_F_QUOTA)
clear_bit(NFACCT_OVERQUOTA_BIT, &acct->flags);
} else {
pkts = atomic64_read(&acct->pkts);
bytes = atomic64_read(&acct->bytes);
}
if (nla_put_be64(skb, NFACCT_PKTS, cpu_to_be64(pkts),
NFACCT_PAD) ||
nla_put_be64(skb, NFACCT_BYTES, cpu_to_be64(bytes),
NFACCT_PAD) ||
nla_put_be32(skb, NFACCT_USE, htonl(refcount_read(&acct->refcnt))))
goto nla_put_failure;
if (acct->flags & NFACCT_F_QUOTA) {
u64 *quota = (u64 *)acct->data;
if (nla_put_be32(skb, NFACCT_FLAGS, htonl(old_flags)) ||
nla_put_be64(skb, NFACCT_QUOTA, cpu_to_be64(*quota),
NFACCT_PAD))
goto nla_put_failure;
}
nlmsg_end(skb, nlh);
return skb->len;
nlmsg_failure:
nla_put_failure:
nlmsg_cancel(skb, nlh);
return -1;
}
static int
nfnl_acct_dump(struct sk_buff *skb, struct netlink_callback *cb)
{
struct net *net = sock_net(skb->sk);
struct nfnl_acct_net *nfnl_acct_net = nfnl_acct_pernet(net);
struct nf_acct *cur, *last;
const struct nfacct_filter *filter = cb->data;
if (cb->args[2])
return 0;
last = (struct nf_acct *)cb->args[1];
if (cb->args[1])
cb->args[1] = 0;
rcu_read_lock();
list_for_each_entry_rcu(cur, &nfnl_acct_net->nfnl_acct_list, head) {
if (last) {
if (cur != last)
continue;
last = NULL;
}
if (filter && (cur->flags & filter->mask) != filter->value)
continue;
if (nfnl_acct_fill_info(skb, NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq,
NFNL_MSG_TYPE(cb->nlh->nlmsg_type),
NFNL_MSG_ACCT_NEW, cur) < 0) {
cb->args[1] = (unsigned long)cur;
break;
}
}
if (!cb->args[1])
cb->args[2] = 1;
rcu_read_unlock();
return skb->len;
}
static int nfnl_acct_done(struct netlink_callback *cb)
{
kfree(cb->data);
return 0;
}
static const struct nla_policy filter_policy[NFACCT_FILTER_MAX + 1] = {
[NFACCT_FILTER_MASK] = { .type = NLA_U32 },
[NFACCT_FILTER_VALUE] = { .type = NLA_U32 },
};
static int nfnl_acct_start(struct netlink_callback *cb)
{
const struct nlattr *const attr = cb->data;
struct nlattr *tb[NFACCT_FILTER_MAX + 1];
struct nfacct_filter *filter;
int err;
if (!attr)
return 0;
err = nla_parse_nested_deprecated(tb, NFACCT_FILTER_MAX, attr,
filter_policy, NULL);
if (err < 0)
return err;
if (!tb[NFACCT_FILTER_MASK] || !tb[NFACCT_FILTER_VALUE])
return -EINVAL;
filter = kzalloc(sizeof(struct nfacct_filter), GFP_KERNEL);
if (!filter)
return -ENOMEM;
filter->mask = ntohl(nla_get_be32(tb[NFACCT_FILTER_MASK]));
filter->value = ntohl(nla_get_be32(tb[NFACCT_FILTER_VALUE]));
cb->data = filter;
return 0;
}
static int nfnl_acct_get(struct sk_buff *skb, const struct nfnl_info *info,
const struct nlattr * const tb[])
{
struct nfnl_acct_net *nfnl_acct_net = nfnl_acct_pernet(info->net);
int ret = -ENOENT;
struct nf_acct *cur;
char *acct_name;
if (info->nlh->nlmsg_flags & NLM_F_DUMP) {
struct netlink_dump_control c = {
.dump = nfnl_acct_dump,
.start = nfnl_acct_start,
.done = nfnl_acct_done,
.data = (void *)tb[NFACCT_FILTER],
};
return netlink_dump_start(info->sk, skb, info->nlh, &c);
}
if (!tb[NFACCT_NAME])
return -EINVAL;
acct_name = nla_data(tb[NFACCT_NAME]);
list_for_each_entry(cur, &nfnl_acct_net->nfnl_acct_list, head) {
struct sk_buff *skb2;
if (strncmp(cur->name, acct_name, NFACCT_NAME_MAX)!= 0)
continue;
skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (skb2 == NULL) {
ret = -ENOMEM;
break;
}
ret = nfnl_acct_fill_info(skb2, NETLINK_CB(skb).portid,
info->nlh->nlmsg_seq,
NFNL_MSG_TYPE(info->nlh->nlmsg_type),
NFNL_MSG_ACCT_NEW, cur);
if (ret <= 0) {
kfree_skb(skb2);
break;
}
ret = nfnetlink_unicast(skb2, info->net, NETLINK_CB(skb).portid);
break;
}
return ret;
}
/* try to delete object, fail if it is still in use. */
static int nfnl_acct_try_del(struct nf_acct *cur)
{
int ret = 0;
/* We want to avoid races with nfnl_acct_put. So only when the current
* refcnt is 1, we decrease it to 0.
*/
if (refcount_dec_if_one(&cur->refcnt)) {
/* We are protected by nfnl mutex. */
list_del_rcu(&cur->head);
kfree_rcu(cur, rcu_head);
} else {
ret = -EBUSY;
}
return ret;
}
static int nfnl_acct_del(struct sk_buff *skb, const struct nfnl_info *info,
const struct nlattr * const tb[])
{
struct nfnl_acct_net *nfnl_acct_net = nfnl_acct_pernet(info->net);
struct nf_acct *cur, *tmp;
int ret = -ENOENT;
char *acct_name;
if (!tb[NFACCT_NAME]) {
list_for_each_entry_safe(cur, tmp, &nfnl_acct_net->nfnl_acct_list, head)
nfnl_acct_try_del(cur);
return 0;
}
acct_name = nla_data(tb[NFACCT_NAME]);
list_for_each_entry(cur, &nfnl_acct_net->nfnl_acct_list, head) {
if (strncmp(cur->name, acct_name, NFACCT_NAME_MAX) != 0)
continue;
ret = nfnl_acct_try_del(cur);
if (ret < 0)
return ret;
break;
}
return ret;
}
static const struct nla_policy nfnl_acct_policy[NFACCT_MAX+1] = {
[NFACCT_NAME] = { .type = NLA_NUL_STRING, .len = NFACCT_NAME_MAX-1 },
[NFACCT_BYTES] = { .type = NLA_U64 },
[NFACCT_PKTS] = { .type = NLA_U64 },
[NFACCT_FLAGS] = { .type = NLA_U32 },
[NFACCT_QUOTA] = { .type = NLA_U64 },
[NFACCT_FILTER] = {.type = NLA_NESTED },
};
static const struct nfnl_callback nfnl_acct_cb[NFNL_MSG_ACCT_MAX] = {
[NFNL_MSG_ACCT_NEW] = {
.call = nfnl_acct_new,
.type = NFNL_CB_MUTEX,
.attr_count = NFACCT_MAX,
.policy = nfnl_acct_policy
},
[NFNL_MSG_ACCT_GET] = {
.call = nfnl_acct_get,
.type = NFNL_CB_MUTEX,
.attr_count = NFACCT_MAX,
.policy = nfnl_acct_policy
},
[NFNL_MSG_ACCT_GET_CTRZERO] = {
.call = nfnl_acct_get,
.type = NFNL_CB_MUTEX,
.attr_count = NFACCT_MAX,
.policy = nfnl_acct_policy
},
[NFNL_MSG_ACCT_DEL] = {
.call = nfnl_acct_del,
.type = NFNL_CB_MUTEX,
.attr_count = NFACCT_MAX,
.policy = nfnl_acct_policy
},
};
static const struct nfnetlink_subsystem nfnl_acct_subsys = {
.name = "acct",
.subsys_id = NFNL_SUBSYS_ACCT,
.cb_count = NFNL_MSG_ACCT_MAX,
.cb = nfnl_acct_cb,
};
MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_ACCT);
struct nf_acct *nfnl_acct_find_get(struct net *net, const char *acct_name)
{
struct nfnl_acct_net *nfnl_acct_net = nfnl_acct_pernet(net);
struct nf_acct *cur, *acct = NULL;
rcu_read_lock();
list_for_each_entry_rcu(cur, &nfnl_acct_net->nfnl_acct_list, head) {
if (strncmp(cur->name, acct_name, NFACCT_NAME_MAX)!= 0)
continue;
if (!try_module_get(THIS_MODULE))
goto err;
if (!refcount_inc_not_zero(&cur->refcnt)) {
module_put(THIS_MODULE);
goto err;
}
acct = cur;
break;
}
err:
rcu_read_unlock();
return acct;
}
EXPORT_SYMBOL_GPL(nfnl_acct_find_get);
void nfnl_acct_put(struct nf_acct *acct)
{
if (refcount_dec_and_test(&acct->refcnt))
kfree_rcu(acct, rcu_head);
module_put(THIS_MODULE);
}
EXPORT_SYMBOL_GPL(nfnl_acct_put);
void nfnl_acct_update(const struct sk_buff *skb, struct nf_acct *nfacct)
{
atomic64_inc(&nfacct->pkts);
atomic64_add(skb->len, &nfacct->bytes);
}
EXPORT_SYMBOL_GPL(nfnl_acct_update);
static void nfnl_overquota_report(struct net *net, struct nf_acct *nfacct)
{
int ret;
struct sk_buff *skb;
skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
if (skb == NULL)
return;
ret = nfnl_acct_fill_info(skb, 0, 0, NFNL_MSG_ACCT_OVERQUOTA, 0,
nfacct);
if (ret <= 0) {
kfree_skb(skb);
return;
}
nfnetlink_broadcast(net, skb, 0, NFNLGRP_ACCT_QUOTA, GFP_ATOMIC);
}
int nfnl_acct_overquota(struct net *net, struct nf_acct *nfacct)
{
u64 now;
u64 *quota;
int ret = NFACCT_UNDERQUOTA;
/* no place here if we don't have a quota */
if (!(nfacct->flags & NFACCT_F_QUOTA))
return NFACCT_NO_QUOTA;
quota = (u64 *)nfacct->data;
now = (nfacct->flags & NFACCT_F_QUOTA_PKTS) ?
atomic64_read(&nfacct->pkts) : atomic64_read(&nfacct->bytes);
ret = now > *quota;
if (now >= *quota &&
!test_and_set_bit(NFACCT_OVERQUOTA_BIT, &nfacct->flags)) {
nfnl_overquota_report(net, nfacct);
}
return ret;
}
EXPORT_SYMBOL_GPL(nfnl_acct_overquota);
static int __net_init nfnl_acct_net_init(struct net *net)
{
INIT_LIST_HEAD(&nfnl_acct_pernet(net)->nfnl_acct_list);
return 0;
}
static void __net_exit nfnl_acct_net_exit(struct net *net)
{
struct nfnl_acct_net *nfnl_acct_net = nfnl_acct_pernet(net);
struct nf_acct *cur, *tmp;
list_for_each_entry_safe(cur, tmp, &nfnl_acct_net->nfnl_acct_list, head) {
list_del_rcu(&cur->head);
if (refcount_dec_and_test(&cur->refcnt))
kfree_rcu(cur, rcu_head);
}
}
static struct pernet_operations nfnl_acct_ops = {
.init = nfnl_acct_net_init,
.exit = nfnl_acct_net_exit,
.id = &nfnl_acct_net_id,
.size = sizeof(struct nfnl_acct_net),
};
static int __init nfnl_acct_init(void)
{
int ret;
ret = register_pernet_subsys(&nfnl_acct_ops);
if (ret < 0) {
pr_err("nfnl_acct_init: failed to register pernet ops\n");
goto err_out;
}
ret = nfnetlink_subsys_register(&nfnl_acct_subsys);
if (ret < 0) {
pr_err("nfnl_acct_init: cannot register with nfnetlink.\n");
goto cleanup_pernet;
}
return 0;
cleanup_pernet:
unregister_pernet_subsys(&nfnl_acct_ops);
err_out:
return ret;
}
static void __exit nfnl_acct_exit(void)
{
nfnetlink_subsys_unregister(&nfnl_acct_subsys);
unregister_pernet_subsys(&nfnl_acct_ops);
}
module_init(nfnl_acct_init);
module_exit(nfnl_acct_exit);
| linux-master | net/netfilter/nfnetlink_acct.c |
// SPDX-License-Identifier: GPL-2.0-only
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/types.h>
#include <linux/module.h>
#include <net/ip.h>
#include <linux/ipv6.h>
#include <linux/icmp.h>
#include <net/ipv6.h>
#include <net/tcp.h>
#include <net/udp.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter/xt_tcpudp.h>
#include <linux/netfilter_ipv4/ip_tables.h>
#include <linux/netfilter_ipv6/ip6_tables.h>
MODULE_DESCRIPTION("Xtables: TCP, UDP and UDP-Lite match");
MODULE_LICENSE("GPL");
MODULE_ALIAS("xt_tcp");
MODULE_ALIAS("xt_udp");
MODULE_ALIAS("ipt_udp");
MODULE_ALIAS("ipt_tcp");
MODULE_ALIAS("ip6t_udp");
MODULE_ALIAS("ip6t_tcp");
MODULE_ALIAS("ipt_icmp");
MODULE_ALIAS("ip6t_icmp6");
/* Returns 1 if the port is matched by the range, 0 otherwise */
static inline bool
port_match(u_int16_t min, u_int16_t max, u_int16_t port, bool invert)
{
return (port >= min && port <= max) ^ invert;
}
static bool
tcp_find_option(u_int8_t option,
const struct sk_buff *skb,
unsigned int protoff,
unsigned int optlen,
bool invert,
bool *hotdrop)
{
/* tcp.doff is only 4 bits, ie. max 15 * 4 bytes */
const u_int8_t *op;
u_int8_t _opt[60 - sizeof(struct tcphdr)];
unsigned int i;
pr_debug("finding option\n");
if (!optlen)
return invert;
/* If we don't have the whole header, drop packet. */
op = skb_header_pointer(skb, protoff + sizeof(struct tcphdr),
optlen, _opt);
if (op == NULL) {
*hotdrop = true;
return false;
}
for (i = 0; i < optlen; ) {
if (op[i] == option) return !invert;
if (op[i] < 2) i++;
else i += op[i+1]?:1;
}
return invert;
}
static bool tcp_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct tcphdr *th;
struct tcphdr _tcph;
const struct xt_tcp *tcpinfo = par->matchinfo;
if (par->fragoff != 0) {
/* To quote Alan:
Don't allow a fragment of TCP 8 bytes in. Nobody normal
causes this. Its a cracker trying to break in by doing a
flag overwrite to pass the direction checks.
*/
if (par->fragoff == 1) {
pr_debug("Dropping evil TCP offset=1 frag.\n");
par->hotdrop = true;
}
/* Must not be a fragment. */
return false;
}
th = skb_header_pointer(skb, par->thoff, sizeof(_tcph), &_tcph);
if (th == NULL) {
/* We've been asked to examine this packet, and we
can't. Hence, no choice but to drop. */
pr_debug("Dropping evil TCP offset=0 tinygram.\n");
par->hotdrop = true;
return false;
}
if (!port_match(tcpinfo->spts[0], tcpinfo->spts[1],
ntohs(th->source),
!!(tcpinfo->invflags & XT_TCP_INV_SRCPT)))
return false;
if (!port_match(tcpinfo->dpts[0], tcpinfo->dpts[1],
ntohs(th->dest),
!!(tcpinfo->invflags & XT_TCP_INV_DSTPT)))
return false;
if (!NF_INVF(tcpinfo, XT_TCP_INV_FLAGS,
(((unsigned char *)th)[13] & tcpinfo->flg_mask) == tcpinfo->flg_cmp))
return false;
if (tcpinfo->option) {
if (th->doff * 4 < sizeof(_tcph)) {
par->hotdrop = true;
return false;
}
if (!tcp_find_option(tcpinfo->option, skb, par->thoff,
th->doff*4 - sizeof(_tcph),
tcpinfo->invflags & XT_TCP_INV_OPTION,
&par->hotdrop))
return false;
}
return true;
}
static int tcp_mt_check(const struct xt_mtchk_param *par)
{
const struct xt_tcp *tcpinfo = par->matchinfo;
/* Must specify no unknown invflags */
return (tcpinfo->invflags & ~XT_TCP_INV_MASK) ? -EINVAL : 0;
}
static bool udp_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct udphdr *uh;
struct udphdr _udph;
const struct xt_udp *udpinfo = par->matchinfo;
/* Must not be a fragment. */
if (par->fragoff != 0)
return false;
uh = skb_header_pointer(skb, par->thoff, sizeof(_udph), &_udph);
if (uh == NULL) {
/* We've been asked to examine this packet, and we
can't. Hence, no choice but to drop. */
pr_debug("Dropping evil UDP tinygram.\n");
par->hotdrop = true;
return false;
}
return port_match(udpinfo->spts[0], udpinfo->spts[1],
ntohs(uh->source),
!!(udpinfo->invflags & XT_UDP_INV_SRCPT))
&& port_match(udpinfo->dpts[0], udpinfo->dpts[1],
ntohs(uh->dest),
!!(udpinfo->invflags & XT_UDP_INV_DSTPT));
}
static int udp_mt_check(const struct xt_mtchk_param *par)
{
const struct xt_udp *udpinfo = par->matchinfo;
/* Must specify no unknown invflags */
return (udpinfo->invflags & ~XT_UDP_INV_MASK) ? -EINVAL : 0;
}
/* Returns 1 if the type and code is matched by the range, 0 otherwise */
static bool type_code_in_range(u8 test_type, u8 min_code, u8 max_code,
u8 type, u8 code)
{
return type == test_type && code >= min_code && code <= max_code;
}
static bool icmp_type_code_match(u8 test_type, u8 min_code, u8 max_code,
u8 type, u8 code, bool invert)
{
return (test_type == 0xFF ||
type_code_in_range(test_type, min_code, max_code, type, code))
^ invert;
}
static bool icmp6_type_code_match(u8 test_type, u8 min_code, u8 max_code,
u8 type, u8 code, bool invert)
{
return type_code_in_range(test_type, min_code, max_code, type, code) ^ invert;
}
static bool
icmp_match(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct icmphdr *ic;
struct icmphdr _icmph;
const struct ipt_icmp *icmpinfo = par->matchinfo;
/* Must not be a fragment. */
if (par->fragoff != 0)
return false;
ic = skb_header_pointer(skb, par->thoff, sizeof(_icmph), &_icmph);
if (!ic) {
/* We've been asked to examine this packet, and we
* can't. Hence, no choice but to drop.
*/
par->hotdrop = true;
return false;
}
return icmp_type_code_match(icmpinfo->type,
icmpinfo->code[0],
icmpinfo->code[1],
ic->type, ic->code,
!!(icmpinfo->invflags & IPT_ICMP_INV));
}
static bool
icmp6_match(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct icmp6hdr *ic;
struct icmp6hdr _icmph;
const struct ip6t_icmp *icmpinfo = par->matchinfo;
/* Must not be a fragment. */
if (par->fragoff != 0)
return false;
ic = skb_header_pointer(skb, par->thoff, sizeof(_icmph), &_icmph);
if (!ic) {
/* We've been asked to examine this packet, and we
* can't. Hence, no choice but to drop.
*/
par->hotdrop = true;
return false;
}
return icmp6_type_code_match(icmpinfo->type,
icmpinfo->code[0],
icmpinfo->code[1],
ic->icmp6_type, ic->icmp6_code,
!!(icmpinfo->invflags & IP6T_ICMP_INV));
}
static int icmp_checkentry(const struct xt_mtchk_param *par)
{
const struct ipt_icmp *icmpinfo = par->matchinfo;
return (icmpinfo->invflags & ~IPT_ICMP_INV) ? -EINVAL : 0;
}
static int icmp6_checkentry(const struct xt_mtchk_param *par)
{
const struct ip6t_icmp *icmpinfo = par->matchinfo;
return (icmpinfo->invflags & ~IP6T_ICMP_INV) ? -EINVAL : 0;
}
static struct xt_match tcpudp_mt_reg[] __read_mostly = {
{
.name = "tcp",
.family = NFPROTO_IPV4,
.checkentry = tcp_mt_check,
.match = tcp_mt,
.matchsize = sizeof(struct xt_tcp),
.proto = IPPROTO_TCP,
.me = THIS_MODULE,
},
{
.name = "tcp",
.family = NFPROTO_IPV6,
.checkentry = tcp_mt_check,
.match = tcp_mt,
.matchsize = sizeof(struct xt_tcp),
.proto = IPPROTO_TCP,
.me = THIS_MODULE,
},
{
.name = "udp",
.family = NFPROTO_IPV4,
.checkentry = udp_mt_check,
.match = udp_mt,
.matchsize = sizeof(struct xt_udp),
.proto = IPPROTO_UDP,
.me = THIS_MODULE,
},
{
.name = "udp",
.family = NFPROTO_IPV6,
.checkentry = udp_mt_check,
.match = udp_mt,
.matchsize = sizeof(struct xt_udp),
.proto = IPPROTO_UDP,
.me = THIS_MODULE,
},
{
.name = "udplite",
.family = NFPROTO_IPV4,
.checkentry = udp_mt_check,
.match = udp_mt,
.matchsize = sizeof(struct xt_udp),
.proto = IPPROTO_UDPLITE,
.me = THIS_MODULE,
},
{
.name = "udplite",
.family = NFPROTO_IPV6,
.checkentry = udp_mt_check,
.match = udp_mt,
.matchsize = sizeof(struct xt_udp),
.proto = IPPROTO_UDPLITE,
.me = THIS_MODULE,
},
{
.name = "icmp",
.match = icmp_match,
.matchsize = sizeof(struct ipt_icmp),
.checkentry = icmp_checkentry,
.proto = IPPROTO_ICMP,
.family = NFPROTO_IPV4,
.me = THIS_MODULE,
},
{
.name = "icmp6",
.match = icmp6_match,
.matchsize = sizeof(struct ip6t_icmp),
.checkentry = icmp6_checkentry,
.proto = IPPROTO_ICMPV6,
.family = NFPROTO_IPV6,
.me = THIS_MODULE,
},
};
static int __init tcpudp_mt_init(void)
{
return xt_register_matches(tcpudp_mt_reg, ARRAY_SIZE(tcpudp_mt_reg));
}
static void __exit tcpudp_mt_exit(void)
{
xt_unregister_matches(tcpudp_mt_reg, ARRAY_SIZE(tcpudp_mt_reg));
}
module_init(tcpudp_mt_init);
module_exit(tcpudp_mt_exit);
| linux-master | net/netfilter/xt_tcpudp.c |
/* Connection tracking via netlink socket. Allows for user space
* protocol helpers and general trouble making from userspace.
*
* (C) 2001 by Jay Schulist <[email protected]>
* (C) 2002-2006 by Harald Welte <[email protected]>
* (C) 2003 by Patrick Mchardy <[email protected]>
* (C) 2005-2012 by Pablo Neira Ayuso <[email protected]>
*
* Initial connection tracking via netlink development funded and
* generally made possible by Network Robots, Inc. (www.networkrobots.com)
*
* Further development of this code funded by Astaro AG (http://www.astaro.com)
*
* This software may be used and distributed according to the terms
* of the GNU General Public License, incorporated herein by reference.
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/rculist.h>
#include <linux/rculist_nulls.h>
#include <linux/types.h>
#include <linux/timer.h>
#include <linux/security.h>
#include <linux/skbuff.h>
#include <linux/errno.h>
#include <linux/netlink.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/siphash.h>
#include <linux/netfilter.h>
#include <net/netlink.h>
#include <net/sock.h>
#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_conntrack_core.h>
#include <net/netfilter/nf_conntrack_expect.h>
#include <net/netfilter/nf_conntrack_helper.h>
#include <net/netfilter/nf_conntrack_seqadj.h>
#include <net/netfilter/nf_conntrack_l4proto.h>
#include <net/netfilter/nf_conntrack_tuple.h>
#include <net/netfilter/nf_conntrack_acct.h>
#include <net/netfilter/nf_conntrack_zones.h>
#include <net/netfilter/nf_conntrack_timestamp.h>
#include <net/netfilter/nf_conntrack_labels.h>
#include <net/netfilter/nf_conntrack_synproxy.h>
#if IS_ENABLED(CONFIG_NF_NAT)
#include <net/netfilter/nf_nat.h>
#include <net/netfilter/nf_nat_helper.h>
#endif
#include <linux/netfilter/nfnetlink.h>
#include <linux/netfilter/nfnetlink_conntrack.h>
#include "nf_internals.h"
MODULE_LICENSE("GPL");
struct ctnetlink_list_dump_ctx {
struct nf_conn *last;
unsigned int cpu;
bool done;
};
static int ctnetlink_dump_tuples_proto(struct sk_buff *skb,
const struct nf_conntrack_tuple *tuple,
const struct nf_conntrack_l4proto *l4proto)
{
int ret = 0;
struct nlattr *nest_parms;
nest_parms = nla_nest_start(skb, CTA_TUPLE_PROTO);
if (!nest_parms)
goto nla_put_failure;
if (nla_put_u8(skb, CTA_PROTO_NUM, tuple->dst.protonum))
goto nla_put_failure;
if (likely(l4proto->tuple_to_nlattr))
ret = l4proto->tuple_to_nlattr(skb, tuple);
nla_nest_end(skb, nest_parms);
return ret;
nla_put_failure:
return -1;
}
static int ipv4_tuple_to_nlattr(struct sk_buff *skb,
const struct nf_conntrack_tuple *tuple)
{
if (nla_put_in_addr(skb, CTA_IP_V4_SRC, tuple->src.u3.ip) ||
nla_put_in_addr(skb, CTA_IP_V4_DST, tuple->dst.u3.ip))
return -EMSGSIZE;
return 0;
}
static int ipv6_tuple_to_nlattr(struct sk_buff *skb,
const struct nf_conntrack_tuple *tuple)
{
if (nla_put_in6_addr(skb, CTA_IP_V6_SRC, &tuple->src.u3.in6) ||
nla_put_in6_addr(skb, CTA_IP_V6_DST, &tuple->dst.u3.in6))
return -EMSGSIZE;
return 0;
}
static int ctnetlink_dump_tuples_ip(struct sk_buff *skb,
const struct nf_conntrack_tuple *tuple)
{
int ret = 0;
struct nlattr *nest_parms;
nest_parms = nla_nest_start(skb, CTA_TUPLE_IP);
if (!nest_parms)
goto nla_put_failure;
switch (tuple->src.l3num) {
case NFPROTO_IPV4:
ret = ipv4_tuple_to_nlattr(skb, tuple);
break;
case NFPROTO_IPV6:
ret = ipv6_tuple_to_nlattr(skb, tuple);
break;
}
nla_nest_end(skb, nest_parms);
return ret;
nla_put_failure:
return -1;
}
static int ctnetlink_dump_tuples(struct sk_buff *skb,
const struct nf_conntrack_tuple *tuple)
{
const struct nf_conntrack_l4proto *l4proto;
int ret;
rcu_read_lock();
ret = ctnetlink_dump_tuples_ip(skb, tuple);
if (ret >= 0) {
l4proto = nf_ct_l4proto_find(tuple->dst.protonum);
ret = ctnetlink_dump_tuples_proto(skb, tuple, l4proto);
}
rcu_read_unlock();
return ret;
}
static int ctnetlink_dump_zone_id(struct sk_buff *skb, int attrtype,
const struct nf_conntrack_zone *zone, int dir)
{
if (zone->id == NF_CT_DEFAULT_ZONE_ID || zone->dir != dir)
return 0;
if (nla_put_be16(skb, attrtype, htons(zone->id)))
goto nla_put_failure;
return 0;
nla_put_failure:
return -1;
}
static int ctnetlink_dump_status(struct sk_buff *skb, const struct nf_conn *ct)
{
if (nla_put_be32(skb, CTA_STATUS, htonl(ct->status)))
goto nla_put_failure;
return 0;
nla_put_failure:
return -1;
}
static int ctnetlink_dump_timeout(struct sk_buff *skb, const struct nf_conn *ct,
bool skip_zero)
{
long timeout;
if (nf_ct_is_confirmed(ct))
timeout = nf_ct_expires(ct) / HZ;
else
timeout = ct->timeout / HZ;
if (skip_zero && timeout == 0)
return 0;
if (nla_put_be32(skb, CTA_TIMEOUT, htonl(timeout)))
goto nla_put_failure;
return 0;
nla_put_failure:
return -1;
}
static int ctnetlink_dump_protoinfo(struct sk_buff *skb, struct nf_conn *ct,
bool destroy)
{
const struct nf_conntrack_l4proto *l4proto;
struct nlattr *nest_proto;
int ret;
l4proto = nf_ct_l4proto_find(nf_ct_protonum(ct));
if (!l4proto->to_nlattr)
return 0;
nest_proto = nla_nest_start(skb, CTA_PROTOINFO);
if (!nest_proto)
goto nla_put_failure;
ret = l4proto->to_nlattr(skb, nest_proto, ct, destroy);
nla_nest_end(skb, nest_proto);
return ret;
nla_put_failure:
return -1;
}
static int ctnetlink_dump_helpinfo(struct sk_buff *skb,
const struct nf_conn *ct)
{
struct nlattr *nest_helper;
const struct nf_conn_help *help = nfct_help(ct);
struct nf_conntrack_helper *helper;
if (!help)
return 0;
rcu_read_lock();
helper = rcu_dereference(help->helper);
if (!helper)
goto out;
nest_helper = nla_nest_start(skb, CTA_HELP);
if (!nest_helper)
goto nla_put_failure;
if (nla_put_string(skb, CTA_HELP_NAME, helper->name))
goto nla_put_failure;
if (helper->to_nlattr)
helper->to_nlattr(skb, ct);
nla_nest_end(skb, nest_helper);
out:
rcu_read_unlock();
return 0;
nla_put_failure:
rcu_read_unlock();
return -1;
}
static int
dump_counters(struct sk_buff *skb, struct nf_conn_acct *acct,
enum ip_conntrack_dir dir, int type)
{
enum ctattr_type attr = dir ? CTA_COUNTERS_REPLY: CTA_COUNTERS_ORIG;
struct nf_conn_counter *counter = acct->counter;
struct nlattr *nest_count;
u64 pkts, bytes;
if (type == IPCTNL_MSG_CT_GET_CTRZERO) {
pkts = atomic64_xchg(&counter[dir].packets, 0);
bytes = atomic64_xchg(&counter[dir].bytes, 0);
} else {
pkts = atomic64_read(&counter[dir].packets);
bytes = atomic64_read(&counter[dir].bytes);
}
nest_count = nla_nest_start(skb, attr);
if (!nest_count)
goto nla_put_failure;
if (nla_put_be64(skb, CTA_COUNTERS_PACKETS, cpu_to_be64(pkts),
CTA_COUNTERS_PAD) ||
nla_put_be64(skb, CTA_COUNTERS_BYTES, cpu_to_be64(bytes),
CTA_COUNTERS_PAD))
goto nla_put_failure;
nla_nest_end(skb, nest_count);
return 0;
nla_put_failure:
return -1;
}
static int
ctnetlink_dump_acct(struct sk_buff *skb, const struct nf_conn *ct, int type)
{
struct nf_conn_acct *acct = nf_conn_acct_find(ct);
if (!acct)
return 0;
if (dump_counters(skb, acct, IP_CT_DIR_ORIGINAL, type) < 0)
return -1;
if (dump_counters(skb, acct, IP_CT_DIR_REPLY, type) < 0)
return -1;
return 0;
}
static int
ctnetlink_dump_timestamp(struct sk_buff *skb, const struct nf_conn *ct)
{
struct nlattr *nest_count;
const struct nf_conn_tstamp *tstamp;
tstamp = nf_conn_tstamp_find(ct);
if (!tstamp)
return 0;
nest_count = nla_nest_start(skb, CTA_TIMESTAMP);
if (!nest_count)
goto nla_put_failure;
if (nla_put_be64(skb, CTA_TIMESTAMP_START, cpu_to_be64(tstamp->start),
CTA_TIMESTAMP_PAD) ||
(tstamp->stop != 0 && nla_put_be64(skb, CTA_TIMESTAMP_STOP,
cpu_to_be64(tstamp->stop),
CTA_TIMESTAMP_PAD)))
goto nla_put_failure;
nla_nest_end(skb, nest_count);
return 0;
nla_put_failure:
return -1;
}
#ifdef CONFIG_NF_CONNTRACK_MARK
static int ctnetlink_dump_mark(struct sk_buff *skb, const struct nf_conn *ct,
bool dump)
{
u32 mark = READ_ONCE(ct->mark);
if (!mark && !dump)
return 0;
if (nla_put_be32(skb, CTA_MARK, htonl(mark)))
goto nla_put_failure;
return 0;
nla_put_failure:
return -1;
}
#else
#define ctnetlink_dump_mark(a, b, c) (0)
#endif
#ifdef CONFIG_NF_CONNTRACK_SECMARK
static int ctnetlink_dump_secctx(struct sk_buff *skb, const struct nf_conn *ct)
{
struct nlattr *nest_secctx;
int len, ret;
char *secctx;
ret = security_secid_to_secctx(ct->secmark, &secctx, &len);
if (ret)
return 0;
ret = -1;
nest_secctx = nla_nest_start(skb, CTA_SECCTX);
if (!nest_secctx)
goto nla_put_failure;
if (nla_put_string(skb, CTA_SECCTX_NAME, secctx))
goto nla_put_failure;
nla_nest_end(skb, nest_secctx);
ret = 0;
nla_put_failure:
security_release_secctx(secctx, len);
return ret;
}
#else
#define ctnetlink_dump_secctx(a, b) (0)
#endif
#ifdef CONFIG_NF_CONNTRACK_LABELS
static inline int ctnetlink_label_size(const struct nf_conn *ct)
{
struct nf_conn_labels *labels = nf_ct_labels_find(ct);
if (!labels)
return 0;
return nla_total_size(sizeof(labels->bits));
}
static int
ctnetlink_dump_labels(struct sk_buff *skb, const struct nf_conn *ct)
{
struct nf_conn_labels *labels = nf_ct_labels_find(ct);
unsigned int i;
if (!labels)
return 0;
i = 0;
do {
if (labels->bits[i] != 0)
return nla_put(skb, CTA_LABELS, sizeof(labels->bits),
labels->bits);
i++;
} while (i < ARRAY_SIZE(labels->bits));
return 0;
}
#else
#define ctnetlink_dump_labels(a, b) (0)
#define ctnetlink_label_size(a) (0)
#endif
#define master_tuple(ct) &(ct->master->tuplehash[IP_CT_DIR_ORIGINAL].tuple)
static int ctnetlink_dump_master(struct sk_buff *skb, const struct nf_conn *ct)
{
struct nlattr *nest_parms;
if (!(ct->status & IPS_EXPECTED))
return 0;
nest_parms = nla_nest_start(skb, CTA_TUPLE_MASTER);
if (!nest_parms)
goto nla_put_failure;
if (ctnetlink_dump_tuples(skb, master_tuple(ct)) < 0)
goto nla_put_failure;
nla_nest_end(skb, nest_parms);
return 0;
nla_put_failure:
return -1;
}
static int
dump_ct_seq_adj(struct sk_buff *skb, const struct nf_ct_seqadj *seq, int type)
{
struct nlattr *nest_parms;
nest_parms = nla_nest_start(skb, type);
if (!nest_parms)
goto nla_put_failure;
if (nla_put_be32(skb, CTA_SEQADJ_CORRECTION_POS,
htonl(seq->correction_pos)) ||
nla_put_be32(skb, CTA_SEQADJ_OFFSET_BEFORE,
htonl(seq->offset_before)) ||
nla_put_be32(skb, CTA_SEQADJ_OFFSET_AFTER,
htonl(seq->offset_after)))
goto nla_put_failure;
nla_nest_end(skb, nest_parms);
return 0;
nla_put_failure:
return -1;
}
static int ctnetlink_dump_ct_seq_adj(struct sk_buff *skb, struct nf_conn *ct)
{
struct nf_conn_seqadj *seqadj = nfct_seqadj(ct);
struct nf_ct_seqadj *seq;
if (!(ct->status & IPS_SEQ_ADJUST) || !seqadj)
return 0;
spin_lock_bh(&ct->lock);
seq = &seqadj->seq[IP_CT_DIR_ORIGINAL];
if (dump_ct_seq_adj(skb, seq, CTA_SEQ_ADJ_ORIG) == -1)
goto err;
seq = &seqadj->seq[IP_CT_DIR_REPLY];
if (dump_ct_seq_adj(skb, seq, CTA_SEQ_ADJ_REPLY) == -1)
goto err;
spin_unlock_bh(&ct->lock);
return 0;
err:
spin_unlock_bh(&ct->lock);
return -1;
}
static int ctnetlink_dump_ct_synproxy(struct sk_buff *skb, struct nf_conn *ct)
{
struct nf_conn_synproxy *synproxy = nfct_synproxy(ct);
struct nlattr *nest_parms;
if (!synproxy)
return 0;
nest_parms = nla_nest_start(skb, CTA_SYNPROXY);
if (!nest_parms)
goto nla_put_failure;
if (nla_put_be32(skb, CTA_SYNPROXY_ISN, htonl(synproxy->isn)) ||
nla_put_be32(skb, CTA_SYNPROXY_ITS, htonl(synproxy->its)) ||
nla_put_be32(skb, CTA_SYNPROXY_TSOFF, htonl(synproxy->tsoff)))
goto nla_put_failure;
nla_nest_end(skb, nest_parms);
return 0;
nla_put_failure:
return -1;
}
static int ctnetlink_dump_id(struct sk_buff *skb, const struct nf_conn *ct)
{
__be32 id = (__force __be32)nf_ct_get_id(ct);
if (nla_put_be32(skb, CTA_ID, id))
goto nla_put_failure;
return 0;
nla_put_failure:
return -1;
}
static int ctnetlink_dump_use(struct sk_buff *skb, const struct nf_conn *ct)
{
if (nla_put_be32(skb, CTA_USE, htonl(refcount_read(&ct->ct_general.use))))
goto nla_put_failure;
return 0;
nla_put_failure:
return -1;
}
/* all these functions access ct->ext. Caller must either hold a reference
* on ct or prevent its deletion by holding either the bucket spinlock or
* pcpu dying list lock.
*/
static int ctnetlink_dump_extinfo(struct sk_buff *skb,
struct nf_conn *ct, u32 type)
{
if (ctnetlink_dump_acct(skb, ct, type) < 0 ||
ctnetlink_dump_timestamp(skb, ct) < 0 ||
ctnetlink_dump_helpinfo(skb, ct) < 0 ||
ctnetlink_dump_labels(skb, ct) < 0 ||
ctnetlink_dump_ct_seq_adj(skb, ct) < 0 ||
ctnetlink_dump_ct_synproxy(skb, ct) < 0)
return -1;
return 0;
}
static int ctnetlink_dump_info(struct sk_buff *skb, struct nf_conn *ct)
{
if (ctnetlink_dump_status(skb, ct) < 0 ||
ctnetlink_dump_mark(skb, ct, true) < 0 ||
ctnetlink_dump_secctx(skb, ct) < 0 ||
ctnetlink_dump_id(skb, ct) < 0 ||
ctnetlink_dump_use(skb, ct) < 0 ||
ctnetlink_dump_master(skb, ct) < 0)
return -1;
if (!test_bit(IPS_OFFLOAD_BIT, &ct->status) &&
(ctnetlink_dump_timeout(skb, ct, false) < 0 ||
ctnetlink_dump_protoinfo(skb, ct, false) < 0))
return -1;
return 0;
}
static int
ctnetlink_fill_info(struct sk_buff *skb, u32 portid, u32 seq, u32 type,
struct nf_conn *ct, bool extinfo, unsigned int flags)
{
const struct nf_conntrack_zone *zone;
struct nlmsghdr *nlh;
struct nlattr *nest_parms;
unsigned int event;
if (portid)
flags |= NLM_F_MULTI;
event = nfnl_msg_type(NFNL_SUBSYS_CTNETLINK, IPCTNL_MSG_CT_NEW);
nlh = nfnl_msg_put(skb, portid, seq, event, flags, nf_ct_l3num(ct),
NFNETLINK_V0, 0);
if (!nlh)
goto nlmsg_failure;
zone = nf_ct_zone(ct);
nest_parms = nla_nest_start(skb, CTA_TUPLE_ORIG);
if (!nest_parms)
goto nla_put_failure;
if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_ORIGINAL)) < 0)
goto nla_put_failure;
if (ctnetlink_dump_zone_id(skb, CTA_TUPLE_ZONE, zone,
NF_CT_ZONE_DIR_ORIG) < 0)
goto nla_put_failure;
nla_nest_end(skb, nest_parms);
nest_parms = nla_nest_start(skb, CTA_TUPLE_REPLY);
if (!nest_parms)
goto nla_put_failure;
if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_REPLY)) < 0)
goto nla_put_failure;
if (ctnetlink_dump_zone_id(skb, CTA_TUPLE_ZONE, zone,
NF_CT_ZONE_DIR_REPL) < 0)
goto nla_put_failure;
nla_nest_end(skb, nest_parms);
if (ctnetlink_dump_zone_id(skb, CTA_ZONE, zone,
NF_CT_DEFAULT_ZONE_DIR) < 0)
goto nla_put_failure;
if (ctnetlink_dump_info(skb, ct) < 0)
goto nla_put_failure;
if (extinfo && ctnetlink_dump_extinfo(skb, ct, type) < 0)
goto nla_put_failure;
nlmsg_end(skb, nlh);
return skb->len;
nlmsg_failure:
nla_put_failure:
nlmsg_cancel(skb, nlh);
return -1;
}
static const struct nla_policy cta_ip_nla_policy[CTA_IP_MAX + 1] = {
[CTA_IP_V4_SRC] = { .type = NLA_U32 },
[CTA_IP_V4_DST] = { .type = NLA_U32 },
[CTA_IP_V6_SRC] = { .len = sizeof(__be32) * 4 },
[CTA_IP_V6_DST] = { .len = sizeof(__be32) * 4 },
};
#if defined(CONFIG_NETFILTER_NETLINK_GLUE_CT) || defined(CONFIG_NF_CONNTRACK_EVENTS)
static size_t ctnetlink_proto_size(const struct nf_conn *ct)
{
const struct nf_conntrack_l4proto *l4proto;
size_t len, len4 = 0;
len = nla_policy_len(cta_ip_nla_policy, CTA_IP_MAX + 1);
len *= 3u; /* ORIG, REPLY, MASTER */
l4proto = nf_ct_l4proto_find(nf_ct_protonum(ct));
len += l4proto->nlattr_size;
if (l4proto->nlattr_tuple_size) {
len4 = l4proto->nlattr_tuple_size();
len4 *= 3u; /* ORIG, REPLY, MASTER */
}
return len + len4;
}
#endif
static inline size_t ctnetlink_acct_size(const struct nf_conn *ct)
{
if (!nf_ct_ext_exist(ct, NF_CT_EXT_ACCT))
return 0;
return 2 * nla_total_size(0) /* CTA_COUNTERS_ORIG|REPL */
+ 2 * nla_total_size_64bit(sizeof(uint64_t)) /* CTA_COUNTERS_PACKETS */
+ 2 * nla_total_size_64bit(sizeof(uint64_t)) /* CTA_COUNTERS_BYTES */
;
}
static inline int ctnetlink_secctx_size(const struct nf_conn *ct)
{
#ifdef CONFIG_NF_CONNTRACK_SECMARK
int len, ret;
ret = security_secid_to_secctx(ct->secmark, NULL, &len);
if (ret)
return 0;
return nla_total_size(0) /* CTA_SECCTX */
+ nla_total_size(sizeof(char) * len); /* CTA_SECCTX_NAME */
#else
return 0;
#endif
}
static inline size_t ctnetlink_timestamp_size(const struct nf_conn *ct)
{
#ifdef CONFIG_NF_CONNTRACK_TIMESTAMP
if (!nf_ct_ext_exist(ct, NF_CT_EXT_TSTAMP))
return 0;
return nla_total_size(0) + 2 * nla_total_size_64bit(sizeof(uint64_t));
#else
return 0;
#endif
}
#ifdef CONFIG_NF_CONNTRACK_EVENTS
static size_t ctnetlink_nlmsg_size(const struct nf_conn *ct)
{
return NLMSG_ALIGN(sizeof(struct nfgenmsg))
+ 3 * nla_total_size(0) /* CTA_TUPLE_ORIG|REPL|MASTER */
+ 3 * nla_total_size(0) /* CTA_TUPLE_IP */
+ 3 * nla_total_size(0) /* CTA_TUPLE_PROTO */
+ 3 * nla_total_size(sizeof(u_int8_t)) /* CTA_PROTO_NUM */
+ nla_total_size(sizeof(u_int32_t)) /* CTA_ID */
+ nla_total_size(sizeof(u_int32_t)) /* CTA_STATUS */
+ ctnetlink_acct_size(ct)
+ ctnetlink_timestamp_size(ct)
+ nla_total_size(sizeof(u_int32_t)) /* CTA_TIMEOUT */
+ nla_total_size(0) /* CTA_PROTOINFO */
+ nla_total_size(0) /* CTA_HELP */
+ nla_total_size(NF_CT_HELPER_NAME_LEN) /* CTA_HELP_NAME */
+ ctnetlink_secctx_size(ct)
#if IS_ENABLED(CONFIG_NF_NAT)
+ 2 * nla_total_size(0) /* CTA_NAT_SEQ_ADJ_ORIG|REPL */
+ 6 * nla_total_size(sizeof(u_int32_t)) /* CTA_NAT_SEQ_OFFSET */
#endif
#ifdef CONFIG_NF_CONNTRACK_MARK
+ nla_total_size(sizeof(u_int32_t)) /* CTA_MARK */
#endif
#ifdef CONFIG_NF_CONNTRACK_ZONES
+ nla_total_size(sizeof(u_int16_t)) /* CTA_ZONE|CTA_TUPLE_ZONE */
#endif
+ ctnetlink_proto_size(ct)
+ ctnetlink_label_size(ct)
;
}
static int
ctnetlink_conntrack_event(unsigned int events, const struct nf_ct_event *item)
{
const struct nf_conntrack_zone *zone;
struct net *net;
struct nlmsghdr *nlh;
struct nlattr *nest_parms;
struct nf_conn *ct = item->ct;
struct sk_buff *skb;
unsigned int type;
unsigned int flags = 0, group;
int err;
if (events & (1 << IPCT_DESTROY)) {
type = IPCTNL_MSG_CT_DELETE;
group = NFNLGRP_CONNTRACK_DESTROY;
} else if (events & ((1 << IPCT_NEW) | (1 << IPCT_RELATED))) {
type = IPCTNL_MSG_CT_NEW;
flags = NLM_F_CREATE|NLM_F_EXCL;
group = NFNLGRP_CONNTRACK_NEW;
} else if (events) {
type = IPCTNL_MSG_CT_NEW;
group = NFNLGRP_CONNTRACK_UPDATE;
} else
return 0;
net = nf_ct_net(ct);
if (!item->report && !nfnetlink_has_listeners(net, group))
return 0;
skb = nlmsg_new(ctnetlink_nlmsg_size(ct), GFP_ATOMIC);
if (skb == NULL)
goto errout;
type = nfnl_msg_type(NFNL_SUBSYS_CTNETLINK, type);
nlh = nfnl_msg_put(skb, item->portid, 0, type, flags, nf_ct_l3num(ct),
NFNETLINK_V0, 0);
if (!nlh)
goto nlmsg_failure;
zone = nf_ct_zone(ct);
nest_parms = nla_nest_start(skb, CTA_TUPLE_ORIG);
if (!nest_parms)
goto nla_put_failure;
if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_ORIGINAL)) < 0)
goto nla_put_failure;
if (ctnetlink_dump_zone_id(skb, CTA_TUPLE_ZONE, zone,
NF_CT_ZONE_DIR_ORIG) < 0)
goto nla_put_failure;
nla_nest_end(skb, nest_parms);
nest_parms = nla_nest_start(skb, CTA_TUPLE_REPLY);
if (!nest_parms)
goto nla_put_failure;
if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_REPLY)) < 0)
goto nla_put_failure;
if (ctnetlink_dump_zone_id(skb, CTA_TUPLE_ZONE, zone,
NF_CT_ZONE_DIR_REPL) < 0)
goto nla_put_failure;
nla_nest_end(skb, nest_parms);
if (ctnetlink_dump_zone_id(skb, CTA_ZONE, zone,
NF_CT_DEFAULT_ZONE_DIR) < 0)
goto nla_put_failure;
if (ctnetlink_dump_id(skb, ct) < 0)
goto nla_put_failure;
if (ctnetlink_dump_status(skb, ct) < 0)
goto nla_put_failure;
if (events & (1 << IPCT_DESTROY)) {
if (ctnetlink_dump_timeout(skb, ct, true) < 0)
goto nla_put_failure;
if (ctnetlink_dump_acct(skb, ct, type) < 0 ||
ctnetlink_dump_timestamp(skb, ct) < 0 ||
ctnetlink_dump_protoinfo(skb, ct, true) < 0)
goto nla_put_failure;
} else {
if (ctnetlink_dump_timeout(skb, ct, false) < 0)
goto nla_put_failure;
if (events & (1 << IPCT_PROTOINFO) &&
ctnetlink_dump_protoinfo(skb, ct, false) < 0)
goto nla_put_failure;
if ((events & (1 << IPCT_HELPER) || nfct_help(ct))
&& ctnetlink_dump_helpinfo(skb, ct) < 0)
goto nla_put_failure;
#ifdef CONFIG_NF_CONNTRACK_SECMARK
if ((events & (1 << IPCT_SECMARK) || ct->secmark)
&& ctnetlink_dump_secctx(skb, ct) < 0)
goto nla_put_failure;
#endif
if (events & (1 << IPCT_LABEL) &&
ctnetlink_dump_labels(skb, ct) < 0)
goto nla_put_failure;
if (events & (1 << IPCT_RELATED) &&
ctnetlink_dump_master(skb, ct) < 0)
goto nla_put_failure;
if (events & (1 << IPCT_SEQADJ) &&
ctnetlink_dump_ct_seq_adj(skb, ct) < 0)
goto nla_put_failure;
if (events & (1 << IPCT_SYNPROXY) &&
ctnetlink_dump_ct_synproxy(skb, ct) < 0)
goto nla_put_failure;
}
#ifdef CONFIG_NF_CONNTRACK_MARK
if (ctnetlink_dump_mark(skb, ct, events & (1 << IPCT_MARK)))
goto nla_put_failure;
#endif
nlmsg_end(skb, nlh);
err = nfnetlink_send(skb, net, item->portid, group, item->report,
GFP_ATOMIC);
if (err == -ENOBUFS || err == -EAGAIN)
return -ENOBUFS;
return 0;
nla_put_failure:
nlmsg_cancel(skb, nlh);
nlmsg_failure:
kfree_skb(skb);
errout:
if (nfnetlink_set_err(net, 0, group, -ENOBUFS) > 0)
return -ENOBUFS;
return 0;
}
#endif /* CONFIG_NF_CONNTRACK_EVENTS */
static int ctnetlink_done(struct netlink_callback *cb)
{
if (cb->args[1])
nf_ct_put((struct nf_conn *)cb->args[1]);
kfree(cb->data);
return 0;
}
struct ctnetlink_filter_u32 {
u32 val;
u32 mask;
};
struct ctnetlink_filter {
u8 family;
u_int32_t orig_flags;
u_int32_t reply_flags;
struct nf_conntrack_tuple orig;
struct nf_conntrack_tuple reply;
struct nf_conntrack_zone zone;
struct ctnetlink_filter_u32 mark;
struct ctnetlink_filter_u32 status;
};
static const struct nla_policy cta_filter_nla_policy[CTA_FILTER_MAX + 1] = {
[CTA_FILTER_ORIG_FLAGS] = { .type = NLA_U32 },
[CTA_FILTER_REPLY_FLAGS] = { .type = NLA_U32 },
};
static int ctnetlink_parse_filter(const struct nlattr *attr,
struct ctnetlink_filter *filter)
{
struct nlattr *tb[CTA_FILTER_MAX + 1];
int ret = 0;
ret = nla_parse_nested(tb, CTA_FILTER_MAX, attr, cta_filter_nla_policy,
NULL);
if (ret)
return ret;
if (tb[CTA_FILTER_ORIG_FLAGS]) {
filter->orig_flags = nla_get_u32(tb[CTA_FILTER_ORIG_FLAGS]);
if (filter->orig_flags & ~CTA_FILTER_F_ALL)
return -EOPNOTSUPP;
}
if (tb[CTA_FILTER_REPLY_FLAGS]) {
filter->reply_flags = nla_get_u32(tb[CTA_FILTER_REPLY_FLAGS]);
if (filter->reply_flags & ~CTA_FILTER_F_ALL)
return -EOPNOTSUPP;
}
return 0;
}
static int ctnetlink_parse_zone(const struct nlattr *attr,
struct nf_conntrack_zone *zone);
static int ctnetlink_parse_tuple_filter(const struct nlattr * const cda[],
struct nf_conntrack_tuple *tuple,
u32 type, u_int8_t l3num,
struct nf_conntrack_zone *zone,
u_int32_t flags);
static int ctnetlink_filter_parse_mark(struct ctnetlink_filter_u32 *mark,
const struct nlattr * const cda[])
{
#ifdef CONFIG_NF_CONNTRACK_MARK
if (cda[CTA_MARK]) {
mark->val = ntohl(nla_get_be32(cda[CTA_MARK]));
if (cda[CTA_MARK_MASK])
mark->mask = ntohl(nla_get_be32(cda[CTA_MARK_MASK]));
else
mark->mask = 0xffffffff;
} else if (cda[CTA_MARK_MASK]) {
return -EINVAL;
}
#endif
return 0;
}
static int ctnetlink_filter_parse_status(struct ctnetlink_filter_u32 *status,
const struct nlattr * const cda[])
{
if (cda[CTA_STATUS]) {
status->val = ntohl(nla_get_be32(cda[CTA_STATUS]));
if (cda[CTA_STATUS_MASK])
status->mask = ntohl(nla_get_be32(cda[CTA_STATUS_MASK]));
else
status->mask = status->val;
/* status->val == 0? always true, else always false. */
if (status->mask == 0)
return -EINVAL;
} else if (cda[CTA_STATUS_MASK]) {
return -EINVAL;
}
/* CTA_STATUS is NLA_U32, if this fires UAPI needs to be extended */
BUILD_BUG_ON(__IPS_MAX_BIT >= 32);
return 0;
}
static struct ctnetlink_filter *
ctnetlink_alloc_filter(const struct nlattr * const cda[], u8 family)
{
struct ctnetlink_filter *filter;
int err;
#ifndef CONFIG_NF_CONNTRACK_MARK
if (cda[CTA_MARK] || cda[CTA_MARK_MASK])
return ERR_PTR(-EOPNOTSUPP);
#endif
filter = kzalloc(sizeof(*filter), GFP_KERNEL);
if (filter == NULL)
return ERR_PTR(-ENOMEM);
filter->family = family;
err = ctnetlink_filter_parse_mark(&filter->mark, cda);
if (err)
goto err_filter;
err = ctnetlink_filter_parse_status(&filter->status, cda);
if (err)
goto err_filter;
if (!cda[CTA_FILTER])
return filter;
err = ctnetlink_parse_zone(cda[CTA_ZONE], &filter->zone);
if (err < 0)
goto err_filter;
err = ctnetlink_parse_filter(cda[CTA_FILTER], filter);
if (err < 0)
goto err_filter;
if (filter->orig_flags) {
if (!cda[CTA_TUPLE_ORIG]) {
err = -EINVAL;
goto err_filter;
}
err = ctnetlink_parse_tuple_filter(cda, &filter->orig,
CTA_TUPLE_ORIG,
filter->family,
&filter->zone,
filter->orig_flags);
if (err < 0)
goto err_filter;
}
if (filter->reply_flags) {
if (!cda[CTA_TUPLE_REPLY]) {
err = -EINVAL;
goto err_filter;
}
err = ctnetlink_parse_tuple_filter(cda, &filter->reply,
CTA_TUPLE_REPLY,
filter->family,
&filter->zone,
filter->reply_flags);
if (err < 0)
goto err_filter;
}
return filter;
err_filter:
kfree(filter);
return ERR_PTR(err);
}
static bool ctnetlink_needs_filter(u8 family, const struct nlattr * const *cda)
{
return family || cda[CTA_MARK] || cda[CTA_FILTER] || cda[CTA_STATUS];
}
static int ctnetlink_start(struct netlink_callback *cb)
{
const struct nlattr * const *cda = cb->data;
struct ctnetlink_filter *filter = NULL;
struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
u8 family = nfmsg->nfgen_family;
if (ctnetlink_needs_filter(family, cda)) {
filter = ctnetlink_alloc_filter(cda, family);
if (IS_ERR(filter))
return PTR_ERR(filter);
}
cb->data = filter;
return 0;
}
static int ctnetlink_filter_match_tuple(struct nf_conntrack_tuple *filter_tuple,
struct nf_conntrack_tuple *ct_tuple,
u_int32_t flags, int family)
{
switch (family) {
case NFPROTO_IPV4:
if ((flags & CTA_FILTER_FLAG(CTA_IP_SRC)) &&
filter_tuple->src.u3.ip != ct_tuple->src.u3.ip)
return 0;
if ((flags & CTA_FILTER_FLAG(CTA_IP_DST)) &&
filter_tuple->dst.u3.ip != ct_tuple->dst.u3.ip)
return 0;
break;
case NFPROTO_IPV6:
if ((flags & CTA_FILTER_FLAG(CTA_IP_SRC)) &&
!ipv6_addr_cmp(&filter_tuple->src.u3.in6,
&ct_tuple->src.u3.in6))
return 0;
if ((flags & CTA_FILTER_FLAG(CTA_IP_DST)) &&
!ipv6_addr_cmp(&filter_tuple->dst.u3.in6,
&ct_tuple->dst.u3.in6))
return 0;
break;
}
if ((flags & CTA_FILTER_FLAG(CTA_PROTO_NUM)) &&
filter_tuple->dst.protonum != ct_tuple->dst.protonum)
return 0;
switch (ct_tuple->dst.protonum) {
case IPPROTO_TCP:
case IPPROTO_UDP:
if ((flags & CTA_FILTER_FLAG(CTA_PROTO_SRC_PORT)) &&
filter_tuple->src.u.tcp.port != ct_tuple->src.u.tcp.port)
return 0;
if ((flags & CTA_FILTER_FLAG(CTA_PROTO_DST_PORT)) &&
filter_tuple->dst.u.tcp.port != ct_tuple->dst.u.tcp.port)
return 0;
break;
case IPPROTO_ICMP:
if ((flags & CTA_FILTER_FLAG(CTA_PROTO_ICMP_TYPE)) &&
filter_tuple->dst.u.icmp.type != ct_tuple->dst.u.icmp.type)
return 0;
if ((flags & CTA_FILTER_FLAG(CTA_PROTO_ICMP_CODE)) &&
filter_tuple->dst.u.icmp.code != ct_tuple->dst.u.icmp.code)
return 0;
if ((flags & CTA_FILTER_FLAG(CTA_PROTO_ICMP_ID)) &&
filter_tuple->src.u.icmp.id != ct_tuple->src.u.icmp.id)
return 0;
break;
case IPPROTO_ICMPV6:
if ((flags & CTA_FILTER_FLAG(CTA_PROTO_ICMPV6_TYPE)) &&
filter_tuple->dst.u.icmp.type != ct_tuple->dst.u.icmp.type)
return 0;
if ((flags & CTA_FILTER_FLAG(CTA_PROTO_ICMPV6_CODE)) &&
filter_tuple->dst.u.icmp.code != ct_tuple->dst.u.icmp.code)
return 0;
if ((flags & CTA_FILTER_FLAG(CTA_PROTO_ICMPV6_ID)) &&
filter_tuple->src.u.icmp.id != ct_tuple->src.u.icmp.id)
return 0;
break;
}
return 1;
}
static int ctnetlink_filter_match(struct nf_conn *ct, void *data)
{
struct ctnetlink_filter *filter = data;
struct nf_conntrack_tuple *tuple;
u32 status;
if (filter == NULL)
goto out;
/* Match entries of a given L3 protocol number.
* If it is not specified, ie. l3proto == 0,
* then match everything.
*/
if (filter->family && nf_ct_l3num(ct) != filter->family)
goto ignore_entry;
if (filter->orig_flags) {
tuple = nf_ct_tuple(ct, IP_CT_DIR_ORIGINAL);
if (!ctnetlink_filter_match_tuple(&filter->orig, tuple,
filter->orig_flags,
filter->family))
goto ignore_entry;
}
if (filter->reply_flags) {
tuple = nf_ct_tuple(ct, IP_CT_DIR_REPLY);
if (!ctnetlink_filter_match_tuple(&filter->reply, tuple,
filter->reply_flags,
filter->family))
goto ignore_entry;
}
#ifdef CONFIG_NF_CONNTRACK_MARK
if ((READ_ONCE(ct->mark) & filter->mark.mask) != filter->mark.val)
goto ignore_entry;
#endif
status = (u32)READ_ONCE(ct->status);
if ((status & filter->status.mask) != filter->status.val)
goto ignore_entry;
out:
return 1;
ignore_entry:
return 0;
}
static int
ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
{
unsigned int flags = cb->data ? NLM_F_DUMP_FILTERED : 0;
struct net *net = sock_net(skb->sk);
struct nf_conn *ct, *last;
struct nf_conntrack_tuple_hash *h;
struct hlist_nulls_node *n;
struct nf_conn *nf_ct_evict[8];
int res, i;
spinlock_t *lockp;
last = (struct nf_conn *)cb->args[1];
i = 0;
local_bh_disable();
for (; cb->args[0] < nf_conntrack_htable_size; cb->args[0]++) {
restart:
while (i) {
i--;
if (nf_ct_should_gc(nf_ct_evict[i]))
nf_ct_kill(nf_ct_evict[i]);
nf_ct_put(nf_ct_evict[i]);
}
lockp = &nf_conntrack_locks[cb->args[0] % CONNTRACK_LOCKS];
nf_conntrack_lock(lockp);
if (cb->args[0] >= nf_conntrack_htable_size) {
spin_unlock(lockp);
goto out;
}
hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[cb->args[0]],
hnnode) {
ct = nf_ct_tuplehash_to_ctrack(h);
if (nf_ct_is_expired(ct)) {
/* need to defer nf_ct_kill() until lock is released */
if (i < ARRAY_SIZE(nf_ct_evict) &&
refcount_inc_not_zero(&ct->ct_general.use))
nf_ct_evict[i++] = ct;
continue;
}
if (!net_eq(net, nf_ct_net(ct)))
continue;
if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL)
continue;
if (cb->args[1]) {
if (ct != last)
continue;
cb->args[1] = 0;
}
if (!ctnetlink_filter_match(ct, cb->data))
continue;
res =
ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq,
NFNL_MSG_TYPE(cb->nlh->nlmsg_type),
ct, true, flags);
if (res < 0) {
nf_conntrack_get(&ct->ct_general);
cb->args[1] = (unsigned long)ct;
spin_unlock(lockp);
goto out;
}
}
spin_unlock(lockp);
if (cb->args[1]) {
cb->args[1] = 0;
goto restart;
}
}
out:
local_bh_enable();
if (last) {
/* nf ct hash resize happened, now clear the leftover. */
if ((struct nf_conn *)cb->args[1] == last)
cb->args[1] = 0;
nf_ct_put(last);
}
while (i) {
i--;
if (nf_ct_should_gc(nf_ct_evict[i]))
nf_ct_kill(nf_ct_evict[i]);
nf_ct_put(nf_ct_evict[i]);
}
return skb->len;
}
static int ipv4_nlattr_to_tuple(struct nlattr *tb[],
struct nf_conntrack_tuple *t,
u_int32_t flags)
{
if (flags & CTA_FILTER_FLAG(CTA_IP_SRC)) {
if (!tb[CTA_IP_V4_SRC])
return -EINVAL;
t->src.u3.ip = nla_get_in_addr(tb[CTA_IP_V4_SRC]);
}
if (flags & CTA_FILTER_FLAG(CTA_IP_DST)) {
if (!tb[CTA_IP_V4_DST])
return -EINVAL;
t->dst.u3.ip = nla_get_in_addr(tb[CTA_IP_V4_DST]);
}
return 0;
}
static int ipv6_nlattr_to_tuple(struct nlattr *tb[],
struct nf_conntrack_tuple *t,
u_int32_t flags)
{
if (flags & CTA_FILTER_FLAG(CTA_IP_SRC)) {
if (!tb[CTA_IP_V6_SRC])
return -EINVAL;
t->src.u3.in6 = nla_get_in6_addr(tb[CTA_IP_V6_SRC]);
}
if (flags & CTA_FILTER_FLAG(CTA_IP_DST)) {
if (!tb[CTA_IP_V6_DST])
return -EINVAL;
t->dst.u3.in6 = nla_get_in6_addr(tb[CTA_IP_V6_DST]);
}
return 0;
}
static int ctnetlink_parse_tuple_ip(struct nlattr *attr,
struct nf_conntrack_tuple *tuple,
u_int32_t flags)
{
struct nlattr *tb[CTA_IP_MAX+1];
int ret = 0;
ret = nla_parse_nested_deprecated(tb, CTA_IP_MAX, attr,
cta_ip_nla_policy, NULL);
if (ret < 0)
return ret;
switch (tuple->src.l3num) {
case NFPROTO_IPV4:
ret = ipv4_nlattr_to_tuple(tb, tuple, flags);
break;
case NFPROTO_IPV6:
ret = ipv6_nlattr_to_tuple(tb, tuple, flags);
break;
}
return ret;
}
static const struct nla_policy proto_nla_policy[CTA_PROTO_MAX+1] = {
[CTA_PROTO_NUM] = { .type = NLA_U8 },
};
static int ctnetlink_parse_tuple_proto(struct nlattr *attr,
struct nf_conntrack_tuple *tuple,
u_int32_t flags)
{
const struct nf_conntrack_l4proto *l4proto;
struct nlattr *tb[CTA_PROTO_MAX+1];
int ret = 0;
ret = nla_parse_nested_deprecated(tb, CTA_PROTO_MAX, attr,
proto_nla_policy, NULL);
if (ret < 0)
return ret;
if (!(flags & CTA_FILTER_FLAG(CTA_PROTO_NUM)))
return 0;
if (!tb[CTA_PROTO_NUM])
return -EINVAL;
tuple->dst.protonum = nla_get_u8(tb[CTA_PROTO_NUM]);
rcu_read_lock();
l4proto = nf_ct_l4proto_find(tuple->dst.protonum);
if (likely(l4proto->nlattr_to_tuple)) {
ret = nla_validate_nested_deprecated(attr, CTA_PROTO_MAX,
l4proto->nla_policy,
NULL);
if (ret == 0)
ret = l4proto->nlattr_to_tuple(tb, tuple, flags);
}
rcu_read_unlock();
return ret;
}
static int
ctnetlink_parse_zone(const struct nlattr *attr,
struct nf_conntrack_zone *zone)
{
nf_ct_zone_init(zone, NF_CT_DEFAULT_ZONE_ID,
NF_CT_DEFAULT_ZONE_DIR, 0);
#ifdef CONFIG_NF_CONNTRACK_ZONES
if (attr)
zone->id = ntohs(nla_get_be16(attr));
#else
if (attr)
return -EOPNOTSUPP;
#endif
return 0;
}
static int
ctnetlink_parse_tuple_zone(struct nlattr *attr, enum ctattr_type type,
struct nf_conntrack_zone *zone)
{
int ret;
if (zone->id != NF_CT_DEFAULT_ZONE_ID)
return -EINVAL;
ret = ctnetlink_parse_zone(attr, zone);
if (ret < 0)
return ret;
if (type == CTA_TUPLE_REPLY)
zone->dir = NF_CT_ZONE_DIR_REPL;
else
zone->dir = NF_CT_ZONE_DIR_ORIG;
return 0;
}
static const struct nla_policy tuple_nla_policy[CTA_TUPLE_MAX+1] = {
[CTA_TUPLE_IP] = { .type = NLA_NESTED },
[CTA_TUPLE_PROTO] = { .type = NLA_NESTED },
[CTA_TUPLE_ZONE] = { .type = NLA_U16 },
};
#define CTA_FILTER_F_ALL_CTA_PROTO \
(CTA_FILTER_F_CTA_PROTO_SRC_PORT | \
CTA_FILTER_F_CTA_PROTO_DST_PORT | \
CTA_FILTER_F_CTA_PROTO_ICMP_TYPE | \
CTA_FILTER_F_CTA_PROTO_ICMP_CODE | \
CTA_FILTER_F_CTA_PROTO_ICMP_ID | \
CTA_FILTER_F_CTA_PROTO_ICMPV6_TYPE | \
CTA_FILTER_F_CTA_PROTO_ICMPV6_CODE | \
CTA_FILTER_F_CTA_PROTO_ICMPV6_ID)
static int
ctnetlink_parse_tuple_filter(const struct nlattr * const cda[],
struct nf_conntrack_tuple *tuple, u32 type,
u_int8_t l3num, struct nf_conntrack_zone *zone,
u_int32_t flags)
{
struct nlattr *tb[CTA_TUPLE_MAX+1];
int err;
memset(tuple, 0, sizeof(*tuple));
err = nla_parse_nested_deprecated(tb, CTA_TUPLE_MAX, cda[type],
tuple_nla_policy, NULL);
if (err < 0)
return err;
if (l3num != NFPROTO_IPV4 && l3num != NFPROTO_IPV6)
return -EOPNOTSUPP;
tuple->src.l3num = l3num;
if (flags & CTA_FILTER_FLAG(CTA_IP_DST) ||
flags & CTA_FILTER_FLAG(CTA_IP_SRC)) {
if (!tb[CTA_TUPLE_IP])
return -EINVAL;
err = ctnetlink_parse_tuple_ip(tb[CTA_TUPLE_IP], tuple, flags);
if (err < 0)
return err;
}
if (flags & CTA_FILTER_FLAG(CTA_PROTO_NUM)) {
if (!tb[CTA_TUPLE_PROTO])
return -EINVAL;
err = ctnetlink_parse_tuple_proto(tb[CTA_TUPLE_PROTO], tuple, flags);
if (err < 0)
return err;
} else if (flags & CTA_FILTER_FLAG(ALL_CTA_PROTO)) {
/* Can't manage proto flags without a protonum */
return -EINVAL;
}
if ((flags & CTA_FILTER_FLAG(CTA_TUPLE_ZONE)) && tb[CTA_TUPLE_ZONE]) {
if (!zone)
return -EINVAL;
err = ctnetlink_parse_tuple_zone(tb[CTA_TUPLE_ZONE],
type, zone);
if (err < 0)
return err;
}
/* orig and expect tuples get DIR_ORIGINAL */
if (type == CTA_TUPLE_REPLY)
tuple->dst.dir = IP_CT_DIR_REPLY;
else
tuple->dst.dir = IP_CT_DIR_ORIGINAL;
return 0;
}
static int
ctnetlink_parse_tuple(const struct nlattr * const cda[],
struct nf_conntrack_tuple *tuple, u32 type,
u_int8_t l3num, struct nf_conntrack_zone *zone)
{
return ctnetlink_parse_tuple_filter(cda, tuple, type, l3num, zone,
CTA_FILTER_FLAG(ALL));
}
static const struct nla_policy help_nla_policy[CTA_HELP_MAX+1] = {
[CTA_HELP_NAME] = { .type = NLA_NUL_STRING,
.len = NF_CT_HELPER_NAME_LEN - 1 },
};
static int ctnetlink_parse_help(const struct nlattr *attr, char **helper_name,
struct nlattr **helpinfo)
{
int err;
struct nlattr *tb[CTA_HELP_MAX+1];
err = nla_parse_nested_deprecated(tb, CTA_HELP_MAX, attr,
help_nla_policy, NULL);
if (err < 0)
return err;
if (!tb[CTA_HELP_NAME])
return -EINVAL;
*helper_name = nla_data(tb[CTA_HELP_NAME]);
if (tb[CTA_HELP_INFO])
*helpinfo = tb[CTA_HELP_INFO];
return 0;
}
static const struct nla_policy ct_nla_policy[CTA_MAX+1] = {
[CTA_TUPLE_ORIG] = { .type = NLA_NESTED },
[CTA_TUPLE_REPLY] = { .type = NLA_NESTED },
[CTA_STATUS] = { .type = NLA_U32 },
[CTA_PROTOINFO] = { .type = NLA_NESTED },
[CTA_HELP] = { .type = NLA_NESTED },
[CTA_NAT_SRC] = { .type = NLA_NESTED },
[CTA_TIMEOUT] = { .type = NLA_U32 },
[CTA_MARK] = { .type = NLA_U32 },
[CTA_ID] = { .type = NLA_U32 },
[CTA_NAT_DST] = { .type = NLA_NESTED },
[CTA_TUPLE_MASTER] = { .type = NLA_NESTED },
[CTA_NAT_SEQ_ADJ_ORIG] = { .type = NLA_NESTED },
[CTA_NAT_SEQ_ADJ_REPLY] = { .type = NLA_NESTED },
[CTA_ZONE] = { .type = NLA_U16 },
[CTA_MARK_MASK] = { .type = NLA_U32 },
[CTA_LABELS] = { .type = NLA_BINARY,
.len = NF_CT_LABELS_MAX_SIZE },
[CTA_LABELS_MASK] = { .type = NLA_BINARY,
.len = NF_CT_LABELS_MAX_SIZE },
[CTA_FILTER] = { .type = NLA_NESTED },
[CTA_STATUS_MASK] = { .type = NLA_U32 },
};
static int ctnetlink_flush_iterate(struct nf_conn *ct, void *data)
{
return ctnetlink_filter_match(ct, data);
}
static int ctnetlink_flush_conntrack(struct net *net,
const struct nlattr * const cda[],
u32 portid, int report, u8 family)
{
struct ctnetlink_filter *filter = NULL;
struct nf_ct_iter_data iter = {
.net = net,
.portid = portid,
.report = report,
};
if (ctnetlink_needs_filter(family, cda)) {
if (cda[CTA_FILTER])
return -EOPNOTSUPP;
filter = ctnetlink_alloc_filter(cda, family);
if (IS_ERR(filter))
return PTR_ERR(filter);
iter.data = filter;
}
nf_ct_iterate_cleanup_net(ctnetlink_flush_iterate, &iter);
kfree(filter);
return 0;
}
static int ctnetlink_del_conntrack(struct sk_buff *skb,
const struct nfnl_info *info,
const struct nlattr * const cda[])
{
u8 family = info->nfmsg->nfgen_family;
struct nf_conntrack_tuple_hash *h;
struct nf_conntrack_tuple tuple;
struct nf_conntrack_zone zone;
struct nf_conn *ct;
int err;
err = ctnetlink_parse_zone(cda[CTA_ZONE], &zone);
if (err < 0)
return err;
if (cda[CTA_TUPLE_ORIG])
err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_ORIG,
family, &zone);
else if (cda[CTA_TUPLE_REPLY])
err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_REPLY,
family, &zone);
else {
u_int8_t u3 = info->nfmsg->version ? family : AF_UNSPEC;
return ctnetlink_flush_conntrack(info->net, cda,
NETLINK_CB(skb).portid,
nlmsg_report(info->nlh), u3);
}
if (err < 0)
return err;
h = nf_conntrack_find_get(info->net, &zone, &tuple);
if (!h)
return -ENOENT;
ct = nf_ct_tuplehash_to_ctrack(h);
if (cda[CTA_ID]) {
__be32 id = nla_get_be32(cda[CTA_ID]);
if (id != (__force __be32)nf_ct_get_id(ct)) {
nf_ct_put(ct);
return -ENOENT;
}
}
nf_ct_delete(ct, NETLINK_CB(skb).portid, nlmsg_report(info->nlh));
nf_ct_put(ct);
return 0;
}
static int ctnetlink_get_conntrack(struct sk_buff *skb,
const struct nfnl_info *info,
const struct nlattr * const cda[])
{
u_int8_t u3 = info->nfmsg->nfgen_family;
struct nf_conntrack_tuple_hash *h;
struct nf_conntrack_tuple tuple;
struct nf_conntrack_zone zone;
struct sk_buff *skb2;
struct nf_conn *ct;
int err;
if (info->nlh->nlmsg_flags & NLM_F_DUMP) {
struct netlink_dump_control c = {
.start = ctnetlink_start,
.dump = ctnetlink_dump_table,
.done = ctnetlink_done,
.data = (void *)cda,
};
return netlink_dump_start(info->sk, skb, info->nlh, &c);
}
err = ctnetlink_parse_zone(cda[CTA_ZONE], &zone);
if (err < 0)
return err;
if (cda[CTA_TUPLE_ORIG])
err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_ORIG,
u3, &zone);
else if (cda[CTA_TUPLE_REPLY])
err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_REPLY,
u3, &zone);
else
return -EINVAL;
if (err < 0)
return err;
h = nf_conntrack_find_get(info->net, &zone, &tuple);
if (!h)
return -ENOENT;
ct = nf_ct_tuplehash_to_ctrack(h);
skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!skb2) {
nf_ct_put(ct);
return -ENOMEM;
}
err = ctnetlink_fill_info(skb2, NETLINK_CB(skb).portid,
info->nlh->nlmsg_seq,
NFNL_MSG_TYPE(info->nlh->nlmsg_type), ct,
true, 0);
nf_ct_put(ct);
if (err <= 0) {
kfree_skb(skb2);
return -ENOMEM;
}
return nfnetlink_unicast(skb2, info->net, NETLINK_CB(skb).portid);
}
static int ctnetlink_done_list(struct netlink_callback *cb)
{
struct ctnetlink_list_dump_ctx *ctx = (void *)cb->ctx;
if (ctx->last)
nf_ct_put(ctx->last);
return 0;
}
#ifdef CONFIG_NF_CONNTRACK_EVENTS
static int ctnetlink_dump_one_entry(struct sk_buff *skb,
struct netlink_callback *cb,
struct nf_conn *ct,
bool dying)
{
struct ctnetlink_list_dump_ctx *ctx = (void *)cb->ctx;
struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
u8 l3proto = nfmsg->nfgen_family;
int res;
if (l3proto && nf_ct_l3num(ct) != l3proto)
return 0;
if (ctx->last) {
if (ct != ctx->last)
return 0;
ctx->last = NULL;
}
/* We can't dump extension info for the unconfirmed
* list because unconfirmed conntracks can have
* ct->ext reallocated (and thus freed).
*
* In the dying list case ct->ext can't be free'd
* until after we drop pcpu->lock.
*/
res = ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq,
NFNL_MSG_TYPE(cb->nlh->nlmsg_type),
ct, dying, 0);
if (res < 0) {
if (!refcount_inc_not_zero(&ct->ct_general.use))
return 0;
ctx->last = ct;
}
return res;
}
#endif
static int
ctnetlink_dump_unconfirmed(struct sk_buff *skb, struct netlink_callback *cb)
{
return 0;
}
static int
ctnetlink_dump_dying(struct sk_buff *skb, struct netlink_callback *cb)
{
struct ctnetlink_list_dump_ctx *ctx = (void *)cb->ctx;
struct nf_conn *last = ctx->last;
#ifdef CONFIG_NF_CONNTRACK_EVENTS
const struct net *net = sock_net(skb->sk);
struct nf_conntrack_net_ecache *ecache_net;
struct nf_conntrack_tuple_hash *h;
struct hlist_nulls_node *n;
#endif
if (ctx->done)
return 0;
ctx->last = NULL;
#ifdef CONFIG_NF_CONNTRACK_EVENTS
ecache_net = nf_conn_pernet_ecache(net);
spin_lock_bh(&ecache_net->dying_lock);
hlist_nulls_for_each_entry(h, n, &ecache_net->dying_list, hnnode) {
struct nf_conn *ct;
int res;
ct = nf_ct_tuplehash_to_ctrack(h);
if (last && last != ct)
continue;
res = ctnetlink_dump_one_entry(skb, cb, ct, true);
if (res < 0) {
spin_unlock_bh(&ecache_net->dying_lock);
nf_ct_put(last);
return skb->len;
}
nf_ct_put(last);
last = NULL;
}
spin_unlock_bh(&ecache_net->dying_lock);
#endif
ctx->done = true;
nf_ct_put(last);
return skb->len;
}
static int ctnetlink_get_ct_dying(struct sk_buff *skb,
const struct nfnl_info *info,
const struct nlattr * const cda[])
{
if (info->nlh->nlmsg_flags & NLM_F_DUMP) {
struct netlink_dump_control c = {
.dump = ctnetlink_dump_dying,
.done = ctnetlink_done_list,
};
return netlink_dump_start(info->sk, skb, info->nlh, &c);
}
return -EOPNOTSUPP;
}
static int ctnetlink_get_ct_unconfirmed(struct sk_buff *skb,
const struct nfnl_info *info,
const struct nlattr * const cda[])
{
if (info->nlh->nlmsg_flags & NLM_F_DUMP) {
struct netlink_dump_control c = {
.dump = ctnetlink_dump_unconfirmed,
.done = ctnetlink_done_list,
};
return netlink_dump_start(info->sk, skb, info->nlh, &c);
}
return -EOPNOTSUPP;
}
#if IS_ENABLED(CONFIG_NF_NAT)
static int
ctnetlink_parse_nat_setup(struct nf_conn *ct,
enum nf_nat_manip_type manip,
const struct nlattr *attr)
__must_hold(RCU)
{
const struct nf_nat_hook *nat_hook;
int err;
nat_hook = rcu_dereference(nf_nat_hook);
if (!nat_hook) {
#ifdef CONFIG_MODULES
rcu_read_unlock();
nfnl_unlock(NFNL_SUBSYS_CTNETLINK);
if (request_module("nf-nat") < 0) {
nfnl_lock(NFNL_SUBSYS_CTNETLINK);
rcu_read_lock();
return -EOPNOTSUPP;
}
nfnl_lock(NFNL_SUBSYS_CTNETLINK);
rcu_read_lock();
nat_hook = rcu_dereference(nf_nat_hook);
if (nat_hook)
return -EAGAIN;
#endif
return -EOPNOTSUPP;
}
err = nat_hook->parse_nat_setup(ct, manip, attr);
if (err == -EAGAIN) {
#ifdef CONFIG_MODULES
rcu_read_unlock();
nfnl_unlock(NFNL_SUBSYS_CTNETLINK);
if (request_module("nf-nat-%u", nf_ct_l3num(ct)) < 0) {
nfnl_lock(NFNL_SUBSYS_CTNETLINK);
rcu_read_lock();
return -EOPNOTSUPP;
}
nfnl_lock(NFNL_SUBSYS_CTNETLINK);
rcu_read_lock();
#else
err = -EOPNOTSUPP;
#endif
}
return err;
}
#endif
static int
ctnetlink_change_status(struct nf_conn *ct, const struct nlattr * const cda[])
{
return nf_ct_change_status_common(ct, ntohl(nla_get_be32(cda[CTA_STATUS])));
}
static int
ctnetlink_setup_nat(struct nf_conn *ct, const struct nlattr * const cda[])
{
#if IS_ENABLED(CONFIG_NF_NAT)
int ret;
if (!cda[CTA_NAT_DST] && !cda[CTA_NAT_SRC])
return 0;
ret = ctnetlink_parse_nat_setup(ct, NF_NAT_MANIP_DST,
cda[CTA_NAT_DST]);
if (ret < 0)
return ret;
return ctnetlink_parse_nat_setup(ct, NF_NAT_MANIP_SRC,
cda[CTA_NAT_SRC]);
#else
if (!cda[CTA_NAT_DST] && !cda[CTA_NAT_SRC])
return 0;
return -EOPNOTSUPP;
#endif
}
static int ctnetlink_change_helper(struct nf_conn *ct,
const struct nlattr * const cda[])
{
struct nf_conntrack_helper *helper;
struct nf_conn_help *help = nfct_help(ct);
char *helpname = NULL;
struct nlattr *helpinfo = NULL;
int err;
err = ctnetlink_parse_help(cda[CTA_HELP], &helpname, &helpinfo);
if (err < 0)
return err;
/* don't change helper of sibling connections */
if (ct->master) {
/* If we try to change the helper to the same thing twice,
* treat the second attempt as a no-op instead of returning
* an error.
*/
err = -EBUSY;
if (help) {
rcu_read_lock();
helper = rcu_dereference(help->helper);
if (helper && !strcmp(helper->name, helpname))
err = 0;
rcu_read_unlock();
}
return err;
}
if (!strcmp(helpname, "")) {
if (help && help->helper) {
/* we had a helper before ... */
nf_ct_remove_expectations(ct);
RCU_INIT_POINTER(help->helper, NULL);
}
return 0;
}
rcu_read_lock();
helper = __nf_conntrack_helper_find(helpname, nf_ct_l3num(ct),
nf_ct_protonum(ct));
if (helper == NULL) {
rcu_read_unlock();
return -EOPNOTSUPP;
}
if (help) {
if (rcu_access_pointer(help->helper) == helper) {
/* update private helper data if allowed. */
if (helper->from_nlattr)
helper->from_nlattr(helpinfo, ct);
err = 0;
} else
err = -EBUSY;
} else {
/* we cannot set a helper for an existing conntrack */
err = -EOPNOTSUPP;
}
rcu_read_unlock();
return err;
}
static int ctnetlink_change_timeout(struct nf_conn *ct,
const struct nlattr * const cda[])
{
return __nf_ct_change_timeout(ct, (u64)ntohl(nla_get_be32(cda[CTA_TIMEOUT])) * HZ);
}
#if defined(CONFIG_NF_CONNTRACK_MARK)
static void ctnetlink_change_mark(struct nf_conn *ct,
const struct nlattr * const cda[])
{
u32 mark, newmark, mask = 0;
if (cda[CTA_MARK_MASK])
mask = ~ntohl(nla_get_be32(cda[CTA_MARK_MASK]));
mark = ntohl(nla_get_be32(cda[CTA_MARK]));
newmark = (READ_ONCE(ct->mark) & mask) ^ mark;
if (newmark != READ_ONCE(ct->mark))
WRITE_ONCE(ct->mark, newmark);
}
#endif
static const struct nla_policy protoinfo_policy[CTA_PROTOINFO_MAX+1] = {
[CTA_PROTOINFO_TCP] = { .type = NLA_NESTED },
[CTA_PROTOINFO_DCCP] = { .type = NLA_NESTED },
[CTA_PROTOINFO_SCTP] = { .type = NLA_NESTED },
};
static int ctnetlink_change_protoinfo(struct nf_conn *ct,
const struct nlattr * const cda[])
{
const struct nlattr *attr = cda[CTA_PROTOINFO];
const struct nf_conntrack_l4proto *l4proto;
struct nlattr *tb[CTA_PROTOINFO_MAX+1];
int err = 0;
err = nla_parse_nested_deprecated(tb, CTA_PROTOINFO_MAX, attr,
protoinfo_policy, NULL);
if (err < 0)
return err;
l4proto = nf_ct_l4proto_find(nf_ct_protonum(ct));
if (l4proto->from_nlattr)
err = l4proto->from_nlattr(tb, ct);
return err;
}
static const struct nla_policy seqadj_policy[CTA_SEQADJ_MAX+1] = {
[CTA_SEQADJ_CORRECTION_POS] = { .type = NLA_U32 },
[CTA_SEQADJ_OFFSET_BEFORE] = { .type = NLA_U32 },
[CTA_SEQADJ_OFFSET_AFTER] = { .type = NLA_U32 },
};
static int change_seq_adj(struct nf_ct_seqadj *seq,
const struct nlattr * const attr)
{
int err;
struct nlattr *cda[CTA_SEQADJ_MAX+1];
err = nla_parse_nested_deprecated(cda, CTA_SEQADJ_MAX, attr,
seqadj_policy, NULL);
if (err < 0)
return err;
if (!cda[CTA_SEQADJ_CORRECTION_POS])
return -EINVAL;
seq->correction_pos =
ntohl(nla_get_be32(cda[CTA_SEQADJ_CORRECTION_POS]));
if (!cda[CTA_SEQADJ_OFFSET_BEFORE])
return -EINVAL;
seq->offset_before =
ntohl(nla_get_be32(cda[CTA_SEQADJ_OFFSET_BEFORE]));
if (!cda[CTA_SEQADJ_OFFSET_AFTER])
return -EINVAL;
seq->offset_after =
ntohl(nla_get_be32(cda[CTA_SEQADJ_OFFSET_AFTER]));
return 0;
}
static int
ctnetlink_change_seq_adj(struct nf_conn *ct,
const struct nlattr * const cda[])
{
struct nf_conn_seqadj *seqadj = nfct_seqadj(ct);
int ret = 0;
if (!seqadj)
return 0;
spin_lock_bh(&ct->lock);
if (cda[CTA_SEQ_ADJ_ORIG]) {
ret = change_seq_adj(&seqadj->seq[IP_CT_DIR_ORIGINAL],
cda[CTA_SEQ_ADJ_ORIG]);
if (ret < 0)
goto err;
set_bit(IPS_SEQ_ADJUST_BIT, &ct->status);
}
if (cda[CTA_SEQ_ADJ_REPLY]) {
ret = change_seq_adj(&seqadj->seq[IP_CT_DIR_REPLY],
cda[CTA_SEQ_ADJ_REPLY]);
if (ret < 0)
goto err;
set_bit(IPS_SEQ_ADJUST_BIT, &ct->status);
}
spin_unlock_bh(&ct->lock);
return 0;
err:
spin_unlock_bh(&ct->lock);
return ret;
}
static const struct nla_policy synproxy_policy[CTA_SYNPROXY_MAX + 1] = {
[CTA_SYNPROXY_ISN] = { .type = NLA_U32 },
[CTA_SYNPROXY_ITS] = { .type = NLA_U32 },
[CTA_SYNPROXY_TSOFF] = { .type = NLA_U32 },
};
static int ctnetlink_change_synproxy(struct nf_conn *ct,
const struct nlattr * const cda[])
{
struct nf_conn_synproxy *synproxy = nfct_synproxy(ct);
struct nlattr *tb[CTA_SYNPROXY_MAX + 1];
int err;
if (!synproxy)
return 0;
err = nla_parse_nested_deprecated(tb, CTA_SYNPROXY_MAX,
cda[CTA_SYNPROXY], synproxy_policy,
NULL);
if (err < 0)
return err;
if (!tb[CTA_SYNPROXY_ISN] ||
!tb[CTA_SYNPROXY_ITS] ||
!tb[CTA_SYNPROXY_TSOFF])
return -EINVAL;
synproxy->isn = ntohl(nla_get_be32(tb[CTA_SYNPROXY_ISN]));
synproxy->its = ntohl(nla_get_be32(tb[CTA_SYNPROXY_ITS]));
synproxy->tsoff = ntohl(nla_get_be32(tb[CTA_SYNPROXY_TSOFF]));
return 0;
}
static int
ctnetlink_attach_labels(struct nf_conn *ct, const struct nlattr * const cda[])
{
#ifdef CONFIG_NF_CONNTRACK_LABELS
size_t len = nla_len(cda[CTA_LABELS]);
const void *mask = cda[CTA_LABELS_MASK];
if (len & (sizeof(u32)-1)) /* must be multiple of u32 */
return -EINVAL;
if (mask) {
if (nla_len(cda[CTA_LABELS_MASK]) == 0 ||
nla_len(cda[CTA_LABELS_MASK]) != len)
return -EINVAL;
mask = nla_data(cda[CTA_LABELS_MASK]);
}
len /= sizeof(u32);
return nf_connlabels_replace(ct, nla_data(cda[CTA_LABELS]), mask, len);
#else
return -EOPNOTSUPP;
#endif
}
static int
ctnetlink_change_conntrack(struct nf_conn *ct,
const struct nlattr * const cda[])
{
int err;
/* only allow NAT changes and master assignation for new conntracks */
if (cda[CTA_NAT_SRC] || cda[CTA_NAT_DST] || cda[CTA_TUPLE_MASTER])
return -EOPNOTSUPP;
if (cda[CTA_HELP]) {
err = ctnetlink_change_helper(ct, cda);
if (err < 0)
return err;
}
if (cda[CTA_TIMEOUT]) {
err = ctnetlink_change_timeout(ct, cda);
if (err < 0)
return err;
}
if (cda[CTA_STATUS]) {
err = ctnetlink_change_status(ct, cda);
if (err < 0)
return err;
}
if (cda[CTA_PROTOINFO]) {
err = ctnetlink_change_protoinfo(ct, cda);
if (err < 0)
return err;
}
#if defined(CONFIG_NF_CONNTRACK_MARK)
if (cda[CTA_MARK])
ctnetlink_change_mark(ct, cda);
#endif
if (cda[CTA_SEQ_ADJ_ORIG] || cda[CTA_SEQ_ADJ_REPLY]) {
err = ctnetlink_change_seq_adj(ct, cda);
if (err < 0)
return err;
}
if (cda[CTA_SYNPROXY]) {
err = ctnetlink_change_synproxy(ct, cda);
if (err < 0)
return err;
}
if (cda[CTA_LABELS]) {
err = ctnetlink_attach_labels(ct, cda);
if (err < 0)
return err;
}
return 0;
}
static struct nf_conn *
ctnetlink_create_conntrack(struct net *net,
const struct nf_conntrack_zone *zone,
const struct nlattr * const cda[],
struct nf_conntrack_tuple *otuple,
struct nf_conntrack_tuple *rtuple,
u8 u3)
{
struct nf_conn *ct;
int err = -EINVAL;
struct nf_conntrack_helper *helper;
struct nf_conn_tstamp *tstamp;
u64 timeout;
ct = nf_conntrack_alloc(net, zone, otuple, rtuple, GFP_ATOMIC);
if (IS_ERR(ct))
return ERR_PTR(-ENOMEM);
if (!cda[CTA_TIMEOUT])
goto err1;
rcu_read_lock();
if (cda[CTA_HELP]) {
char *helpname = NULL;
struct nlattr *helpinfo = NULL;
err = ctnetlink_parse_help(cda[CTA_HELP], &helpname, &helpinfo);
if (err < 0)
goto err2;
helper = __nf_conntrack_helper_find(helpname, nf_ct_l3num(ct),
nf_ct_protonum(ct));
if (helper == NULL) {
rcu_read_unlock();
#ifdef CONFIG_MODULES
if (request_module("nfct-helper-%s", helpname) < 0) {
err = -EOPNOTSUPP;
goto err1;
}
rcu_read_lock();
helper = __nf_conntrack_helper_find(helpname,
nf_ct_l3num(ct),
nf_ct_protonum(ct));
if (helper) {
err = -EAGAIN;
goto err2;
}
rcu_read_unlock();
#endif
err = -EOPNOTSUPP;
goto err1;
} else {
struct nf_conn_help *help;
help = nf_ct_helper_ext_add(ct, GFP_ATOMIC);
if (help == NULL) {
err = -ENOMEM;
goto err2;
}
/* set private helper data if allowed. */
if (helper->from_nlattr)
helper->from_nlattr(helpinfo, ct);
/* disable helper auto-assignment for this entry */
ct->status |= IPS_HELPER;
RCU_INIT_POINTER(help->helper, helper);
}
}
err = ctnetlink_setup_nat(ct, cda);
if (err < 0)
goto err2;
nf_ct_acct_ext_add(ct, GFP_ATOMIC);
nf_ct_tstamp_ext_add(ct, GFP_ATOMIC);
nf_ct_ecache_ext_add(ct, 0, 0, GFP_ATOMIC);
nf_ct_labels_ext_add(ct);
nfct_seqadj_ext_add(ct);
nfct_synproxy_ext_add(ct);
/* we must add conntrack extensions before confirmation. */
ct->status |= IPS_CONFIRMED;
timeout = (u64)ntohl(nla_get_be32(cda[CTA_TIMEOUT])) * HZ;
__nf_ct_set_timeout(ct, timeout);
if (cda[CTA_STATUS]) {
err = ctnetlink_change_status(ct, cda);
if (err < 0)
goto err2;
}
if (cda[CTA_SEQ_ADJ_ORIG] || cda[CTA_SEQ_ADJ_REPLY]) {
err = ctnetlink_change_seq_adj(ct, cda);
if (err < 0)
goto err2;
}
memset(&ct->proto, 0, sizeof(ct->proto));
if (cda[CTA_PROTOINFO]) {
err = ctnetlink_change_protoinfo(ct, cda);
if (err < 0)
goto err2;
}
if (cda[CTA_SYNPROXY]) {
err = ctnetlink_change_synproxy(ct, cda);
if (err < 0)
goto err2;
}
#if defined(CONFIG_NF_CONNTRACK_MARK)
if (cda[CTA_MARK])
ctnetlink_change_mark(ct, cda);
#endif
/* setup master conntrack: this is a confirmed expectation */
if (cda[CTA_TUPLE_MASTER]) {
struct nf_conntrack_tuple master;
struct nf_conntrack_tuple_hash *master_h;
struct nf_conn *master_ct;
err = ctnetlink_parse_tuple(cda, &master, CTA_TUPLE_MASTER,
u3, NULL);
if (err < 0)
goto err2;
master_h = nf_conntrack_find_get(net, zone, &master);
if (master_h == NULL) {
err = -ENOENT;
goto err2;
}
master_ct = nf_ct_tuplehash_to_ctrack(master_h);
__set_bit(IPS_EXPECTED_BIT, &ct->status);
ct->master = master_ct;
}
tstamp = nf_conn_tstamp_find(ct);
if (tstamp)
tstamp->start = ktime_get_real_ns();
err = nf_conntrack_hash_check_insert(ct);
if (err < 0)
goto err3;
rcu_read_unlock();
return ct;
err3:
if (ct->master)
nf_ct_put(ct->master);
err2:
rcu_read_unlock();
err1:
nf_conntrack_free(ct);
return ERR_PTR(err);
}
static int ctnetlink_new_conntrack(struct sk_buff *skb,
const struct nfnl_info *info,
const struct nlattr * const cda[])
{
struct nf_conntrack_tuple otuple, rtuple;
struct nf_conntrack_tuple_hash *h = NULL;
u_int8_t u3 = info->nfmsg->nfgen_family;
struct nf_conntrack_zone zone;
struct nf_conn *ct;
int err;
err = ctnetlink_parse_zone(cda[CTA_ZONE], &zone);
if (err < 0)
return err;
if (cda[CTA_TUPLE_ORIG]) {
err = ctnetlink_parse_tuple(cda, &otuple, CTA_TUPLE_ORIG,
u3, &zone);
if (err < 0)
return err;
}
if (cda[CTA_TUPLE_REPLY]) {
err = ctnetlink_parse_tuple(cda, &rtuple, CTA_TUPLE_REPLY,
u3, &zone);
if (err < 0)
return err;
}
if (cda[CTA_TUPLE_ORIG])
h = nf_conntrack_find_get(info->net, &zone, &otuple);
else if (cda[CTA_TUPLE_REPLY])
h = nf_conntrack_find_get(info->net, &zone, &rtuple);
if (h == NULL) {
err = -ENOENT;
if (info->nlh->nlmsg_flags & NLM_F_CREATE) {
enum ip_conntrack_events events;
if (!cda[CTA_TUPLE_ORIG] || !cda[CTA_TUPLE_REPLY])
return -EINVAL;
if (otuple.dst.protonum != rtuple.dst.protonum)
return -EINVAL;
ct = ctnetlink_create_conntrack(info->net, &zone, cda,
&otuple, &rtuple, u3);
if (IS_ERR(ct))
return PTR_ERR(ct);
err = 0;
if (test_bit(IPS_EXPECTED_BIT, &ct->status))
events = 1 << IPCT_RELATED;
else
events = 1 << IPCT_NEW;
if (cda[CTA_LABELS] &&
ctnetlink_attach_labels(ct, cda) == 0)
events |= (1 << IPCT_LABEL);
nf_conntrack_eventmask_report((1 << IPCT_REPLY) |
(1 << IPCT_ASSURED) |
(1 << IPCT_HELPER) |
(1 << IPCT_PROTOINFO) |
(1 << IPCT_SEQADJ) |
(1 << IPCT_MARK) |
(1 << IPCT_SYNPROXY) |
events,
ct, NETLINK_CB(skb).portid,
nlmsg_report(info->nlh));
nf_ct_put(ct);
}
return err;
}
/* implicit 'else' */
err = -EEXIST;
ct = nf_ct_tuplehash_to_ctrack(h);
if (!(info->nlh->nlmsg_flags & NLM_F_EXCL)) {
err = ctnetlink_change_conntrack(ct, cda);
if (err == 0) {
nf_conntrack_eventmask_report((1 << IPCT_REPLY) |
(1 << IPCT_ASSURED) |
(1 << IPCT_HELPER) |
(1 << IPCT_LABEL) |
(1 << IPCT_PROTOINFO) |
(1 << IPCT_SEQADJ) |
(1 << IPCT_MARK) |
(1 << IPCT_SYNPROXY),
ct, NETLINK_CB(skb).portid,
nlmsg_report(info->nlh));
}
}
nf_ct_put(ct);
return err;
}
static int
ctnetlink_ct_stat_cpu_fill_info(struct sk_buff *skb, u32 portid, u32 seq,
__u16 cpu, const struct ip_conntrack_stat *st)
{
struct nlmsghdr *nlh;
unsigned int flags = portid ? NLM_F_MULTI : 0, event;
event = nfnl_msg_type(NFNL_SUBSYS_CTNETLINK,
IPCTNL_MSG_CT_GET_STATS_CPU);
nlh = nfnl_msg_put(skb, portid, seq, event, flags, AF_UNSPEC,
NFNETLINK_V0, htons(cpu));
if (!nlh)
goto nlmsg_failure;
if (nla_put_be32(skb, CTA_STATS_FOUND, htonl(st->found)) ||
nla_put_be32(skb, CTA_STATS_INVALID, htonl(st->invalid)) ||
nla_put_be32(skb, CTA_STATS_INSERT, htonl(st->insert)) ||
nla_put_be32(skb, CTA_STATS_INSERT_FAILED,
htonl(st->insert_failed)) ||
nla_put_be32(skb, CTA_STATS_DROP, htonl(st->drop)) ||
nla_put_be32(skb, CTA_STATS_EARLY_DROP, htonl(st->early_drop)) ||
nla_put_be32(skb, CTA_STATS_ERROR, htonl(st->error)) ||
nla_put_be32(skb, CTA_STATS_SEARCH_RESTART,
htonl(st->search_restart)) ||
nla_put_be32(skb, CTA_STATS_CLASH_RESOLVE,
htonl(st->clash_resolve)) ||
nla_put_be32(skb, CTA_STATS_CHAIN_TOOLONG,
htonl(st->chaintoolong)))
goto nla_put_failure;
nlmsg_end(skb, nlh);
return skb->len;
nla_put_failure:
nlmsg_failure:
nlmsg_cancel(skb, nlh);
return -1;
}
static int
ctnetlink_ct_stat_cpu_dump(struct sk_buff *skb, struct netlink_callback *cb)
{
int cpu;
struct net *net = sock_net(skb->sk);
if (cb->args[0] == nr_cpu_ids)
return 0;
for (cpu = cb->args[0]; cpu < nr_cpu_ids; cpu++) {
const struct ip_conntrack_stat *st;
if (!cpu_possible(cpu))
continue;
st = per_cpu_ptr(net->ct.stat, cpu);
if (ctnetlink_ct_stat_cpu_fill_info(skb,
NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq,
cpu, st) < 0)
break;
}
cb->args[0] = cpu;
return skb->len;
}
static int ctnetlink_stat_ct_cpu(struct sk_buff *skb,
const struct nfnl_info *info,
const struct nlattr * const cda[])
{
if (info->nlh->nlmsg_flags & NLM_F_DUMP) {
struct netlink_dump_control c = {
.dump = ctnetlink_ct_stat_cpu_dump,
};
return netlink_dump_start(info->sk, skb, info->nlh, &c);
}
return 0;
}
static int
ctnetlink_stat_ct_fill_info(struct sk_buff *skb, u32 portid, u32 seq, u32 type,
struct net *net)
{
unsigned int flags = portid ? NLM_F_MULTI : 0, event;
unsigned int nr_conntracks;
struct nlmsghdr *nlh;
event = nfnl_msg_type(NFNL_SUBSYS_CTNETLINK, IPCTNL_MSG_CT_GET_STATS);
nlh = nfnl_msg_put(skb, portid, seq, event, flags, AF_UNSPEC,
NFNETLINK_V0, 0);
if (!nlh)
goto nlmsg_failure;
nr_conntracks = nf_conntrack_count(net);
if (nla_put_be32(skb, CTA_STATS_GLOBAL_ENTRIES, htonl(nr_conntracks)))
goto nla_put_failure;
if (nla_put_be32(skb, CTA_STATS_GLOBAL_MAX_ENTRIES, htonl(nf_conntrack_max)))
goto nla_put_failure;
nlmsg_end(skb, nlh);
return skb->len;
nla_put_failure:
nlmsg_failure:
nlmsg_cancel(skb, nlh);
return -1;
}
static int ctnetlink_stat_ct(struct sk_buff *skb, const struct nfnl_info *info,
const struct nlattr * const cda[])
{
struct sk_buff *skb2;
int err;
skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (skb2 == NULL)
return -ENOMEM;
err = ctnetlink_stat_ct_fill_info(skb2, NETLINK_CB(skb).portid,
info->nlh->nlmsg_seq,
NFNL_MSG_TYPE(info->nlh->nlmsg_type),
sock_net(skb->sk));
if (err <= 0) {
kfree_skb(skb2);
return -ENOMEM;
}
return nfnetlink_unicast(skb2, info->net, NETLINK_CB(skb).portid);
}
static const struct nla_policy exp_nla_policy[CTA_EXPECT_MAX+1] = {
[CTA_EXPECT_MASTER] = { .type = NLA_NESTED },
[CTA_EXPECT_TUPLE] = { .type = NLA_NESTED },
[CTA_EXPECT_MASK] = { .type = NLA_NESTED },
[CTA_EXPECT_TIMEOUT] = { .type = NLA_U32 },
[CTA_EXPECT_ID] = { .type = NLA_U32 },
[CTA_EXPECT_HELP_NAME] = { .type = NLA_NUL_STRING,
.len = NF_CT_HELPER_NAME_LEN - 1 },
[CTA_EXPECT_ZONE] = { .type = NLA_U16 },
[CTA_EXPECT_FLAGS] = { .type = NLA_U32 },
[CTA_EXPECT_CLASS] = { .type = NLA_U32 },
[CTA_EXPECT_NAT] = { .type = NLA_NESTED },
[CTA_EXPECT_FN] = { .type = NLA_NUL_STRING },
};
static struct nf_conntrack_expect *
ctnetlink_alloc_expect(const struct nlattr *const cda[], struct nf_conn *ct,
struct nf_conntrack_helper *helper,
struct nf_conntrack_tuple *tuple,
struct nf_conntrack_tuple *mask);
#ifdef CONFIG_NETFILTER_NETLINK_GLUE_CT
static size_t
ctnetlink_glue_build_size(const struct nf_conn *ct)
{
return 3 * nla_total_size(0) /* CTA_TUPLE_ORIG|REPL|MASTER */
+ 3 * nla_total_size(0) /* CTA_TUPLE_IP */
+ 3 * nla_total_size(0) /* CTA_TUPLE_PROTO */
+ 3 * nla_total_size(sizeof(u_int8_t)) /* CTA_PROTO_NUM */
+ nla_total_size(sizeof(u_int32_t)) /* CTA_ID */
+ nla_total_size(sizeof(u_int32_t)) /* CTA_STATUS */
+ nla_total_size(sizeof(u_int32_t)) /* CTA_TIMEOUT */
+ nla_total_size(0) /* CTA_PROTOINFO */
+ nla_total_size(0) /* CTA_HELP */
+ nla_total_size(NF_CT_HELPER_NAME_LEN) /* CTA_HELP_NAME */
+ ctnetlink_secctx_size(ct)
+ ctnetlink_acct_size(ct)
+ ctnetlink_timestamp_size(ct)
#if IS_ENABLED(CONFIG_NF_NAT)
+ 2 * nla_total_size(0) /* CTA_NAT_SEQ_ADJ_ORIG|REPL */
+ 6 * nla_total_size(sizeof(u_int32_t)) /* CTA_NAT_SEQ_OFFSET */
#endif
#ifdef CONFIG_NF_CONNTRACK_MARK
+ nla_total_size(sizeof(u_int32_t)) /* CTA_MARK */
#endif
#ifdef CONFIG_NF_CONNTRACK_ZONES
+ nla_total_size(sizeof(u_int16_t)) /* CTA_ZONE|CTA_TUPLE_ZONE */
#endif
+ ctnetlink_proto_size(ct)
;
}
static int __ctnetlink_glue_build(struct sk_buff *skb, struct nf_conn *ct)
{
const struct nf_conntrack_zone *zone;
struct nlattr *nest_parms;
zone = nf_ct_zone(ct);
nest_parms = nla_nest_start(skb, CTA_TUPLE_ORIG);
if (!nest_parms)
goto nla_put_failure;
if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_ORIGINAL)) < 0)
goto nla_put_failure;
if (ctnetlink_dump_zone_id(skb, CTA_TUPLE_ZONE, zone,
NF_CT_ZONE_DIR_ORIG) < 0)
goto nla_put_failure;
nla_nest_end(skb, nest_parms);
nest_parms = nla_nest_start(skb, CTA_TUPLE_REPLY);
if (!nest_parms)
goto nla_put_failure;
if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_REPLY)) < 0)
goto nla_put_failure;
if (ctnetlink_dump_zone_id(skb, CTA_TUPLE_ZONE, zone,
NF_CT_ZONE_DIR_REPL) < 0)
goto nla_put_failure;
nla_nest_end(skb, nest_parms);
if (ctnetlink_dump_zone_id(skb, CTA_ZONE, zone,
NF_CT_DEFAULT_ZONE_DIR) < 0)
goto nla_put_failure;
if (ctnetlink_dump_id(skb, ct) < 0)
goto nla_put_failure;
if (ctnetlink_dump_status(skb, ct) < 0)
goto nla_put_failure;
if (ctnetlink_dump_timeout(skb, ct, false) < 0)
goto nla_put_failure;
if (ctnetlink_dump_protoinfo(skb, ct, false) < 0)
goto nla_put_failure;
if (ctnetlink_dump_acct(skb, ct, IPCTNL_MSG_CT_GET) < 0 ||
ctnetlink_dump_timestamp(skb, ct) < 0)
goto nla_put_failure;
if (ctnetlink_dump_helpinfo(skb, ct) < 0)
goto nla_put_failure;
#ifdef CONFIG_NF_CONNTRACK_SECMARK
if (ct->secmark && ctnetlink_dump_secctx(skb, ct) < 0)
goto nla_put_failure;
#endif
if (ct->master && ctnetlink_dump_master(skb, ct) < 0)
goto nla_put_failure;
if ((ct->status & IPS_SEQ_ADJUST) &&
ctnetlink_dump_ct_seq_adj(skb, ct) < 0)
goto nla_put_failure;
if (ctnetlink_dump_ct_synproxy(skb, ct) < 0)
goto nla_put_failure;
#ifdef CONFIG_NF_CONNTRACK_MARK
if (ctnetlink_dump_mark(skb, ct, true) < 0)
goto nla_put_failure;
#endif
if (ctnetlink_dump_labels(skb, ct) < 0)
goto nla_put_failure;
return 0;
nla_put_failure:
return -ENOSPC;
}
static int
ctnetlink_glue_build(struct sk_buff *skb, struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
u_int16_t ct_attr, u_int16_t ct_info_attr)
{
struct nlattr *nest_parms;
nest_parms = nla_nest_start(skb, ct_attr);
if (!nest_parms)
goto nla_put_failure;
if (__ctnetlink_glue_build(skb, ct) < 0)
goto nla_put_failure;
nla_nest_end(skb, nest_parms);
if (nla_put_be32(skb, ct_info_attr, htonl(ctinfo)))
goto nla_put_failure;
return 0;
nla_put_failure:
return -ENOSPC;
}
static int
ctnetlink_update_status(struct nf_conn *ct, const struct nlattr * const cda[])
{
unsigned int status = ntohl(nla_get_be32(cda[CTA_STATUS]));
unsigned long d = ct->status ^ status;
if (d & IPS_SEEN_REPLY && !(status & IPS_SEEN_REPLY))
/* SEEN_REPLY bit can only be set */
return -EBUSY;
if (d & IPS_ASSURED && !(status & IPS_ASSURED))
/* ASSURED bit can only be set */
return -EBUSY;
/* This check is less strict than ctnetlink_change_status()
* because callers often flip IPS_EXPECTED bits when sending
* an NFQA_CT attribute to the kernel. So ignore the
* unchangeable bits but do not error out. Also user programs
* are allowed to clear the bits that they are allowed to change.
*/
__nf_ct_change_status(ct, status, ~status);
return 0;
}
static int
ctnetlink_glue_parse_ct(const struct nlattr *cda[], struct nf_conn *ct)
{
int err;
if (cda[CTA_TIMEOUT]) {
err = ctnetlink_change_timeout(ct, cda);
if (err < 0)
return err;
}
if (cda[CTA_STATUS]) {
err = ctnetlink_update_status(ct, cda);
if (err < 0)
return err;
}
if (cda[CTA_HELP]) {
err = ctnetlink_change_helper(ct, cda);
if (err < 0)
return err;
}
if (cda[CTA_LABELS]) {
err = ctnetlink_attach_labels(ct, cda);
if (err < 0)
return err;
}
#if defined(CONFIG_NF_CONNTRACK_MARK)
if (cda[CTA_MARK]) {
ctnetlink_change_mark(ct, cda);
}
#endif
return 0;
}
static int
ctnetlink_glue_parse(const struct nlattr *attr, struct nf_conn *ct)
{
struct nlattr *cda[CTA_MAX+1];
int ret;
ret = nla_parse_nested_deprecated(cda, CTA_MAX, attr, ct_nla_policy,
NULL);
if (ret < 0)
return ret;
return ctnetlink_glue_parse_ct((const struct nlattr **)cda, ct);
}
static int ctnetlink_glue_exp_parse(const struct nlattr * const *cda,
const struct nf_conn *ct,
struct nf_conntrack_tuple *tuple,
struct nf_conntrack_tuple *mask)
{
int err;
err = ctnetlink_parse_tuple(cda, tuple, CTA_EXPECT_TUPLE,
nf_ct_l3num(ct), NULL);
if (err < 0)
return err;
return ctnetlink_parse_tuple(cda, mask, CTA_EXPECT_MASK,
nf_ct_l3num(ct), NULL);
}
static int
ctnetlink_glue_attach_expect(const struct nlattr *attr, struct nf_conn *ct,
u32 portid, u32 report)
{
struct nlattr *cda[CTA_EXPECT_MAX+1];
struct nf_conntrack_tuple tuple, mask;
struct nf_conntrack_helper *helper = NULL;
struct nf_conntrack_expect *exp;
int err;
err = nla_parse_nested_deprecated(cda, CTA_EXPECT_MAX, attr,
exp_nla_policy, NULL);
if (err < 0)
return err;
err = ctnetlink_glue_exp_parse((const struct nlattr * const *)cda,
ct, &tuple, &mask);
if (err < 0)
return err;
if (cda[CTA_EXPECT_HELP_NAME]) {
const char *helpname = nla_data(cda[CTA_EXPECT_HELP_NAME]);
helper = __nf_conntrack_helper_find(helpname, nf_ct_l3num(ct),
nf_ct_protonum(ct));
if (helper == NULL)
return -EOPNOTSUPP;
}
exp = ctnetlink_alloc_expect((const struct nlattr * const *)cda, ct,
helper, &tuple, &mask);
if (IS_ERR(exp))
return PTR_ERR(exp);
err = nf_ct_expect_related_report(exp, portid, report, 0);
nf_ct_expect_put(exp);
return err;
}
static void ctnetlink_glue_seqadj(struct sk_buff *skb, struct nf_conn *ct,
enum ip_conntrack_info ctinfo, int diff)
{
if (!(ct->status & IPS_NAT_MASK))
return;
nf_ct_tcp_seqadj_set(skb, ct, ctinfo, diff);
}
static const struct nfnl_ct_hook ctnetlink_glue_hook = {
.build_size = ctnetlink_glue_build_size,
.build = ctnetlink_glue_build,
.parse = ctnetlink_glue_parse,
.attach_expect = ctnetlink_glue_attach_expect,
.seq_adjust = ctnetlink_glue_seqadj,
};
#endif /* CONFIG_NETFILTER_NETLINK_GLUE_CT */
/***********************************************************************
* EXPECT
***********************************************************************/
static int ctnetlink_exp_dump_tuple(struct sk_buff *skb,
const struct nf_conntrack_tuple *tuple,
u32 type)
{
struct nlattr *nest_parms;
nest_parms = nla_nest_start(skb, type);
if (!nest_parms)
goto nla_put_failure;
if (ctnetlink_dump_tuples(skb, tuple) < 0)
goto nla_put_failure;
nla_nest_end(skb, nest_parms);
return 0;
nla_put_failure:
return -1;
}
static int ctnetlink_exp_dump_mask(struct sk_buff *skb,
const struct nf_conntrack_tuple *tuple,
const struct nf_conntrack_tuple_mask *mask)
{
const struct nf_conntrack_l4proto *l4proto;
struct nf_conntrack_tuple m;
struct nlattr *nest_parms;
int ret;
memset(&m, 0xFF, sizeof(m));
memcpy(&m.src.u3, &mask->src.u3, sizeof(m.src.u3));
m.src.u.all = mask->src.u.all;
m.src.l3num = tuple->src.l3num;
m.dst.protonum = tuple->dst.protonum;
nest_parms = nla_nest_start(skb, CTA_EXPECT_MASK);
if (!nest_parms)
goto nla_put_failure;
rcu_read_lock();
ret = ctnetlink_dump_tuples_ip(skb, &m);
if (ret >= 0) {
l4proto = nf_ct_l4proto_find(tuple->dst.protonum);
ret = ctnetlink_dump_tuples_proto(skb, &m, l4proto);
}
rcu_read_unlock();
if (unlikely(ret < 0))
goto nla_put_failure;
nla_nest_end(skb, nest_parms);
return 0;
nla_put_failure:
return -1;
}
#if IS_ENABLED(CONFIG_NF_NAT)
static const union nf_inet_addr any_addr;
#endif
static __be32 nf_expect_get_id(const struct nf_conntrack_expect *exp)
{
static siphash_aligned_key_t exp_id_seed;
unsigned long a, b, c, d;
net_get_random_once(&exp_id_seed, sizeof(exp_id_seed));
a = (unsigned long)exp;
b = (unsigned long)exp->helper;
c = (unsigned long)exp->master;
d = (unsigned long)siphash(&exp->tuple, sizeof(exp->tuple), &exp_id_seed);
#ifdef CONFIG_64BIT
return (__force __be32)siphash_4u64((u64)a, (u64)b, (u64)c, (u64)d, &exp_id_seed);
#else
return (__force __be32)siphash_4u32((u32)a, (u32)b, (u32)c, (u32)d, &exp_id_seed);
#endif
}
static int
ctnetlink_exp_dump_expect(struct sk_buff *skb,
const struct nf_conntrack_expect *exp)
{
struct nf_conn *master = exp->master;
long timeout = ((long)exp->timeout.expires - (long)jiffies) / HZ;
struct nf_conn_help *help;
#if IS_ENABLED(CONFIG_NF_NAT)
struct nlattr *nest_parms;
struct nf_conntrack_tuple nat_tuple = {};
#endif
struct nf_ct_helper_expectfn *expfn;
if (timeout < 0)
timeout = 0;
if (ctnetlink_exp_dump_tuple(skb, &exp->tuple, CTA_EXPECT_TUPLE) < 0)
goto nla_put_failure;
if (ctnetlink_exp_dump_mask(skb, &exp->tuple, &exp->mask) < 0)
goto nla_put_failure;
if (ctnetlink_exp_dump_tuple(skb,
&master->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
CTA_EXPECT_MASTER) < 0)
goto nla_put_failure;
#if IS_ENABLED(CONFIG_NF_NAT)
if (!nf_inet_addr_cmp(&exp->saved_addr, &any_addr) ||
exp->saved_proto.all) {
nest_parms = nla_nest_start(skb, CTA_EXPECT_NAT);
if (!nest_parms)
goto nla_put_failure;
if (nla_put_be32(skb, CTA_EXPECT_NAT_DIR, htonl(exp->dir)))
goto nla_put_failure;
nat_tuple.src.l3num = nf_ct_l3num(master);
nat_tuple.src.u3 = exp->saved_addr;
nat_tuple.dst.protonum = nf_ct_protonum(master);
nat_tuple.src.u = exp->saved_proto;
if (ctnetlink_exp_dump_tuple(skb, &nat_tuple,
CTA_EXPECT_NAT_TUPLE) < 0)
goto nla_put_failure;
nla_nest_end(skb, nest_parms);
}
#endif
if (nla_put_be32(skb, CTA_EXPECT_TIMEOUT, htonl(timeout)) ||
nla_put_be32(skb, CTA_EXPECT_ID, nf_expect_get_id(exp)) ||
nla_put_be32(skb, CTA_EXPECT_FLAGS, htonl(exp->flags)) ||
nla_put_be32(skb, CTA_EXPECT_CLASS, htonl(exp->class)))
goto nla_put_failure;
help = nfct_help(master);
if (help) {
struct nf_conntrack_helper *helper;
helper = rcu_dereference(help->helper);
if (helper &&
nla_put_string(skb, CTA_EXPECT_HELP_NAME, helper->name))
goto nla_put_failure;
}
expfn = nf_ct_helper_expectfn_find_by_symbol(exp->expectfn);
if (expfn != NULL &&
nla_put_string(skb, CTA_EXPECT_FN, expfn->name))
goto nla_put_failure;
return 0;
nla_put_failure:
return -1;
}
static int
ctnetlink_exp_fill_info(struct sk_buff *skb, u32 portid, u32 seq,
int event, const struct nf_conntrack_expect *exp)
{
struct nlmsghdr *nlh;
unsigned int flags = portid ? NLM_F_MULTI : 0;
event = nfnl_msg_type(NFNL_SUBSYS_CTNETLINK_EXP, event);
nlh = nfnl_msg_put(skb, portid, seq, event, flags,
exp->tuple.src.l3num, NFNETLINK_V0, 0);
if (!nlh)
goto nlmsg_failure;
if (ctnetlink_exp_dump_expect(skb, exp) < 0)
goto nla_put_failure;
nlmsg_end(skb, nlh);
return skb->len;
nlmsg_failure:
nla_put_failure:
nlmsg_cancel(skb, nlh);
return -1;
}
#ifdef CONFIG_NF_CONNTRACK_EVENTS
static int
ctnetlink_expect_event(unsigned int events, const struct nf_exp_event *item)
{
struct nf_conntrack_expect *exp = item->exp;
struct net *net = nf_ct_exp_net(exp);
struct nlmsghdr *nlh;
struct sk_buff *skb;
unsigned int type, group;
int flags = 0;
if (events & (1 << IPEXP_DESTROY)) {
type = IPCTNL_MSG_EXP_DELETE;
group = NFNLGRP_CONNTRACK_EXP_DESTROY;
} else if (events & (1 << IPEXP_NEW)) {
type = IPCTNL_MSG_EXP_NEW;
flags = NLM_F_CREATE|NLM_F_EXCL;
group = NFNLGRP_CONNTRACK_EXP_NEW;
} else
return 0;
if (!item->report && !nfnetlink_has_listeners(net, group))
return 0;
skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
if (skb == NULL)
goto errout;
type = nfnl_msg_type(NFNL_SUBSYS_CTNETLINK_EXP, type);
nlh = nfnl_msg_put(skb, item->portid, 0, type, flags,
exp->tuple.src.l3num, NFNETLINK_V0, 0);
if (!nlh)
goto nlmsg_failure;
if (ctnetlink_exp_dump_expect(skb, exp) < 0)
goto nla_put_failure;
nlmsg_end(skb, nlh);
nfnetlink_send(skb, net, item->portid, group, item->report, GFP_ATOMIC);
return 0;
nla_put_failure:
nlmsg_cancel(skb, nlh);
nlmsg_failure:
kfree_skb(skb);
errout:
nfnetlink_set_err(net, 0, 0, -ENOBUFS);
return 0;
}
#endif
static int ctnetlink_exp_done(struct netlink_callback *cb)
{
if (cb->args[1])
nf_ct_expect_put((struct nf_conntrack_expect *)cb->args[1]);
return 0;
}
static int
ctnetlink_exp_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
{
struct net *net = sock_net(skb->sk);
struct nf_conntrack_expect *exp, *last;
struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
u_int8_t l3proto = nfmsg->nfgen_family;
rcu_read_lock();
last = (struct nf_conntrack_expect *)cb->args[1];
for (; cb->args[0] < nf_ct_expect_hsize; cb->args[0]++) {
restart:
hlist_for_each_entry_rcu(exp, &nf_ct_expect_hash[cb->args[0]],
hnode) {
if (l3proto && exp->tuple.src.l3num != l3proto)
continue;
if (!net_eq(nf_ct_net(exp->master), net))
continue;
if (cb->args[1]) {
if (exp != last)
continue;
cb->args[1] = 0;
}
if (ctnetlink_exp_fill_info(skb,
NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq,
IPCTNL_MSG_EXP_NEW,
exp) < 0) {
if (!refcount_inc_not_zero(&exp->use))
continue;
cb->args[1] = (unsigned long)exp;
goto out;
}
}
if (cb->args[1]) {
cb->args[1] = 0;
goto restart;
}
}
out:
rcu_read_unlock();
if (last)
nf_ct_expect_put(last);
return skb->len;
}
static int
ctnetlink_exp_ct_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
{
struct nf_conntrack_expect *exp, *last;
struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
struct nf_conn *ct = cb->data;
struct nf_conn_help *help = nfct_help(ct);
u_int8_t l3proto = nfmsg->nfgen_family;
if (cb->args[0])
return 0;
rcu_read_lock();
last = (struct nf_conntrack_expect *)cb->args[1];
restart:
hlist_for_each_entry_rcu(exp, &help->expectations, lnode) {
if (l3proto && exp->tuple.src.l3num != l3proto)
continue;
if (cb->args[1]) {
if (exp != last)
continue;
cb->args[1] = 0;
}
if (ctnetlink_exp_fill_info(skb, NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq,
IPCTNL_MSG_EXP_NEW,
exp) < 0) {
if (!refcount_inc_not_zero(&exp->use))
continue;
cb->args[1] = (unsigned long)exp;
goto out;
}
}
if (cb->args[1]) {
cb->args[1] = 0;
goto restart;
}
cb->args[0] = 1;
out:
rcu_read_unlock();
if (last)
nf_ct_expect_put(last);
return skb->len;
}
static int ctnetlink_dump_exp_ct(struct net *net, struct sock *ctnl,
struct sk_buff *skb,
const struct nlmsghdr *nlh,
const struct nlattr * const cda[],
struct netlink_ext_ack *extack)
{
int err;
struct nfgenmsg *nfmsg = nlmsg_data(nlh);
u_int8_t u3 = nfmsg->nfgen_family;
struct nf_conntrack_tuple tuple;
struct nf_conntrack_tuple_hash *h;
struct nf_conn *ct;
struct nf_conntrack_zone zone;
struct netlink_dump_control c = {
.dump = ctnetlink_exp_ct_dump_table,
.done = ctnetlink_exp_done,
};
err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_MASTER,
u3, NULL);
if (err < 0)
return err;
err = ctnetlink_parse_zone(cda[CTA_EXPECT_ZONE], &zone);
if (err < 0)
return err;
h = nf_conntrack_find_get(net, &zone, &tuple);
if (!h)
return -ENOENT;
ct = nf_ct_tuplehash_to_ctrack(h);
/* No expectation linked to this connection tracking. */
if (!nfct_help(ct)) {
nf_ct_put(ct);
return 0;
}
c.data = ct;
err = netlink_dump_start(ctnl, skb, nlh, &c);
nf_ct_put(ct);
return err;
}
static int ctnetlink_get_expect(struct sk_buff *skb,
const struct nfnl_info *info,
const struct nlattr * const cda[])
{
u_int8_t u3 = info->nfmsg->nfgen_family;
struct nf_conntrack_tuple tuple;
struct nf_conntrack_expect *exp;
struct nf_conntrack_zone zone;
struct sk_buff *skb2;
int err;
if (info->nlh->nlmsg_flags & NLM_F_DUMP) {
if (cda[CTA_EXPECT_MASTER])
return ctnetlink_dump_exp_ct(info->net, info->sk, skb,
info->nlh, cda,
info->extack);
else {
struct netlink_dump_control c = {
.dump = ctnetlink_exp_dump_table,
.done = ctnetlink_exp_done,
};
return netlink_dump_start(info->sk, skb, info->nlh, &c);
}
}
err = ctnetlink_parse_zone(cda[CTA_EXPECT_ZONE], &zone);
if (err < 0)
return err;
if (cda[CTA_EXPECT_TUPLE])
err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE,
u3, NULL);
else if (cda[CTA_EXPECT_MASTER])
err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_MASTER,
u3, NULL);
else
return -EINVAL;
if (err < 0)
return err;
exp = nf_ct_expect_find_get(info->net, &zone, &tuple);
if (!exp)
return -ENOENT;
if (cda[CTA_EXPECT_ID]) {
__be32 id = nla_get_be32(cda[CTA_EXPECT_ID]);
if (id != nf_expect_get_id(exp)) {
nf_ct_expect_put(exp);
return -ENOENT;
}
}
skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!skb2) {
nf_ct_expect_put(exp);
return -ENOMEM;
}
rcu_read_lock();
err = ctnetlink_exp_fill_info(skb2, NETLINK_CB(skb).portid,
info->nlh->nlmsg_seq, IPCTNL_MSG_EXP_NEW,
exp);
rcu_read_unlock();
nf_ct_expect_put(exp);
if (err <= 0) {
kfree_skb(skb2);
return -ENOMEM;
}
return nfnetlink_unicast(skb2, info->net, NETLINK_CB(skb).portid);
}
static bool expect_iter_name(struct nf_conntrack_expect *exp, void *data)
{
struct nf_conntrack_helper *helper;
const struct nf_conn_help *m_help;
const char *name = data;
m_help = nfct_help(exp->master);
helper = rcu_dereference(m_help->helper);
if (!helper)
return false;
return strcmp(helper->name, name) == 0;
}
static bool expect_iter_all(struct nf_conntrack_expect *exp, void *data)
{
return true;
}
static int ctnetlink_del_expect(struct sk_buff *skb,
const struct nfnl_info *info,
const struct nlattr * const cda[])
{
u_int8_t u3 = info->nfmsg->nfgen_family;
struct nf_conntrack_expect *exp;
struct nf_conntrack_tuple tuple;
struct nf_conntrack_zone zone;
int err;
if (cda[CTA_EXPECT_TUPLE]) {
/* delete a single expect by tuple */
err = ctnetlink_parse_zone(cda[CTA_EXPECT_ZONE], &zone);
if (err < 0)
return err;
err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE,
u3, NULL);
if (err < 0)
return err;
/* bump usage count to 2 */
exp = nf_ct_expect_find_get(info->net, &zone, &tuple);
if (!exp)
return -ENOENT;
if (cda[CTA_EXPECT_ID]) {
__be32 id = nla_get_be32(cda[CTA_EXPECT_ID]);
if (ntohl(id) != (u32)(unsigned long)exp) {
nf_ct_expect_put(exp);
return -ENOENT;
}
}
/* after list removal, usage count == 1 */
spin_lock_bh(&nf_conntrack_expect_lock);
if (del_timer(&exp->timeout)) {
nf_ct_unlink_expect_report(exp, NETLINK_CB(skb).portid,
nlmsg_report(info->nlh));
nf_ct_expect_put(exp);
}
spin_unlock_bh(&nf_conntrack_expect_lock);
/* have to put what we 'get' above.
* after this line usage count == 0 */
nf_ct_expect_put(exp);
} else if (cda[CTA_EXPECT_HELP_NAME]) {
char *name = nla_data(cda[CTA_EXPECT_HELP_NAME]);
nf_ct_expect_iterate_net(info->net, expect_iter_name, name,
NETLINK_CB(skb).portid,
nlmsg_report(info->nlh));
} else {
/* This basically means we have to flush everything*/
nf_ct_expect_iterate_net(info->net, expect_iter_all, NULL,
NETLINK_CB(skb).portid,
nlmsg_report(info->nlh));
}
return 0;
}
static int
ctnetlink_change_expect(struct nf_conntrack_expect *x,
const struct nlattr * const cda[])
{
if (cda[CTA_EXPECT_TIMEOUT]) {
if (!del_timer(&x->timeout))
return -ETIME;
x->timeout.expires = jiffies +
ntohl(nla_get_be32(cda[CTA_EXPECT_TIMEOUT])) * HZ;
add_timer(&x->timeout);
}
return 0;
}
#if IS_ENABLED(CONFIG_NF_NAT)
static const struct nla_policy exp_nat_nla_policy[CTA_EXPECT_NAT_MAX+1] = {
[CTA_EXPECT_NAT_DIR] = { .type = NLA_U32 },
[CTA_EXPECT_NAT_TUPLE] = { .type = NLA_NESTED },
};
#endif
static int
ctnetlink_parse_expect_nat(const struct nlattr *attr,
struct nf_conntrack_expect *exp,
u_int8_t u3)
{
#if IS_ENABLED(CONFIG_NF_NAT)
struct nlattr *tb[CTA_EXPECT_NAT_MAX+1];
struct nf_conntrack_tuple nat_tuple = {};
int err;
err = nla_parse_nested_deprecated(tb, CTA_EXPECT_NAT_MAX, attr,
exp_nat_nla_policy, NULL);
if (err < 0)
return err;
if (!tb[CTA_EXPECT_NAT_DIR] || !tb[CTA_EXPECT_NAT_TUPLE])
return -EINVAL;
err = ctnetlink_parse_tuple((const struct nlattr * const *)tb,
&nat_tuple, CTA_EXPECT_NAT_TUPLE,
u3, NULL);
if (err < 0)
return err;
exp->saved_addr = nat_tuple.src.u3;
exp->saved_proto = nat_tuple.src.u;
exp->dir = ntohl(nla_get_be32(tb[CTA_EXPECT_NAT_DIR]));
return 0;
#else
return -EOPNOTSUPP;
#endif
}
static struct nf_conntrack_expect *
ctnetlink_alloc_expect(const struct nlattr * const cda[], struct nf_conn *ct,
struct nf_conntrack_helper *helper,
struct nf_conntrack_tuple *tuple,
struct nf_conntrack_tuple *mask)
{
u_int32_t class = 0;
struct nf_conntrack_expect *exp;
struct nf_conn_help *help;
int err;
help = nfct_help(ct);
if (!help)
return ERR_PTR(-EOPNOTSUPP);
if (cda[CTA_EXPECT_CLASS] && helper) {
class = ntohl(nla_get_be32(cda[CTA_EXPECT_CLASS]));
if (class > helper->expect_class_max)
return ERR_PTR(-EINVAL);
}
exp = nf_ct_expect_alloc(ct);
if (!exp)
return ERR_PTR(-ENOMEM);
if (cda[CTA_EXPECT_FLAGS]) {
exp->flags = ntohl(nla_get_be32(cda[CTA_EXPECT_FLAGS]));
exp->flags &= ~NF_CT_EXPECT_USERSPACE;
} else {
exp->flags = 0;
}
if (cda[CTA_EXPECT_FN]) {
const char *name = nla_data(cda[CTA_EXPECT_FN]);
struct nf_ct_helper_expectfn *expfn;
expfn = nf_ct_helper_expectfn_find_by_name(name);
if (expfn == NULL) {
err = -EINVAL;
goto err_out;
}
exp->expectfn = expfn->expectfn;
} else
exp->expectfn = NULL;
exp->class = class;
exp->master = ct;
exp->helper = helper;
exp->tuple = *tuple;
exp->mask.src.u3 = mask->src.u3;
exp->mask.src.u.all = mask->src.u.all;
if (cda[CTA_EXPECT_NAT]) {
err = ctnetlink_parse_expect_nat(cda[CTA_EXPECT_NAT],
exp, nf_ct_l3num(ct));
if (err < 0)
goto err_out;
}
return exp;
err_out:
nf_ct_expect_put(exp);
return ERR_PTR(err);
}
static int
ctnetlink_create_expect(struct net *net,
const struct nf_conntrack_zone *zone,
const struct nlattr * const cda[],
u_int8_t u3, u32 portid, int report)
{
struct nf_conntrack_tuple tuple, mask, master_tuple;
struct nf_conntrack_tuple_hash *h = NULL;
struct nf_conntrack_helper *helper = NULL;
struct nf_conntrack_expect *exp;
struct nf_conn *ct;
int err;
/* caller guarantees that those three CTA_EXPECT_* exist */
err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE,
u3, NULL);
if (err < 0)
return err;
err = ctnetlink_parse_tuple(cda, &mask, CTA_EXPECT_MASK,
u3, NULL);
if (err < 0)
return err;
err = ctnetlink_parse_tuple(cda, &master_tuple, CTA_EXPECT_MASTER,
u3, NULL);
if (err < 0)
return err;
/* Look for master conntrack of this expectation */
h = nf_conntrack_find_get(net, zone, &master_tuple);
if (!h)
return -ENOENT;
ct = nf_ct_tuplehash_to_ctrack(h);
rcu_read_lock();
if (cda[CTA_EXPECT_HELP_NAME]) {
const char *helpname = nla_data(cda[CTA_EXPECT_HELP_NAME]);
helper = __nf_conntrack_helper_find(helpname, u3,
nf_ct_protonum(ct));
if (helper == NULL) {
rcu_read_unlock();
#ifdef CONFIG_MODULES
if (request_module("nfct-helper-%s", helpname) < 0) {
err = -EOPNOTSUPP;
goto err_ct;
}
rcu_read_lock();
helper = __nf_conntrack_helper_find(helpname, u3,
nf_ct_protonum(ct));
if (helper) {
err = -EAGAIN;
goto err_rcu;
}
rcu_read_unlock();
#endif
err = -EOPNOTSUPP;
goto err_ct;
}
}
exp = ctnetlink_alloc_expect(cda, ct, helper, &tuple, &mask);
if (IS_ERR(exp)) {
err = PTR_ERR(exp);
goto err_rcu;
}
err = nf_ct_expect_related_report(exp, portid, report, 0);
nf_ct_expect_put(exp);
err_rcu:
rcu_read_unlock();
err_ct:
nf_ct_put(ct);
return err;
}
static int ctnetlink_new_expect(struct sk_buff *skb,
const struct nfnl_info *info,
const struct nlattr * const cda[])
{
u_int8_t u3 = info->nfmsg->nfgen_family;
struct nf_conntrack_tuple tuple;
struct nf_conntrack_expect *exp;
struct nf_conntrack_zone zone;
int err;
if (!cda[CTA_EXPECT_TUPLE]
|| !cda[CTA_EXPECT_MASK]
|| !cda[CTA_EXPECT_MASTER])
return -EINVAL;
err = ctnetlink_parse_zone(cda[CTA_EXPECT_ZONE], &zone);
if (err < 0)
return err;
err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE,
u3, NULL);
if (err < 0)
return err;
spin_lock_bh(&nf_conntrack_expect_lock);
exp = __nf_ct_expect_find(info->net, &zone, &tuple);
if (!exp) {
spin_unlock_bh(&nf_conntrack_expect_lock);
err = -ENOENT;
if (info->nlh->nlmsg_flags & NLM_F_CREATE) {
err = ctnetlink_create_expect(info->net, &zone, cda, u3,
NETLINK_CB(skb).portid,
nlmsg_report(info->nlh));
}
return err;
}
err = -EEXIST;
if (!(info->nlh->nlmsg_flags & NLM_F_EXCL))
err = ctnetlink_change_expect(exp, cda);
spin_unlock_bh(&nf_conntrack_expect_lock);
return err;
}
static int
ctnetlink_exp_stat_fill_info(struct sk_buff *skb, u32 portid, u32 seq, int cpu,
const struct ip_conntrack_stat *st)
{
struct nlmsghdr *nlh;
unsigned int flags = portid ? NLM_F_MULTI : 0, event;
event = nfnl_msg_type(NFNL_SUBSYS_CTNETLINK,
IPCTNL_MSG_EXP_GET_STATS_CPU);
nlh = nfnl_msg_put(skb, portid, seq, event, flags, AF_UNSPEC,
NFNETLINK_V0, htons(cpu));
if (!nlh)
goto nlmsg_failure;
if (nla_put_be32(skb, CTA_STATS_EXP_NEW, htonl(st->expect_new)) ||
nla_put_be32(skb, CTA_STATS_EXP_CREATE, htonl(st->expect_create)) ||
nla_put_be32(skb, CTA_STATS_EXP_DELETE, htonl(st->expect_delete)))
goto nla_put_failure;
nlmsg_end(skb, nlh);
return skb->len;
nla_put_failure:
nlmsg_failure:
nlmsg_cancel(skb, nlh);
return -1;
}
static int
ctnetlink_exp_stat_cpu_dump(struct sk_buff *skb, struct netlink_callback *cb)
{
int cpu;
struct net *net = sock_net(skb->sk);
if (cb->args[0] == nr_cpu_ids)
return 0;
for (cpu = cb->args[0]; cpu < nr_cpu_ids; cpu++) {
const struct ip_conntrack_stat *st;
if (!cpu_possible(cpu))
continue;
st = per_cpu_ptr(net->ct.stat, cpu);
if (ctnetlink_exp_stat_fill_info(skb, NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq,
cpu, st) < 0)
break;
}
cb->args[0] = cpu;
return skb->len;
}
static int ctnetlink_stat_exp_cpu(struct sk_buff *skb,
const struct nfnl_info *info,
const struct nlattr * const cda[])
{
if (info->nlh->nlmsg_flags & NLM_F_DUMP) {
struct netlink_dump_control c = {
.dump = ctnetlink_exp_stat_cpu_dump,
};
return netlink_dump_start(info->sk, skb, info->nlh, &c);
}
return 0;
}
#ifdef CONFIG_NF_CONNTRACK_EVENTS
static struct nf_ct_event_notifier ctnl_notifier = {
.ct_event = ctnetlink_conntrack_event,
.exp_event = ctnetlink_expect_event,
};
#endif
static const struct nfnl_callback ctnl_cb[IPCTNL_MSG_MAX] = {
[IPCTNL_MSG_CT_NEW] = {
.call = ctnetlink_new_conntrack,
.type = NFNL_CB_MUTEX,
.attr_count = CTA_MAX,
.policy = ct_nla_policy
},
[IPCTNL_MSG_CT_GET] = {
.call = ctnetlink_get_conntrack,
.type = NFNL_CB_MUTEX,
.attr_count = CTA_MAX,
.policy = ct_nla_policy
},
[IPCTNL_MSG_CT_DELETE] = {
.call = ctnetlink_del_conntrack,
.type = NFNL_CB_MUTEX,
.attr_count = CTA_MAX,
.policy = ct_nla_policy
},
[IPCTNL_MSG_CT_GET_CTRZERO] = {
.call = ctnetlink_get_conntrack,
.type = NFNL_CB_MUTEX,
.attr_count = CTA_MAX,
.policy = ct_nla_policy
},
[IPCTNL_MSG_CT_GET_STATS_CPU] = {
.call = ctnetlink_stat_ct_cpu,
.type = NFNL_CB_MUTEX,
},
[IPCTNL_MSG_CT_GET_STATS] = {
.call = ctnetlink_stat_ct,
.type = NFNL_CB_MUTEX,
},
[IPCTNL_MSG_CT_GET_DYING] = {
.call = ctnetlink_get_ct_dying,
.type = NFNL_CB_MUTEX,
},
[IPCTNL_MSG_CT_GET_UNCONFIRMED] = {
.call = ctnetlink_get_ct_unconfirmed,
.type = NFNL_CB_MUTEX,
},
};
static const struct nfnl_callback ctnl_exp_cb[IPCTNL_MSG_EXP_MAX] = {
[IPCTNL_MSG_EXP_GET] = {
.call = ctnetlink_get_expect,
.type = NFNL_CB_MUTEX,
.attr_count = CTA_EXPECT_MAX,
.policy = exp_nla_policy
},
[IPCTNL_MSG_EXP_NEW] = {
.call = ctnetlink_new_expect,
.type = NFNL_CB_MUTEX,
.attr_count = CTA_EXPECT_MAX,
.policy = exp_nla_policy
},
[IPCTNL_MSG_EXP_DELETE] = {
.call = ctnetlink_del_expect,
.type = NFNL_CB_MUTEX,
.attr_count = CTA_EXPECT_MAX,
.policy = exp_nla_policy
},
[IPCTNL_MSG_EXP_GET_STATS_CPU] = {
.call = ctnetlink_stat_exp_cpu,
.type = NFNL_CB_MUTEX,
},
};
static const struct nfnetlink_subsystem ctnl_subsys = {
.name = "conntrack",
.subsys_id = NFNL_SUBSYS_CTNETLINK,
.cb_count = IPCTNL_MSG_MAX,
.cb = ctnl_cb,
};
static const struct nfnetlink_subsystem ctnl_exp_subsys = {
.name = "conntrack_expect",
.subsys_id = NFNL_SUBSYS_CTNETLINK_EXP,
.cb_count = IPCTNL_MSG_EXP_MAX,
.cb = ctnl_exp_cb,
};
MODULE_ALIAS("ip_conntrack_netlink");
MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_CTNETLINK);
MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_CTNETLINK_EXP);
static int __net_init ctnetlink_net_init(struct net *net)
{
#ifdef CONFIG_NF_CONNTRACK_EVENTS
nf_conntrack_register_notifier(net, &ctnl_notifier);
#endif
return 0;
}
static void ctnetlink_net_pre_exit(struct net *net)
{
#ifdef CONFIG_NF_CONNTRACK_EVENTS
nf_conntrack_unregister_notifier(net);
#endif
}
static struct pernet_operations ctnetlink_net_ops = {
.init = ctnetlink_net_init,
.pre_exit = ctnetlink_net_pre_exit,
};
static int __init ctnetlink_init(void)
{
int ret;
NL_ASSERT_DUMP_CTX_FITS(struct ctnetlink_list_dump_ctx);
ret = nfnetlink_subsys_register(&ctnl_subsys);
if (ret < 0) {
pr_err("ctnetlink_init: cannot register with nfnetlink.\n");
goto err_out;
}
ret = nfnetlink_subsys_register(&ctnl_exp_subsys);
if (ret < 0) {
pr_err("ctnetlink_init: cannot register exp with nfnetlink.\n");
goto err_unreg_subsys;
}
ret = register_pernet_subsys(&ctnetlink_net_ops);
if (ret < 0) {
pr_err("ctnetlink_init: cannot register pernet operations\n");
goto err_unreg_exp_subsys;
}
#ifdef CONFIG_NETFILTER_NETLINK_GLUE_CT
/* setup interaction between nf_queue and nf_conntrack_netlink. */
RCU_INIT_POINTER(nfnl_ct_hook, &ctnetlink_glue_hook);
#endif
return 0;
err_unreg_exp_subsys:
nfnetlink_subsys_unregister(&ctnl_exp_subsys);
err_unreg_subsys:
nfnetlink_subsys_unregister(&ctnl_subsys);
err_out:
return ret;
}
static void __exit ctnetlink_exit(void)
{
unregister_pernet_subsys(&ctnetlink_net_ops);
nfnetlink_subsys_unregister(&ctnl_exp_subsys);
nfnetlink_subsys_unregister(&ctnl_subsys);
#ifdef CONFIG_NETFILTER_NETLINK_GLUE_CT
RCU_INIT_POINTER(nfnl_ct_hook, NULL);
#endif
synchronize_rcu();
}
module_init(ctnetlink_init);
module_exit(ctnetlink_exit);
| linux-master | net/netfilter/nf_conntrack_netlink.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2013 Patrick McHardy <[email protected]>
*/
#include <linux/module.h>
#include <linux/skbuff.h>
#include <asm/unaligned.h>
#include <net/tcp.h>
#include <net/netns/generic.h>
#include <linux/proc_fs.h>
#include <linux/netfilter_ipv6.h>
#include <linux/netfilter/nf_synproxy.h>
#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_conntrack_ecache.h>
#include <net/netfilter/nf_conntrack_extend.h>
#include <net/netfilter/nf_conntrack_seqadj.h>
#include <net/netfilter/nf_conntrack_synproxy.h>
#include <net/netfilter/nf_conntrack_zones.h>
#include <net/netfilter/nf_synproxy.h>
unsigned int synproxy_net_id;
EXPORT_SYMBOL_GPL(synproxy_net_id);
bool
synproxy_parse_options(const struct sk_buff *skb, unsigned int doff,
const struct tcphdr *th, struct synproxy_options *opts)
{
int length = (th->doff * 4) - sizeof(*th);
u8 buf[40], *ptr;
if (unlikely(length < 0))
return false;
ptr = skb_header_pointer(skb, doff + sizeof(*th), length, buf);
if (ptr == NULL)
return false;
opts->options = 0;
while (length > 0) {
int opcode = *ptr++;
int opsize;
switch (opcode) {
case TCPOPT_EOL:
return true;
case TCPOPT_NOP:
length--;
continue;
default:
if (length < 2)
return true;
opsize = *ptr++;
if (opsize < 2)
return true;
if (opsize > length)
return true;
switch (opcode) {
case TCPOPT_MSS:
if (opsize == TCPOLEN_MSS) {
opts->mss_option = get_unaligned_be16(ptr);
opts->options |= NF_SYNPROXY_OPT_MSS;
}
break;
case TCPOPT_WINDOW:
if (opsize == TCPOLEN_WINDOW) {
opts->wscale = *ptr;
if (opts->wscale > TCP_MAX_WSCALE)
opts->wscale = TCP_MAX_WSCALE;
opts->options |= NF_SYNPROXY_OPT_WSCALE;
}
break;
case TCPOPT_TIMESTAMP:
if (opsize == TCPOLEN_TIMESTAMP) {
opts->tsval = get_unaligned_be32(ptr);
opts->tsecr = get_unaligned_be32(ptr + 4);
opts->options |= NF_SYNPROXY_OPT_TIMESTAMP;
}
break;
case TCPOPT_SACK_PERM:
if (opsize == TCPOLEN_SACK_PERM)
opts->options |= NF_SYNPROXY_OPT_SACK_PERM;
break;
}
ptr += opsize - 2;
length -= opsize;
}
}
return true;
}
EXPORT_SYMBOL_GPL(synproxy_parse_options);
static unsigned int
synproxy_options_size(const struct synproxy_options *opts)
{
unsigned int size = 0;
if (opts->options & NF_SYNPROXY_OPT_MSS)
size += TCPOLEN_MSS_ALIGNED;
if (opts->options & NF_SYNPROXY_OPT_TIMESTAMP)
size += TCPOLEN_TSTAMP_ALIGNED;
else if (opts->options & NF_SYNPROXY_OPT_SACK_PERM)
size += TCPOLEN_SACKPERM_ALIGNED;
if (opts->options & NF_SYNPROXY_OPT_WSCALE)
size += TCPOLEN_WSCALE_ALIGNED;
return size;
}
static void
synproxy_build_options(struct tcphdr *th, const struct synproxy_options *opts)
{
__be32 *ptr = (__be32 *)(th + 1);
u8 options = opts->options;
if (options & NF_SYNPROXY_OPT_MSS)
*ptr++ = htonl((TCPOPT_MSS << 24) |
(TCPOLEN_MSS << 16) |
opts->mss_option);
if (options & NF_SYNPROXY_OPT_TIMESTAMP) {
if (options & NF_SYNPROXY_OPT_SACK_PERM)
*ptr++ = htonl((TCPOPT_SACK_PERM << 24) |
(TCPOLEN_SACK_PERM << 16) |
(TCPOPT_TIMESTAMP << 8) |
TCPOLEN_TIMESTAMP);
else
*ptr++ = htonl((TCPOPT_NOP << 24) |
(TCPOPT_NOP << 16) |
(TCPOPT_TIMESTAMP << 8) |
TCPOLEN_TIMESTAMP);
*ptr++ = htonl(opts->tsval);
*ptr++ = htonl(opts->tsecr);
} else if (options & NF_SYNPROXY_OPT_SACK_PERM)
*ptr++ = htonl((TCPOPT_NOP << 24) |
(TCPOPT_NOP << 16) |
(TCPOPT_SACK_PERM << 8) |
TCPOLEN_SACK_PERM);
if (options & NF_SYNPROXY_OPT_WSCALE)
*ptr++ = htonl((TCPOPT_NOP << 24) |
(TCPOPT_WINDOW << 16) |
(TCPOLEN_WINDOW << 8) |
opts->wscale);
}
void synproxy_init_timestamp_cookie(const struct nf_synproxy_info *info,
struct synproxy_options *opts)
{
opts->tsecr = opts->tsval;
opts->tsval = tcp_time_stamp_raw() & ~0x3f;
if (opts->options & NF_SYNPROXY_OPT_WSCALE) {
opts->tsval |= opts->wscale;
opts->wscale = info->wscale;
} else
opts->tsval |= 0xf;
if (opts->options & NF_SYNPROXY_OPT_SACK_PERM)
opts->tsval |= 1 << 4;
if (opts->options & NF_SYNPROXY_OPT_ECN)
opts->tsval |= 1 << 5;
}
EXPORT_SYMBOL_GPL(synproxy_init_timestamp_cookie);
static void
synproxy_check_timestamp_cookie(struct synproxy_options *opts)
{
opts->wscale = opts->tsecr & 0xf;
if (opts->wscale != 0xf)
opts->options |= NF_SYNPROXY_OPT_WSCALE;
opts->options |= opts->tsecr & (1 << 4) ? NF_SYNPROXY_OPT_SACK_PERM : 0;
opts->options |= opts->tsecr & (1 << 5) ? NF_SYNPROXY_OPT_ECN : 0;
}
static unsigned int
synproxy_tstamp_adjust(struct sk_buff *skb, unsigned int protoff,
struct tcphdr *th, struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
const struct nf_conn_synproxy *synproxy)
{
unsigned int optoff, optend;
__be32 *ptr, old;
if (synproxy->tsoff == 0)
return 1;
optoff = protoff + sizeof(struct tcphdr);
optend = protoff + th->doff * 4;
if (skb_ensure_writable(skb, optend))
return 0;
while (optoff < optend) {
unsigned char *op = skb->data + optoff;
switch (op[0]) {
case TCPOPT_EOL:
return 1;
case TCPOPT_NOP:
optoff++;
continue;
default:
if (optoff + 1 == optend ||
optoff + op[1] > optend ||
op[1] < 2)
return 0;
if (op[0] == TCPOPT_TIMESTAMP &&
op[1] == TCPOLEN_TIMESTAMP) {
if (CTINFO2DIR(ctinfo) == IP_CT_DIR_REPLY) {
ptr = (__be32 *)&op[2];
old = *ptr;
*ptr = htonl(ntohl(*ptr) -
synproxy->tsoff);
} else {
ptr = (__be32 *)&op[6];
old = *ptr;
*ptr = htonl(ntohl(*ptr) +
synproxy->tsoff);
}
inet_proto_csum_replace4(&th->check, skb,
old, *ptr, false);
return 1;
}
optoff += op[1];
}
}
return 1;
}
#ifdef CONFIG_PROC_FS
static void *synproxy_cpu_seq_start(struct seq_file *seq, loff_t *pos)
{
struct synproxy_net *snet = synproxy_pernet(seq_file_net(seq));
int cpu;
if (*pos == 0)
return SEQ_START_TOKEN;
for (cpu = *pos - 1; cpu < nr_cpu_ids; cpu++) {
if (!cpu_possible(cpu))
continue;
*pos = cpu + 1;
return per_cpu_ptr(snet->stats, cpu);
}
return NULL;
}
static void *synproxy_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
struct synproxy_net *snet = synproxy_pernet(seq_file_net(seq));
int cpu;
for (cpu = *pos; cpu < nr_cpu_ids; cpu++) {
if (!cpu_possible(cpu))
continue;
*pos = cpu + 1;
return per_cpu_ptr(snet->stats, cpu);
}
(*pos)++;
return NULL;
}
static void synproxy_cpu_seq_stop(struct seq_file *seq, void *v)
{
return;
}
static int synproxy_cpu_seq_show(struct seq_file *seq, void *v)
{
struct synproxy_stats *stats = v;
if (v == SEQ_START_TOKEN) {
seq_puts(seq, "entries\t\tsyn_received\t"
"cookie_invalid\tcookie_valid\t"
"cookie_retrans\tconn_reopened\n");
return 0;
}
seq_printf(seq, "%08x\t%08x\t%08x\t%08x\t%08x\t%08x\n", 0,
stats->syn_received,
stats->cookie_invalid,
stats->cookie_valid,
stats->cookie_retrans,
stats->conn_reopened);
return 0;
}
static const struct seq_operations synproxy_cpu_seq_ops = {
.start = synproxy_cpu_seq_start,
.next = synproxy_cpu_seq_next,
.stop = synproxy_cpu_seq_stop,
.show = synproxy_cpu_seq_show,
};
static int __net_init synproxy_proc_init(struct net *net)
{
if (!proc_create_net("synproxy", 0444, net->proc_net_stat,
&synproxy_cpu_seq_ops, sizeof(struct seq_net_private)))
return -ENOMEM;
return 0;
}
static void __net_exit synproxy_proc_exit(struct net *net)
{
remove_proc_entry("synproxy", net->proc_net_stat);
}
#else
static int __net_init synproxy_proc_init(struct net *net)
{
return 0;
}
static void __net_exit synproxy_proc_exit(struct net *net)
{
return;
}
#endif /* CONFIG_PROC_FS */
static int __net_init synproxy_net_init(struct net *net)
{
struct synproxy_net *snet = synproxy_pernet(net);
struct nf_conn *ct;
int err = -ENOMEM;
ct = nf_ct_tmpl_alloc(net, &nf_ct_zone_dflt, GFP_KERNEL);
if (!ct)
goto err1;
if (!nfct_seqadj_ext_add(ct))
goto err2;
if (!nfct_synproxy_ext_add(ct))
goto err2;
__set_bit(IPS_CONFIRMED_BIT, &ct->status);
snet->tmpl = ct;
snet->stats = alloc_percpu(struct synproxy_stats);
if (snet->stats == NULL)
goto err2;
err = synproxy_proc_init(net);
if (err < 0)
goto err3;
return 0;
err3:
free_percpu(snet->stats);
err2:
nf_ct_tmpl_free(ct);
err1:
return err;
}
static void __net_exit synproxy_net_exit(struct net *net)
{
struct synproxy_net *snet = synproxy_pernet(net);
nf_ct_put(snet->tmpl);
synproxy_proc_exit(net);
free_percpu(snet->stats);
}
static struct pernet_operations synproxy_net_ops = {
.init = synproxy_net_init,
.exit = synproxy_net_exit,
.id = &synproxy_net_id,
.size = sizeof(struct synproxy_net),
};
static int __init synproxy_core_init(void)
{
return register_pernet_subsys(&synproxy_net_ops);
}
static void __exit synproxy_core_exit(void)
{
unregister_pernet_subsys(&synproxy_net_ops);
}
module_init(synproxy_core_init);
module_exit(synproxy_core_exit);
static struct iphdr *
synproxy_build_ip(struct net *net, struct sk_buff *skb, __be32 saddr,
__be32 daddr)
{
struct iphdr *iph;
skb_reset_network_header(skb);
iph = skb_put(skb, sizeof(*iph));
iph->version = 4;
iph->ihl = sizeof(*iph) / 4;
iph->tos = 0;
iph->id = 0;
iph->frag_off = htons(IP_DF);
iph->ttl = READ_ONCE(net->ipv4.sysctl_ip_default_ttl);
iph->protocol = IPPROTO_TCP;
iph->check = 0;
iph->saddr = saddr;
iph->daddr = daddr;
return iph;
}
static void
synproxy_send_tcp(struct net *net,
const struct sk_buff *skb, struct sk_buff *nskb,
struct nf_conntrack *nfct, enum ip_conntrack_info ctinfo,
struct iphdr *niph, struct tcphdr *nth,
unsigned int tcp_hdr_size)
{
nth->check = ~tcp_v4_check(tcp_hdr_size, niph->saddr, niph->daddr, 0);
nskb->ip_summed = CHECKSUM_PARTIAL;
nskb->csum_start = (unsigned char *)nth - nskb->head;
nskb->csum_offset = offsetof(struct tcphdr, check);
skb_dst_set_noref(nskb, skb_dst(skb));
nskb->protocol = htons(ETH_P_IP);
if (ip_route_me_harder(net, nskb->sk, nskb, RTN_UNSPEC))
goto free_nskb;
if (nfct) {
nf_ct_set(nskb, (struct nf_conn *)nfct, ctinfo);
nf_conntrack_get(nfct);
}
ip_local_out(net, nskb->sk, nskb);
return;
free_nskb:
kfree_skb(nskb);
}
void
synproxy_send_client_synack(struct net *net,
const struct sk_buff *skb, const struct tcphdr *th,
const struct synproxy_options *opts)
{
struct sk_buff *nskb;
struct iphdr *iph, *niph;
struct tcphdr *nth;
unsigned int tcp_hdr_size;
u16 mss = opts->mss_encode;
iph = ip_hdr(skb);
tcp_hdr_size = sizeof(*nth) + synproxy_options_size(opts);
nskb = alloc_skb(sizeof(*niph) + tcp_hdr_size + MAX_TCP_HEADER,
GFP_ATOMIC);
if (!nskb)
return;
skb_reserve(nskb, MAX_TCP_HEADER);
niph = synproxy_build_ip(net, nskb, iph->daddr, iph->saddr);
skb_reset_transport_header(nskb);
nth = skb_put(nskb, tcp_hdr_size);
nth->source = th->dest;
nth->dest = th->source;
nth->seq = htonl(__cookie_v4_init_sequence(iph, th, &mss));
nth->ack_seq = htonl(ntohl(th->seq) + 1);
tcp_flag_word(nth) = TCP_FLAG_SYN | TCP_FLAG_ACK;
if (opts->options & NF_SYNPROXY_OPT_ECN)
tcp_flag_word(nth) |= TCP_FLAG_ECE;
nth->doff = tcp_hdr_size / 4;
nth->window = 0;
nth->check = 0;
nth->urg_ptr = 0;
synproxy_build_options(nth, opts);
synproxy_send_tcp(net, skb, nskb, skb_nfct(skb),
IP_CT_ESTABLISHED_REPLY, niph, nth, tcp_hdr_size);
}
EXPORT_SYMBOL_GPL(synproxy_send_client_synack);
static void
synproxy_send_server_syn(struct net *net,
const struct sk_buff *skb, const struct tcphdr *th,
const struct synproxy_options *opts, u32 recv_seq)
{
struct synproxy_net *snet = synproxy_pernet(net);
struct sk_buff *nskb;
struct iphdr *iph, *niph;
struct tcphdr *nth;
unsigned int tcp_hdr_size;
iph = ip_hdr(skb);
tcp_hdr_size = sizeof(*nth) + synproxy_options_size(opts);
nskb = alloc_skb(sizeof(*niph) + tcp_hdr_size + MAX_TCP_HEADER,
GFP_ATOMIC);
if (!nskb)
return;
skb_reserve(nskb, MAX_TCP_HEADER);
niph = synproxy_build_ip(net, nskb, iph->saddr, iph->daddr);
skb_reset_transport_header(nskb);
nth = skb_put(nskb, tcp_hdr_size);
nth->source = th->source;
nth->dest = th->dest;
nth->seq = htonl(recv_seq - 1);
/* ack_seq is used to relay our ISN to the synproxy hook to initialize
* sequence number translation once a connection tracking entry exists.
*/
nth->ack_seq = htonl(ntohl(th->ack_seq) - 1);
tcp_flag_word(nth) = TCP_FLAG_SYN;
if (opts->options & NF_SYNPROXY_OPT_ECN)
tcp_flag_word(nth) |= TCP_FLAG_ECE | TCP_FLAG_CWR;
nth->doff = tcp_hdr_size / 4;
nth->window = th->window;
nth->check = 0;
nth->urg_ptr = 0;
synproxy_build_options(nth, opts);
synproxy_send_tcp(net, skb, nskb, &snet->tmpl->ct_general, IP_CT_NEW,
niph, nth, tcp_hdr_size);
}
static void
synproxy_send_server_ack(struct net *net,
const struct ip_ct_tcp *state,
const struct sk_buff *skb, const struct tcphdr *th,
const struct synproxy_options *opts)
{
struct sk_buff *nskb;
struct iphdr *iph, *niph;
struct tcphdr *nth;
unsigned int tcp_hdr_size;
iph = ip_hdr(skb);
tcp_hdr_size = sizeof(*nth) + synproxy_options_size(opts);
nskb = alloc_skb(sizeof(*niph) + tcp_hdr_size + MAX_TCP_HEADER,
GFP_ATOMIC);
if (!nskb)
return;
skb_reserve(nskb, MAX_TCP_HEADER);
niph = synproxy_build_ip(net, nskb, iph->daddr, iph->saddr);
skb_reset_transport_header(nskb);
nth = skb_put(nskb, tcp_hdr_size);
nth->source = th->dest;
nth->dest = th->source;
nth->seq = htonl(ntohl(th->ack_seq));
nth->ack_seq = htonl(ntohl(th->seq) + 1);
tcp_flag_word(nth) = TCP_FLAG_ACK;
nth->doff = tcp_hdr_size / 4;
nth->window = htons(state->seen[IP_CT_DIR_ORIGINAL].td_maxwin);
nth->check = 0;
nth->urg_ptr = 0;
synproxy_build_options(nth, opts);
synproxy_send_tcp(net, skb, nskb, NULL, 0, niph, nth, tcp_hdr_size);
}
static void
synproxy_send_client_ack(struct net *net,
const struct sk_buff *skb, const struct tcphdr *th,
const struct synproxy_options *opts)
{
struct sk_buff *nskb;
struct iphdr *iph, *niph;
struct tcphdr *nth;
unsigned int tcp_hdr_size;
iph = ip_hdr(skb);
tcp_hdr_size = sizeof(*nth) + synproxy_options_size(opts);
nskb = alloc_skb(sizeof(*niph) + tcp_hdr_size + MAX_TCP_HEADER,
GFP_ATOMIC);
if (!nskb)
return;
skb_reserve(nskb, MAX_TCP_HEADER);
niph = synproxy_build_ip(net, nskb, iph->saddr, iph->daddr);
skb_reset_transport_header(nskb);
nth = skb_put(nskb, tcp_hdr_size);
nth->source = th->source;
nth->dest = th->dest;
nth->seq = htonl(ntohl(th->seq) + 1);
nth->ack_seq = th->ack_seq;
tcp_flag_word(nth) = TCP_FLAG_ACK;
nth->doff = tcp_hdr_size / 4;
nth->window = htons(ntohs(th->window) >> opts->wscale);
nth->check = 0;
nth->urg_ptr = 0;
synproxy_build_options(nth, opts);
synproxy_send_tcp(net, skb, nskb, skb_nfct(skb),
IP_CT_ESTABLISHED_REPLY, niph, nth, tcp_hdr_size);
}
bool
synproxy_recv_client_ack(struct net *net,
const struct sk_buff *skb, const struct tcphdr *th,
struct synproxy_options *opts, u32 recv_seq)
{
struct synproxy_net *snet = synproxy_pernet(net);
int mss;
mss = __cookie_v4_check(ip_hdr(skb), th, ntohl(th->ack_seq) - 1);
if (mss == 0) {
this_cpu_inc(snet->stats->cookie_invalid);
return false;
}
this_cpu_inc(snet->stats->cookie_valid);
opts->mss_option = mss;
opts->options |= NF_SYNPROXY_OPT_MSS;
if (opts->options & NF_SYNPROXY_OPT_TIMESTAMP)
synproxy_check_timestamp_cookie(opts);
synproxy_send_server_syn(net, skb, th, opts, recv_seq);
return true;
}
EXPORT_SYMBOL_GPL(synproxy_recv_client_ack);
unsigned int
ipv4_synproxy_hook(void *priv, struct sk_buff *skb,
const struct nf_hook_state *nhs)
{
struct net *net = nhs->net;
struct synproxy_net *snet = synproxy_pernet(net);
enum ip_conntrack_info ctinfo;
struct nf_conn *ct;
struct nf_conn_synproxy *synproxy;
struct synproxy_options opts = {};
const struct ip_ct_tcp *state;
struct tcphdr *th, _th;
unsigned int thoff;
ct = nf_ct_get(skb, &ctinfo);
if (!ct)
return NF_ACCEPT;
synproxy = nfct_synproxy(ct);
if (!synproxy)
return NF_ACCEPT;
if (nf_is_loopback_packet(skb) ||
ip_hdr(skb)->protocol != IPPROTO_TCP)
return NF_ACCEPT;
thoff = ip_hdrlen(skb);
th = skb_header_pointer(skb, thoff, sizeof(_th), &_th);
if (!th)
return NF_DROP;
state = &ct->proto.tcp;
switch (state->state) {
case TCP_CONNTRACK_CLOSE:
if (th->rst && CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL) {
nf_ct_seqadj_init(ct, ctinfo, synproxy->isn -
ntohl(th->seq) + 1);
break;
}
if (!th->syn || th->ack ||
CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL)
break;
/* Reopened connection - reset the sequence number and timestamp
* adjustments, they will get initialized once the connection is
* reestablished.
*/
nf_ct_seqadj_init(ct, ctinfo, 0);
synproxy->tsoff = 0;
this_cpu_inc(snet->stats->conn_reopened);
fallthrough;
case TCP_CONNTRACK_SYN_SENT:
if (!synproxy_parse_options(skb, thoff, th, &opts))
return NF_DROP;
if (!th->syn && th->ack &&
CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL) {
/* Keep-Alives are sent with SEG.SEQ = SND.NXT-1,
* therefore we need to add 1 to make the SYN sequence
* number match the one of first SYN.
*/
if (synproxy_recv_client_ack(net, skb, th, &opts,
ntohl(th->seq) + 1)) {
this_cpu_inc(snet->stats->cookie_retrans);
consume_skb(skb);
return NF_STOLEN;
} else {
return NF_DROP;
}
}
synproxy->isn = ntohl(th->ack_seq);
if (opts.options & NF_SYNPROXY_OPT_TIMESTAMP)
synproxy->its = opts.tsecr;
nf_conntrack_event_cache(IPCT_SYNPROXY, ct);
break;
case TCP_CONNTRACK_SYN_RECV:
if (!th->syn || !th->ack)
break;
if (!synproxy_parse_options(skb, thoff, th, &opts))
return NF_DROP;
if (opts.options & NF_SYNPROXY_OPT_TIMESTAMP) {
synproxy->tsoff = opts.tsval - synproxy->its;
nf_conntrack_event_cache(IPCT_SYNPROXY, ct);
}
opts.options &= ~(NF_SYNPROXY_OPT_MSS |
NF_SYNPROXY_OPT_WSCALE |
NF_SYNPROXY_OPT_SACK_PERM);
swap(opts.tsval, opts.tsecr);
synproxy_send_server_ack(net, state, skb, th, &opts);
nf_ct_seqadj_init(ct, ctinfo, synproxy->isn - ntohl(th->seq));
nf_conntrack_event_cache(IPCT_SEQADJ, ct);
swap(opts.tsval, opts.tsecr);
synproxy_send_client_ack(net, skb, th, &opts);
consume_skb(skb);
return NF_STOLEN;
default:
break;
}
synproxy_tstamp_adjust(skb, thoff, th, ct, ctinfo, synproxy);
return NF_ACCEPT;
}
EXPORT_SYMBOL_GPL(ipv4_synproxy_hook);
static const struct nf_hook_ops ipv4_synproxy_ops[] = {
{
.hook = ipv4_synproxy_hook,
.pf = NFPROTO_IPV4,
.hooknum = NF_INET_LOCAL_IN,
.priority = NF_IP_PRI_CONNTRACK_CONFIRM - 1,
},
{
.hook = ipv4_synproxy_hook,
.pf = NFPROTO_IPV4,
.hooknum = NF_INET_POST_ROUTING,
.priority = NF_IP_PRI_CONNTRACK_CONFIRM - 1,
},
};
int nf_synproxy_ipv4_init(struct synproxy_net *snet, struct net *net)
{
int err;
if (snet->hook_ref4 == 0) {
err = nf_register_net_hooks(net, ipv4_synproxy_ops,
ARRAY_SIZE(ipv4_synproxy_ops));
if (err)
return err;
}
snet->hook_ref4++;
return 0;
}
EXPORT_SYMBOL_GPL(nf_synproxy_ipv4_init);
void nf_synproxy_ipv4_fini(struct synproxy_net *snet, struct net *net)
{
snet->hook_ref4--;
if (snet->hook_ref4 == 0)
nf_unregister_net_hooks(net, ipv4_synproxy_ops,
ARRAY_SIZE(ipv4_synproxy_ops));
}
EXPORT_SYMBOL_GPL(nf_synproxy_ipv4_fini);
#if IS_ENABLED(CONFIG_IPV6)
static struct ipv6hdr *
synproxy_build_ip_ipv6(struct net *net, struct sk_buff *skb,
const struct in6_addr *saddr,
const struct in6_addr *daddr)
{
struct ipv6hdr *iph;
skb_reset_network_header(skb);
iph = skb_put(skb, sizeof(*iph));
ip6_flow_hdr(iph, 0, 0);
iph->hop_limit = net->ipv6.devconf_all->hop_limit;
iph->nexthdr = IPPROTO_TCP;
iph->saddr = *saddr;
iph->daddr = *daddr;
return iph;
}
static void
synproxy_send_tcp_ipv6(struct net *net,
const struct sk_buff *skb, struct sk_buff *nskb,
struct nf_conntrack *nfct, enum ip_conntrack_info ctinfo,
struct ipv6hdr *niph, struct tcphdr *nth,
unsigned int tcp_hdr_size)
{
struct dst_entry *dst;
struct flowi6 fl6;
int err;
nth->check = ~tcp_v6_check(tcp_hdr_size, &niph->saddr, &niph->daddr, 0);
nskb->ip_summed = CHECKSUM_PARTIAL;
nskb->csum_start = (unsigned char *)nth - nskb->head;
nskb->csum_offset = offsetof(struct tcphdr, check);
memset(&fl6, 0, sizeof(fl6));
fl6.flowi6_proto = IPPROTO_TCP;
fl6.saddr = niph->saddr;
fl6.daddr = niph->daddr;
fl6.fl6_sport = nth->source;
fl6.fl6_dport = nth->dest;
security_skb_classify_flow((struct sk_buff *)skb,
flowi6_to_flowi_common(&fl6));
err = nf_ip6_route(net, &dst, flowi6_to_flowi(&fl6), false);
if (err) {
goto free_nskb;
}
dst = xfrm_lookup(net, dst, flowi6_to_flowi(&fl6), NULL, 0);
if (IS_ERR(dst))
goto free_nskb;
skb_dst_set(nskb, dst);
if (nfct) {
nf_ct_set(nskb, (struct nf_conn *)nfct, ctinfo);
nf_conntrack_get(nfct);
}
ip6_local_out(net, nskb->sk, nskb);
return;
free_nskb:
kfree_skb(nskb);
}
void
synproxy_send_client_synack_ipv6(struct net *net,
const struct sk_buff *skb,
const struct tcphdr *th,
const struct synproxy_options *opts)
{
struct sk_buff *nskb;
struct ipv6hdr *iph, *niph;
struct tcphdr *nth;
unsigned int tcp_hdr_size;
u16 mss = opts->mss_encode;
iph = ipv6_hdr(skb);
tcp_hdr_size = sizeof(*nth) + synproxy_options_size(opts);
nskb = alloc_skb(sizeof(*niph) + tcp_hdr_size + MAX_TCP_HEADER,
GFP_ATOMIC);
if (!nskb)
return;
skb_reserve(nskb, MAX_TCP_HEADER);
niph = synproxy_build_ip_ipv6(net, nskb, &iph->daddr, &iph->saddr);
skb_reset_transport_header(nskb);
nth = skb_put(nskb, tcp_hdr_size);
nth->source = th->dest;
nth->dest = th->source;
nth->seq = htonl(nf_ipv6_cookie_init_sequence(iph, th, &mss));
nth->ack_seq = htonl(ntohl(th->seq) + 1);
tcp_flag_word(nth) = TCP_FLAG_SYN | TCP_FLAG_ACK;
if (opts->options & NF_SYNPROXY_OPT_ECN)
tcp_flag_word(nth) |= TCP_FLAG_ECE;
nth->doff = tcp_hdr_size / 4;
nth->window = 0;
nth->check = 0;
nth->urg_ptr = 0;
synproxy_build_options(nth, opts);
synproxy_send_tcp_ipv6(net, skb, nskb, skb_nfct(skb),
IP_CT_ESTABLISHED_REPLY, niph, nth,
tcp_hdr_size);
}
EXPORT_SYMBOL_GPL(synproxy_send_client_synack_ipv6);
static void
synproxy_send_server_syn_ipv6(struct net *net, const struct sk_buff *skb,
const struct tcphdr *th,
const struct synproxy_options *opts, u32 recv_seq)
{
struct synproxy_net *snet = synproxy_pernet(net);
struct sk_buff *nskb;
struct ipv6hdr *iph, *niph;
struct tcphdr *nth;
unsigned int tcp_hdr_size;
iph = ipv6_hdr(skb);
tcp_hdr_size = sizeof(*nth) + synproxy_options_size(opts);
nskb = alloc_skb(sizeof(*niph) + tcp_hdr_size + MAX_TCP_HEADER,
GFP_ATOMIC);
if (!nskb)
return;
skb_reserve(nskb, MAX_TCP_HEADER);
niph = synproxy_build_ip_ipv6(net, nskb, &iph->saddr, &iph->daddr);
skb_reset_transport_header(nskb);
nth = skb_put(nskb, tcp_hdr_size);
nth->source = th->source;
nth->dest = th->dest;
nth->seq = htonl(recv_seq - 1);
/* ack_seq is used to relay our ISN to the synproxy hook to initialize
* sequence number translation once a connection tracking entry exists.
*/
nth->ack_seq = htonl(ntohl(th->ack_seq) - 1);
tcp_flag_word(nth) = TCP_FLAG_SYN;
if (opts->options & NF_SYNPROXY_OPT_ECN)
tcp_flag_word(nth) |= TCP_FLAG_ECE | TCP_FLAG_CWR;
nth->doff = tcp_hdr_size / 4;
nth->window = th->window;
nth->check = 0;
nth->urg_ptr = 0;
synproxy_build_options(nth, opts);
synproxy_send_tcp_ipv6(net, skb, nskb, &snet->tmpl->ct_general,
IP_CT_NEW, niph, nth, tcp_hdr_size);
}
static void
synproxy_send_server_ack_ipv6(struct net *net, const struct ip_ct_tcp *state,
const struct sk_buff *skb,
const struct tcphdr *th,
const struct synproxy_options *opts)
{
struct sk_buff *nskb;
struct ipv6hdr *iph, *niph;
struct tcphdr *nth;
unsigned int tcp_hdr_size;
iph = ipv6_hdr(skb);
tcp_hdr_size = sizeof(*nth) + synproxy_options_size(opts);
nskb = alloc_skb(sizeof(*niph) + tcp_hdr_size + MAX_TCP_HEADER,
GFP_ATOMIC);
if (!nskb)
return;
skb_reserve(nskb, MAX_TCP_HEADER);
niph = synproxy_build_ip_ipv6(net, nskb, &iph->daddr, &iph->saddr);
skb_reset_transport_header(nskb);
nth = skb_put(nskb, tcp_hdr_size);
nth->source = th->dest;
nth->dest = th->source;
nth->seq = htonl(ntohl(th->ack_seq));
nth->ack_seq = htonl(ntohl(th->seq) + 1);
tcp_flag_word(nth) = TCP_FLAG_ACK;
nth->doff = tcp_hdr_size / 4;
nth->window = htons(state->seen[IP_CT_DIR_ORIGINAL].td_maxwin);
nth->check = 0;
nth->urg_ptr = 0;
synproxy_build_options(nth, opts);
synproxy_send_tcp_ipv6(net, skb, nskb, NULL, 0, niph, nth,
tcp_hdr_size);
}
static void
synproxy_send_client_ack_ipv6(struct net *net, const struct sk_buff *skb,
const struct tcphdr *th,
const struct synproxy_options *opts)
{
struct sk_buff *nskb;
struct ipv6hdr *iph, *niph;
struct tcphdr *nth;
unsigned int tcp_hdr_size;
iph = ipv6_hdr(skb);
tcp_hdr_size = sizeof(*nth) + synproxy_options_size(opts);
nskb = alloc_skb(sizeof(*niph) + tcp_hdr_size + MAX_TCP_HEADER,
GFP_ATOMIC);
if (!nskb)
return;
skb_reserve(nskb, MAX_TCP_HEADER);
niph = synproxy_build_ip_ipv6(net, nskb, &iph->saddr, &iph->daddr);
skb_reset_transport_header(nskb);
nth = skb_put(nskb, tcp_hdr_size);
nth->source = th->source;
nth->dest = th->dest;
nth->seq = htonl(ntohl(th->seq) + 1);
nth->ack_seq = th->ack_seq;
tcp_flag_word(nth) = TCP_FLAG_ACK;
nth->doff = tcp_hdr_size / 4;
nth->window = htons(ntohs(th->window) >> opts->wscale);
nth->check = 0;
nth->urg_ptr = 0;
synproxy_build_options(nth, opts);
synproxy_send_tcp_ipv6(net, skb, nskb, skb_nfct(skb),
IP_CT_ESTABLISHED_REPLY, niph, nth,
tcp_hdr_size);
}
bool
synproxy_recv_client_ack_ipv6(struct net *net,
const struct sk_buff *skb,
const struct tcphdr *th,
struct synproxy_options *opts, u32 recv_seq)
{
struct synproxy_net *snet = synproxy_pernet(net);
int mss;
mss = nf_cookie_v6_check(ipv6_hdr(skb), th, ntohl(th->ack_seq) - 1);
if (mss == 0) {
this_cpu_inc(snet->stats->cookie_invalid);
return false;
}
this_cpu_inc(snet->stats->cookie_valid);
opts->mss_option = mss;
opts->options |= NF_SYNPROXY_OPT_MSS;
if (opts->options & NF_SYNPROXY_OPT_TIMESTAMP)
synproxy_check_timestamp_cookie(opts);
synproxy_send_server_syn_ipv6(net, skb, th, opts, recv_seq);
return true;
}
EXPORT_SYMBOL_GPL(synproxy_recv_client_ack_ipv6);
unsigned int
ipv6_synproxy_hook(void *priv, struct sk_buff *skb,
const struct nf_hook_state *nhs)
{
struct net *net = nhs->net;
struct synproxy_net *snet = synproxy_pernet(net);
enum ip_conntrack_info ctinfo;
struct nf_conn *ct;
struct nf_conn_synproxy *synproxy;
struct synproxy_options opts = {};
const struct ip_ct_tcp *state;
struct tcphdr *th, _th;
__be16 frag_off;
u8 nexthdr;
int thoff;
ct = nf_ct_get(skb, &ctinfo);
if (!ct)
return NF_ACCEPT;
synproxy = nfct_synproxy(ct);
if (!synproxy)
return NF_ACCEPT;
if (nf_is_loopback_packet(skb))
return NF_ACCEPT;
nexthdr = ipv6_hdr(skb)->nexthdr;
thoff = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &nexthdr,
&frag_off);
if (thoff < 0 || nexthdr != IPPROTO_TCP)
return NF_ACCEPT;
th = skb_header_pointer(skb, thoff, sizeof(_th), &_th);
if (!th)
return NF_DROP;
state = &ct->proto.tcp;
switch (state->state) {
case TCP_CONNTRACK_CLOSE:
if (th->rst && CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL) {
nf_ct_seqadj_init(ct, ctinfo, synproxy->isn -
ntohl(th->seq) + 1);
break;
}
if (!th->syn || th->ack ||
CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL)
break;
/* Reopened connection - reset the sequence number and timestamp
* adjustments, they will get initialized once the connection is
* reestablished.
*/
nf_ct_seqadj_init(ct, ctinfo, 0);
synproxy->tsoff = 0;
this_cpu_inc(snet->stats->conn_reopened);
fallthrough;
case TCP_CONNTRACK_SYN_SENT:
if (!synproxy_parse_options(skb, thoff, th, &opts))
return NF_DROP;
if (!th->syn && th->ack &&
CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL) {
/* Keep-Alives are sent with SEG.SEQ = SND.NXT-1,
* therefore we need to add 1 to make the SYN sequence
* number match the one of first SYN.
*/
if (synproxy_recv_client_ack_ipv6(net, skb, th, &opts,
ntohl(th->seq) + 1)) {
this_cpu_inc(snet->stats->cookie_retrans);
consume_skb(skb);
return NF_STOLEN;
} else {
return NF_DROP;
}
}
synproxy->isn = ntohl(th->ack_seq);
if (opts.options & NF_SYNPROXY_OPT_TIMESTAMP)
synproxy->its = opts.tsecr;
nf_conntrack_event_cache(IPCT_SYNPROXY, ct);
break;
case TCP_CONNTRACK_SYN_RECV:
if (!th->syn || !th->ack)
break;
if (!synproxy_parse_options(skb, thoff, th, &opts))
return NF_DROP;
if (opts.options & NF_SYNPROXY_OPT_TIMESTAMP) {
synproxy->tsoff = opts.tsval - synproxy->its;
nf_conntrack_event_cache(IPCT_SYNPROXY, ct);
}
opts.options &= ~(NF_SYNPROXY_OPT_MSS |
NF_SYNPROXY_OPT_WSCALE |
NF_SYNPROXY_OPT_SACK_PERM);
swap(opts.tsval, opts.tsecr);
synproxy_send_server_ack_ipv6(net, state, skb, th, &opts);
nf_ct_seqadj_init(ct, ctinfo, synproxy->isn - ntohl(th->seq));
nf_conntrack_event_cache(IPCT_SEQADJ, ct);
swap(opts.tsval, opts.tsecr);
synproxy_send_client_ack_ipv6(net, skb, th, &opts);
consume_skb(skb);
return NF_STOLEN;
default:
break;
}
synproxy_tstamp_adjust(skb, thoff, th, ct, ctinfo, synproxy);
return NF_ACCEPT;
}
EXPORT_SYMBOL_GPL(ipv6_synproxy_hook);
static const struct nf_hook_ops ipv6_synproxy_ops[] = {
{
.hook = ipv6_synproxy_hook,
.pf = NFPROTO_IPV6,
.hooknum = NF_INET_LOCAL_IN,
.priority = NF_IP_PRI_CONNTRACK_CONFIRM - 1,
},
{
.hook = ipv6_synproxy_hook,
.pf = NFPROTO_IPV6,
.hooknum = NF_INET_POST_ROUTING,
.priority = NF_IP_PRI_CONNTRACK_CONFIRM - 1,
},
};
int
nf_synproxy_ipv6_init(struct synproxy_net *snet, struct net *net)
{
int err;
if (snet->hook_ref6 == 0) {
err = nf_register_net_hooks(net, ipv6_synproxy_ops,
ARRAY_SIZE(ipv6_synproxy_ops));
if (err)
return err;
}
snet->hook_ref6++;
return 0;
}
EXPORT_SYMBOL_GPL(nf_synproxy_ipv6_init);
void
nf_synproxy_ipv6_fini(struct synproxy_net *snet, struct net *net)
{
snet->hook_ref6--;
if (snet->hook_ref6 == 0)
nf_unregister_net_hooks(net, ipv6_synproxy_ops,
ARRAY_SIZE(ipv6_synproxy_ops));
}
EXPORT_SYMBOL_GPL(nf_synproxy_ipv6_fini);
#endif /* CONFIG_IPV6 */
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Patrick McHardy <[email protected]>");
MODULE_DESCRIPTION("nftables SYNPROXY expression support");
| linux-master | net/netfilter/nf_synproxy_core.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2014 Arturo Borrero Gonzalez <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/netlink.h>
#include <linux/netfilter.h>
#include <linux/netfilter/nf_tables.h>
#include <net/netfilter/nf_tables.h>
#include <net/netfilter/nf_nat.h>
#include <net/netfilter/nf_nat_masquerade.h>
struct nft_masq {
u32 flags;
u8 sreg_proto_min;
u8 sreg_proto_max;
};
static const struct nla_policy nft_masq_policy[NFTA_MASQ_MAX + 1] = {
[NFTA_MASQ_FLAGS] =
NLA_POLICY_MASK(NLA_BE32, NF_NAT_RANGE_MASK),
[NFTA_MASQ_REG_PROTO_MIN] = { .type = NLA_U32 },
[NFTA_MASQ_REG_PROTO_MAX] = { .type = NLA_U32 },
};
static int nft_masq_validate(const struct nft_ctx *ctx,
const struct nft_expr *expr,
const struct nft_data **data)
{
int err;
err = nft_chain_validate_dependency(ctx->chain, NFT_CHAIN_T_NAT);
if (err < 0)
return err;
return nft_chain_validate_hooks(ctx->chain,
(1 << NF_INET_POST_ROUTING));
}
static int nft_masq_init(const struct nft_ctx *ctx,
const struct nft_expr *expr,
const struct nlattr * const tb[])
{
u32 plen = sizeof_field(struct nf_nat_range, min_proto.all);
struct nft_masq *priv = nft_expr_priv(expr);
int err;
if (tb[NFTA_MASQ_FLAGS])
priv->flags = ntohl(nla_get_be32(tb[NFTA_MASQ_FLAGS]));
if (tb[NFTA_MASQ_REG_PROTO_MIN]) {
err = nft_parse_register_load(tb[NFTA_MASQ_REG_PROTO_MIN],
&priv->sreg_proto_min, plen);
if (err < 0)
return err;
if (tb[NFTA_MASQ_REG_PROTO_MAX]) {
err = nft_parse_register_load(tb[NFTA_MASQ_REG_PROTO_MAX],
&priv->sreg_proto_max,
plen);
if (err < 0)
return err;
} else {
priv->sreg_proto_max = priv->sreg_proto_min;
}
}
return nf_ct_netns_get(ctx->net, ctx->family);
}
static int nft_masq_dump(struct sk_buff *skb,
const struct nft_expr *expr, bool reset)
{
const struct nft_masq *priv = nft_expr_priv(expr);
if (priv->flags != 0 &&
nla_put_be32(skb, NFTA_MASQ_FLAGS, htonl(priv->flags)))
goto nla_put_failure;
if (priv->sreg_proto_min) {
if (nft_dump_register(skb, NFTA_MASQ_REG_PROTO_MIN,
priv->sreg_proto_min) ||
nft_dump_register(skb, NFTA_MASQ_REG_PROTO_MAX,
priv->sreg_proto_max))
goto nla_put_failure;
}
return 0;
nla_put_failure:
return -1;
}
static void nft_masq_eval(const struct nft_expr *expr,
struct nft_regs *regs,
const struct nft_pktinfo *pkt)
{
const struct nft_masq *priv = nft_expr_priv(expr);
struct nf_nat_range2 range;
memset(&range, 0, sizeof(range));
range.flags = priv->flags;
if (priv->sreg_proto_min) {
range.min_proto.all = (__force __be16)
nft_reg_load16(®s->data[priv->sreg_proto_min]);
range.max_proto.all = (__force __be16)
nft_reg_load16(®s->data[priv->sreg_proto_max]);
}
switch (nft_pf(pkt)) {
case NFPROTO_IPV4:
regs->verdict.code = nf_nat_masquerade_ipv4(pkt->skb,
nft_hook(pkt),
&range,
nft_out(pkt));
break;
#ifdef CONFIG_NF_TABLES_IPV6
case NFPROTO_IPV6:
regs->verdict.code = nf_nat_masquerade_ipv6(pkt->skb, &range,
nft_out(pkt));
break;
#endif
default:
WARN_ON_ONCE(1);
break;
}
}
static void
nft_masq_ipv4_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr)
{
nf_ct_netns_put(ctx->net, NFPROTO_IPV4);
}
static struct nft_expr_type nft_masq_ipv4_type;
static const struct nft_expr_ops nft_masq_ipv4_ops = {
.type = &nft_masq_ipv4_type,
.size = NFT_EXPR_SIZE(sizeof(struct nft_masq)),
.eval = nft_masq_eval,
.init = nft_masq_init,
.destroy = nft_masq_ipv4_destroy,
.dump = nft_masq_dump,
.validate = nft_masq_validate,
.reduce = NFT_REDUCE_READONLY,
};
static struct nft_expr_type nft_masq_ipv4_type __read_mostly = {
.family = NFPROTO_IPV4,
.name = "masq",
.ops = &nft_masq_ipv4_ops,
.policy = nft_masq_policy,
.maxattr = NFTA_MASQ_MAX,
.owner = THIS_MODULE,
};
#ifdef CONFIG_NF_TABLES_IPV6
static void
nft_masq_ipv6_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr)
{
nf_ct_netns_put(ctx->net, NFPROTO_IPV6);
}
static struct nft_expr_type nft_masq_ipv6_type;
static const struct nft_expr_ops nft_masq_ipv6_ops = {
.type = &nft_masq_ipv6_type,
.size = NFT_EXPR_SIZE(sizeof(struct nft_masq)),
.eval = nft_masq_eval,
.init = nft_masq_init,
.destroy = nft_masq_ipv6_destroy,
.dump = nft_masq_dump,
.validate = nft_masq_validate,
.reduce = NFT_REDUCE_READONLY,
};
static struct nft_expr_type nft_masq_ipv6_type __read_mostly = {
.family = NFPROTO_IPV6,
.name = "masq",
.ops = &nft_masq_ipv6_ops,
.policy = nft_masq_policy,
.maxattr = NFTA_MASQ_MAX,
.owner = THIS_MODULE,
};
static int __init nft_masq_module_init_ipv6(void)
{
return nft_register_expr(&nft_masq_ipv6_type);
}
static void nft_masq_module_exit_ipv6(void)
{
nft_unregister_expr(&nft_masq_ipv6_type);
}
#else
static inline int nft_masq_module_init_ipv6(void) { return 0; }
static inline void nft_masq_module_exit_ipv6(void) {}
#endif
#ifdef CONFIG_NF_TABLES_INET
static void
nft_masq_inet_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr)
{
nf_ct_netns_put(ctx->net, NFPROTO_INET);
}
static struct nft_expr_type nft_masq_inet_type;
static const struct nft_expr_ops nft_masq_inet_ops = {
.type = &nft_masq_inet_type,
.size = NFT_EXPR_SIZE(sizeof(struct nft_masq)),
.eval = nft_masq_eval,
.init = nft_masq_init,
.destroy = nft_masq_inet_destroy,
.dump = nft_masq_dump,
.validate = nft_masq_validate,
.reduce = NFT_REDUCE_READONLY,
};
static struct nft_expr_type nft_masq_inet_type __read_mostly = {
.family = NFPROTO_INET,
.name = "masq",
.ops = &nft_masq_inet_ops,
.policy = nft_masq_policy,
.maxattr = NFTA_MASQ_MAX,
.owner = THIS_MODULE,
};
static int __init nft_masq_module_init_inet(void)
{
return nft_register_expr(&nft_masq_inet_type);
}
static void nft_masq_module_exit_inet(void)
{
nft_unregister_expr(&nft_masq_inet_type);
}
#else
static inline int nft_masq_module_init_inet(void) { return 0; }
static inline void nft_masq_module_exit_inet(void) {}
#endif
static int __init nft_masq_module_init(void)
{
int ret;
ret = nft_masq_module_init_ipv6();
if (ret < 0)
return ret;
ret = nft_masq_module_init_inet();
if (ret < 0) {
nft_masq_module_exit_ipv6();
return ret;
}
ret = nft_register_expr(&nft_masq_ipv4_type);
if (ret < 0) {
nft_masq_module_exit_inet();
nft_masq_module_exit_ipv6();
return ret;
}
ret = nf_nat_masquerade_inet_register_notifiers();
if (ret < 0) {
nft_masq_module_exit_ipv6();
nft_masq_module_exit_inet();
nft_unregister_expr(&nft_masq_ipv4_type);
return ret;
}
return ret;
}
static void __exit nft_masq_module_exit(void)
{
nft_masq_module_exit_ipv6();
nft_masq_module_exit_inet();
nft_unregister_expr(&nft_masq_ipv4_type);
nf_nat_masquerade_inet_unregister_notifiers();
}
module_init(nft_masq_module_init);
module_exit(nft_masq_module_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Arturo Borrero Gonzalez <[email protected]>");
MODULE_ALIAS_NFT_EXPR("masq");
MODULE_DESCRIPTION("Netfilter nftables masquerade expression support");
| linux-master | net/netfilter/nft_masq.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Kernel module to match L2TP header parameters. */
/* (C) 2013 James Chapman <[email protected]>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/if_ether.h>
#include <net/ip.h>
#include <linux/ipv6.h>
#include <net/ipv6.h>
#include <net/udp.h>
#include <linux/l2tp.h>
#include <linux/netfilter_ipv4.h>
#include <linux/netfilter_ipv6.h>
#include <linux/netfilter_ipv4/ip_tables.h>
#include <linux/netfilter_ipv6/ip6_tables.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter/xt_tcpudp.h>
#include <linux/netfilter/xt_l2tp.h>
/* L2TP header masks */
#define L2TP_HDR_T_BIT 0x8000
#define L2TP_HDR_L_BIT 0x4000
#define L2TP_HDR_VER 0x000f
MODULE_LICENSE("GPL");
MODULE_AUTHOR("James Chapman <[email protected]>");
MODULE_DESCRIPTION("Xtables: L2TP header match");
MODULE_ALIAS("ipt_l2tp");
MODULE_ALIAS("ip6t_l2tp");
/* The L2TP fields that can be matched */
struct l2tp_data {
u32 tid;
u32 sid;
u8 type;
u8 version;
};
union l2tp_val {
__be16 val16[2];
__be32 val32;
};
static bool l2tp_match(const struct xt_l2tp_info *info, struct l2tp_data *data)
{
if ((info->flags & XT_L2TP_TYPE) && (info->type != data->type))
return false;
if ((info->flags & XT_L2TP_VERSION) && (info->version != data->version))
return false;
/* Check tid only for L2TPv3 control or any L2TPv2 packets */
if ((info->flags & XT_L2TP_TID) &&
((data->type == XT_L2TP_TYPE_CONTROL) || (data->version == 2)) &&
(info->tid != data->tid))
return false;
/* Check sid only for L2TP data packets */
if ((info->flags & XT_L2TP_SID) && (data->type == XT_L2TP_TYPE_DATA) &&
(info->sid != data->sid))
return false;
return true;
}
/* Parse L2TP header fields when UDP encapsulation is used. Handles
* L2TPv2 and L2TPv3. Note the L2TPv3 control and data packets have a
* different format. See
* RFC2661, Section 3.1, L2TPv2 Header Format
* RFC3931, Section 3.2.1, L2TPv3 Control Message Header
* RFC3931, Section 3.2.2, L2TPv3 Data Message Header
* RFC3931, Section 4.1.2.1, L2TPv3 Session Header over UDP
*/
static bool l2tp_udp_mt(const struct sk_buff *skb, struct xt_action_param *par, u16 thoff)
{
const struct xt_l2tp_info *info = par->matchinfo;
int uhlen = sizeof(struct udphdr);
int offs = thoff + uhlen;
union l2tp_val *lh;
union l2tp_val lhbuf;
u16 flags;
struct l2tp_data data = { 0, };
if (par->fragoff != 0)
return false;
/* Extract L2TP header fields. The flags in the first 16 bits
* tell us where the other fields are.
*/
lh = skb_header_pointer(skb, offs, 2, &lhbuf);
if (lh == NULL)
return false;
flags = ntohs(lh->val16[0]);
if (flags & L2TP_HDR_T_BIT)
data.type = XT_L2TP_TYPE_CONTROL;
else
data.type = XT_L2TP_TYPE_DATA;
data.version = (u8) flags & L2TP_HDR_VER;
/* Now extract the L2TP tid/sid. These are in different places
* for L2TPv2 (rfc2661) and L2TPv3 (rfc3931). For L2TPv2, we
* must also check to see if the length field is present,
* since this affects the offsets into the packet of the
* tid/sid fields.
*/
if (data.version == 3) {
lh = skb_header_pointer(skb, offs + 4, 4, &lhbuf);
if (lh == NULL)
return false;
if (data.type == XT_L2TP_TYPE_CONTROL)
data.tid = ntohl(lh->val32);
else
data.sid = ntohl(lh->val32);
} else if (data.version == 2) {
if (flags & L2TP_HDR_L_BIT)
offs += 2;
lh = skb_header_pointer(skb, offs + 2, 4, &lhbuf);
if (lh == NULL)
return false;
data.tid = (u32) ntohs(lh->val16[0]);
data.sid = (u32) ntohs(lh->val16[1]);
} else
return false;
return l2tp_match(info, &data);
}
/* Parse L2TP header fields for IP encapsulation (no UDP header).
* L2TPv3 data packets have a different form with IP encap. See
* RC3931, Section 4.1.1.1, L2TPv3 Session Header over IP.
* RC3931, Section 4.1.1.2, L2TPv3 Control and Data Traffic over IP.
*/
static bool l2tp_ip_mt(const struct sk_buff *skb, struct xt_action_param *par, u16 thoff)
{
const struct xt_l2tp_info *info = par->matchinfo;
union l2tp_val *lh;
union l2tp_val lhbuf;
struct l2tp_data data = { 0, };
/* For IP encap, the L2TP sid is the first 32-bits. */
lh = skb_header_pointer(skb, thoff, sizeof(lhbuf), &lhbuf);
if (lh == NULL)
return false;
if (lh->val32 == 0) {
/* Must be a control packet. The L2TP tid is further
* into the packet.
*/
data.type = XT_L2TP_TYPE_CONTROL;
lh = skb_header_pointer(skb, thoff + 8, sizeof(lhbuf),
&lhbuf);
if (lh == NULL)
return false;
data.tid = ntohl(lh->val32);
} else {
data.sid = ntohl(lh->val32);
data.type = XT_L2TP_TYPE_DATA;
}
data.version = 3;
return l2tp_match(info, &data);
}
static bool l2tp_mt4(const struct sk_buff *skb, struct xt_action_param *par)
{
struct iphdr *iph = ip_hdr(skb);
u8 ipproto = iph->protocol;
/* l2tp_mt_check4 already restricts the transport protocol */
switch (ipproto) {
case IPPROTO_UDP:
return l2tp_udp_mt(skb, par, par->thoff);
case IPPROTO_L2TP:
return l2tp_ip_mt(skb, par, par->thoff);
}
return false;
}
#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
static bool l2tp_mt6(const struct sk_buff *skb, struct xt_action_param *par)
{
unsigned int thoff = 0;
unsigned short fragoff = 0;
int ipproto;
ipproto = ipv6_find_hdr(skb, &thoff, -1, &fragoff, NULL);
if (fragoff != 0)
return false;
/* l2tp_mt_check6 already restricts the transport protocol */
switch (ipproto) {
case IPPROTO_UDP:
return l2tp_udp_mt(skb, par, thoff);
case IPPROTO_L2TP:
return l2tp_ip_mt(skb, par, thoff);
}
return false;
}
#endif
static int l2tp_mt_check(const struct xt_mtchk_param *par)
{
const struct xt_l2tp_info *info = par->matchinfo;
/* Check for invalid flags */
if (info->flags & ~(XT_L2TP_TID | XT_L2TP_SID | XT_L2TP_VERSION |
XT_L2TP_TYPE)) {
pr_info_ratelimited("unknown flags: %x\n", info->flags);
return -EINVAL;
}
/* At least one of tid, sid or type=control must be specified */
if ((!(info->flags & XT_L2TP_TID)) &&
(!(info->flags & XT_L2TP_SID)) &&
((!(info->flags & XT_L2TP_TYPE)) ||
(info->type != XT_L2TP_TYPE_CONTROL))) {
pr_info_ratelimited("invalid flags combination: %x\n",
info->flags);
return -EINVAL;
}
/* If version 2 is specified, check that incompatible params
* are not supplied
*/
if (info->flags & XT_L2TP_VERSION) {
if ((info->version < 2) || (info->version > 3)) {
pr_info_ratelimited("wrong L2TP version: %u\n",
info->version);
return -EINVAL;
}
if (info->version == 2) {
if ((info->flags & XT_L2TP_TID) &&
(info->tid > 0xffff)) {
pr_info_ratelimited("v2 tid > 0xffff: %u\n",
info->tid);
return -EINVAL;
}
if ((info->flags & XT_L2TP_SID) &&
(info->sid > 0xffff)) {
pr_info_ratelimited("v2 sid > 0xffff: %u\n",
info->sid);
return -EINVAL;
}
}
}
return 0;
}
static int l2tp_mt_check4(const struct xt_mtchk_param *par)
{
const struct xt_l2tp_info *info = par->matchinfo;
const struct ipt_entry *e = par->entryinfo;
const struct ipt_ip *ip = &e->ip;
int ret;
ret = l2tp_mt_check(par);
if (ret != 0)
return ret;
if ((ip->proto != IPPROTO_UDP) &&
(ip->proto != IPPROTO_L2TP)) {
pr_info_ratelimited("missing protocol rule (udp|l2tpip)\n");
return -EINVAL;
}
if ((ip->proto == IPPROTO_L2TP) &&
(info->version == 2)) {
pr_info_ratelimited("v2 doesn't support IP mode\n");
return -EINVAL;
}
return 0;
}
#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
static int l2tp_mt_check6(const struct xt_mtchk_param *par)
{
const struct xt_l2tp_info *info = par->matchinfo;
const struct ip6t_entry *e = par->entryinfo;
const struct ip6t_ip6 *ip = &e->ipv6;
int ret;
ret = l2tp_mt_check(par);
if (ret != 0)
return ret;
if ((ip->proto != IPPROTO_UDP) &&
(ip->proto != IPPROTO_L2TP)) {
pr_info_ratelimited("missing protocol rule (udp|l2tpip)\n");
return -EINVAL;
}
if ((ip->proto == IPPROTO_L2TP) &&
(info->version == 2)) {
pr_info_ratelimited("v2 doesn't support IP mode\n");
return -EINVAL;
}
return 0;
}
#endif
static struct xt_match l2tp_mt_reg[] __read_mostly = {
{
.name = "l2tp",
.revision = 0,
.family = NFPROTO_IPV4,
.match = l2tp_mt4,
.matchsize = XT_ALIGN(sizeof(struct xt_l2tp_info)),
.checkentry = l2tp_mt_check4,
.hooks = ((1 << NF_INET_PRE_ROUTING) |
(1 << NF_INET_LOCAL_IN) |
(1 << NF_INET_LOCAL_OUT) |
(1 << NF_INET_FORWARD)),
.me = THIS_MODULE,
},
#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
{
.name = "l2tp",
.revision = 0,
.family = NFPROTO_IPV6,
.match = l2tp_mt6,
.matchsize = XT_ALIGN(sizeof(struct xt_l2tp_info)),
.checkentry = l2tp_mt_check6,
.hooks = ((1 << NF_INET_PRE_ROUTING) |
(1 << NF_INET_LOCAL_IN) |
(1 << NF_INET_LOCAL_OUT) |
(1 << NF_INET_FORWARD)),
.me = THIS_MODULE,
},
#endif
};
static int __init l2tp_mt_init(void)
{
return xt_register_matches(&l2tp_mt_reg[0], ARRAY_SIZE(l2tp_mt_reg));
}
static void __exit l2tp_mt_exit(void)
{
xt_unregister_matches(&l2tp_mt_reg[0], ARRAY_SIZE(l2tp_mt_reg));
}
module_init(l2tp_mt_init);
module_exit(l2tp_mt_exit);
| linux-master | net/netfilter/xt_l2tp.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/sysctl.h>
#include <net/lwtunnel.h>
#include <net/netfilter/nf_hooks_lwtunnel.h>
static inline int nf_hooks_lwtunnel_get(void)
{
if (static_branch_unlikely(&nf_hooks_lwtunnel_enabled))
return 1;
else
return 0;
}
static inline int nf_hooks_lwtunnel_set(int enable)
{
if (static_branch_unlikely(&nf_hooks_lwtunnel_enabled)) {
if (!enable)
return -EBUSY;
} else if (enable) {
static_branch_enable(&nf_hooks_lwtunnel_enabled);
}
return 0;
}
#ifdef CONFIG_SYSCTL
int nf_hooks_lwtunnel_sysctl_handler(struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
int proc_nf_hooks_lwtunnel_enabled = 0;
struct ctl_table tmp = {
.procname = table->procname,
.data = &proc_nf_hooks_lwtunnel_enabled,
.maxlen = sizeof(int),
.mode = table->mode,
.extra1 = SYSCTL_ZERO,
.extra2 = SYSCTL_ONE,
};
int ret;
if (!write)
proc_nf_hooks_lwtunnel_enabled = nf_hooks_lwtunnel_get();
ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
if (write && ret == 0)
ret = nf_hooks_lwtunnel_set(proc_nf_hooks_lwtunnel_enabled);
return ret;
}
EXPORT_SYMBOL_GPL(nf_hooks_lwtunnel_sysctl_handler);
#endif /* CONFIG_SYSCTL */
| linux-master | net/netfilter/nf_hooks_lwtunnel.c |
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/kernel.h>
#include <linux/proc_fs.h>
#include <net/netfilter/nf_flow_table.h>
static void *nf_flow_table_cpu_seq_start(struct seq_file *seq, loff_t *pos)
{
struct net *net = seq_file_net(seq);
int cpu;
if (*pos == 0)
return SEQ_START_TOKEN;
for (cpu = *pos - 1; cpu < nr_cpu_ids; ++cpu) {
if (!cpu_possible(cpu))
continue;
*pos = cpu + 1;
return per_cpu_ptr(net->ft.stat, cpu);
}
return NULL;
}
static void *nf_flow_table_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
struct net *net = seq_file_net(seq);
int cpu;
for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
if (!cpu_possible(cpu))
continue;
*pos = cpu + 1;
return per_cpu_ptr(net->ft.stat, cpu);
}
(*pos)++;
return NULL;
}
static void nf_flow_table_cpu_seq_stop(struct seq_file *seq, void *v)
{
}
static int nf_flow_table_cpu_seq_show(struct seq_file *seq, void *v)
{
const struct nf_flow_table_stat *st = v;
if (v == SEQ_START_TOKEN) {
seq_puts(seq, "wq_add wq_del wq_stats\n");
return 0;
}
seq_printf(seq, "%8d %8d %8d\n",
st->count_wq_add,
st->count_wq_del,
st->count_wq_stats
);
return 0;
}
static const struct seq_operations nf_flow_table_cpu_seq_ops = {
.start = nf_flow_table_cpu_seq_start,
.next = nf_flow_table_cpu_seq_next,
.stop = nf_flow_table_cpu_seq_stop,
.show = nf_flow_table_cpu_seq_show,
};
int nf_flow_table_init_proc(struct net *net)
{
struct proc_dir_entry *pde;
pde = proc_create_net("nf_flowtable", 0444, net->proc_net_stat,
&nf_flow_table_cpu_seq_ops,
sizeof(struct seq_net_private));
return pde ? 0 : -ENOMEM;
}
void nf_flow_table_fini_proc(struct net *net)
{
remove_proc_entry("nf_flowtable", net->proc_net_stat);
}
| linux-master | net/netfilter/nf_flow_table_procfs.c |
/*
* netfilter module to limit the number of parallel tcp
* connections per IP address.
* (c) 2000 Gerd Knorr <[email protected]>
* Nov 2002: Martin Bene <[email protected]>:
* only ignore TIME_WAIT or gone connections
* (C) CC Computer Consultants GmbH, 2007
*
* based on ...
*
* Kernel module to match connection tracking information.
* GPL (C) 1999 Rusty Russell ([email protected]).
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter/xt_connlimit.h>
#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_conntrack_core.h>
#include <net/netfilter/nf_conntrack_tuple.h>
#include <net/netfilter/nf_conntrack_zones.h>
#include <net/netfilter/nf_conntrack_count.h>
static bool
connlimit_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
struct net *net = xt_net(par);
const struct xt_connlimit_info *info = par->matchinfo;
struct nf_conntrack_tuple tuple;
const struct nf_conntrack_tuple *tuple_ptr = &tuple;
const struct nf_conntrack_zone *zone = &nf_ct_zone_dflt;
enum ip_conntrack_info ctinfo;
const struct nf_conn *ct;
unsigned int connections;
u32 key[5];
ct = nf_ct_get(skb, &ctinfo);
if (ct != NULL) {
tuple_ptr = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
zone = nf_ct_zone(ct);
} else if (!nf_ct_get_tuplepr(skb, skb_network_offset(skb),
xt_family(par), net, &tuple)) {
goto hotdrop;
}
if (xt_family(par) == NFPROTO_IPV6) {
const struct ipv6hdr *iph = ipv6_hdr(skb);
union nf_inet_addr addr;
unsigned int i;
memcpy(&addr.ip6, (info->flags & XT_CONNLIMIT_DADDR) ?
&iph->daddr : &iph->saddr, sizeof(addr.ip6));
for (i = 0; i < ARRAY_SIZE(addr.ip6); ++i)
addr.ip6[i] &= info->mask.ip6[i];
memcpy(key, &addr, sizeof(addr.ip6));
key[4] = zone->id;
} else {
const struct iphdr *iph = ip_hdr(skb);
key[0] = (info->flags & XT_CONNLIMIT_DADDR) ?
(__force __u32)iph->daddr : (__force __u32)iph->saddr;
key[0] &= (__force __u32)info->mask.ip;
key[1] = zone->id;
}
connections = nf_conncount_count(net, info->data, key, tuple_ptr,
zone);
if (connections == 0)
/* kmalloc failed, drop it entirely */
goto hotdrop;
return (connections > info->limit) ^ !!(info->flags & XT_CONNLIMIT_INVERT);
hotdrop:
par->hotdrop = true;
return false;
}
static int connlimit_mt_check(const struct xt_mtchk_param *par)
{
struct xt_connlimit_info *info = par->matchinfo;
unsigned int keylen;
keylen = sizeof(u32);
if (par->family == NFPROTO_IPV6)
keylen += sizeof(struct in6_addr);
else
keylen += sizeof(struct in_addr);
/* init private data */
info->data = nf_conncount_init(par->net, par->family, keylen);
return PTR_ERR_OR_ZERO(info->data);
}
static void connlimit_mt_destroy(const struct xt_mtdtor_param *par)
{
const struct xt_connlimit_info *info = par->matchinfo;
nf_conncount_destroy(par->net, par->family, info->data);
}
static struct xt_match connlimit_mt_reg __read_mostly = {
.name = "connlimit",
.revision = 1,
.family = NFPROTO_UNSPEC,
.checkentry = connlimit_mt_check,
.match = connlimit_mt,
.matchsize = sizeof(struct xt_connlimit_info),
.usersize = offsetof(struct xt_connlimit_info, data),
.destroy = connlimit_mt_destroy,
.me = THIS_MODULE,
};
static int __init connlimit_mt_init(void)
{
return xt_register_match(&connlimit_mt_reg);
}
static void __exit connlimit_mt_exit(void)
{
xt_unregister_match(&connlimit_mt_reg);
}
module_init(connlimit_mt_init);
module_exit(connlimit_mt_exit);
MODULE_AUTHOR("Jan Engelhardt <[email protected]>");
MODULE_DESCRIPTION("Xtables: Number of connections matching");
MODULE_LICENSE("GPL");
MODULE_ALIAS("ipt_connlimit");
MODULE_ALIAS("ip6t_connlimit");
| linux-master | net/netfilter/xt_connlimit.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* (C) 2007 Patrick McHardy <[email protected]>
*/
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/gen_stats.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter/xt_rateest.h>
#include <net/netfilter/xt_rateest.h>
static bool
xt_rateest_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct xt_rateest_match_info *info = par->matchinfo;
struct gnet_stats_rate_est64 sample = {0};
u_int32_t bps1, bps2, pps1, pps2;
bool ret = true;
gen_estimator_read(&info->est1->rate_est, &sample);
if (info->flags & XT_RATEEST_MATCH_DELTA) {
bps1 = info->bps1 >= sample.bps ? info->bps1 - sample.bps : 0;
pps1 = info->pps1 >= sample.pps ? info->pps1 - sample.pps : 0;
} else {
bps1 = sample.bps;
pps1 = sample.pps;
}
if (info->flags & XT_RATEEST_MATCH_ABS) {
bps2 = info->bps2;
pps2 = info->pps2;
} else {
gen_estimator_read(&info->est2->rate_est, &sample);
if (info->flags & XT_RATEEST_MATCH_DELTA) {
bps2 = info->bps2 >= sample.bps ? info->bps2 - sample.bps : 0;
pps2 = info->pps2 >= sample.pps ? info->pps2 - sample.pps : 0;
} else {
bps2 = sample.bps;
pps2 = sample.pps;
}
}
switch (info->mode) {
case XT_RATEEST_MATCH_LT:
if (info->flags & XT_RATEEST_MATCH_BPS)
ret &= bps1 < bps2;
if (info->flags & XT_RATEEST_MATCH_PPS)
ret &= pps1 < pps2;
break;
case XT_RATEEST_MATCH_GT:
if (info->flags & XT_RATEEST_MATCH_BPS)
ret &= bps1 > bps2;
if (info->flags & XT_RATEEST_MATCH_PPS)
ret &= pps1 > pps2;
break;
case XT_RATEEST_MATCH_EQ:
if (info->flags & XT_RATEEST_MATCH_BPS)
ret &= bps1 == bps2;
if (info->flags & XT_RATEEST_MATCH_PPS)
ret &= pps1 == pps2;
break;
}
ret ^= info->flags & XT_RATEEST_MATCH_INVERT ? true : false;
return ret;
}
static int xt_rateest_mt_checkentry(const struct xt_mtchk_param *par)
{
struct xt_rateest_match_info *info = par->matchinfo;
struct xt_rateest *est1, *est2;
int ret = -EINVAL;
if (hweight32(info->flags & (XT_RATEEST_MATCH_ABS |
XT_RATEEST_MATCH_REL)) != 1)
goto err1;
if (!(info->flags & (XT_RATEEST_MATCH_BPS | XT_RATEEST_MATCH_PPS)))
goto err1;
switch (info->mode) {
case XT_RATEEST_MATCH_EQ:
case XT_RATEEST_MATCH_LT:
case XT_RATEEST_MATCH_GT:
break;
default:
goto err1;
}
ret = -ENOENT;
est1 = xt_rateest_lookup(par->net, info->name1);
if (!est1)
goto err1;
est2 = NULL;
if (info->flags & XT_RATEEST_MATCH_REL) {
est2 = xt_rateest_lookup(par->net, info->name2);
if (!est2)
goto err2;
}
info->est1 = est1;
info->est2 = est2;
return 0;
err2:
xt_rateest_put(par->net, est1);
err1:
return ret;
}
static void xt_rateest_mt_destroy(const struct xt_mtdtor_param *par)
{
struct xt_rateest_match_info *info = par->matchinfo;
xt_rateest_put(par->net, info->est1);
if (info->est2)
xt_rateest_put(par->net, info->est2);
}
static struct xt_match xt_rateest_mt_reg __read_mostly = {
.name = "rateest",
.revision = 0,
.family = NFPROTO_UNSPEC,
.match = xt_rateest_mt,
.checkentry = xt_rateest_mt_checkentry,
.destroy = xt_rateest_mt_destroy,
.matchsize = sizeof(struct xt_rateest_match_info),
.usersize = offsetof(struct xt_rateest_match_info, est1),
.me = THIS_MODULE,
};
static int __init xt_rateest_mt_init(void)
{
return xt_register_match(&xt_rateest_mt_reg);
}
static void __exit xt_rateest_mt_fini(void)
{
xt_unregister_match(&xt_rateest_mt_reg);
}
MODULE_AUTHOR("Patrick McHardy <[email protected]>");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("xtables rate estimator match");
MODULE_ALIAS("ipt_rateest");
MODULE_ALIAS("ip6t_rateest");
module_init(xt_rateest_mt_init);
module_exit(xt_rateest_mt_fini);
| linux-master | net/netfilter/xt_RATEEST.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2011 Patrick McHardy <[email protected]>
*/
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <linux/netfilter/xt_devgroup.h>
#include <linux/netfilter/x_tables.h>
MODULE_AUTHOR("Patrick McHardy <[email protected]>");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Xtables: Device group match");
MODULE_ALIAS("ipt_devgroup");
MODULE_ALIAS("ip6t_devgroup");
static bool devgroup_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct xt_devgroup_info *info = par->matchinfo;
if (info->flags & XT_DEVGROUP_MATCH_SRC &&
(((info->src_group ^ xt_in(par)->group) & info->src_mask ? 1 : 0) ^
((info->flags & XT_DEVGROUP_INVERT_SRC) ? 1 : 0)))
return false;
if (info->flags & XT_DEVGROUP_MATCH_DST &&
(((info->dst_group ^ xt_out(par)->group) & info->dst_mask ? 1 : 0) ^
((info->flags & XT_DEVGROUP_INVERT_DST) ? 1 : 0)))
return false;
return true;
}
static int devgroup_mt_checkentry(const struct xt_mtchk_param *par)
{
const struct xt_devgroup_info *info = par->matchinfo;
if (info->flags & ~(XT_DEVGROUP_MATCH_SRC | XT_DEVGROUP_INVERT_SRC |
XT_DEVGROUP_MATCH_DST | XT_DEVGROUP_INVERT_DST))
return -EINVAL;
if (info->flags & XT_DEVGROUP_MATCH_SRC &&
par->hook_mask & ~((1 << NF_INET_PRE_ROUTING) |
(1 << NF_INET_LOCAL_IN) |
(1 << NF_INET_FORWARD)))
return -EINVAL;
if (info->flags & XT_DEVGROUP_MATCH_DST &&
par->hook_mask & ~((1 << NF_INET_FORWARD) |
(1 << NF_INET_LOCAL_OUT) |
(1 << NF_INET_POST_ROUTING)))
return -EINVAL;
return 0;
}
static struct xt_match devgroup_mt_reg __read_mostly = {
.name = "devgroup",
.match = devgroup_mt,
.checkentry = devgroup_mt_checkentry,
.matchsize = sizeof(struct xt_devgroup_info),
.family = NFPROTO_UNSPEC,
.me = THIS_MODULE
};
static int __init devgroup_mt_init(void)
{
return xt_register_match(&devgroup_mt_reg);
}
static void __exit devgroup_mt_exit(void)
{
xt_unregister_match(&devgroup_mt_reg);
}
module_init(devgroup_mt_init);
module_exit(devgroup_mt_exit);
| linux-master | net/netfilter/xt_devgroup.c |
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/netfilter.h>
#include <linux/rhashtable.h>
#include <linux/netdevice.h>
#include <net/ip.h>
#include <net/ip6_route.h>
#include <net/netfilter/nf_tables.h>
#include <net/netfilter/nf_flow_table.h>
#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_conntrack_core.h>
#include <net/netfilter/nf_conntrack_l4proto.h>
#include <net/netfilter/nf_conntrack_tuple.h>
static DEFINE_MUTEX(flowtable_lock);
static LIST_HEAD(flowtables);
static void
flow_offload_fill_dir(struct flow_offload *flow,
enum flow_offload_tuple_dir dir)
{
struct flow_offload_tuple *ft = &flow->tuplehash[dir].tuple;
struct nf_conntrack_tuple *ctt = &flow->ct->tuplehash[dir].tuple;
ft->dir = dir;
switch (ctt->src.l3num) {
case NFPROTO_IPV4:
ft->src_v4 = ctt->src.u3.in;
ft->dst_v4 = ctt->dst.u3.in;
break;
case NFPROTO_IPV6:
ft->src_v6 = ctt->src.u3.in6;
ft->dst_v6 = ctt->dst.u3.in6;
break;
}
ft->l3proto = ctt->src.l3num;
ft->l4proto = ctt->dst.protonum;
switch (ctt->dst.protonum) {
case IPPROTO_TCP:
case IPPROTO_UDP:
ft->src_port = ctt->src.u.tcp.port;
ft->dst_port = ctt->dst.u.tcp.port;
break;
}
}
struct flow_offload *flow_offload_alloc(struct nf_conn *ct)
{
struct flow_offload *flow;
if (unlikely(nf_ct_is_dying(ct)))
return NULL;
flow = kzalloc(sizeof(*flow), GFP_ATOMIC);
if (!flow)
return NULL;
refcount_inc(&ct->ct_general.use);
flow->ct = ct;
flow_offload_fill_dir(flow, FLOW_OFFLOAD_DIR_ORIGINAL);
flow_offload_fill_dir(flow, FLOW_OFFLOAD_DIR_REPLY);
if (ct->status & IPS_SRC_NAT)
__set_bit(NF_FLOW_SNAT, &flow->flags);
if (ct->status & IPS_DST_NAT)
__set_bit(NF_FLOW_DNAT, &flow->flags);
return flow;
}
EXPORT_SYMBOL_GPL(flow_offload_alloc);
static u32 flow_offload_dst_cookie(struct flow_offload_tuple *flow_tuple)
{
const struct rt6_info *rt;
if (flow_tuple->l3proto == NFPROTO_IPV6) {
rt = (const struct rt6_info *)flow_tuple->dst_cache;
return rt6_get_cookie(rt);
}
return 0;
}
static int flow_offload_fill_route(struct flow_offload *flow,
const struct nf_flow_route *route,
enum flow_offload_tuple_dir dir)
{
struct flow_offload_tuple *flow_tuple = &flow->tuplehash[dir].tuple;
struct dst_entry *dst = route->tuple[dir].dst;
int i, j = 0;
switch (flow_tuple->l3proto) {
case NFPROTO_IPV4:
flow_tuple->mtu = ip_dst_mtu_maybe_forward(dst, true);
break;
case NFPROTO_IPV6:
flow_tuple->mtu = ip6_dst_mtu_maybe_forward(dst, true);
break;
}
flow_tuple->iifidx = route->tuple[dir].in.ifindex;
for (i = route->tuple[dir].in.num_encaps - 1; i >= 0; i--) {
flow_tuple->encap[j].id = route->tuple[dir].in.encap[i].id;
flow_tuple->encap[j].proto = route->tuple[dir].in.encap[i].proto;
if (route->tuple[dir].in.ingress_vlans & BIT(i))
flow_tuple->in_vlan_ingress |= BIT(j);
j++;
}
flow_tuple->encap_num = route->tuple[dir].in.num_encaps;
switch (route->tuple[dir].xmit_type) {
case FLOW_OFFLOAD_XMIT_DIRECT:
memcpy(flow_tuple->out.h_dest, route->tuple[dir].out.h_dest,
ETH_ALEN);
memcpy(flow_tuple->out.h_source, route->tuple[dir].out.h_source,
ETH_ALEN);
flow_tuple->out.ifidx = route->tuple[dir].out.ifindex;
flow_tuple->out.hw_ifidx = route->tuple[dir].out.hw_ifindex;
break;
case FLOW_OFFLOAD_XMIT_XFRM:
case FLOW_OFFLOAD_XMIT_NEIGH:
flow_tuple->dst_cache = dst;
flow_tuple->dst_cookie = flow_offload_dst_cookie(flow_tuple);
break;
default:
WARN_ON_ONCE(1);
break;
}
flow_tuple->xmit_type = route->tuple[dir].xmit_type;
return 0;
}
static void nft_flow_dst_release(struct flow_offload *flow,
enum flow_offload_tuple_dir dir)
{
if (flow->tuplehash[dir].tuple.xmit_type == FLOW_OFFLOAD_XMIT_NEIGH ||
flow->tuplehash[dir].tuple.xmit_type == FLOW_OFFLOAD_XMIT_XFRM)
dst_release(flow->tuplehash[dir].tuple.dst_cache);
}
void flow_offload_route_init(struct flow_offload *flow,
const struct nf_flow_route *route)
{
flow_offload_fill_route(flow, route, FLOW_OFFLOAD_DIR_ORIGINAL);
flow_offload_fill_route(flow, route, FLOW_OFFLOAD_DIR_REPLY);
flow->type = NF_FLOW_OFFLOAD_ROUTE;
}
EXPORT_SYMBOL_GPL(flow_offload_route_init);
static void flow_offload_fixup_tcp(struct ip_ct_tcp *tcp)
{
tcp->seen[0].td_maxwin = 0;
tcp->seen[1].td_maxwin = 0;
}
static void flow_offload_fixup_ct(struct nf_conn *ct)
{
struct net *net = nf_ct_net(ct);
int l4num = nf_ct_protonum(ct);
s32 timeout;
if (l4num == IPPROTO_TCP) {
struct nf_tcp_net *tn = nf_tcp_pernet(net);
flow_offload_fixup_tcp(&ct->proto.tcp);
timeout = tn->timeouts[ct->proto.tcp.state];
timeout -= tn->offload_timeout;
} else if (l4num == IPPROTO_UDP) {
struct nf_udp_net *tn = nf_udp_pernet(net);
enum udp_conntrack state =
test_bit(IPS_SEEN_REPLY_BIT, &ct->status) ?
UDP_CT_REPLIED : UDP_CT_UNREPLIED;
timeout = tn->timeouts[state];
timeout -= tn->offload_timeout;
} else {
return;
}
if (timeout < 0)
timeout = 0;
if (nf_flow_timeout_delta(READ_ONCE(ct->timeout)) > (__s32)timeout)
WRITE_ONCE(ct->timeout, nfct_time_stamp + timeout);
}
static void flow_offload_route_release(struct flow_offload *flow)
{
nft_flow_dst_release(flow, FLOW_OFFLOAD_DIR_ORIGINAL);
nft_flow_dst_release(flow, FLOW_OFFLOAD_DIR_REPLY);
}
void flow_offload_free(struct flow_offload *flow)
{
switch (flow->type) {
case NF_FLOW_OFFLOAD_ROUTE:
flow_offload_route_release(flow);
break;
default:
break;
}
nf_ct_put(flow->ct);
kfree_rcu(flow, rcu_head);
}
EXPORT_SYMBOL_GPL(flow_offload_free);
static u32 flow_offload_hash(const void *data, u32 len, u32 seed)
{
const struct flow_offload_tuple *tuple = data;
return jhash(tuple, offsetof(struct flow_offload_tuple, __hash), seed);
}
static u32 flow_offload_hash_obj(const void *data, u32 len, u32 seed)
{
const struct flow_offload_tuple_rhash *tuplehash = data;
return jhash(&tuplehash->tuple, offsetof(struct flow_offload_tuple, __hash), seed);
}
static int flow_offload_hash_cmp(struct rhashtable_compare_arg *arg,
const void *ptr)
{
const struct flow_offload_tuple *tuple = arg->key;
const struct flow_offload_tuple_rhash *x = ptr;
if (memcmp(&x->tuple, tuple, offsetof(struct flow_offload_tuple, __hash)))
return 1;
return 0;
}
static const struct rhashtable_params nf_flow_offload_rhash_params = {
.head_offset = offsetof(struct flow_offload_tuple_rhash, node),
.hashfn = flow_offload_hash,
.obj_hashfn = flow_offload_hash_obj,
.obj_cmpfn = flow_offload_hash_cmp,
.automatic_shrinking = true,
};
unsigned long flow_offload_get_timeout(struct flow_offload *flow)
{
unsigned long timeout = NF_FLOW_TIMEOUT;
struct net *net = nf_ct_net(flow->ct);
int l4num = nf_ct_protonum(flow->ct);
if (l4num == IPPROTO_TCP) {
struct nf_tcp_net *tn = nf_tcp_pernet(net);
timeout = tn->offload_timeout;
} else if (l4num == IPPROTO_UDP) {
struct nf_udp_net *tn = nf_udp_pernet(net);
timeout = tn->offload_timeout;
}
return timeout;
}
int flow_offload_add(struct nf_flowtable *flow_table, struct flow_offload *flow)
{
int err;
flow->timeout = nf_flowtable_time_stamp + flow_offload_get_timeout(flow);
err = rhashtable_insert_fast(&flow_table->rhashtable,
&flow->tuplehash[0].node,
nf_flow_offload_rhash_params);
if (err < 0)
return err;
err = rhashtable_insert_fast(&flow_table->rhashtable,
&flow->tuplehash[1].node,
nf_flow_offload_rhash_params);
if (err < 0) {
rhashtable_remove_fast(&flow_table->rhashtable,
&flow->tuplehash[0].node,
nf_flow_offload_rhash_params);
return err;
}
nf_ct_offload_timeout(flow->ct);
if (nf_flowtable_hw_offload(flow_table)) {
__set_bit(NF_FLOW_HW, &flow->flags);
nf_flow_offload_add(flow_table, flow);
}
return 0;
}
EXPORT_SYMBOL_GPL(flow_offload_add);
void flow_offload_refresh(struct nf_flowtable *flow_table,
struct flow_offload *flow, bool force)
{
u32 timeout;
timeout = nf_flowtable_time_stamp + flow_offload_get_timeout(flow);
if (force || timeout - READ_ONCE(flow->timeout) > HZ)
WRITE_ONCE(flow->timeout, timeout);
else
return;
if (likely(!nf_flowtable_hw_offload(flow_table)))
return;
nf_flow_offload_add(flow_table, flow);
}
EXPORT_SYMBOL_GPL(flow_offload_refresh);
static bool nf_flow_is_outdated(const struct flow_offload *flow)
{
return test_bit(IPS_SEEN_REPLY_BIT, &flow->ct->status) &&
!test_bit(NF_FLOW_HW_ESTABLISHED, &flow->flags);
}
static inline bool nf_flow_has_expired(const struct flow_offload *flow)
{
return nf_flow_timeout_delta(flow->timeout) <= 0;
}
static void flow_offload_del(struct nf_flowtable *flow_table,
struct flow_offload *flow)
{
rhashtable_remove_fast(&flow_table->rhashtable,
&flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].node,
nf_flow_offload_rhash_params);
rhashtable_remove_fast(&flow_table->rhashtable,
&flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].node,
nf_flow_offload_rhash_params);
flow_offload_free(flow);
}
void flow_offload_teardown(struct flow_offload *flow)
{
clear_bit(IPS_OFFLOAD_BIT, &flow->ct->status);
set_bit(NF_FLOW_TEARDOWN, &flow->flags);
flow_offload_fixup_ct(flow->ct);
}
EXPORT_SYMBOL_GPL(flow_offload_teardown);
struct flow_offload_tuple_rhash *
flow_offload_lookup(struct nf_flowtable *flow_table,
struct flow_offload_tuple *tuple)
{
struct flow_offload_tuple_rhash *tuplehash;
struct flow_offload *flow;
int dir;
tuplehash = rhashtable_lookup(&flow_table->rhashtable, tuple,
nf_flow_offload_rhash_params);
if (!tuplehash)
return NULL;
dir = tuplehash->tuple.dir;
flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]);
if (test_bit(NF_FLOW_TEARDOWN, &flow->flags))
return NULL;
if (unlikely(nf_ct_is_dying(flow->ct)))
return NULL;
return tuplehash;
}
EXPORT_SYMBOL_GPL(flow_offload_lookup);
static int
nf_flow_table_iterate(struct nf_flowtable *flow_table,
void (*iter)(struct nf_flowtable *flowtable,
struct flow_offload *flow, void *data),
void *data)
{
struct flow_offload_tuple_rhash *tuplehash;
struct rhashtable_iter hti;
struct flow_offload *flow;
int err = 0;
rhashtable_walk_enter(&flow_table->rhashtable, &hti);
rhashtable_walk_start(&hti);
while ((tuplehash = rhashtable_walk_next(&hti))) {
if (IS_ERR(tuplehash)) {
if (PTR_ERR(tuplehash) != -EAGAIN) {
err = PTR_ERR(tuplehash);
break;
}
continue;
}
if (tuplehash->tuple.dir)
continue;
flow = container_of(tuplehash, struct flow_offload, tuplehash[0]);
iter(flow_table, flow, data);
}
rhashtable_walk_stop(&hti);
rhashtable_walk_exit(&hti);
return err;
}
static void nf_flow_offload_gc_step(struct nf_flowtable *flow_table,
struct flow_offload *flow, void *data)
{
if (nf_flow_has_expired(flow) ||
nf_ct_is_dying(flow->ct) ||
nf_flow_is_outdated(flow))
flow_offload_teardown(flow);
if (test_bit(NF_FLOW_TEARDOWN, &flow->flags)) {
if (test_bit(NF_FLOW_HW, &flow->flags)) {
if (!test_bit(NF_FLOW_HW_DYING, &flow->flags))
nf_flow_offload_del(flow_table, flow);
else if (test_bit(NF_FLOW_HW_DEAD, &flow->flags))
flow_offload_del(flow_table, flow);
} else {
flow_offload_del(flow_table, flow);
}
} else if (test_bit(NF_FLOW_HW, &flow->flags)) {
nf_flow_offload_stats(flow_table, flow);
}
}
void nf_flow_table_gc_run(struct nf_flowtable *flow_table)
{
nf_flow_table_iterate(flow_table, nf_flow_offload_gc_step, NULL);
}
static void nf_flow_offload_work_gc(struct work_struct *work)
{
struct nf_flowtable *flow_table;
flow_table = container_of(work, struct nf_flowtable, gc_work.work);
nf_flow_table_gc_run(flow_table);
queue_delayed_work(system_power_efficient_wq, &flow_table->gc_work, HZ);
}
static void nf_flow_nat_port_tcp(struct sk_buff *skb, unsigned int thoff,
__be16 port, __be16 new_port)
{
struct tcphdr *tcph;
tcph = (void *)(skb_network_header(skb) + thoff);
inet_proto_csum_replace2(&tcph->check, skb, port, new_port, false);
}
static void nf_flow_nat_port_udp(struct sk_buff *skb, unsigned int thoff,
__be16 port, __be16 new_port)
{
struct udphdr *udph;
udph = (void *)(skb_network_header(skb) + thoff);
if (udph->check || skb->ip_summed == CHECKSUM_PARTIAL) {
inet_proto_csum_replace2(&udph->check, skb, port,
new_port, false);
if (!udph->check)
udph->check = CSUM_MANGLED_0;
}
}
static void nf_flow_nat_port(struct sk_buff *skb, unsigned int thoff,
u8 protocol, __be16 port, __be16 new_port)
{
switch (protocol) {
case IPPROTO_TCP:
nf_flow_nat_port_tcp(skb, thoff, port, new_port);
break;
case IPPROTO_UDP:
nf_flow_nat_port_udp(skb, thoff, port, new_port);
break;
}
}
void nf_flow_snat_port(const struct flow_offload *flow,
struct sk_buff *skb, unsigned int thoff,
u8 protocol, enum flow_offload_tuple_dir dir)
{
struct flow_ports *hdr;
__be16 port, new_port;
hdr = (void *)(skb_network_header(skb) + thoff);
switch (dir) {
case FLOW_OFFLOAD_DIR_ORIGINAL:
port = hdr->source;
new_port = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_port;
hdr->source = new_port;
break;
case FLOW_OFFLOAD_DIR_REPLY:
port = hdr->dest;
new_port = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_port;
hdr->dest = new_port;
break;
}
nf_flow_nat_port(skb, thoff, protocol, port, new_port);
}
EXPORT_SYMBOL_GPL(nf_flow_snat_port);
void nf_flow_dnat_port(const struct flow_offload *flow, struct sk_buff *skb,
unsigned int thoff, u8 protocol,
enum flow_offload_tuple_dir dir)
{
struct flow_ports *hdr;
__be16 port, new_port;
hdr = (void *)(skb_network_header(skb) + thoff);
switch (dir) {
case FLOW_OFFLOAD_DIR_ORIGINAL:
port = hdr->dest;
new_port = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_port;
hdr->dest = new_port;
break;
case FLOW_OFFLOAD_DIR_REPLY:
port = hdr->source;
new_port = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_port;
hdr->source = new_port;
break;
}
nf_flow_nat_port(skb, thoff, protocol, port, new_port);
}
EXPORT_SYMBOL_GPL(nf_flow_dnat_port);
int nf_flow_table_init(struct nf_flowtable *flowtable)
{
int err;
INIT_DELAYED_WORK(&flowtable->gc_work, nf_flow_offload_work_gc);
flow_block_init(&flowtable->flow_block);
init_rwsem(&flowtable->flow_block_lock);
err = rhashtable_init(&flowtable->rhashtable,
&nf_flow_offload_rhash_params);
if (err < 0)
return err;
queue_delayed_work(system_power_efficient_wq,
&flowtable->gc_work, HZ);
mutex_lock(&flowtable_lock);
list_add(&flowtable->list, &flowtables);
mutex_unlock(&flowtable_lock);
return 0;
}
EXPORT_SYMBOL_GPL(nf_flow_table_init);
static void nf_flow_table_do_cleanup(struct nf_flowtable *flow_table,
struct flow_offload *flow, void *data)
{
struct net_device *dev = data;
if (!dev) {
flow_offload_teardown(flow);
return;
}
if (net_eq(nf_ct_net(flow->ct), dev_net(dev)) &&
(flow->tuplehash[0].tuple.iifidx == dev->ifindex ||
flow->tuplehash[1].tuple.iifidx == dev->ifindex))
flow_offload_teardown(flow);
}
void nf_flow_table_gc_cleanup(struct nf_flowtable *flowtable,
struct net_device *dev)
{
nf_flow_table_iterate(flowtable, nf_flow_table_do_cleanup, dev);
flush_delayed_work(&flowtable->gc_work);
nf_flow_table_offload_flush(flowtable);
}
void nf_flow_table_cleanup(struct net_device *dev)
{
struct nf_flowtable *flowtable;
mutex_lock(&flowtable_lock);
list_for_each_entry(flowtable, &flowtables, list)
nf_flow_table_gc_cleanup(flowtable, dev);
mutex_unlock(&flowtable_lock);
}
EXPORT_SYMBOL_GPL(nf_flow_table_cleanup);
void nf_flow_table_free(struct nf_flowtable *flow_table)
{
mutex_lock(&flowtable_lock);
list_del(&flow_table->list);
mutex_unlock(&flowtable_lock);
cancel_delayed_work_sync(&flow_table->gc_work);
nf_flow_table_offload_flush(flow_table);
/* ... no more pending work after this stage ... */
nf_flow_table_iterate(flow_table, nf_flow_table_do_cleanup, NULL);
nf_flow_table_gc_run(flow_table);
nf_flow_table_offload_flush_cleanup(flow_table);
rhashtable_destroy(&flow_table->rhashtable);
}
EXPORT_SYMBOL_GPL(nf_flow_table_free);
static int nf_flow_table_init_net(struct net *net)
{
net->ft.stat = alloc_percpu(struct nf_flow_table_stat);
return net->ft.stat ? 0 : -ENOMEM;
}
static void nf_flow_table_fini_net(struct net *net)
{
free_percpu(net->ft.stat);
}
static int nf_flow_table_pernet_init(struct net *net)
{
int ret;
ret = nf_flow_table_init_net(net);
if (ret < 0)
return ret;
ret = nf_flow_table_init_proc(net);
if (ret < 0)
goto out_proc;
return 0;
out_proc:
nf_flow_table_fini_net(net);
return ret;
}
static void nf_flow_table_pernet_exit(struct list_head *net_exit_list)
{
struct net *net;
list_for_each_entry(net, net_exit_list, exit_list) {
nf_flow_table_fini_proc(net);
nf_flow_table_fini_net(net);
}
}
static struct pernet_operations nf_flow_table_net_ops = {
.init = nf_flow_table_pernet_init,
.exit_batch = nf_flow_table_pernet_exit,
};
static int __init nf_flow_table_module_init(void)
{
int ret;
ret = register_pernet_subsys(&nf_flow_table_net_ops);
if (ret < 0)
return ret;
ret = nf_flow_table_offload_init();
if (ret)
goto out_offload;
return 0;
out_offload:
unregister_pernet_subsys(&nf_flow_table_net_ops);
return ret;
}
static void __exit nf_flow_table_module_exit(void)
{
nf_flow_table_offload_exit();
unregister_pernet_subsys(&nf_flow_table_net_ops);
}
module_init(nf_flow_table_module_init);
module_exit(nf_flow_table_module_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Pablo Neira Ayuso <[email protected]>");
MODULE_DESCRIPTION("Netfilter flow table module");
| linux-master | net/netfilter/nf_flow_table_core.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/skbuff.h>
#include <linux/netfilter.h>
#include <linux/netfilter_ipv4.h>
#include <linux/netfilter_ipv6.h>
#include <linux/netfilter/nfnetlink.h>
#include <linux/netfilter/nf_tables.h>
#include <net/netfilter/nf_tables.h>
#include <net/netfilter/nf_tables_ipv4.h>
#include <net/netfilter/nf_tables_ipv6.h>
#include <net/route.h>
#include <net/ip.h>
#ifdef CONFIG_NF_TABLES_IPV4
static unsigned int nf_route_table_hook4(void *priv,
struct sk_buff *skb,
const struct nf_hook_state *state)
{
const struct iphdr *iph;
struct nft_pktinfo pkt;
__be32 saddr, daddr;
unsigned int ret;
u32 mark;
int err;
u8 tos;
nft_set_pktinfo(&pkt, skb, state);
nft_set_pktinfo_ipv4(&pkt);
mark = skb->mark;
iph = ip_hdr(skb);
saddr = iph->saddr;
daddr = iph->daddr;
tos = iph->tos;
ret = nft_do_chain(&pkt, priv);
if (ret == NF_ACCEPT) {
iph = ip_hdr(skb);
if (iph->saddr != saddr ||
iph->daddr != daddr ||
skb->mark != mark ||
iph->tos != tos) {
err = ip_route_me_harder(state->net, state->sk, skb, RTN_UNSPEC);
if (err < 0)
ret = NF_DROP_ERR(err);
}
}
return ret;
}
static const struct nft_chain_type nft_chain_route_ipv4 = {
.name = "route",
.type = NFT_CHAIN_T_ROUTE,
.family = NFPROTO_IPV4,
.hook_mask = (1 << NF_INET_LOCAL_OUT),
.hooks = {
[NF_INET_LOCAL_OUT] = nf_route_table_hook4,
},
};
#endif
#ifdef CONFIG_NF_TABLES_IPV6
static unsigned int nf_route_table_hook6(void *priv,
struct sk_buff *skb,
const struct nf_hook_state *state)
{
struct in6_addr saddr, daddr;
struct nft_pktinfo pkt;
u32 mark, flowlabel;
unsigned int ret;
u8 hop_limit;
int err;
nft_set_pktinfo(&pkt, skb, state);
nft_set_pktinfo_ipv6(&pkt);
/* save source/dest address, mark, hoplimit, flowlabel, priority */
memcpy(&saddr, &ipv6_hdr(skb)->saddr, sizeof(saddr));
memcpy(&daddr, &ipv6_hdr(skb)->daddr, sizeof(daddr));
mark = skb->mark;
hop_limit = ipv6_hdr(skb)->hop_limit;
/* flowlabel and prio (includes version, which shouldn't change either)*/
flowlabel = *((u32 *)ipv6_hdr(skb));
ret = nft_do_chain(&pkt, priv);
if (ret == NF_ACCEPT &&
(memcmp(&ipv6_hdr(skb)->saddr, &saddr, sizeof(saddr)) ||
memcmp(&ipv6_hdr(skb)->daddr, &daddr, sizeof(daddr)) ||
skb->mark != mark ||
ipv6_hdr(skb)->hop_limit != hop_limit ||
flowlabel != *((u32 *)ipv6_hdr(skb)))) {
err = nf_ip6_route_me_harder(state->net, state->sk, skb);
if (err < 0)
ret = NF_DROP_ERR(err);
}
return ret;
}
static const struct nft_chain_type nft_chain_route_ipv6 = {
.name = "route",
.type = NFT_CHAIN_T_ROUTE,
.family = NFPROTO_IPV6,
.hook_mask = (1 << NF_INET_LOCAL_OUT),
.hooks = {
[NF_INET_LOCAL_OUT] = nf_route_table_hook6,
},
};
#endif
#ifdef CONFIG_NF_TABLES_INET
static unsigned int nf_route_table_inet(void *priv,
struct sk_buff *skb,
const struct nf_hook_state *state)
{
struct nft_pktinfo pkt;
switch (state->pf) {
case NFPROTO_IPV4:
return nf_route_table_hook4(priv, skb, state);
case NFPROTO_IPV6:
return nf_route_table_hook6(priv, skb, state);
default:
nft_set_pktinfo(&pkt, skb, state);
break;
}
return nft_do_chain(&pkt, priv);
}
static const struct nft_chain_type nft_chain_route_inet = {
.name = "route",
.type = NFT_CHAIN_T_ROUTE,
.family = NFPROTO_INET,
.hook_mask = (1 << NF_INET_LOCAL_OUT),
.hooks = {
[NF_INET_LOCAL_OUT] = nf_route_table_inet,
},
};
#endif
void __init nft_chain_route_init(void)
{
#ifdef CONFIG_NF_TABLES_IPV6
nft_register_chain_type(&nft_chain_route_ipv6);
#endif
#ifdef CONFIG_NF_TABLES_IPV4
nft_register_chain_type(&nft_chain_route_ipv4);
#endif
#ifdef CONFIG_NF_TABLES_INET
nft_register_chain_type(&nft_chain_route_inet);
#endif
}
void __exit nft_chain_route_fini(void)
{
#ifdef CONFIG_NF_TABLES_IPV6
nft_unregister_chain_type(&nft_chain_route_ipv6);
#endif
#ifdef CONFIG_NF_TABLES_IPV4
nft_unregister_chain_type(&nft_chain_route_ipv4);
#endif
#ifdef CONFIG_NF_TABLES_INET
nft_unregister_chain_type(&nft_chain_route_inet);
#endif
}
| linux-master | net/netfilter/nft_chain_route.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2012-2016 Pablo Neira Ayuso <[email protected]>
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/netlink.h>
#include <linux/netfilter.h>
#include <linux/netfilter/nf_tables.h>
#include <net/netfilter/nf_tables_core.h>
#define nft_objref_priv(expr) *((struct nft_object **)nft_expr_priv(expr))
void nft_objref_eval(const struct nft_expr *expr,
struct nft_regs *regs,
const struct nft_pktinfo *pkt)
{
struct nft_object *obj = nft_objref_priv(expr);
obj->ops->eval(obj, regs, pkt);
}
static int nft_objref_init(const struct nft_ctx *ctx,
const struct nft_expr *expr,
const struct nlattr * const tb[])
{
struct nft_object *obj = nft_objref_priv(expr);
u8 genmask = nft_genmask_next(ctx->net);
u32 objtype;
if (!tb[NFTA_OBJREF_IMM_NAME] ||
!tb[NFTA_OBJREF_IMM_TYPE])
return -EINVAL;
objtype = ntohl(nla_get_be32(tb[NFTA_OBJREF_IMM_TYPE]));
obj = nft_obj_lookup(ctx->net, ctx->table,
tb[NFTA_OBJREF_IMM_NAME], objtype,
genmask);
if (IS_ERR(obj))
return -ENOENT;
if (!nft_use_inc(&obj->use))
return -EMFILE;
nft_objref_priv(expr) = obj;
return 0;
}
static int nft_objref_dump(struct sk_buff *skb,
const struct nft_expr *expr, bool reset)
{
const struct nft_object *obj = nft_objref_priv(expr);
if (nla_put_string(skb, NFTA_OBJREF_IMM_NAME, obj->key.name) ||
nla_put_be32(skb, NFTA_OBJREF_IMM_TYPE,
htonl(obj->ops->type->type)))
goto nla_put_failure;
return 0;
nla_put_failure:
return -1;
}
static void nft_objref_deactivate(const struct nft_ctx *ctx,
const struct nft_expr *expr,
enum nft_trans_phase phase)
{
struct nft_object *obj = nft_objref_priv(expr);
if (phase == NFT_TRANS_COMMIT)
return;
nft_use_dec(&obj->use);
}
static void nft_objref_activate(const struct nft_ctx *ctx,
const struct nft_expr *expr)
{
struct nft_object *obj = nft_objref_priv(expr);
nft_use_inc_restore(&obj->use);
}
static const struct nft_expr_ops nft_objref_ops = {
.type = &nft_objref_type,
.size = NFT_EXPR_SIZE(sizeof(struct nft_object *)),
.eval = nft_objref_eval,
.init = nft_objref_init,
.activate = nft_objref_activate,
.deactivate = nft_objref_deactivate,
.dump = nft_objref_dump,
.reduce = NFT_REDUCE_READONLY,
};
struct nft_objref_map {
struct nft_set *set;
u8 sreg;
struct nft_set_binding binding;
};
void nft_objref_map_eval(const struct nft_expr *expr,
struct nft_regs *regs,
const struct nft_pktinfo *pkt)
{
struct nft_objref_map *priv = nft_expr_priv(expr);
const struct nft_set *set = priv->set;
struct net *net = nft_net(pkt);
const struct nft_set_ext *ext;
struct nft_object *obj;
bool found;
found = nft_set_do_lookup(net, set, ®s->data[priv->sreg], &ext);
if (!found) {
ext = nft_set_catchall_lookup(net, set);
if (!ext) {
regs->verdict.code = NFT_BREAK;
return;
}
}
obj = *nft_set_ext_obj(ext);
obj->ops->eval(obj, regs, pkt);
}
static int nft_objref_map_init(const struct nft_ctx *ctx,
const struct nft_expr *expr,
const struct nlattr * const tb[])
{
struct nft_objref_map *priv = nft_expr_priv(expr);
u8 genmask = nft_genmask_next(ctx->net);
struct nft_set *set;
int err;
set = nft_set_lookup_global(ctx->net, ctx->table,
tb[NFTA_OBJREF_SET_NAME],
tb[NFTA_OBJREF_SET_ID], genmask);
if (IS_ERR(set))
return PTR_ERR(set);
if (!(set->flags & NFT_SET_OBJECT))
return -EINVAL;
err = nft_parse_register_load(tb[NFTA_OBJREF_SET_SREG], &priv->sreg,
set->klen);
if (err < 0)
return err;
priv->binding.flags = set->flags & NFT_SET_OBJECT;
err = nf_tables_bind_set(ctx, set, &priv->binding);
if (err < 0)
return err;
priv->set = set;
return 0;
}
static int nft_objref_map_dump(struct sk_buff *skb,
const struct nft_expr *expr, bool reset)
{
const struct nft_objref_map *priv = nft_expr_priv(expr);
if (nft_dump_register(skb, NFTA_OBJREF_SET_SREG, priv->sreg) ||
nla_put_string(skb, NFTA_OBJREF_SET_NAME, priv->set->name))
goto nla_put_failure;
return 0;
nla_put_failure:
return -1;
}
static void nft_objref_map_deactivate(const struct nft_ctx *ctx,
const struct nft_expr *expr,
enum nft_trans_phase phase)
{
struct nft_objref_map *priv = nft_expr_priv(expr);
nf_tables_deactivate_set(ctx, priv->set, &priv->binding, phase);
}
static void nft_objref_map_activate(const struct nft_ctx *ctx,
const struct nft_expr *expr)
{
struct nft_objref_map *priv = nft_expr_priv(expr);
nf_tables_activate_set(ctx, priv->set);
}
static void nft_objref_map_destroy(const struct nft_ctx *ctx,
const struct nft_expr *expr)
{
struct nft_objref_map *priv = nft_expr_priv(expr);
nf_tables_destroy_set(ctx, priv->set);
}
static const struct nft_expr_ops nft_objref_map_ops = {
.type = &nft_objref_type,
.size = NFT_EXPR_SIZE(sizeof(struct nft_objref_map)),
.eval = nft_objref_map_eval,
.init = nft_objref_map_init,
.activate = nft_objref_map_activate,
.deactivate = nft_objref_map_deactivate,
.destroy = nft_objref_map_destroy,
.dump = nft_objref_map_dump,
.reduce = NFT_REDUCE_READONLY,
};
static const struct nft_expr_ops *
nft_objref_select_ops(const struct nft_ctx *ctx,
const struct nlattr * const tb[])
{
if (tb[NFTA_OBJREF_SET_SREG] &&
(tb[NFTA_OBJREF_SET_NAME] ||
tb[NFTA_OBJREF_SET_ID]))
return &nft_objref_map_ops;
else if (tb[NFTA_OBJREF_IMM_NAME] &&
tb[NFTA_OBJREF_IMM_TYPE])
return &nft_objref_ops;
return ERR_PTR(-EOPNOTSUPP);
}
static const struct nla_policy nft_objref_policy[NFTA_OBJREF_MAX + 1] = {
[NFTA_OBJREF_IMM_NAME] = { .type = NLA_STRING,
.len = NFT_OBJ_MAXNAMELEN - 1 },
[NFTA_OBJREF_IMM_TYPE] = { .type = NLA_U32 },
[NFTA_OBJREF_SET_SREG] = { .type = NLA_U32 },
[NFTA_OBJREF_SET_NAME] = { .type = NLA_STRING,
.len = NFT_SET_MAXNAMELEN - 1 },
[NFTA_OBJREF_SET_ID] = { .type = NLA_U32 },
};
struct nft_expr_type nft_objref_type __read_mostly = {
.name = "objref",
.select_ops = nft_objref_select_ops,
.policy = nft_objref_policy,
.maxattr = NFTA_OBJREF_MAX,
.owner = THIS_MODULE,
};
| linux-master | net/netfilter/nft_objref.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* (C) 1999-2001 Paul `Rusty' Russell
* (C) 2002-2006 Netfilter Core Team <[email protected]>
* (C) 2011 Patrick McHardy <[email protected]>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/types.h>
#include <linux/timer.h>
#include <linux/skbuff.h>
#include <linux/gfp.h>
#include <net/xfrm.h>
#include <linux/siphash.h>
#include <linux/rtnetlink.h>
#include <net/netfilter/nf_conntrack_bpf.h>
#include <net/netfilter/nf_conntrack_core.h>
#include <net/netfilter/nf_conntrack_helper.h>
#include <net/netfilter/nf_conntrack_seqadj.h>
#include <net/netfilter/nf_conntrack_zones.h>
#include <net/netfilter/nf_nat.h>
#include <net/netfilter/nf_nat_helper.h>
#include <uapi/linux/netfilter/nf_nat.h>
#include "nf_internals.h"
#define NF_NAT_MAX_ATTEMPTS 128
#define NF_NAT_HARDER_THRESH (NF_NAT_MAX_ATTEMPTS / 4)
static spinlock_t nf_nat_locks[CONNTRACK_LOCKS];
static DEFINE_MUTEX(nf_nat_proto_mutex);
static unsigned int nat_net_id __read_mostly;
static struct hlist_head *nf_nat_bysource __read_mostly;
static unsigned int nf_nat_htable_size __read_mostly;
static siphash_aligned_key_t nf_nat_hash_rnd;
struct nf_nat_lookup_hook_priv {
struct nf_hook_entries __rcu *entries;
struct rcu_head rcu_head;
};
struct nf_nat_hooks_net {
struct nf_hook_ops *nat_hook_ops;
unsigned int users;
};
struct nat_net {
struct nf_nat_hooks_net nat_proto_net[NFPROTO_NUMPROTO];
};
#ifdef CONFIG_XFRM
static void nf_nat_ipv4_decode_session(struct sk_buff *skb,
const struct nf_conn *ct,
enum ip_conntrack_dir dir,
unsigned long statusbit,
struct flowi *fl)
{
const struct nf_conntrack_tuple *t = &ct->tuplehash[dir].tuple;
struct flowi4 *fl4 = &fl->u.ip4;
if (ct->status & statusbit) {
fl4->daddr = t->dst.u3.ip;
if (t->dst.protonum == IPPROTO_TCP ||
t->dst.protonum == IPPROTO_UDP ||
t->dst.protonum == IPPROTO_UDPLITE ||
t->dst.protonum == IPPROTO_DCCP ||
t->dst.protonum == IPPROTO_SCTP)
fl4->fl4_dport = t->dst.u.all;
}
statusbit ^= IPS_NAT_MASK;
if (ct->status & statusbit) {
fl4->saddr = t->src.u3.ip;
if (t->dst.protonum == IPPROTO_TCP ||
t->dst.protonum == IPPROTO_UDP ||
t->dst.protonum == IPPROTO_UDPLITE ||
t->dst.protonum == IPPROTO_DCCP ||
t->dst.protonum == IPPROTO_SCTP)
fl4->fl4_sport = t->src.u.all;
}
}
static void nf_nat_ipv6_decode_session(struct sk_buff *skb,
const struct nf_conn *ct,
enum ip_conntrack_dir dir,
unsigned long statusbit,
struct flowi *fl)
{
#if IS_ENABLED(CONFIG_IPV6)
const struct nf_conntrack_tuple *t = &ct->tuplehash[dir].tuple;
struct flowi6 *fl6 = &fl->u.ip6;
if (ct->status & statusbit) {
fl6->daddr = t->dst.u3.in6;
if (t->dst.protonum == IPPROTO_TCP ||
t->dst.protonum == IPPROTO_UDP ||
t->dst.protonum == IPPROTO_UDPLITE ||
t->dst.protonum == IPPROTO_DCCP ||
t->dst.protonum == IPPROTO_SCTP)
fl6->fl6_dport = t->dst.u.all;
}
statusbit ^= IPS_NAT_MASK;
if (ct->status & statusbit) {
fl6->saddr = t->src.u3.in6;
if (t->dst.protonum == IPPROTO_TCP ||
t->dst.protonum == IPPROTO_UDP ||
t->dst.protonum == IPPROTO_UDPLITE ||
t->dst.protonum == IPPROTO_DCCP ||
t->dst.protonum == IPPROTO_SCTP)
fl6->fl6_sport = t->src.u.all;
}
#endif
}
static void __nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl)
{
const struct nf_conn *ct;
enum ip_conntrack_info ctinfo;
enum ip_conntrack_dir dir;
unsigned long statusbit;
u8 family;
ct = nf_ct_get(skb, &ctinfo);
if (ct == NULL)
return;
family = nf_ct_l3num(ct);
dir = CTINFO2DIR(ctinfo);
if (dir == IP_CT_DIR_ORIGINAL)
statusbit = IPS_DST_NAT;
else
statusbit = IPS_SRC_NAT;
switch (family) {
case NFPROTO_IPV4:
nf_nat_ipv4_decode_session(skb, ct, dir, statusbit, fl);
return;
case NFPROTO_IPV6:
nf_nat_ipv6_decode_session(skb, ct, dir, statusbit, fl);
return;
}
}
#endif /* CONFIG_XFRM */
/* We keep an extra hash for each conntrack, for fast searching. */
static unsigned int
hash_by_src(const struct net *net,
const struct nf_conntrack_zone *zone,
const struct nf_conntrack_tuple *tuple)
{
unsigned int hash;
struct {
struct nf_conntrack_man src;
u32 net_mix;
u32 protonum;
u32 zone;
} __aligned(SIPHASH_ALIGNMENT) combined;
get_random_once(&nf_nat_hash_rnd, sizeof(nf_nat_hash_rnd));
memset(&combined, 0, sizeof(combined));
/* Original src, to ensure we map it consistently if poss. */
combined.src = tuple->src;
combined.net_mix = net_hash_mix(net);
combined.protonum = tuple->dst.protonum;
/* Zone ID can be used provided its valid for both directions */
if (zone->dir == NF_CT_DEFAULT_ZONE_DIR)
combined.zone = zone->id;
hash = siphash(&combined, sizeof(combined), &nf_nat_hash_rnd);
return reciprocal_scale(hash, nf_nat_htable_size);
}
/* Is this tuple already taken? (not by us) */
static int
nf_nat_used_tuple(const struct nf_conntrack_tuple *tuple,
const struct nf_conn *ignored_conntrack)
{
/* Conntrack tracking doesn't keep track of outgoing tuples; only
* incoming ones. NAT means they don't have a fixed mapping,
* so we invert the tuple and look for the incoming reply.
*
* We could keep a separate hash if this proves too slow.
*/
struct nf_conntrack_tuple reply;
nf_ct_invert_tuple(&reply, tuple);
return nf_conntrack_tuple_taken(&reply, ignored_conntrack);
}
static bool nf_nat_may_kill(struct nf_conn *ct, unsigned long flags)
{
static const unsigned long flags_refuse = IPS_FIXED_TIMEOUT |
IPS_DYING;
static const unsigned long flags_needed = IPS_SRC_NAT;
enum tcp_conntrack old_state;
old_state = READ_ONCE(ct->proto.tcp.state);
if (old_state < TCP_CONNTRACK_TIME_WAIT)
return false;
if (flags & flags_refuse)
return false;
return (flags & flags_needed) == flags_needed;
}
/* reverse direction will send packets to new source, so
* make sure such packets are invalid.
*/
static bool nf_seq_has_advanced(const struct nf_conn *old, const struct nf_conn *new)
{
return (__s32)(new->proto.tcp.seen[0].td_end -
old->proto.tcp.seen[0].td_end) > 0;
}
static int
nf_nat_used_tuple_harder(const struct nf_conntrack_tuple *tuple,
const struct nf_conn *ignored_conntrack,
unsigned int attempts_left)
{
static const unsigned long flags_offload = IPS_OFFLOAD | IPS_HW_OFFLOAD;
struct nf_conntrack_tuple_hash *thash;
const struct nf_conntrack_zone *zone;
struct nf_conntrack_tuple reply;
unsigned long flags;
struct nf_conn *ct;
bool taken = true;
struct net *net;
nf_ct_invert_tuple(&reply, tuple);
if (attempts_left > NF_NAT_HARDER_THRESH ||
tuple->dst.protonum != IPPROTO_TCP ||
ignored_conntrack->proto.tcp.state != TCP_CONNTRACK_SYN_SENT)
return nf_conntrack_tuple_taken(&reply, ignored_conntrack);
/* :ast few attempts to find a free tcp port. Destructive
* action: evict colliding if its in timewait state and the
* tcp sequence number has advanced past the one used by the
* old entry.
*/
net = nf_ct_net(ignored_conntrack);
zone = nf_ct_zone(ignored_conntrack);
thash = nf_conntrack_find_get(net, zone, &reply);
if (!thash)
return false;
ct = nf_ct_tuplehash_to_ctrack(thash);
if (thash->tuple.dst.dir == IP_CT_DIR_ORIGINAL)
goto out;
if (WARN_ON_ONCE(ct == ignored_conntrack))
goto out;
flags = READ_ONCE(ct->status);
if (!nf_nat_may_kill(ct, flags))
goto out;
if (!nf_seq_has_advanced(ct, ignored_conntrack))
goto out;
/* Even if we can evict do not reuse if entry is offloaded. */
if (nf_ct_kill(ct))
taken = flags & flags_offload;
out:
nf_ct_put(ct);
return taken;
}
static bool nf_nat_inet_in_range(const struct nf_conntrack_tuple *t,
const struct nf_nat_range2 *range)
{
if (t->src.l3num == NFPROTO_IPV4)
return ntohl(t->src.u3.ip) >= ntohl(range->min_addr.ip) &&
ntohl(t->src.u3.ip) <= ntohl(range->max_addr.ip);
return ipv6_addr_cmp(&t->src.u3.in6, &range->min_addr.in6) >= 0 &&
ipv6_addr_cmp(&t->src.u3.in6, &range->max_addr.in6) <= 0;
}
/* Is the manipable part of the tuple between min and max incl? */
static bool l4proto_in_range(const struct nf_conntrack_tuple *tuple,
enum nf_nat_manip_type maniptype,
const union nf_conntrack_man_proto *min,
const union nf_conntrack_man_proto *max)
{
__be16 port;
switch (tuple->dst.protonum) {
case IPPROTO_ICMP:
case IPPROTO_ICMPV6:
return ntohs(tuple->src.u.icmp.id) >= ntohs(min->icmp.id) &&
ntohs(tuple->src.u.icmp.id) <= ntohs(max->icmp.id);
case IPPROTO_GRE: /* all fall though */
case IPPROTO_TCP:
case IPPROTO_UDP:
case IPPROTO_UDPLITE:
case IPPROTO_DCCP:
case IPPROTO_SCTP:
if (maniptype == NF_NAT_MANIP_SRC)
port = tuple->src.u.all;
else
port = tuple->dst.u.all;
return ntohs(port) >= ntohs(min->all) &&
ntohs(port) <= ntohs(max->all);
default:
return true;
}
}
/* If we source map this tuple so reply looks like reply_tuple, will
* that meet the constraints of range.
*/
static int nf_in_range(const struct nf_conntrack_tuple *tuple,
const struct nf_nat_range2 *range)
{
/* If we are supposed to map IPs, then we must be in the
* range specified, otherwise let this drag us onto a new src IP.
*/
if (range->flags & NF_NAT_RANGE_MAP_IPS &&
!nf_nat_inet_in_range(tuple, range))
return 0;
if (!(range->flags & NF_NAT_RANGE_PROTO_SPECIFIED))
return 1;
return l4proto_in_range(tuple, NF_NAT_MANIP_SRC,
&range->min_proto, &range->max_proto);
}
static inline int
same_src(const struct nf_conn *ct,
const struct nf_conntrack_tuple *tuple)
{
const struct nf_conntrack_tuple *t;
t = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
return (t->dst.protonum == tuple->dst.protonum &&
nf_inet_addr_cmp(&t->src.u3, &tuple->src.u3) &&
t->src.u.all == tuple->src.u.all);
}
/* Only called for SRC manip */
static int
find_appropriate_src(struct net *net,
const struct nf_conntrack_zone *zone,
const struct nf_conntrack_tuple *tuple,
struct nf_conntrack_tuple *result,
const struct nf_nat_range2 *range)
{
unsigned int h = hash_by_src(net, zone, tuple);
const struct nf_conn *ct;
hlist_for_each_entry_rcu(ct, &nf_nat_bysource[h], nat_bysource) {
if (same_src(ct, tuple) &&
net_eq(net, nf_ct_net(ct)) &&
nf_ct_zone_equal(ct, zone, IP_CT_DIR_ORIGINAL)) {
/* Copy source part from reply tuple. */
nf_ct_invert_tuple(result,
&ct->tuplehash[IP_CT_DIR_REPLY].tuple);
result->dst = tuple->dst;
if (nf_in_range(result, range))
return 1;
}
}
return 0;
}
/* For [FUTURE] fragmentation handling, we want the least-used
* src-ip/dst-ip/proto triple. Fairness doesn't come into it. Thus
* if the range specifies 1.2.3.4 ports 10000-10005 and 1.2.3.5 ports
* 1-65535, we don't do pro-rata allocation based on ports; we choose
* the ip with the lowest src-ip/dst-ip/proto usage.
*/
static void
find_best_ips_proto(const struct nf_conntrack_zone *zone,
struct nf_conntrack_tuple *tuple,
const struct nf_nat_range2 *range,
const struct nf_conn *ct,
enum nf_nat_manip_type maniptype)
{
union nf_inet_addr *var_ipp;
unsigned int i, max;
/* Host order */
u32 minip, maxip, j, dist;
bool full_range;
/* No IP mapping? Do nothing. */
if (!(range->flags & NF_NAT_RANGE_MAP_IPS))
return;
if (maniptype == NF_NAT_MANIP_SRC)
var_ipp = &tuple->src.u3;
else
var_ipp = &tuple->dst.u3;
/* Fast path: only one choice. */
if (nf_inet_addr_cmp(&range->min_addr, &range->max_addr)) {
*var_ipp = range->min_addr;
return;
}
if (nf_ct_l3num(ct) == NFPROTO_IPV4)
max = sizeof(var_ipp->ip) / sizeof(u32) - 1;
else
max = sizeof(var_ipp->ip6) / sizeof(u32) - 1;
/* Hashing source and destination IPs gives a fairly even
* spread in practice (if there are a small number of IPs
* involved, there usually aren't that many connections
* anyway). The consistency means that servers see the same
* client coming from the same IP (some Internet Banking sites
* like this), even across reboots.
*/
j = jhash2((u32 *)&tuple->src.u3, sizeof(tuple->src.u3) / sizeof(u32),
range->flags & NF_NAT_RANGE_PERSISTENT ?
0 : (__force u32)tuple->dst.u3.all[max] ^ zone->id);
full_range = false;
for (i = 0; i <= max; i++) {
/* If first bytes of the address are at the maximum, use the
* distance. Otherwise use the full range.
*/
if (!full_range) {
minip = ntohl((__force __be32)range->min_addr.all[i]);
maxip = ntohl((__force __be32)range->max_addr.all[i]);
dist = maxip - minip + 1;
} else {
minip = 0;
dist = ~0;
}
var_ipp->all[i] = (__force __u32)
htonl(minip + reciprocal_scale(j, dist));
if (var_ipp->all[i] != range->max_addr.all[i])
full_range = true;
if (!(range->flags & NF_NAT_RANGE_PERSISTENT))
j ^= (__force u32)tuple->dst.u3.all[i];
}
}
/* Alter the per-proto part of the tuple (depending on maniptype), to
* give a unique tuple in the given range if possible.
*
* Per-protocol part of tuple is initialized to the incoming packet.
*/
static void nf_nat_l4proto_unique_tuple(struct nf_conntrack_tuple *tuple,
const struct nf_nat_range2 *range,
enum nf_nat_manip_type maniptype,
const struct nf_conn *ct)
{
unsigned int range_size, min, max, i, attempts;
__be16 *keyptr;
u16 off;
switch (tuple->dst.protonum) {
case IPPROTO_ICMP:
case IPPROTO_ICMPV6:
/* id is same for either direction... */
keyptr = &tuple->src.u.icmp.id;
if (!(range->flags & NF_NAT_RANGE_PROTO_SPECIFIED)) {
min = 0;
range_size = 65536;
} else {
min = ntohs(range->min_proto.icmp.id);
range_size = ntohs(range->max_proto.icmp.id) -
ntohs(range->min_proto.icmp.id) + 1;
}
goto find_free_id;
#if IS_ENABLED(CONFIG_NF_CT_PROTO_GRE)
case IPPROTO_GRE:
/* If there is no master conntrack we are not PPTP,
do not change tuples */
if (!ct->master)
return;
if (maniptype == NF_NAT_MANIP_SRC)
keyptr = &tuple->src.u.gre.key;
else
keyptr = &tuple->dst.u.gre.key;
if (!(range->flags & NF_NAT_RANGE_PROTO_SPECIFIED)) {
min = 1;
range_size = 65535;
} else {
min = ntohs(range->min_proto.gre.key);
range_size = ntohs(range->max_proto.gre.key) - min + 1;
}
goto find_free_id;
#endif
case IPPROTO_UDP:
case IPPROTO_UDPLITE:
case IPPROTO_TCP:
case IPPROTO_SCTP:
case IPPROTO_DCCP:
if (maniptype == NF_NAT_MANIP_SRC)
keyptr = &tuple->src.u.all;
else
keyptr = &tuple->dst.u.all;
break;
default:
return;
}
/* If no range specified... */
if (!(range->flags & NF_NAT_RANGE_PROTO_SPECIFIED)) {
/* If it's dst rewrite, can't change port */
if (maniptype == NF_NAT_MANIP_DST)
return;
if (ntohs(*keyptr) < 1024) {
/* Loose convention: >> 512 is credential passing */
if (ntohs(*keyptr) < 512) {
min = 1;
range_size = 511 - min + 1;
} else {
min = 600;
range_size = 1023 - min + 1;
}
} else {
min = 1024;
range_size = 65535 - 1024 + 1;
}
} else {
min = ntohs(range->min_proto.all);
max = ntohs(range->max_proto.all);
if (unlikely(max < min))
swap(max, min);
range_size = max - min + 1;
}
find_free_id:
if (range->flags & NF_NAT_RANGE_PROTO_OFFSET)
off = (ntohs(*keyptr) - ntohs(range->base_proto.all));
else
off = get_random_u16();
attempts = range_size;
if (attempts > NF_NAT_MAX_ATTEMPTS)
attempts = NF_NAT_MAX_ATTEMPTS;
/* We are in softirq; doing a search of the entire range risks
* soft lockup when all tuples are already used.
*
* If we can't find any free port from first offset, pick a new
* one and try again, with ever smaller search window.
*/
another_round:
for (i = 0; i < attempts; i++, off++) {
*keyptr = htons(min + off % range_size);
if (!nf_nat_used_tuple_harder(tuple, ct, attempts - i))
return;
}
if (attempts >= range_size || attempts < 16)
return;
attempts /= 2;
off = get_random_u16();
goto another_round;
}
/* Manipulate the tuple into the range given. For NF_INET_POST_ROUTING,
* we change the source to map into the range. For NF_INET_PRE_ROUTING
* and NF_INET_LOCAL_OUT, we change the destination to map into the
* range. It might not be possible to get a unique tuple, but we try.
* At worst (or if we race), we will end up with a final duplicate in
* __nf_conntrack_confirm and drop the packet. */
static void
get_unique_tuple(struct nf_conntrack_tuple *tuple,
const struct nf_conntrack_tuple *orig_tuple,
const struct nf_nat_range2 *range,
struct nf_conn *ct,
enum nf_nat_manip_type maniptype)
{
const struct nf_conntrack_zone *zone;
struct net *net = nf_ct_net(ct);
zone = nf_ct_zone(ct);
/* 1) If this srcip/proto/src-proto-part is currently mapped,
* and that same mapping gives a unique tuple within the given
* range, use that.
*
* This is only required for source (ie. NAT/masq) mappings.
* So far, we don't do local source mappings, so multiple
* manips not an issue.
*/
if (maniptype == NF_NAT_MANIP_SRC &&
!(range->flags & NF_NAT_RANGE_PROTO_RANDOM_ALL)) {
/* try the original tuple first */
if (nf_in_range(orig_tuple, range)) {
if (!nf_nat_used_tuple(orig_tuple, ct)) {
*tuple = *orig_tuple;
return;
}
} else if (find_appropriate_src(net, zone,
orig_tuple, tuple, range)) {
pr_debug("get_unique_tuple: Found current src map\n");
if (!nf_nat_used_tuple(tuple, ct))
return;
}
}
/* 2) Select the least-used IP/proto combination in the given range */
*tuple = *orig_tuple;
find_best_ips_proto(zone, tuple, range, ct, maniptype);
/* 3) The per-protocol part of the manip is made to map into
* the range to make a unique tuple.
*/
/* Only bother mapping if it's not already in range and unique */
if (!(range->flags & NF_NAT_RANGE_PROTO_RANDOM_ALL)) {
if (range->flags & NF_NAT_RANGE_PROTO_SPECIFIED) {
if (!(range->flags & NF_NAT_RANGE_PROTO_OFFSET) &&
l4proto_in_range(tuple, maniptype,
&range->min_proto,
&range->max_proto) &&
(range->min_proto.all == range->max_proto.all ||
!nf_nat_used_tuple(tuple, ct)))
return;
} else if (!nf_nat_used_tuple(tuple, ct)) {
return;
}
}
/* Last chance: get protocol to try to obtain unique tuple. */
nf_nat_l4proto_unique_tuple(tuple, range, maniptype, ct);
}
struct nf_conn_nat *nf_ct_nat_ext_add(struct nf_conn *ct)
{
struct nf_conn_nat *nat = nfct_nat(ct);
if (nat)
return nat;
if (!nf_ct_is_confirmed(ct))
nat = nf_ct_ext_add(ct, NF_CT_EXT_NAT, GFP_ATOMIC);
return nat;
}
EXPORT_SYMBOL_GPL(nf_ct_nat_ext_add);
unsigned int
nf_nat_setup_info(struct nf_conn *ct,
const struct nf_nat_range2 *range,
enum nf_nat_manip_type maniptype)
{
struct net *net = nf_ct_net(ct);
struct nf_conntrack_tuple curr_tuple, new_tuple;
/* Can't setup nat info for confirmed ct. */
if (nf_ct_is_confirmed(ct))
return NF_ACCEPT;
WARN_ON(maniptype != NF_NAT_MANIP_SRC &&
maniptype != NF_NAT_MANIP_DST);
if (WARN_ON(nf_nat_initialized(ct, maniptype)))
return NF_DROP;
/* What we've got will look like inverse of reply. Normally
* this is what is in the conntrack, except for prior
* manipulations (future optimization: if num_manips == 0,
* orig_tp = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple)
*/
nf_ct_invert_tuple(&curr_tuple,
&ct->tuplehash[IP_CT_DIR_REPLY].tuple);
get_unique_tuple(&new_tuple, &curr_tuple, range, ct, maniptype);
if (!nf_ct_tuple_equal(&new_tuple, &curr_tuple)) {
struct nf_conntrack_tuple reply;
/* Alter conntrack table so will recognize replies. */
nf_ct_invert_tuple(&reply, &new_tuple);
nf_conntrack_alter_reply(ct, &reply);
/* Non-atomic: we own this at the moment. */
if (maniptype == NF_NAT_MANIP_SRC)
ct->status |= IPS_SRC_NAT;
else
ct->status |= IPS_DST_NAT;
if (nfct_help(ct) && !nfct_seqadj(ct))
if (!nfct_seqadj_ext_add(ct))
return NF_DROP;
}
if (maniptype == NF_NAT_MANIP_SRC) {
unsigned int srchash;
spinlock_t *lock;
srchash = hash_by_src(net, nf_ct_zone(ct),
&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
lock = &nf_nat_locks[srchash % CONNTRACK_LOCKS];
spin_lock_bh(lock);
hlist_add_head_rcu(&ct->nat_bysource,
&nf_nat_bysource[srchash]);
spin_unlock_bh(lock);
}
/* It's done. */
if (maniptype == NF_NAT_MANIP_DST)
ct->status |= IPS_DST_NAT_DONE;
else
ct->status |= IPS_SRC_NAT_DONE;
return NF_ACCEPT;
}
EXPORT_SYMBOL(nf_nat_setup_info);
static unsigned int
__nf_nat_alloc_null_binding(struct nf_conn *ct, enum nf_nat_manip_type manip)
{
/* Force range to this IP; let proto decide mapping for
* per-proto parts (hence not IP_NAT_RANGE_PROTO_SPECIFIED).
* Use reply in case it's already been mangled (eg local packet).
*/
union nf_inet_addr ip =
(manip == NF_NAT_MANIP_SRC ?
ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3 :
ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u3);
struct nf_nat_range2 range = {
.flags = NF_NAT_RANGE_MAP_IPS,
.min_addr = ip,
.max_addr = ip,
};
return nf_nat_setup_info(ct, &range, manip);
}
unsigned int
nf_nat_alloc_null_binding(struct nf_conn *ct, unsigned int hooknum)
{
return __nf_nat_alloc_null_binding(ct, HOOK2MANIP(hooknum));
}
EXPORT_SYMBOL_GPL(nf_nat_alloc_null_binding);
/* Do packet manipulations according to nf_nat_setup_info. */
unsigned int nf_nat_packet(struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
unsigned int hooknum,
struct sk_buff *skb)
{
enum nf_nat_manip_type mtype = HOOK2MANIP(hooknum);
enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
unsigned int verdict = NF_ACCEPT;
unsigned long statusbit;
if (mtype == NF_NAT_MANIP_SRC)
statusbit = IPS_SRC_NAT;
else
statusbit = IPS_DST_NAT;
/* Invert if this is reply dir. */
if (dir == IP_CT_DIR_REPLY)
statusbit ^= IPS_NAT_MASK;
/* Non-atomic: these bits don't change. */
if (ct->status & statusbit)
verdict = nf_nat_manip_pkt(skb, ct, mtype, dir);
return verdict;
}
EXPORT_SYMBOL_GPL(nf_nat_packet);
static bool in_vrf_postrouting(const struct nf_hook_state *state)
{
#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
if (state->hook == NF_INET_POST_ROUTING &&
netif_is_l3_master(state->out))
return true;
#endif
return false;
}
unsigned int
nf_nat_inet_fn(void *priv, struct sk_buff *skb,
const struct nf_hook_state *state)
{
struct nf_conn *ct;
enum ip_conntrack_info ctinfo;
struct nf_conn_nat *nat;
/* maniptype == SRC for postrouting. */
enum nf_nat_manip_type maniptype = HOOK2MANIP(state->hook);
ct = nf_ct_get(skb, &ctinfo);
/* Can't track? It's not due to stress, or conntrack would
* have dropped it. Hence it's the user's responsibilty to
* packet filter it out, or implement conntrack/NAT for that
* protocol. 8) --RR
*/
if (!ct || in_vrf_postrouting(state))
return NF_ACCEPT;
nat = nfct_nat(ct);
switch (ctinfo) {
case IP_CT_RELATED:
case IP_CT_RELATED_REPLY:
/* Only ICMPs can be IP_CT_IS_REPLY. Fallthrough */
case IP_CT_NEW:
/* Seen it before? This can happen for loopback, retrans,
* or local packets.
*/
if (!nf_nat_initialized(ct, maniptype)) {
struct nf_nat_lookup_hook_priv *lpriv = priv;
struct nf_hook_entries *e = rcu_dereference(lpriv->entries);
unsigned int ret;
int i;
if (!e)
goto null_bind;
for (i = 0; i < e->num_hook_entries; i++) {
ret = e->hooks[i].hook(e->hooks[i].priv, skb,
state);
if (ret != NF_ACCEPT)
return ret;
if (nf_nat_initialized(ct, maniptype))
goto do_nat;
}
null_bind:
ret = nf_nat_alloc_null_binding(ct, state->hook);
if (ret != NF_ACCEPT)
return ret;
} else {
pr_debug("Already setup manip %s for ct %p (status bits 0x%lx)\n",
maniptype == NF_NAT_MANIP_SRC ? "SRC" : "DST",
ct, ct->status);
if (nf_nat_oif_changed(state->hook, ctinfo, nat,
state->out))
goto oif_changed;
}
break;
default:
/* ESTABLISHED */
WARN_ON(ctinfo != IP_CT_ESTABLISHED &&
ctinfo != IP_CT_ESTABLISHED_REPLY);
if (nf_nat_oif_changed(state->hook, ctinfo, nat, state->out))
goto oif_changed;
}
do_nat:
return nf_nat_packet(ct, ctinfo, state->hook, skb);
oif_changed:
nf_ct_kill_acct(ct, ctinfo, skb);
return NF_DROP;
}
EXPORT_SYMBOL_GPL(nf_nat_inet_fn);
struct nf_nat_proto_clean {
u8 l3proto;
u8 l4proto;
};
/* kill conntracks with affected NAT section */
static int nf_nat_proto_remove(struct nf_conn *i, void *data)
{
const struct nf_nat_proto_clean *clean = data;
if ((clean->l3proto && nf_ct_l3num(i) != clean->l3proto) ||
(clean->l4proto && nf_ct_protonum(i) != clean->l4proto))
return 0;
return i->status & IPS_NAT_MASK ? 1 : 0;
}
static void nf_nat_cleanup_conntrack(struct nf_conn *ct)
{
unsigned int h;
h = hash_by_src(nf_ct_net(ct), nf_ct_zone(ct), &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
spin_lock_bh(&nf_nat_locks[h % CONNTRACK_LOCKS]);
hlist_del_rcu(&ct->nat_bysource);
spin_unlock_bh(&nf_nat_locks[h % CONNTRACK_LOCKS]);
}
static int nf_nat_proto_clean(struct nf_conn *ct, void *data)
{
if (nf_nat_proto_remove(ct, data))
return 1;
/* This module is being removed and conntrack has nat null binding.
* Remove it from bysource hash, as the table will be freed soon.
*
* Else, when the conntrack is destoyed, nf_nat_cleanup_conntrack()
* will delete entry from already-freed table.
*/
if (test_and_clear_bit(IPS_SRC_NAT_DONE_BIT, &ct->status))
nf_nat_cleanup_conntrack(ct);
/* don't delete conntrack. Although that would make things a lot
* simpler, we'd end up flushing all conntracks on nat rmmod.
*/
return 0;
}
#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
#include <linux/netfilter/nfnetlink.h>
#include <linux/netfilter/nfnetlink_conntrack.h>
static const struct nla_policy protonat_nla_policy[CTA_PROTONAT_MAX+1] = {
[CTA_PROTONAT_PORT_MIN] = { .type = NLA_U16 },
[CTA_PROTONAT_PORT_MAX] = { .type = NLA_U16 },
};
static int nf_nat_l4proto_nlattr_to_range(struct nlattr *tb[],
struct nf_nat_range2 *range)
{
if (tb[CTA_PROTONAT_PORT_MIN]) {
range->min_proto.all = nla_get_be16(tb[CTA_PROTONAT_PORT_MIN]);
range->max_proto.all = range->min_proto.all;
range->flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
}
if (tb[CTA_PROTONAT_PORT_MAX]) {
range->max_proto.all = nla_get_be16(tb[CTA_PROTONAT_PORT_MAX]);
range->flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
}
return 0;
}
static int nfnetlink_parse_nat_proto(struct nlattr *attr,
const struct nf_conn *ct,
struct nf_nat_range2 *range)
{
struct nlattr *tb[CTA_PROTONAT_MAX+1];
int err;
err = nla_parse_nested_deprecated(tb, CTA_PROTONAT_MAX, attr,
protonat_nla_policy, NULL);
if (err < 0)
return err;
return nf_nat_l4proto_nlattr_to_range(tb, range);
}
static const struct nla_policy nat_nla_policy[CTA_NAT_MAX+1] = {
[CTA_NAT_V4_MINIP] = { .type = NLA_U32 },
[CTA_NAT_V4_MAXIP] = { .type = NLA_U32 },
[CTA_NAT_V6_MINIP] = { .len = sizeof(struct in6_addr) },
[CTA_NAT_V6_MAXIP] = { .len = sizeof(struct in6_addr) },
[CTA_NAT_PROTO] = { .type = NLA_NESTED },
};
static int nf_nat_ipv4_nlattr_to_range(struct nlattr *tb[],
struct nf_nat_range2 *range)
{
if (tb[CTA_NAT_V4_MINIP]) {
range->min_addr.ip = nla_get_be32(tb[CTA_NAT_V4_MINIP]);
range->flags |= NF_NAT_RANGE_MAP_IPS;
}
if (tb[CTA_NAT_V4_MAXIP])
range->max_addr.ip = nla_get_be32(tb[CTA_NAT_V4_MAXIP]);
else
range->max_addr.ip = range->min_addr.ip;
return 0;
}
static int nf_nat_ipv6_nlattr_to_range(struct nlattr *tb[],
struct nf_nat_range2 *range)
{
if (tb[CTA_NAT_V6_MINIP]) {
nla_memcpy(&range->min_addr.ip6, tb[CTA_NAT_V6_MINIP],
sizeof(struct in6_addr));
range->flags |= NF_NAT_RANGE_MAP_IPS;
}
if (tb[CTA_NAT_V6_MAXIP])
nla_memcpy(&range->max_addr.ip6, tb[CTA_NAT_V6_MAXIP],
sizeof(struct in6_addr));
else
range->max_addr = range->min_addr;
return 0;
}
static int
nfnetlink_parse_nat(const struct nlattr *nat,
const struct nf_conn *ct, struct nf_nat_range2 *range)
{
struct nlattr *tb[CTA_NAT_MAX+1];
int err;
memset(range, 0, sizeof(*range));
err = nla_parse_nested_deprecated(tb, CTA_NAT_MAX, nat,
nat_nla_policy, NULL);
if (err < 0)
return err;
switch (nf_ct_l3num(ct)) {
case NFPROTO_IPV4:
err = nf_nat_ipv4_nlattr_to_range(tb, range);
break;
case NFPROTO_IPV6:
err = nf_nat_ipv6_nlattr_to_range(tb, range);
break;
default:
err = -EPROTONOSUPPORT;
break;
}
if (err)
return err;
if (!tb[CTA_NAT_PROTO])
return 0;
return nfnetlink_parse_nat_proto(tb[CTA_NAT_PROTO], ct, range);
}
/* This function is called under rcu_read_lock() */
static int
nfnetlink_parse_nat_setup(struct nf_conn *ct,
enum nf_nat_manip_type manip,
const struct nlattr *attr)
{
struct nf_nat_range2 range;
int err;
/* Should not happen, restricted to creating new conntracks
* via ctnetlink.
*/
if (WARN_ON_ONCE(nf_nat_initialized(ct, manip)))
return -EEXIST;
/* No NAT information has been passed, allocate the null-binding */
if (attr == NULL)
return __nf_nat_alloc_null_binding(ct, manip) == NF_DROP ? -ENOMEM : 0;
err = nfnetlink_parse_nat(attr, ct, &range);
if (err < 0)
return err;
return nf_nat_setup_info(ct, &range, manip) == NF_DROP ? -ENOMEM : 0;
}
#else
static int
nfnetlink_parse_nat_setup(struct nf_conn *ct,
enum nf_nat_manip_type manip,
const struct nlattr *attr)
{
return -EOPNOTSUPP;
}
#endif
static struct nf_ct_helper_expectfn follow_master_nat = {
.name = "nat-follow-master",
.expectfn = nf_nat_follow_master,
};
int nf_nat_register_fn(struct net *net, u8 pf, const struct nf_hook_ops *ops,
const struct nf_hook_ops *orig_nat_ops, unsigned int ops_count)
{
struct nat_net *nat_net = net_generic(net, nat_net_id);
struct nf_nat_hooks_net *nat_proto_net;
struct nf_nat_lookup_hook_priv *priv;
unsigned int hooknum = ops->hooknum;
struct nf_hook_ops *nat_ops;
int i, ret;
if (WARN_ON_ONCE(pf >= ARRAY_SIZE(nat_net->nat_proto_net)))
return -EINVAL;
nat_proto_net = &nat_net->nat_proto_net[pf];
for (i = 0; i < ops_count; i++) {
if (orig_nat_ops[i].hooknum == hooknum) {
hooknum = i;
break;
}
}
if (WARN_ON_ONCE(i == ops_count))
return -EINVAL;
mutex_lock(&nf_nat_proto_mutex);
if (!nat_proto_net->nat_hook_ops) {
WARN_ON(nat_proto_net->users != 0);
nat_ops = kmemdup(orig_nat_ops, sizeof(*orig_nat_ops) * ops_count, GFP_KERNEL);
if (!nat_ops) {
mutex_unlock(&nf_nat_proto_mutex);
return -ENOMEM;
}
for (i = 0; i < ops_count; i++) {
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (priv) {
nat_ops[i].priv = priv;
continue;
}
mutex_unlock(&nf_nat_proto_mutex);
while (i)
kfree(nat_ops[--i].priv);
kfree(nat_ops);
return -ENOMEM;
}
ret = nf_register_net_hooks(net, nat_ops, ops_count);
if (ret < 0) {
mutex_unlock(&nf_nat_proto_mutex);
for (i = 0; i < ops_count; i++)
kfree(nat_ops[i].priv);
kfree(nat_ops);
return ret;
}
nat_proto_net->nat_hook_ops = nat_ops;
}
nat_ops = nat_proto_net->nat_hook_ops;
priv = nat_ops[hooknum].priv;
if (WARN_ON_ONCE(!priv)) {
mutex_unlock(&nf_nat_proto_mutex);
return -EOPNOTSUPP;
}
ret = nf_hook_entries_insert_raw(&priv->entries, ops);
if (ret == 0)
nat_proto_net->users++;
mutex_unlock(&nf_nat_proto_mutex);
return ret;
}
void nf_nat_unregister_fn(struct net *net, u8 pf, const struct nf_hook_ops *ops,
unsigned int ops_count)
{
struct nat_net *nat_net = net_generic(net, nat_net_id);
struct nf_nat_hooks_net *nat_proto_net;
struct nf_nat_lookup_hook_priv *priv;
struct nf_hook_ops *nat_ops;
int hooknum = ops->hooknum;
int i;
if (pf >= ARRAY_SIZE(nat_net->nat_proto_net))
return;
nat_proto_net = &nat_net->nat_proto_net[pf];
mutex_lock(&nf_nat_proto_mutex);
if (WARN_ON(nat_proto_net->users == 0))
goto unlock;
nat_proto_net->users--;
nat_ops = nat_proto_net->nat_hook_ops;
for (i = 0; i < ops_count; i++) {
if (nat_ops[i].hooknum == hooknum) {
hooknum = i;
break;
}
}
if (WARN_ON_ONCE(i == ops_count))
goto unlock;
priv = nat_ops[hooknum].priv;
nf_hook_entries_delete_raw(&priv->entries, ops);
if (nat_proto_net->users == 0) {
nf_unregister_net_hooks(net, nat_ops, ops_count);
for (i = 0; i < ops_count; i++) {
priv = nat_ops[i].priv;
kfree_rcu(priv, rcu_head);
}
nat_proto_net->nat_hook_ops = NULL;
kfree(nat_ops);
}
unlock:
mutex_unlock(&nf_nat_proto_mutex);
}
static struct pernet_operations nat_net_ops = {
.id = &nat_net_id,
.size = sizeof(struct nat_net),
};
static const struct nf_nat_hook nat_hook = {
.parse_nat_setup = nfnetlink_parse_nat_setup,
#ifdef CONFIG_XFRM
.decode_session = __nf_nat_decode_session,
#endif
.manip_pkt = nf_nat_manip_pkt,
.remove_nat_bysrc = nf_nat_cleanup_conntrack,
};
static int __init nf_nat_init(void)
{
int ret, i;
/* Leave them the same for the moment. */
nf_nat_htable_size = nf_conntrack_htable_size;
if (nf_nat_htable_size < CONNTRACK_LOCKS)
nf_nat_htable_size = CONNTRACK_LOCKS;
nf_nat_bysource = nf_ct_alloc_hashtable(&nf_nat_htable_size, 0);
if (!nf_nat_bysource)
return -ENOMEM;
for (i = 0; i < CONNTRACK_LOCKS; i++)
spin_lock_init(&nf_nat_locks[i]);
ret = register_pernet_subsys(&nat_net_ops);
if (ret < 0) {
kvfree(nf_nat_bysource);
return ret;
}
nf_ct_helper_expectfn_register(&follow_master_nat);
WARN_ON(nf_nat_hook != NULL);
RCU_INIT_POINTER(nf_nat_hook, &nat_hook);
ret = register_nf_nat_bpf();
if (ret < 0) {
RCU_INIT_POINTER(nf_nat_hook, NULL);
nf_ct_helper_expectfn_unregister(&follow_master_nat);
synchronize_net();
unregister_pernet_subsys(&nat_net_ops);
kvfree(nf_nat_bysource);
}
return ret;
}
static void __exit nf_nat_cleanup(void)
{
struct nf_nat_proto_clean clean = {};
nf_ct_iterate_destroy(nf_nat_proto_clean, &clean);
nf_ct_helper_expectfn_unregister(&follow_master_nat);
RCU_INIT_POINTER(nf_nat_hook, NULL);
synchronize_net();
kvfree(nf_nat_bysource);
unregister_pernet_subsys(&nat_net_ops);
}
MODULE_LICENSE("GPL");
module_init(nf_nat_init);
module_exit(nf_nat_cleanup);
| linux-master | net/netfilter/nf_nat_core.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
*
* Generic part shared by ipv4 and ipv6 backends.
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/netlink.h>
#include <linux/netfilter.h>
#include <linux/netfilter/nf_tables.h>
#include <net/netfilter/nf_tables_core.h>
#include <net/netfilter/nf_tables.h>
#include <net/netfilter/nft_fib.h>
#define NFTA_FIB_F_ALL (NFTA_FIB_F_SADDR | NFTA_FIB_F_DADDR | \
NFTA_FIB_F_MARK | NFTA_FIB_F_IIF | NFTA_FIB_F_OIF | \
NFTA_FIB_F_PRESENT)
const struct nla_policy nft_fib_policy[NFTA_FIB_MAX + 1] = {
[NFTA_FIB_DREG] = { .type = NLA_U32 },
[NFTA_FIB_RESULT] = { .type = NLA_U32 },
[NFTA_FIB_FLAGS] =
NLA_POLICY_MASK(NLA_BE32, NFTA_FIB_F_ALL),
};
EXPORT_SYMBOL(nft_fib_policy);
int nft_fib_validate(const struct nft_ctx *ctx, const struct nft_expr *expr,
const struct nft_data **data)
{
const struct nft_fib *priv = nft_expr_priv(expr);
unsigned int hooks;
switch (priv->result) {
case NFT_FIB_RESULT_OIF:
case NFT_FIB_RESULT_OIFNAME:
hooks = (1 << NF_INET_PRE_ROUTING);
if (priv->flags & NFTA_FIB_F_IIF) {
hooks |= (1 << NF_INET_LOCAL_IN) |
(1 << NF_INET_FORWARD);
}
break;
case NFT_FIB_RESULT_ADDRTYPE:
if (priv->flags & NFTA_FIB_F_IIF)
hooks = (1 << NF_INET_PRE_ROUTING) |
(1 << NF_INET_LOCAL_IN) |
(1 << NF_INET_FORWARD);
else if (priv->flags & NFTA_FIB_F_OIF)
hooks = (1 << NF_INET_LOCAL_OUT) |
(1 << NF_INET_POST_ROUTING) |
(1 << NF_INET_FORWARD);
else
hooks = (1 << NF_INET_LOCAL_IN) |
(1 << NF_INET_LOCAL_OUT) |
(1 << NF_INET_FORWARD) |
(1 << NF_INET_PRE_ROUTING) |
(1 << NF_INET_POST_ROUTING);
break;
default:
return -EINVAL;
}
return nft_chain_validate_hooks(ctx->chain, hooks);
}
EXPORT_SYMBOL_GPL(nft_fib_validate);
int nft_fib_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
const struct nlattr * const tb[])
{
struct nft_fib *priv = nft_expr_priv(expr);
unsigned int len;
int err;
if (!tb[NFTA_FIB_DREG] || !tb[NFTA_FIB_RESULT] || !tb[NFTA_FIB_FLAGS])
return -EINVAL;
priv->flags = ntohl(nla_get_be32(tb[NFTA_FIB_FLAGS]));
if (priv->flags == 0)
return -EINVAL;
if ((priv->flags & (NFTA_FIB_F_SADDR | NFTA_FIB_F_DADDR)) ==
(NFTA_FIB_F_SADDR | NFTA_FIB_F_DADDR))
return -EINVAL;
if ((priv->flags & (NFTA_FIB_F_IIF | NFTA_FIB_F_OIF)) ==
(NFTA_FIB_F_IIF | NFTA_FIB_F_OIF))
return -EINVAL;
if ((priv->flags & (NFTA_FIB_F_SADDR | NFTA_FIB_F_DADDR)) == 0)
return -EINVAL;
priv->result = ntohl(nla_get_be32(tb[NFTA_FIB_RESULT]));
switch (priv->result) {
case NFT_FIB_RESULT_OIF:
if (priv->flags & NFTA_FIB_F_OIF)
return -EINVAL;
len = sizeof(int);
break;
case NFT_FIB_RESULT_OIFNAME:
if (priv->flags & NFTA_FIB_F_OIF)
return -EINVAL;
len = IFNAMSIZ;
break;
case NFT_FIB_RESULT_ADDRTYPE:
len = sizeof(u32);
break;
default:
return -EINVAL;
}
err = nft_parse_register_store(ctx, tb[NFTA_FIB_DREG], &priv->dreg,
NULL, NFT_DATA_VALUE, len);
if (err < 0)
return err;
return 0;
}
EXPORT_SYMBOL_GPL(nft_fib_init);
int nft_fib_dump(struct sk_buff *skb, const struct nft_expr *expr, bool reset)
{
const struct nft_fib *priv = nft_expr_priv(expr);
if (nft_dump_register(skb, NFTA_FIB_DREG, priv->dreg))
return -1;
if (nla_put_be32(skb, NFTA_FIB_RESULT, htonl(priv->result)))
return -1;
if (nla_put_be32(skb, NFTA_FIB_FLAGS, htonl(priv->flags)))
return -1;
return 0;
}
EXPORT_SYMBOL_GPL(nft_fib_dump);
void nft_fib_store_result(void *reg, const struct nft_fib *priv,
const struct net_device *dev)
{
u32 *dreg = reg;
int index;
switch (priv->result) {
case NFT_FIB_RESULT_OIF:
index = dev ? dev->ifindex : 0;
*dreg = (priv->flags & NFTA_FIB_F_PRESENT) ? !!index : index;
break;
case NFT_FIB_RESULT_OIFNAME:
if (priv->flags & NFTA_FIB_F_PRESENT)
*dreg = !!dev;
else
strscpy_pad(reg, dev ? dev->name : "", IFNAMSIZ);
break;
default:
WARN_ON_ONCE(1);
*dreg = 0;
break;
}
}
EXPORT_SYMBOL_GPL(nft_fib_store_result);
bool nft_fib_reduce(struct nft_regs_track *track,
const struct nft_expr *expr)
{
const struct nft_fib *priv = nft_expr_priv(expr);
unsigned int len = NFT_REG32_SIZE;
const struct nft_fib *fib;
switch (priv->result) {
case NFT_FIB_RESULT_OIF:
break;
case NFT_FIB_RESULT_OIFNAME:
if (priv->flags & NFTA_FIB_F_PRESENT)
len = NFT_REG32_SIZE;
else
len = IFNAMSIZ;
break;
case NFT_FIB_RESULT_ADDRTYPE:
break;
default:
WARN_ON_ONCE(1);
break;
}
if (!nft_reg_track_cmp(track, expr, priv->dreg)) {
nft_reg_track_update(track, expr, priv->dreg, len);
return false;
}
fib = nft_expr_priv(track->regs[priv->dreg].selector);
if (priv->result != fib->result ||
priv->flags != fib->flags) {
nft_reg_track_update(track, expr, priv->dreg, len);
return false;
}
if (!track->regs[priv->dreg].bitwise)
return true;
return false;
}
EXPORT_SYMBOL_GPL(nft_fib_reduce);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Florian Westphal <[email protected]>");
| linux-master | net/netfilter/nft_fib.c |
// SPDX-License-Identifier: GPL-2.0-only
#include <net/ip.h>
#include <net/tcp.h>
#include <net/netfilter/nf_tables.h>
#include <linux/netfilter/nfnetlink_osf.h>
struct nft_osf {
u8 dreg;
u8 ttl;
u32 flags;
};
static const struct nla_policy nft_osf_policy[NFTA_OSF_MAX + 1] = {
[NFTA_OSF_DREG] = { .type = NLA_U32 },
[NFTA_OSF_TTL] = { .type = NLA_U8 },
[NFTA_OSF_FLAGS] = { .type = NLA_U32 },
};
static void nft_osf_eval(const struct nft_expr *expr, struct nft_regs *regs,
const struct nft_pktinfo *pkt)
{
struct nft_osf *priv = nft_expr_priv(expr);
u32 *dest = ®s->data[priv->dreg];
struct sk_buff *skb = pkt->skb;
char os_match[NFT_OSF_MAXGENRELEN];
const struct tcphdr *tcp;
struct nf_osf_data data;
struct tcphdr _tcph;
if (pkt->tprot != IPPROTO_TCP) {
regs->verdict.code = NFT_BREAK;
return;
}
tcp = skb_header_pointer(skb, ip_hdrlen(skb),
sizeof(struct tcphdr), &_tcph);
if (!tcp) {
regs->verdict.code = NFT_BREAK;
return;
}
if (!tcp->syn) {
regs->verdict.code = NFT_BREAK;
return;
}
if (!nf_osf_find(skb, nf_osf_fingers, priv->ttl, &data)) {
strscpy_pad((char *)dest, "unknown", NFT_OSF_MAXGENRELEN);
} else {
if (priv->flags & NFT_OSF_F_VERSION)
snprintf(os_match, NFT_OSF_MAXGENRELEN, "%s:%s",
data.genre, data.version);
else
strscpy(os_match, data.genre, NFT_OSF_MAXGENRELEN);
strscpy_pad((char *)dest, os_match, NFT_OSF_MAXGENRELEN);
}
}
static int nft_osf_init(const struct nft_ctx *ctx,
const struct nft_expr *expr,
const struct nlattr * const tb[])
{
struct nft_osf *priv = nft_expr_priv(expr);
u32 flags;
int err;
u8 ttl;
if (!tb[NFTA_OSF_DREG])
return -EINVAL;
if (tb[NFTA_OSF_TTL]) {
ttl = nla_get_u8(tb[NFTA_OSF_TTL]);
if (ttl > 2)
return -EINVAL;
priv->ttl = ttl;
}
if (tb[NFTA_OSF_FLAGS]) {
flags = ntohl(nla_get_be32(tb[NFTA_OSF_FLAGS]));
if (flags != NFT_OSF_F_VERSION)
return -EINVAL;
priv->flags = flags;
}
err = nft_parse_register_store(ctx, tb[NFTA_OSF_DREG], &priv->dreg,
NULL, NFT_DATA_VALUE,
NFT_OSF_MAXGENRELEN);
if (err < 0)
return err;
return 0;
}
static int nft_osf_dump(struct sk_buff *skb,
const struct nft_expr *expr, bool reset)
{
const struct nft_osf *priv = nft_expr_priv(expr);
if (nla_put_u8(skb, NFTA_OSF_TTL, priv->ttl))
goto nla_put_failure;
if (nla_put_u32(skb, NFTA_OSF_FLAGS, ntohl((__force __be32)priv->flags)))
goto nla_put_failure;
if (nft_dump_register(skb, NFTA_OSF_DREG, priv->dreg))
goto nla_put_failure;
return 0;
nla_put_failure:
return -1;
}
static int nft_osf_validate(const struct nft_ctx *ctx,
const struct nft_expr *expr,
const struct nft_data **data)
{
unsigned int hooks;
switch (ctx->family) {
case NFPROTO_IPV4:
case NFPROTO_IPV6:
case NFPROTO_INET:
hooks = (1 << NF_INET_LOCAL_IN) |
(1 << NF_INET_PRE_ROUTING) |
(1 << NF_INET_FORWARD);
break;
default:
return -EOPNOTSUPP;
}
return nft_chain_validate_hooks(ctx->chain, hooks);
}
static bool nft_osf_reduce(struct nft_regs_track *track,
const struct nft_expr *expr)
{
struct nft_osf *priv = nft_expr_priv(expr);
struct nft_osf *osf;
if (!nft_reg_track_cmp(track, expr, priv->dreg)) {
nft_reg_track_update(track, expr, priv->dreg, NFT_OSF_MAXGENRELEN);
return false;
}
osf = nft_expr_priv(track->regs[priv->dreg].selector);
if (priv->flags != osf->flags ||
priv->ttl != osf->ttl) {
nft_reg_track_update(track, expr, priv->dreg, NFT_OSF_MAXGENRELEN);
return false;
}
if (!track->regs[priv->dreg].bitwise)
return true;
return false;
}
static struct nft_expr_type nft_osf_type;
static const struct nft_expr_ops nft_osf_op = {
.eval = nft_osf_eval,
.size = NFT_EXPR_SIZE(sizeof(struct nft_osf)),
.init = nft_osf_init,
.dump = nft_osf_dump,
.type = &nft_osf_type,
.validate = nft_osf_validate,
.reduce = nft_osf_reduce,
};
static struct nft_expr_type nft_osf_type __read_mostly = {
.ops = &nft_osf_op,
.name = "osf",
.owner = THIS_MODULE,
.policy = nft_osf_policy,
.maxattr = NFTA_OSF_MAX,
};
static int __init nft_osf_module_init(void)
{
return nft_register_expr(&nft_osf_type);
}
static void __exit nft_osf_module_exit(void)
{
return nft_unregister_expr(&nft_osf_type);
}
module_init(nft_osf_module_init);
module_exit(nft_osf_module_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Fernando Fernandez <[email protected]>");
MODULE_ALIAS_NFT_EXPR("osf");
MODULE_DESCRIPTION("nftables passive OS fingerprint support");
| linux-master | net/netfilter/nft_osf.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* A module for stripping a specific TCP option from TCP packets.
*
* Copyright (C) 2007 Sven Schnelle <[email protected]>
* Copyright © CC Computer Consultants GmbH, 2007
*/
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/tcp.h>
#include <net/ipv6.h>
#include <net/tcp.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter/xt_TCPOPTSTRIP.h>
static inline unsigned int optlen(const u_int8_t *opt, unsigned int offset)
{
/* Beware zero-length options: make finite progress */
if (opt[offset] <= TCPOPT_NOP || opt[offset+1] == 0)
return 1;
else
return opt[offset+1];
}
static unsigned int
tcpoptstrip_mangle_packet(struct sk_buff *skb,
const struct xt_action_param *par,
unsigned int tcphoff)
{
const struct xt_tcpoptstrip_target_info *info = par->targinfo;
struct tcphdr *tcph, _th;
unsigned int optl, i, j;
u_int16_t n, o;
u_int8_t *opt;
int tcp_hdrlen;
/* This is a fragment, no TCP header is available */
if (par->fragoff != 0)
return XT_CONTINUE;
tcph = skb_header_pointer(skb, tcphoff, sizeof(_th), &_th);
if (!tcph)
return NF_DROP;
tcp_hdrlen = tcph->doff * 4;
if (tcp_hdrlen < sizeof(struct tcphdr))
return NF_DROP;
if (skb_ensure_writable(skb, tcphoff + tcp_hdrlen))
return NF_DROP;
/* must reload tcph, might have been moved */
tcph = (struct tcphdr *)(skb_network_header(skb) + tcphoff);
opt = (u8 *)tcph;
/*
* Walk through all TCP options - if we find some option to remove,
* set all octets to %TCPOPT_NOP and adjust checksum.
*/
for (i = sizeof(struct tcphdr); i < tcp_hdrlen - 1; i += optl) {
optl = optlen(opt, i);
if (i + optl > tcp_hdrlen)
break;
if (!tcpoptstrip_test_bit(info->strip_bmap, opt[i]))
continue;
for (j = 0; j < optl; ++j) {
o = opt[i+j];
n = TCPOPT_NOP;
if ((i + j) % 2 == 0) {
o <<= 8;
n <<= 8;
}
inet_proto_csum_replace2(&tcph->check, skb, htons(o),
htons(n), false);
}
memset(opt + i, TCPOPT_NOP, optl);
}
return XT_CONTINUE;
}
static unsigned int
tcpoptstrip_tg4(struct sk_buff *skb, const struct xt_action_param *par)
{
return tcpoptstrip_mangle_packet(skb, par, ip_hdrlen(skb));
}
#if IS_ENABLED(CONFIG_IP6_NF_MANGLE)
static unsigned int
tcpoptstrip_tg6(struct sk_buff *skb, const struct xt_action_param *par)
{
struct ipv6hdr *ipv6h = ipv6_hdr(skb);
int tcphoff;
u_int8_t nexthdr;
__be16 frag_off;
nexthdr = ipv6h->nexthdr;
tcphoff = ipv6_skip_exthdr(skb, sizeof(*ipv6h), &nexthdr, &frag_off);
if (tcphoff < 0)
return NF_DROP;
return tcpoptstrip_mangle_packet(skb, par, tcphoff);
}
#endif
static struct xt_target tcpoptstrip_tg_reg[] __read_mostly = {
{
.name = "TCPOPTSTRIP",
.family = NFPROTO_IPV4,
.table = "mangle",
.proto = IPPROTO_TCP,
.target = tcpoptstrip_tg4,
.targetsize = sizeof(struct xt_tcpoptstrip_target_info),
.me = THIS_MODULE,
},
#if IS_ENABLED(CONFIG_IP6_NF_MANGLE)
{
.name = "TCPOPTSTRIP",
.family = NFPROTO_IPV6,
.table = "mangle",
.proto = IPPROTO_TCP,
.target = tcpoptstrip_tg6,
.targetsize = sizeof(struct xt_tcpoptstrip_target_info),
.me = THIS_MODULE,
},
#endif
};
static int __init tcpoptstrip_tg_init(void)
{
return xt_register_targets(tcpoptstrip_tg_reg,
ARRAY_SIZE(tcpoptstrip_tg_reg));
}
static void __exit tcpoptstrip_tg_exit(void)
{
xt_unregister_targets(tcpoptstrip_tg_reg,
ARRAY_SIZE(tcpoptstrip_tg_reg));
}
module_init(tcpoptstrip_tg_init);
module_exit(tcpoptstrip_tg_exit);
MODULE_AUTHOR("Sven Schnelle <[email protected]>, Jan Engelhardt <[email protected]>");
MODULE_DESCRIPTION("Xtables: TCP option stripping");
MODULE_LICENSE("GPL");
MODULE_ALIAS("ipt_TCPOPTSTRIP");
MODULE_ALIAS("ip6t_TCPOPTSTRIP");
| linux-master | net/netfilter/xt_TCPOPTSTRIP.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2008-2009 Patrick McHardy <[email protected]>
*
* Development of this code funded by Astaro AG (http://www.astaro.com/)
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/list.h>
#include <linux/rbtree.h>
#include <linux/netlink.h>
#include <linux/netfilter.h>
#include <linux/netfilter/nf_tables.h>
#include <net/netfilter/nf_tables_core.h>
struct nft_rbtree {
struct rb_root root;
rwlock_t lock;
seqcount_rwlock_t count;
struct delayed_work gc_work;
};
struct nft_rbtree_elem {
struct rb_node node;
struct nft_set_ext ext;
};
static bool nft_rbtree_interval_end(const struct nft_rbtree_elem *rbe)
{
return nft_set_ext_exists(&rbe->ext, NFT_SET_EXT_FLAGS) &&
(*nft_set_ext_flags(&rbe->ext) & NFT_SET_ELEM_INTERVAL_END);
}
static bool nft_rbtree_interval_start(const struct nft_rbtree_elem *rbe)
{
return !nft_rbtree_interval_end(rbe);
}
static int nft_rbtree_cmp(const struct nft_set *set,
const struct nft_rbtree_elem *e1,
const struct nft_rbtree_elem *e2)
{
return memcmp(nft_set_ext_key(&e1->ext), nft_set_ext_key(&e2->ext),
set->klen);
}
static bool nft_rbtree_elem_expired(const struct nft_rbtree_elem *rbe)
{
return nft_set_elem_expired(&rbe->ext) ||
nft_set_elem_is_dead(&rbe->ext);
}
static bool __nft_rbtree_lookup(const struct net *net, const struct nft_set *set,
const u32 *key, const struct nft_set_ext **ext,
unsigned int seq)
{
struct nft_rbtree *priv = nft_set_priv(set);
const struct nft_rbtree_elem *rbe, *interval = NULL;
u8 genmask = nft_genmask_cur(net);
const struct rb_node *parent;
int d;
parent = rcu_dereference_raw(priv->root.rb_node);
while (parent != NULL) {
if (read_seqcount_retry(&priv->count, seq))
return false;
rbe = rb_entry(parent, struct nft_rbtree_elem, node);
d = memcmp(nft_set_ext_key(&rbe->ext), key, set->klen);
if (d < 0) {
parent = rcu_dereference_raw(parent->rb_left);
if (interval &&
!nft_rbtree_cmp(set, rbe, interval) &&
nft_rbtree_interval_end(rbe) &&
nft_rbtree_interval_start(interval))
continue;
interval = rbe;
} else if (d > 0)
parent = rcu_dereference_raw(parent->rb_right);
else {
if (!nft_set_elem_active(&rbe->ext, genmask)) {
parent = rcu_dereference_raw(parent->rb_left);
continue;
}
if (nft_rbtree_elem_expired(rbe))
return false;
if (nft_rbtree_interval_end(rbe)) {
if (nft_set_is_anonymous(set))
return false;
parent = rcu_dereference_raw(parent->rb_left);
interval = NULL;
continue;
}
*ext = &rbe->ext;
return true;
}
}
if (set->flags & NFT_SET_INTERVAL && interval != NULL &&
nft_set_elem_active(&interval->ext, genmask) &&
!nft_rbtree_elem_expired(interval) &&
nft_rbtree_interval_start(interval)) {
*ext = &interval->ext;
return true;
}
return false;
}
INDIRECT_CALLABLE_SCOPE
bool nft_rbtree_lookup(const struct net *net, const struct nft_set *set,
const u32 *key, const struct nft_set_ext **ext)
{
struct nft_rbtree *priv = nft_set_priv(set);
unsigned int seq = read_seqcount_begin(&priv->count);
bool ret;
ret = __nft_rbtree_lookup(net, set, key, ext, seq);
if (ret || !read_seqcount_retry(&priv->count, seq))
return ret;
read_lock_bh(&priv->lock);
seq = read_seqcount_begin(&priv->count);
ret = __nft_rbtree_lookup(net, set, key, ext, seq);
read_unlock_bh(&priv->lock);
return ret;
}
static bool __nft_rbtree_get(const struct net *net, const struct nft_set *set,
const u32 *key, struct nft_rbtree_elem **elem,
unsigned int seq, unsigned int flags, u8 genmask)
{
struct nft_rbtree_elem *rbe, *interval = NULL;
struct nft_rbtree *priv = nft_set_priv(set);
const struct rb_node *parent;
const void *this;
int d;
parent = rcu_dereference_raw(priv->root.rb_node);
while (parent != NULL) {
if (read_seqcount_retry(&priv->count, seq))
return false;
rbe = rb_entry(parent, struct nft_rbtree_elem, node);
this = nft_set_ext_key(&rbe->ext);
d = memcmp(this, key, set->klen);
if (d < 0) {
parent = rcu_dereference_raw(parent->rb_left);
if (!(flags & NFT_SET_ELEM_INTERVAL_END))
interval = rbe;
} else if (d > 0) {
parent = rcu_dereference_raw(parent->rb_right);
if (flags & NFT_SET_ELEM_INTERVAL_END)
interval = rbe;
} else {
if (!nft_set_elem_active(&rbe->ext, genmask)) {
parent = rcu_dereference_raw(parent->rb_left);
continue;
}
if (nft_set_elem_expired(&rbe->ext))
return false;
if (!nft_set_ext_exists(&rbe->ext, NFT_SET_EXT_FLAGS) ||
(*nft_set_ext_flags(&rbe->ext) & NFT_SET_ELEM_INTERVAL_END) ==
(flags & NFT_SET_ELEM_INTERVAL_END)) {
*elem = rbe;
return true;
}
if (nft_rbtree_interval_end(rbe))
interval = NULL;
parent = rcu_dereference_raw(parent->rb_left);
}
}
if (set->flags & NFT_SET_INTERVAL && interval != NULL &&
nft_set_elem_active(&interval->ext, genmask) &&
!nft_set_elem_expired(&interval->ext) &&
((!nft_rbtree_interval_end(interval) &&
!(flags & NFT_SET_ELEM_INTERVAL_END)) ||
(nft_rbtree_interval_end(interval) &&
(flags & NFT_SET_ELEM_INTERVAL_END)))) {
*elem = interval;
return true;
}
return false;
}
static void *nft_rbtree_get(const struct net *net, const struct nft_set *set,
const struct nft_set_elem *elem, unsigned int flags)
{
struct nft_rbtree *priv = nft_set_priv(set);
unsigned int seq = read_seqcount_begin(&priv->count);
struct nft_rbtree_elem *rbe = ERR_PTR(-ENOENT);
const u32 *key = (const u32 *)&elem->key.val;
u8 genmask = nft_genmask_cur(net);
bool ret;
ret = __nft_rbtree_get(net, set, key, &rbe, seq, flags, genmask);
if (ret || !read_seqcount_retry(&priv->count, seq))
return rbe;
read_lock_bh(&priv->lock);
seq = read_seqcount_begin(&priv->count);
ret = __nft_rbtree_get(net, set, key, &rbe, seq, flags, genmask);
if (!ret)
rbe = ERR_PTR(-ENOENT);
read_unlock_bh(&priv->lock);
return rbe;
}
static void nft_rbtree_gc_remove(struct net *net, struct nft_set *set,
struct nft_rbtree *priv,
struct nft_rbtree_elem *rbe)
{
struct nft_set_elem elem = {
.priv = rbe,
};
nft_setelem_data_deactivate(net, set, &elem);
rb_erase(&rbe->node, &priv->root);
}
static int nft_rbtree_gc_elem(const struct nft_set *__set,
struct nft_rbtree *priv,
struct nft_rbtree_elem *rbe,
u8 genmask)
{
struct nft_set *set = (struct nft_set *)__set;
struct rb_node *prev = rb_prev(&rbe->node);
struct net *net = read_pnet(&set->net);
struct nft_rbtree_elem *rbe_prev;
struct nft_trans_gc *gc;
gc = nft_trans_gc_alloc(set, 0, GFP_ATOMIC);
if (!gc)
return -ENOMEM;
/* search for end interval coming before this element.
* end intervals don't carry a timeout extension, they
* are coupled with the interval start element.
*/
while (prev) {
rbe_prev = rb_entry(prev, struct nft_rbtree_elem, node);
if (nft_rbtree_interval_end(rbe_prev) &&
nft_set_elem_active(&rbe_prev->ext, genmask))
break;
prev = rb_prev(prev);
}
if (prev) {
rbe_prev = rb_entry(prev, struct nft_rbtree_elem, node);
nft_rbtree_gc_remove(net, set, priv, rbe_prev);
/* There is always room in this trans gc for this element,
* memory allocation never actually happens, hence, the warning
* splat in such case. No need to set NFT_SET_ELEM_DEAD_BIT,
* this is synchronous gc which never fails.
*/
gc = nft_trans_gc_queue_sync(gc, GFP_ATOMIC);
if (WARN_ON_ONCE(!gc))
return -ENOMEM;
nft_trans_gc_elem_add(gc, rbe_prev);
}
nft_rbtree_gc_remove(net, set, priv, rbe);
gc = nft_trans_gc_queue_sync(gc, GFP_ATOMIC);
if (WARN_ON_ONCE(!gc))
return -ENOMEM;
nft_trans_gc_elem_add(gc, rbe);
nft_trans_gc_queue_sync_done(gc);
return 0;
}
static bool nft_rbtree_update_first(const struct nft_set *set,
struct nft_rbtree_elem *rbe,
struct rb_node *first)
{
struct nft_rbtree_elem *first_elem;
first_elem = rb_entry(first, struct nft_rbtree_elem, node);
/* this element is closest to where the new element is to be inserted:
* update the first element for the node list path.
*/
if (nft_rbtree_cmp(set, rbe, first_elem) < 0)
return true;
return false;
}
static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set,
struct nft_rbtree_elem *new,
struct nft_set_ext **ext)
{
struct nft_rbtree_elem *rbe, *rbe_le = NULL, *rbe_ge = NULL;
struct rb_node *node, *next, *parent, **p, *first = NULL;
struct nft_rbtree *priv = nft_set_priv(set);
u8 cur_genmask = nft_genmask_cur(net);
u8 genmask = nft_genmask_next(net);
int d, err;
/* Descend the tree to search for an existing element greater than the
* key value to insert that is greater than the new element. This is the
* first element to walk the ordered elements to find possible overlap.
*/
parent = NULL;
p = &priv->root.rb_node;
while (*p != NULL) {
parent = *p;
rbe = rb_entry(parent, struct nft_rbtree_elem, node);
d = nft_rbtree_cmp(set, rbe, new);
if (d < 0) {
p = &parent->rb_left;
} else if (d > 0) {
if (!first ||
nft_rbtree_update_first(set, rbe, first))
first = &rbe->node;
p = &parent->rb_right;
} else {
if (nft_rbtree_interval_end(rbe))
p = &parent->rb_left;
else
p = &parent->rb_right;
}
}
if (!first)
first = rb_first(&priv->root);
/* Detect overlap by going through the list of valid tree nodes.
* Values stored in the tree are in reversed order, starting from
* highest to lowest value.
*/
for (node = first; node != NULL; node = next) {
next = rb_next(node);
rbe = rb_entry(node, struct nft_rbtree_elem, node);
if (!nft_set_elem_active(&rbe->ext, genmask))
continue;
/* perform garbage collection to avoid bogus overlap reports
* but skip new elements in this transaction.
*/
if (nft_set_elem_expired(&rbe->ext) &&
nft_set_elem_active(&rbe->ext, cur_genmask)) {
err = nft_rbtree_gc_elem(set, priv, rbe, genmask);
if (err < 0)
return err;
continue;
}
d = nft_rbtree_cmp(set, rbe, new);
if (d == 0) {
/* Matching end element: no need to look for an
* overlapping greater or equal element.
*/
if (nft_rbtree_interval_end(rbe)) {
rbe_le = rbe;
break;
}
/* first element that is greater or equal to key value. */
if (!rbe_ge) {
rbe_ge = rbe;
continue;
}
/* this is a closer more or equal element, update it. */
if (nft_rbtree_cmp(set, rbe_ge, new) != 0) {
rbe_ge = rbe;
continue;
}
/* element is equal to key value, make sure flags are
* the same, an existing more or equal start element
* must not be replaced by more or equal end element.
*/
if ((nft_rbtree_interval_start(new) &&
nft_rbtree_interval_start(rbe_ge)) ||
(nft_rbtree_interval_end(new) &&
nft_rbtree_interval_end(rbe_ge))) {
rbe_ge = rbe;
continue;
}
} else if (d > 0) {
/* annotate element greater than the new element. */
rbe_ge = rbe;
continue;
} else if (d < 0) {
/* annotate element less than the new element. */
rbe_le = rbe;
break;
}
}
/* - new start element matching existing start element: full overlap
* reported as -EEXIST, cleared by caller if NLM_F_EXCL is not given.
*/
if (rbe_ge && !nft_rbtree_cmp(set, new, rbe_ge) &&
nft_rbtree_interval_start(rbe_ge) == nft_rbtree_interval_start(new)) {
*ext = &rbe_ge->ext;
return -EEXIST;
}
/* - new end element matching existing end element: full overlap
* reported as -EEXIST, cleared by caller if NLM_F_EXCL is not given.
*/
if (rbe_le && !nft_rbtree_cmp(set, new, rbe_le) &&
nft_rbtree_interval_end(rbe_le) == nft_rbtree_interval_end(new)) {
*ext = &rbe_le->ext;
return -EEXIST;
}
/* - new start element with existing closest, less or equal key value
* being a start element: partial overlap, reported as -ENOTEMPTY.
* Anonymous sets allow for two consecutive start element since they
* are constant, skip them to avoid bogus overlap reports.
*/
if (!nft_set_is_anonymous(set) && rbe_le &&
nft_rbtree_interval_start(rbe_le) && nft_rbtree_interval_start(new))
return -ENOTEMPTY;
/* - new end element with existing closest, less or equal key value
* being a end element: partial overlap, reported as -ENOTEMPTY.
*/
if (rbe_le &&
nft_rbtree_interval_end(rbe_le) && nft_rbtree_interval_end(new))
return -ENOTEMPTY;
/* - new end element with existing closest, greater or equal key value
* being an end element: partial overlap, reported as -ENOTEMPTY
*/
if (rbe_ge &&
nft_rbtree_interval_end(rbe_ge) && nft_rbtree_interval_end(new))
return -ENOTEMPTY;
/* Accepted element: pick insertion point depending on key value */
parent = NULL;
p = &priv->root.rb_node;
while (*p != NULL) {
parent = *p;
rbe = rb_entry(parent, struct nft_rbtree_elem, node);
d = nft_rbtree_cmp(set, rbe, new);
if (d < 0)
p = &parent->rb_left;
else if (d > 0)
p = &parent->rb_right;
else if (nft_rbtree_interval_end(rbe))
p = &parent->rb_left;
else
p = &parent->rb_right;
}
rb_link_node_rcu(&new->node, parent, p);
rb_insert_color(&new->node, &priv->root);
return 0;
}
static int nft_rbtree_insert(const struct net *net, const struct nft_set *set,
const struct nft_set_elem *elem,
struct nft_set_ext **ext)
{
struct nft_rbtree *priv = nft_set_priv(set);
struct nft_rbtree_elem *rbe = elem->priv;
int err;
write_lock_bh(&priv->lock);
write_seqcount_begin(&priv->count);
err = __nft_rbtree_insert(net, set, rbe, ext);
write_seqcount_end(&priv->count);
write_unlock_bh(&priv->lock);
return err;
}
static void nft_rbtree_remove(const struct net *net,
const struct nft_set *set,
const struct nft_set_elem *elem)
{
struct nft_rbtree *priv = nft_set_priv(set);
struct nft_rbtree_elem *rbe = elem->priv;
write_lock_bh(&priv->lock);
write_seqcount_begin(&priv->count);
rb_erase(&rbe->node, &priv->root);
write_seqcount_end(&priv->count);
write_unlock_bh(&priv->lock);
}
static void nft_rbtree_activate(const struct net *net,
const struct nft_set *set,
const struct nft_set_elem *elem)
{
struct nft_rbtree_elem *rbe = elem->priv;
nft_set_elem_change_active(net, set, &rbe->ext);
}
static bool nft_rbtree_flush(const struct net *net,
const struct nft_set *set, void *priv)
{
struct nft_rbtree_elem *rbe = priv;
nft_set_elem_change_active(net, set, &rbe->ext);
return true;
}
static void *nft_rbtree_deactivate(const struct net *net,
const struct nft_set *set,
const struct nft_set_elem *elem)
{
const struct nft_rbtree *priv = nft_set_priv(set);
const struct rb_node *parent = priv->root.rb_node;
struct nft_rbtree_elem *rbe, *this = elem->priv;
u8 genmask = nft_genmask_next(net);
int d;
while (parent != NULL) {
rbe = rb_entry(parent, struct nft_rbtree_elem, node);
d = memcmp(nft_set_ext_key(&rbe->ext), &elem->key.val,
set->klen);
if (d < 0)
parent = parent->rb_left;
else if (d > 0)
parent = parent->rb_right;
else {
if (nft_rbtree_interval_end(rbe) &&
nft_rbtree_interval_start(this)) {
parent = parent->rb_left;
continue;
} else if (nft_rbtree_interval_start(rbe) &&
nft_rbtree_interval_end(this)) {
parent = parent->rb_right;
continue;
} else if (!nft_set_elem_active(&rbe->ext, genmask)) {
parent = parent->rb_left;
continue;
}
nft_rbtree_flush(net, set, rbe);
return rbe;
}
}
return NULL;
}
static void nft_rbtree_walk(const struct nft_ctx *ctx,
struct nft_set *set,
struct nft_set_iter *iter)
{
struct nft_rbtree *priv = nft_set_priv(set);
struct nft_rbtree_elem *rbe;
struct nft_set_elem elem;
struct rb_node *node;
read_lock_bh(&priv->lock);
for (node = rb_first(&priv->root); node != NULL; node = rb_next(node)) {
rbe = rb_entry(node, struct nft_rbtree_elem, node);
if (iter->count < iter->skip)
goto cont;
if (!nft_set_elem_active(&rbe->ext, iter->genmask))
goto cont;
elem.priv = rbe;
iter->err = iter->fn(ctx, set, iter, &elem);
if (iter->err < 0) {
read_unlock_bh(&priv->lock);
return;
}
cont:
iter->count++;
}
read_unlock_bh(&priv->lock);
}
static void nft_rbtree_gc(struct work_struct *work)
{
struct nft_rbtree_elem *rbe, *rbe_end = NULL;
struct nftables_pernet *nft_net;
struct nft_rbtree *priv;
struct nft_trans_gc *gc;
struct rb_node *node;
struct nft_set *set;
unsigned int gc_seq;
struct net *net;
priv = container_of(work, struct nft_rbtree, gc_work.work);
set = nft_set_container_of(priv);
net = read_pnet(&set->net);
nft_net = nft_pernet(net);
gc_seq = READ_ONCE(nft_net->gc_seq);
if (nft_set_gc_is_pending(set))
goto done;
gc = nft_trans_gc_alloc(set, gc_seq, GFP_KERNEL);
if (!gc)
goto done;
read_lock_bh(&priv->lock);
for (node = rb_first(&priv->root); node != NULL; node = rb_next(node)) {
/* Ruleset has been updated, try later. */
if (READ_ONCE(nft_net->gc_seq) != gc_seq) {
nft_trans_gc_destroy(gc);
gc = NULL;
goto try_later;
}
rbe = rb_entry(node, struct nft_rbtree_elem, node);
if (nft_set_elem_is_dead(&rbe->ext))
goto dead_elem;
/* elements are reversed in the rbtree for historical reasons,
* from highest to lowest value, that is why end element is
* always visited before the start element.
*/
if (nft_rbtree_interval_end(rbe)) {
rbe_end = rbe;
continue;
}
if (!nft_set_elem_expired(&rbe->ext))
continue;
nft_set_elem_dead(&rbe->ext);
if (!rbe_end)
continue;
nft_set_elem_dead(&rbe_end->ext);
gc = nft_trans_gc_queue_async(gc, gc_seq, GFP_ATOMIC);
if (!gc)
goto try_later;
nft_trans_gc_elem_add(gc, rbe_end);
rbe_end = NULL;
dead_elem:
gc = nft_trans_gc_queue_async(gc, gc_seq, GFP_ATOMIC);
if (!gc)
goto try_later;
nft_trans_gc_elem_add(gc, rbe);
}
gc = nft_trans_gc_catchall_async(gc, gc_seq);
try_later:
read_unlock_bh(&priv->lock);
if (gc)
nft_trans_gc_queue_async_done(gc);
done:
queue_delayed_work(system_power_efficient_wq, &priv->gc_work,
nft_set_gc_interval(set));
}
static u64 nft_rbtree_privsize(const struct nlattr * const nla[],
const struct nft_set_desc *desc)
{
return sizeof(struct nft_rbtree);
}
static int nft_rbtree_init(const struct nft_set *set,
const struct nft_set_desc *desc,
const struct nlattr * const nla[])
{
struct nft_rbtree *priv = nft_set_priv(set);
rwlock_init(&priv->lock);
seqcount_rwlock_init(&priv->count, &priv->lock);
priv->root = RB_ROOT;
INIT_DEFERRABLE_WORK(&priv->gc_work, nft_rbtree_gc);
if (set->flags & NFT_SET_TIMEOUT)
queue_delayed_work(system_power_efficient_wq, &priv->gc_work,
nft_set_gc_interval(set));
return 0;
}
static void nft_rbtree_destroy(const struct nft_ctx *ctx,
const struct nft_set *set)
{
struct nft_rbtree *priv = nft_set_priv(set);
struct nft_rbtree_elem *rbe;
struct rb_node *node;
cancel_delayed_work_sync(&priv->gc_work);
rcu_barrier();
while ((node = priv->root.rb_node) != NULL) {
rb_erase(node, &priv->root);
rbe = rb_entry(node, struct nft_rbtree_elem, node);
nf_tables_set_elem_destroy(ctx, set, rbe);
}
}
static bool nft_rbtree_estimate(const struct nft_set_desc *desc, u32 features,
struct nft_set_estimate *est)
{
if (desc->field_count > 1)
return false;
if (desc->size)
est->size = sizeof(struct nft_rbtree) +
desc->size * sizeof(struct nft_rbtree_elem);
else
est->size = ~0;
est->lookup = NFT_SET_CLASS_O_LOG_N;
est->space = NFT_SET_CLASS_O_N;
return true;
}
const struct nft_set_type nft_set_rbtree_type = {
.features = NFT_SET_INTERVAL | NFT_SET_MAP | NFT_SET_OBJECT | NFT_SET_TIMEOUT,
.ops = {
.privsize = nft_rbtree_privsize,
.elemsize = offsetof(struct nft_rbtree_elem, ext),
.estimate = nft_rbtree_estimate,
.init = nft_rbtree_init,
.destroy = nft_rbtree_destroy,
.insert = nft_rbtree_insert,
.remove = nft_rbtree_remove,
.deactivate = nft_rbtree_deactivate,
.flush = nft_rbtree_flush,
.activate = nft_rbtree_activate,
.lookup = nft_rbtree_lookup,
.walk = nft_rbtree_walk,
.get = nft_rbtree_get,
},
};
| linux-master | net/netfilter/nft_set_rbtree.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Kernel module to match various things tied to sockets associated with
* locally generated outgoing packets.
*
* (C) 2000 Marc Boucher <[email protected]>
*
* Copyright © CC Computer Consultants GmbH, 2007 - 2008
*/
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/file.h>
#include <linux/cred.h>
#include <net/sock.h>
#include <net/inet_sock.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter/xt_owner.h>
static int owner_check(const struct xt_mtchk_param *par)
{
struct xt_owner_match_info *info = par->matchinfo;
struct net *net = par->net;
if (info->match & ~XT_OWNER_MASK)
return -EINVAL;
/* Only allow the common case where the userns of the writer
* matches the userns of the network namespace.
*/
if ((info->match & (XT_OWNER_UID|XT_OWNER_GID)) &&
(current_user_ns() != net->user_ns))
return -EINVAL;
/* Ensure the uids are valid */
if (info->match & XT_OWNER_UID) {
kuid_t uid_min = make_kuid(net->user_ns, info->uid_min);
kuid_t uid_max = make_kuid(net->user_ns, info->uid_max);
if (!uid_valid(uid_min) || !uid_valid(uid_max) ||
(info->uid_max < info->uid_min) ||
uid_lt(uid_max, uid_min)) {
return -EINVAL;
}
}
/* Ensure the gids are valid */
if (info->match & XT_OWNER_GID) {
kgid_t gid_min = make_kgid(net->user_ns, info->gid_min);
kgid_t gid_max = make_kgid(net->user_ns, info->gid_max);
if (!gid_valid(gid_min) || !gid_valid(gid_max) ||
(info->gid_max < info->gid_min) ||
gid_lt(gid_max, gid_min)) {
return -EINVAL;
}
}
return 0;
}
static bool
owner_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct xt_owner_match_info *info = par->matchinfo;
const struct file *filp;
struct sock *sk = skb_to_full_sk(skb);
struct net *net = xt_net(par);
if (!sk || !sk->sk_socket || !net_eq(net, sock_net(sk)))
return (info->match ^ info->invert) == 0;
else if (info->match & info->invert & XT_OWNER_SOCKET)
/*
* Socket exists but user wanted ! --socket-exists.
* (Single ampersands intended.)
*/
return false;
filp = sk->sk_socket->file;
if (filp == NULL)
return ((info->match ^ info->invert) &
(XT_OWNER_UID | XT_OWNER_GID)) == 0;
if (info->match & XT_OWNER_UID) {
kuid_t uid_min = make_kuid(net->user_ns, info->uid_min);
kuid_t uid_max = make_kuid(net->user_ns, info->uid_max);
if ((uid_gte(filp->f_cred->fsuid, uid_min) &&
uid_lte(filp->f_cred->fsuid, uid_max)) ^
!(info->invert & XT_OWNER_UID))
return false;
}
if (info->match & XT_OWNER_GID) {
unsigned int i, match = false;
kgid_t gid_min = make_kgid(net->user_ns, info->gid_min);
kgid_t gid_max = make_kgid(net->user_ns, info->gid_max);
struct group_info *gi = filp->f_cred->group_info;
if (gid_gte(filp->f_cred->fsgid, gid_min) &&
gid_lte(filp->f_cred->fsgid, gid_max))
match = true;
if (!match && (info->match & XT_OWNER_SUPPL_GROUPS) && gi) {
for (i = 0; i < gi->ngroups; ++i) {
kgid_t group = gi->gid[i];
if (gid_gte(group, gid_min) &&
gid_lte(group, gid_max)) {
match = true;
break;
}
}
}
if (match ^ !(info->invert & XT_OWNER_GID))
return false;
}
return true;
}
static struct xt_match owner_mt_reg __read_mostly = {
.name = "owner",
.revision = 1,
.family = NFPROTO_UNSPEC,
.checkentry = owner_check,
.match = owner_mt,
.matchsize = sizeof(struct xt_owner_match_info),
.hooks = (1 << NF_INET_LOCAL_OUT) |
(1 << NF_INET_POST_ROUTING),
.me = THIS_MODULE,
};
static int __init owner_mt_init(void)
{
return xt_register_match(&owner_mt_reg);
}
static void __exit owner_mt_exit(void)
{
xt_unregister_match(&owner_mt_reg);
}
module_init(owner_mt_init);
module_exit(owner_mt_exit);
MODULE_AUTHOR("Jan Engelhardt <[email protected]>");
MODULE_DESCRIPTION("Xtables: socket owner matching");
MODULE_LICENSE("GPL");
MODULE_ALIAS("ipt_owner");
MODULE_ALIAS("ip6t_owner");
| linux-master | net/netfilter/xt_owner.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2008 Patrick McHardy <[email protected]>
*
* Development of this code funded by Astaro AG (http://www.astaro.com/)
*/
#include <asm/unaligned.h>
#include <linux/kernel.h>
#include <linux/netlink.h>
#include <linux/netfilter.h>
#include <linux/netfilter/nf_tables.h>
#include <linux/dccp.h>
#include <linux/sctp.h>
#include <net/netfilter/nf_tables_core.h>
#include <net/netfilter/nf_tables.h>
#include <net/tcp.h>
struct nft_exthdr {
u8 type;
u8 offset;
u8 len;
u8 op;
u8 dreg;
u8 sreg;
u8 flags;
};
static unsigned int optlen(const u8 *opt, unsigned int offset)
{
/* Beware zero-length options: make finite progress */
if (opt[offset] <= TCPOPT_NOP || opt[offset + 1] == 0)
return 1;
else
return opt[offset + 1];
}
static int nft_skb_copy_to_reg(const struct sk_buff *skb, int offset, u32 *dest, unsigned int len)
{
if (len % NFT_REG32_SIZE)
dest[len / NFT_REG32_SIZE] = 0;
return skb_copy_bits(skb, offset, dest, len);
}
static void nft_exthdr_ipv6_eval(const struct nft_expr *expr,
struct nft_regs *regs,
const struct nft_pktinfo *pkt)
{
struct nft_exthdr *priv = nft_expr_priv(expr);
u32 *dest = ®s->data[priv->dreg];
unsigned int offset = 0;
int err;
if (pkt->skb->protocol != htons(ETH_P_IPV6))
goto err;
err = ipv6_find_hdr(pkt->skb, &offset, priv->type, NULL, NULL);
if (priv->flags & NFT_EXTHDR_F_PRESENT) {
nft_reg_store8(dest, err >= 0);
return;
} else if (err < 0) {
goto err;
}
offset += priv->offset;
if (nft_skb_copy_to_reg(pkt->skb, offset, dest, priv->len) < 0)
goto err;
return;
err:
regs->verdict.code = NFT_BREAK;
}
/* find the offset to specified option.
*
* If target header is found, its offset is set in *offset and return option
* number. Otherwise, return negative error.
*
* If the first fragment doesn't contain the End of Options it is considered
* invalid.
*/
static int ipv4_find_option(struct net *net, struct sk_buff *skb,
unsigned int *offset, int target)
{
unsigned char optbuf[sizeof(struct ip_options) + 40];
struct ip_options *opt = (struct ip_options *)optbuf;
struct iphdr *iph, _iph;
unsigned int start;
bool found = false;
__be32 info;
int optlen;
iph = skb_header_pointer(skb, 0, sizeof(_iph), &_iph);
if (!iph)
return -EBADMSG;
start = sizeof(struct iphdr);
optlen = iph->ihl * 4 - (int)sizeof(struct iphdr);
if (optlen <= 0)
return -ENOENT;
memset(opt, 0, sizeof(struct ip_options));
/* Copy the options since __ip_options_compile() modifies
* the options.
*/
if (skb_copy_bits(skb, start, opt->__data, optlen))
return -EBADMSG;
opt->optlen = optlen;
if (__ip_options_compile(net, opt, NULL, &info))
return -EBADMSG;
switch (target) {
case IPOPT_SSRR:
case IPOPT_LSRR:
if (!opt->srr)
break;
found = target == IPOPT_SSRR ? opt->is_strictroute :
!opt->is_strictroute;
if (found)
*offset = opt->srr + start;
break;
case IPOPT_RR:
if (!opt->rr)
break;
*offset = opt->rr + start;
found = true;
break;
case IPOPT_RA:
if (!opt->router_alert)
break;
*offset = opt->router_alert + start;
found = true;
break;
default:
return -EOPNOTSUPP;
}
return found ? target : -ENOENT;
}
static void nft_exthdr_ipv4_eval(const struct nft_expr *expr,
struct nft_regs *regs,
const struct nft_pktinfo *pkt)
{
struct nft_exthdr *priv = nft_expr_priv(expr);
u32 *dest = ®s->data[priv->dreg];
struct sk_buff *skb = pkt->skb;
unsigned int offset;
int err;
if (skb->protocol != htons(ETH_P_IP))
goto err;
err = ipv4_find_option(nft_net(pkt), skb, &offset, priv->type);
if (priv->flags & NFT_EXTHDR_F_PRESENT) {
nft_reg_store8(dest, err >= 0);
return;
} else if (err < 0) {
goto err;
}
offset += priv->offset;
if (nft_skb_copy_to_reg(pkt->skb, offset, dest, priv->len) < 0)
goto err;
return;
err:
regs->verdict.code = NFT_BREAK;
}
static void *
nft_tcp_header_pointer(const struct nft_pktinfo *pkt,
unsigned int len, void *buffer, unsigned int *tcphdr_len)
{
struct tcphdr *tcph;
if (pkt->tprot != IPPROTO_TCP || pkt->fragoff)
return NULL;
tcph = skb_header_pointer(pkt->skb, nft_thoff(pkt), sizeof(*tcph), buffer);
if (!tcph)
return NULL;
*tcphdr_len = __tcp_hdrlen(tcph);
if (*tcphdr_len < sizeof(*tcph) || *tcphdr_len > len)
return NULL;
return skb_header_pointer(pkt->skb, nft_thoff(pkt), *tcphdr_len, buffer);
}
static void nft_exthdr_tcp_eval(const struct nft_expr *expr,
struct nft_regs *regs,
const struct nft_pktinfo *pkt)
{
u8 buff[sizeof(struct tcphdr) + MAX_TCP_OPTION_SPACE];
struct nft_exthdr *priv = nft_expr_priv(expr);
unsigned int i, optl, tcphdr_len, offset;
u32 *dest = ®s->data[priv->dreg];
struct tcphdr *tcph;
u8 *opt;
tcph = nft_tcp_header_pointer(pkt, sizeof(buff), buff, &tcphdr_len);
if (!tcph)
goto err;
opt = (u8 *)tcph;
for (i = sizeof(*tcph); i < tcphdr_len - 1; i += optl) {
optl = optlen(opt, i);
if (priv->type != opt[i])
continue;
if (i + optl > tcphdr_len || priv->len + priv->offset > optl)
goto err;
offset = i + priv->offset;
if (priv->flags & NFT_EXTHDR_F_PRESENT) {
*dest = 1;
} else {
if (priv->len % NFT_REG32_SIZE)
dest[priv->len / NFT_REG32_SIZE] = 0;
memcpy(dest, opt + offset, priv->len);
}
return;
}
err:
if (priv->flags & NFT_EXTHDR_F_PRESENT)
*dest = 0;
else
regs->verdict.code = NFT_BREAK;
}
static void nft_exthdr_tcp_set_eval(const struct nft_expr *expr,
struct nft_regs *regs,
const struct nft_pktinfo *pkt)
{
u8 buff[sizeof(struct tcphdr) + MAX_TCP_OPTION_SPACE];
struct nft_exthdr *priv = nft_expr_priv(expr);
unsigned int i, optl, tcphdr_len, offset;
struct tcphdr *tcph;
u8 *opt;
tcph = nft_tcp_header_pointer(pkt, sizeof(buff), buff, &tcphdr_len);
if (!tcph)
goto err;
if (skb_ensure_writable(pkt->skb, nft_thoff(pkt) + tcphdr_len))
goto err;
tcph = (struct tcphdr *)(pkt->skb->data + nft_thoff(pkt));
opt = (u8 *)tcph;
for (i = sizeof(*tcph); i < tcphdr_len - 1; i += optl) {
union {
__be16 v16;
__be32 v32;
} old, new;
optl = optlen(opt, i);
if (priv->type != opt[i])
continue;
if (i + optl > tcphdr_len || priv->len + priv->offset > optl)
goto err;
offset = i + priv->offset;
switch (priv->len) {
case 2:
old.v16 = (__force __be16)get_unaligned((u16 *)(opt + offset));
new.v16 = (__force __be16)nft_reg_load16(
®s->data[priv->sreg]);
switch (priv->type) {
case TCPOPT_MSS:
/* increase can cause connection to stall */
if (ntohs(old.v16) <= ntohs(new.v16))
return;
break;
}
if (old.v16 == new.v16)
return;
put_unaligned(new.v16, (__be16*)(opt + offset));
inet_proto_csum_replace2(&tcph->check, pkt->skb,
old.v16, new.v16, false);
break;
case 4:
new.v32 = nft_reg_load_be32(®s->data[priv->sreg]);
old.v32 = (__force __be32)get_unaligned((u32 *)(opt + offset));
if (old.v32 == new.v32)
return;
put_unaligned(new.v32, (__be32*)(opt + offset));
inet_proto_csum_replace4(&tcph->check, pkt->skb,
old.v32, new.v32, false);
break;
default:
WARN_ON_ONCE(1);
break;
}
return;
}
return;
err:
regs->verdict.code = NFT_BREAK;
}
static void nft_exthdr_tcp_strip_eval(const struct nft_expr *expr,
struct nft_regs *regs,
const struct nft_pktinfo *pkt)
{
u8 buff[sizeof(struct tcphdr) + MAX_TCP_OPTION_SPACE];
struct nft_exthdr *priv = nft_expr_priv(expr);
unsigned int i, tcphdr_len, optl;
struct tcphdr *tcph;
u8 *opt;
tcph = nft_tcp_header_pointer(pkt, sizeof(buff), buff, &tcphdr_len);
if (!tcph)
goto err;
if (skb_ensure_writable(pkt->skb, nft_thoff(pkt) + tcphdr_len))
goto drop;
tcph = (struct tcphdr *)(pkt->skb->data + nft_thoff(pkt));
opt = (u8 *)tcph;
for (i = sizeof(*tcph); i < tcphdr_len - 1; i += optl) {
unsigned int j;
optl = optlen(opt, i);
if (priv->type != opt[i])
continue;
if (i + optl > tcphdr_len)
goto drop;
for (j = 0; j < optl; ++j) {
u16 n = TCPOPT_NOP;
u16 o = opt[i+j];
if ((i + j) % 2 == 0) {
o <<= 8;
n <<= 8;
}
inet_proto_csum_replace2(&tcph->check, pkt->skb, htons(o),
htons(n), false);
}
memset(opt + i, TCPOPT_NOP, optl);
return;
}
/* option not found, continue. This allows to do multiple
* option removals per rule.
*/
return;
err:
regs->verdict.code = NFT_BREAK;
return;
drop:
/* can't remove, no choice but to drop */
regs->verdict.code = NF_DROP;
}
static void nft_exthdr_sctp_eval(const struct nft_expr *expr,
struct nft_regs *regs,
const struct nft_pktinfo *pkt)
{
unsigned int offset = nft_thoff(pkt) + sizeof(struct sctphdr);
struct nft_exthdr *priv = nft_expr_priv(expr);
u32 *dest = ®s->data[priv->dreg];
const struct sctp_chunkhdr *sch;
struct sctp_chunkhdr _sch;
if (pkt->tprot != IPPROTO_SCTP)
goto err;
do {
sch = skb_header_pointer(pkt->skb, offset, sizeof(_sch), &_sch);
if (!sch || !sch->length)
break;
if (sch->type == priv->type) {
if (priv->flags & NFT_EXTHDR_F_PRESENT) {
nft_reg_store8(dest, true);
return;
}
if (priv->offset + priv->len > ntohs(sch->length) ||
offset + ntohs(sch->length) > pkt->skb->len)
break;
if (nft_skb_copy_to_reg(pkt->skb, offset + priv->offset,
dest, priv->len) < 0)
break;
return;
}
offset += SCTP_PAD4(ntohs(sch->length));
} while (offset < pkt->skb->len);
err:
if (priv->flags & NFT_EXTHDR_F_PRESENT)
nft_reg_store8(dest, false);
else
regs->verdict.code = NFT_BREAK;
}
static void nft_exthdr_dccp_eval(const struct nft_expr *expr,
struct nft_regs *regs,
const struct nft_pktinfo *pkt)
{
struct nft_exthdr *priv = nft_expr_priv(expr);
unsigned int thoff, dataoff, optoff, optlen, i;
u32 *dest = ®s->data[priv->dreg];
const struct dccp_hdr *dh;
struct dccp_hdr _dh;
if (pkt->tprot != IPPROTO_DCCP || pkt->fragoff)
goto err;
thoff = nft_thoff(pkt);
dh = skb_header_pointer(pkt->skb, thoff, sizeof(_dh), &_dh);
if (!dh)
goto err;
dataoff = dh->dccph_doff * sizeof(u32);
optoff = __dccp_hdr_len(dh);
if (dataoff <= optoff)
goto err;
optlen = dataoff - optoff;
for (i = 0; i < optlen; ) {
/* Options 0 (DCCPO_PADDING) - 31 (DCCPO_MAX_RESERVED) are 1B in
* the length; the remaining options are at least 2B long. In
* all cases, the first byte contains the option type. In
* multi-byte options, the second byte contains the option
* length, which must be at least two: 1 for the type plus 1 for
* the length plus 0-253 for any following option data. We
* aren't interested in the option data, only the type and the
* length, so we don't need to read more than two bytes at a
* time.
*/
unsigned int buflen = optlen - i;
u8 buf[2], *bufp;
u8 type, len;
if (buflen > sizeof(buf))
buflen = sizeof(buf);
bufp = skb_header_pointer(pkt->skb, thoff + optoff + i, buflen,
&buf);
if (!bufp)
goto err;
type = bufp[0];
if (type == priv->type) {
*dest = 1;
return;
}
if (type <= DCCPO_MAX_RESERVED) {
i++;
continue;
}
if (buflen < 2)
goto err;
len = bufp[1];
if (len < 2)
goto err;
i += len;
}
err:
*dest = 0;
}
static const struct nla_policy nft_exthdr_policy[NFTA_EXTHDR_MAX + 1] = {
[NFTA_EXTHDR_DREG] = { .type = NLA_U32 },
[NFTA_EXTHDR_TYPE] = { .type = NLA_U8 },
[NFTA_EXTHDR_OFFSET] = { .type = NLA_U32 },
[NFTA_EXTHDR_LEN] = NLA_POLICY_MAX(NLA_BE32, 255),
[NFTA_EXTHDR_FLAGS] = { .type = NLA_U32 },
[NFTA_EXTHDR_OP] = NLA_POLICY_MAX(NLA_BE32, 255),
[NFTA_EXTHDR_SREG] = { .type = NLA_U32 },
};
static int nft_exthdr_init(const struct nft_ctx *ctx,
const struct nft_expr *expr,
const struct nlattr * const tb[])
{
struct nft_exthdr *priv = nft_expr_priv(expr);
u32 offset, len, flags = 0, op = NFT_EXTHDR_OP_IPV6;
int err;
if (!tb[NFTA_EXTHDR_DREG] ||
!tb[NFTA_EXTHDR_TYPE] ||
!tb[NFTA_EXTHDR_OFFSET] ||
!tb[NFTA_EXTHDR_LEN])
return -EINVAL;
err = nft_parse_u32_check(tb[NFTA_EXTHDR_OFFSET], U8_MAX, &offset);
if (err < 0)
return err;
err = nft_parse_u32_check(tb[NFTA_EXTHDR_LEN], U8_MAX, &len);
if (err < 0)
return err;
if (tb[NFTA_EXTHDR_FLAGS]) {
err = nft_parse_u32_check(tb[NFTA_EXTHDR_FLAGS], U8_MAX, &flags);
if (err < 0)
return err;
if (flags & ~NFT_EXTHDR_F_PRESENT)
return -EINVAL;
}
if (tb[NFTA_EXTHDR_OP]) {
err = nft_parse_u32_check(tb[NFTA_EXTHDR_OP], U8_MAX, &op);
if (err < 0)
return err;
}
priv->type = nla_get_u8(tb[NFTA_EXTHDR_TYPE]);
priv->offset = offset;
priv->len = len;
priv->flags = flags;
priv->op = op;
return nft_parse_register_store(ctx, tb[NFTA_EXTHDR_DREG],
&priv->dreg, NULL, NFT_DATA_VALUE,
priv->len);
}
static int nft_exthdr_tcp_set_init(const struct nft_ctx *ctx,
const struct nft_expr *expr,
const struct nlattr * const tb[])
{
struct nft_exthdr *priv = nft_expr_priv(expr);
u32 offset, len, flags = 0, op = NFT_EXTHDR_OP_IPV6;
int err;
if (!tb[NFTA_EXTHDR_SREG] ||
!tb[NFTA_EXTHDR_TYPE] ||
!tb[NFTA_EXTHDR_OFFSET] ||
!tb[NFTA_EXTHDR_LEN])
return -EINVAL;
if (tb[NFTA_EXTHDR_DREG] || tb[NFTA_EXTHDR_FLAGS])
return -EINVAL;
err = nft_parse_u32_check(tb[NFTA_EXTHDR_OFFSET], U8_MAX, &offset);
if (err < 0)
return err;
err = nft_parse_u32_check(tb[NFTA_EXTHDR_LEN], U8_MAX, &len);
if (err < 0)
return err;
if (offset < 2)
return -EOPNOTSUPP;
switch (len) {
case 2: break;
case 4: break;
default:
return -EOPNOTSUPP;
}
err = nft_parse_u32_check(tb[NFTA_EXTHDR_OP], U8_MAX, &op);
if (err < 0)
return err;
priv->type = nla_get_u8(tb[NFTA_EXTHDR_TYPE]);
priv->offset = offset;
priv->len = len;
priv->flags = flags;
priv->op = op;
return nft_parse_register_load(tb[NFTA_EXTHDR_SREG], &priv->sreg,
priv->len);
}
static int nft_exthdr_tcp_strip_init(const struct nft_ctx *ctx,
const struct nft_expr *expr,
const struct nlattr * const tb[])
{
struct nft_exthdr *priv = nft_expr_priv(expr);
if (tb[NFTA_EXTHDR_SREG] ||
tb[NFTA_EXTHDR_DREG] ||
tb[NFTA_EXTHDR_FLAGS] ||
tb[NFTA_EXTHDR_OFFSET] ||
tb[NFTA_EXTHDR_LEN])
return -EINVAL;
if (!tb[NFTA_EXTHDR_TYPE])
return -EINVAL;
priv->type = nla_get_u8(tb[NFTA_EXTHDR_TYPE]);
priv->op = NFT_EXTHDR_OP_TCPOPT;
return 0;
}
static int nft_exthdr_ipv4_init(const struct nft_ctx *ctx,
const struct nft_expr *expr,
const struct nlattr * const tb[])
{
struct nft_exthdr *priv = nft_expr_priv(expr);
int err = nft_exthdr_init(ctx, expr, tb);
if (err < 0)
return err;
switch (priv->type) {
case IPOPT_SSRR:
case IPOPT_LSRR:
case IPOPT_RR:
case IPOPT_RA:
break;
default:
return -EOPNOTSUPP;
}
return 0;
}
static int nft_exthdr_dccp_init(const struct nft_ctx *ctx,
const struct nft_expr *expr,
const struct nlattr * const tb[])
{
struct nft_exthdr *priv = nft_expr_priv(expr);
int err = nft_exthdr_init(ctx, expr, tb);
if (err < 0)
return err;
if (!(priv->flags & NFT_EXTHDR_F_PRESENT))
return -EOPNOTSUPP;
return 0;
}
static int nft_exthdr_dump_common(struct sk_buff *skb, const struct nft_exthdr *priv)
{
if (nla_put_u8(skb, NFTA_EXTHDR_TYPE, priv->type))
goto nla_put_failure;
if (nla_put_be32(skb, NFTA_EXTHDR_OFFSET, htonl(priv->offset)))
goto nla_put_failure;
if (nla_put_be32(skb, NFTA_EXTHDR_LEN, htonl(priv->len)))
goto nla_put_failure;
if (nla_put_be32(skb, NFTA_EXTHDR_FLAGS, htonl(priv->flags)))
goto nla_put_failure;
if (nla_put_be32(skb, NFTA_EXTHDR_OP, htonl(priv->op)))
goto nla_put_failure;
return 0;
nla_put_failure:
return -1;
}
static int nft_exthdr_dump(struct sk_buff *skb,
const struct nft_expr *expr, bool reset)
{
const struct nft_exthdr *priv = nft_expr_priv(expr);
if (nft_dump_register(skb, NFTA_EXTHDR_DREG, priv->dreg))
return -1;
return nft_exthdr_dump_common(skb, priv);
}
static int nft_exthdr_dump_set(struct sk_buff *skb,
const struct nft_expr *expr, bool reset)
{
const struct nft_exthdr *priv = nft_expr_priv(expr);
if (nft_dump_register(skb, NFTA_EXTHDR_SREG, priv->sreg))
return -1;
return nft_exthdr_dump_common(skb, priv);
}
static int nft_exthdr_dump_strip(struct sk_buff *skb,
const struct nft_expr *expr, bool reset)
{
const struct nft_exthdr *priv = nft_expr_priv(expr);
return nft_exthdr_dump_common(skb, priv);
}
static bool nft_exthdr_reduce(struct nft_regs_track *track,
const struct nft_expr *expr)
{
const struct nft_exthdr *priv = nft_expr_priv(expr);
const struct nft_exthdr *exthdr;
if (!nft_reg_track_cmp(track, expr, priv->dreg)) {
nft_reg_track_update(track, expr, priv->dreg, priv->len);
return false;
}
exthdr = nft_expr_priv(track->regs[priv->dreg].selector);
if (priv->type != exthdr->type ||
priv->op != exthdr->op ||
priv->flags != exthdr->flags ||
priv->offset != exthdr->offset ||
priv->len != exthdr->len) {
nft_reg_track_update(track, expr, priv->dreg, priv->len);
return false;
}
if (!track->regs[priv->dreg].bitwise)
return true;
return nft_expr_reduce_bitwise(track, expr);
}
static const struct nft_expr_ops nft_exthdr_ipv6_ops = {
.type = &nft_exthdr_type,
.size = NFT_EXPR_SIZE(sizeof(struct nft_exthdr)),
.eval = nft_exthdr_ipv6_eval,
.init = nft_exthdr_init,
.dump = nft_exthdr_dump,
.reduce = nft_exthdr_reduce,
};
static const struct nft_expr_ops nft_exthdr_ipv4_ops = {
.type = &nft_exthdr_type,
.size = NFT_EXPR_SIZE(sizeof(struct nft_exthdr)),
.eval = nft_exthdr_ipv4_eval,
.init = nft_exthdr_ipv4_init,
.dump = nft_exthdr_dump,
.reduce = nft_exthdr_reduce,
};
static const struct nft_expr_ops nft_exthdr_tcp_ops = {
.type = &nft_exthdr_type,
.size = NFT_EXPR_SIZE(sizeof(struct nft_exthdr)),
.eval = nft_exthdr_tcp_eval,
.init = nft_exthdr_init,
.dump = nft_exthdr_dump,
.reduce = nft_exthdr_reduce,
};
static const struct nft_expr_ops nft_exthdr_tcp_set_ops = {
.type = &nft_exthdr_type,
.size = NFT_EXPR_SIZE(sizeof(struct nft_exthdr)),
.eval = nft_exthdr_tcp_set_eval,
.init = nft_exthdr_tcp_set_init,
.dump = nft_exthdr_dump_set,
.reduce = NFT_REDUCE_READONLY,
};
static const struct nft_expr_ops nft_exthdr_tcp_strip_ops = {
.type = &nft_exthdr_type,
.size = NFT_EXPR_SIZE(sizeof(struct nft_exthdr)),
.eval = nft_exthdr_tcp_strip_eval,
.init = nft_exthdr_tcp_strip_init,
.dump = nft_exthdr_dump_strip,
.reduce = NFT_REDUCE_READONLY,
};
static const struct nft_expr_ops nft_exthdr_sctp_ops = {
.type = &nft_exthdr_type,
.size = NFT_EXPR_SIZE(sizeof(struct nft_exthdr)),
.eval = nft_exthdr_sctp_eval,
.init = nft_exthdr_init,
.dump = nft_exthdr_dump,
.reduce = nft_exthdr_reduce,
};
static const struct nft_expr_ops nft_exthdr_dccp_ops = {
.type = &nft_exthdr_type,
.size = NFT_EXPR_SIZE(sizeof(struct nft_exthdr)),
.eval = nft_exthdr_dccp_eval,
.init = nft_exthdr_dccp_init,
.dump = nft_exthdr_dump,
.reduce = nft_exthdr_reduce,
};
static const struct nft_expr_ops *
nft_exthdr_select_ops(const struct nft_ctx *ctx,
const struct nlattr * const tb[])
{
u32 op;
if (!tb[NFTA_EXTHDR_OP])
return &nft_exthdr_ipv6_ops;
if (tb[NFTA_EXTHDR_SREG] && tb[NFTA_EXTHDR_DREG])
return ERR_PTR(-EOPNOTSUPP);
op = ntohl(nla_get_be32(tb[NFTA_EXTHDR_OP]));
switch (op) {
case NFT_EXTHDR_OP_TCPOPT:
if (tb[NFTA_EXTHDR_SREG])
return &nft_exthdr_tcp_set_ops;
if (tb[NFTA_EXTHDR_DREG])
return &nft_exthdr_tcp_ops;
return &nft_exthdr_tcp_strip_ops;
case NFT_EXTHDR_OP_IPV6:
if (tb[NFTA_EXTHDR_DREG])
return &nft_exthdr_ipv6_ops;
break;
case NFT_EXTHDR_OP_IPV4:
if (ctx->family != NFPROTO_IPV6) {
if (tb[NFTA_EXTHDR_DREG])
return &nft_exthdr_ipv4_ops;
}
break;
case NFT_EXTHDR_OP_SCTP:
if (tb[NFTA_EXTHDR_DREG])
return &nft_exthdr_sctp_ops;
break;
case NFT_EXTHDR_OP_DCCP:
if (tb[NFTA_EXTHDR_DREG])
return &nft_exthdr_dccp_ops;
break;
}
return ERR_PTR(-EOPNOTSUPP);
}
struct nft_expr_type nft_exthdr_type __read_mostly = {
.name = "exthdr",
.select_ops = nft_exthdr_select_ops,
.policy = nft_exthdr_policy,
.maxattr = NFTA_EXTHDR_MAX,
.owner = THIS_MODULE,
};
| linux-master | net/netfilter/nft_exthdr.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2008-2009 Patrick McHardy <[email protected]>
*
* Development of this code funded by Astaro AG (http://www.astaro.com/)
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/netlink.h>
#include <linux/netfilter.h>
#include <linux/if_arp.h>
#include <linux/netfilter/nf_tables.h>
#include <net/netfilter/nf_tables_core.h>
#include <net/netfilter/nf_tables_offload.h>
#include <net/netfilter/nf_tables.h>
struct nft_cmp_expr {
struct nft_data data;
u8 sreg;
u8 len;
enum nft_cmp_ops op:8;
};
void nft_cmp_eval(const struct nft_expr *expr,
struct nft_regs *regs,
const struct nft_pktinfo *pkt)
{
const struct nft_cmp_expr *priv = nft_expr_priv(expr);
int d;
d = memcmp(®s->data[priv->sreg], &priv->data, priv->len);
switch (priv->op) {
case NFT_CMP_EQ:
if (d != 0)
goto mismatch;
break;
case NFT_CMP_NEQ:
if (d == 0)
goto mismatch;
break;
case NFT_CMP_LT:
if (d == 0)
goto mismatch;
fallthrough;
case NFT_CMP_LTE:
if (d > 0)
goto mismatch;
break;
case NFT_CMP_GT:
if (d == 0)
goto mismatch;
fallthrough;
case NFT_CMP_GTE:
if (d < 0)
goto mismatch;
break;
}
return;
mismatch:
regs->verdict.code = NFT_BREAK;
}
static const struct nla_policy nft_cmp_policy[NFTA_CMP_MAX + 1] = {
[NFTA_CMP_SREG] = { .type = NLA_U32 },
[NFTA_CMP_OP] = { .type = NLA_U32 },
[NFTA_CMP_DATA] = { .type = NLA_NESTED },
};
static int nft_cmp_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
const struct nlattr * const tb[])
{
struct nft_cmp_expr *priv = nft_expr_priv(expr);
struct nft_data_desc desc = {
.type = NFT_DATA_VALUE,
.size = sizeof(priv->data),
};
int err;
err = nft_data_init(NULL, &priv->data, &desc, tb[NFTA_CMP_DATA]);
if (err < 0)
return err;
err = nft_parse_register_load(tb[NFTA_CMP_SREG], &priv->sreg, desc.len);
if (err < 0)
return err;
priv->op = ntohl(nla_get_be32(tb[NFTA_CMP_OP]));
priv->len = desc.len;
return 0;
}
static int nft_cmp_dump(struct sk_buff *skb,
const struct nft_expr *expr, bool reset)
{
const struct nft_cmp_expr *priv = nft_expr_priv(expr);
if (nft_dump_register(skb, NFTA_CMP_SREG, priv->sreg))
goto nla_put_failure;
if (nla_put_be32(skb, NFTA_CMP_OP, htonl(priv->op)))
goto nla_put_failure;
if (nft_data_dump(skb, NFTA_CMP_DATA, &priv->data,
NFT_DATA_VALUE, priv->len) < 0)
goto nla_put_failure;
return 0;
nla_put_failure:
return -1;
}
union nft_cmp_offload_data {
u16 val16;
u32 val32;
u64 val64;
};
static void nft_payload_n2h(union nft_cmp_offload_data *data,
const u8 *val, u32 len)
{
switch (len) {
case 2:
data->val16 = ntohs(*((__be16 *)val));
break;
case 4:
data->val32 = ntohl(*((__be32 *)val));
break;
case 8:
data->val64 = be64_to_cpu(*((__be64 *)val));
break;
default:
WARN_ON_ONCE(1);
break;
}
}
static int __nft_cmp_offload(struct nft_offload_ctx *ctx,
struct nft_flow_rule *flow,
const struct nft_cmp_expr *priv)
{
struct nft_offload_reg *reg = &ctx->regs[priv->sreg];
union nft_cmp_offload_data _data, _datamask;
u8 *mask = (u8 *)&flow->match.mask;
u8 *key = (u8 *)&flow->match.key;
u8 *data, *datamask;
if (priv->op != NFT_CMP_EQ || priv->len > reg->len)
return -EOPNOTSUPP;
if (reg->flags & NFT_OFFLOAD_F_NETWORK2HOST) {
nft_payload_n2h(&_data, (u8 *)&priv->data, reg->len);
nft_payload_n2h(&_datamask, (u8 *)®->mask, reg->len);
data = (u8 *)&_data;
datamask = (u8 *)&_datamask;
} else {
data = (u8 *)&priv->data;
datamask = (u8 *)®->mask;
}
memcpy(key + reg->offset, data, reg->len);
memcpy(mask + reg->offset, datamask, reg->len);
flow->match.dissector.used_keys |= BIT_ULL(reg->key);
flow->match.dissector.offset[reg->key] = reg->base_offset;
if (reg->key == FLOW_DISSECTOR_KEY_META &&
reg->offset == offsetof(struct nft_flow_key, meta.ingress_iftype) &&
nft_reg_load16(priv->data.data) != ARPHRD_ETHER)
return -EOPNOTSUPP;
nft_offload_update_dependency(ctx, &priv->data, reg->len);
return 0;
}
static int nft_cmp_offload(struct nft_offload_ctx *ctx,
struct nft_flow_rule *flow,
const struct nft_expr *expr)
{
const struct nft_cmp_expr *priv = nft_expr_priv(expr);
return __nft_cmp_offload(ctx, flow, priv);
}
static const struct nft_expr_ops nft_cmp_ops = {
.type = &nft_cmp_type,
.size = NFT_EXPR_SIZE(sizeof(struct nft_cmp_expr)),
.eval = nft_cmp_eval,
.init = nft_cmp_init,
.dump = nft_cmp_dump,
.reduce = NFT_REDUCE_READONLY,
.offload = nft_cmp_offload,
};
/* Calculate the mask for the nft_cmp_fast expression. On big endian the
* mask needs to include the *upper* bytes when interpreting that data as
* something smaller than the full u32, therefore a cpu_to_le32 is done.
*/
static u32 nft_cmp_fast_mask(unsigned int len)
{
__le32 mask = cpu_to_le32(~0U >> (sizeof_field(struct nft_cmp_fast_expr,
data) * BITS_PER_BYTE - len));
return (__force u32)mask;
}
static int nft_cmp_fast_init(const struct nft_ctx *ctx,
const struct nft_expr *expr,
const struct nlattr * const tb[])
{
struct nft_cmp_fast_expr *priv = nft_expr_priv(expr);
struct nft_data data;
struct nft_data_desc desc = {
.type = NFT_DATA_VALUE,
.size = sizeof(data),
};
int err;
err = nft_data_init(NULL, &data, &desc, tb[NFTA_CMP_DATA]);
if (err < 0)
return err;
err = nft_parse_register_load(tb[NFTA_CMP_SREG], &priv->sreg, desc.len);
if (err < 0)
return err;
desc.len *= BITS_PER_BYTE;
priv->mask = nft_cmp_fast_mask(desc.len);
priv->data = data.data[0] & priv->mask;
priv->len = desc.len;
priv->inv = ntohl(nla_get_be32(tb[NFTA_CMP_OP])) != NFT_CMP_EQ;
return 0;
}
static int nft_cmp_fast_offload(struct nft_offload_ctx *ctx,
struct nft_flow_rule *flow,
const struct nft_expr *expr)
{
const struct nft_cmp_fast_expr *priv = nft_expr_priv(expr);
struct nft_cmp_expr cmp = {
.data = {
.data = {
[0] = priv->data,
},
},
.sreg = priv->sreg,
.len = priv->len / BITS_PER_BYTE,
.op = priv->inv ? NFT_CMP_NEQ : NFT_CMP_EQ,
};
return __nft_cmp_offload(ctx, flow, &cmp);
}
static int nft_cmp_fast_dump(struct sk_buff *skb,
const struct nft_expr *expr, bool reset)
{
const struct nft_cmp_fast_expr *priv = nft_expr_priv(expr);
enum nft_cmp_ops op = priv->inv ? NFT_CMP_NEQ : NFT_CMP_EQ;
struct nft_data data;
if (nft_dump_register(skb, NFTA_CMP_SREG, priv->sreg))
goto nla_put_failure;
if (nla_put_be32(skb, NFTA_CMP_OP, htonl(op)))
goto nla_put_failure;
data.data[0] = priv->data;
if (nft_data_dump(skb, NFTA_CMP_DATA, &data,
NFT_DATA_VALUE, priv->len / BITS_PER_BYTE) < 0)
goto nla_put_failure;
return 0;
nla_put_failure:
return -1;
}
const struct nft_expr_ops nft_cmp_fast_ops = {
.type = &nft_cmp_type,
.size = NFT_EXPR_SIZE(sizeof(struct nft_cmp_fast_expr)),
.eval = NULL, /* inlined */
.init = nft_cmp_fast_init,
.dump = nft_cmp_fast_dump,
.reduce = NFT_REDUCE_READONLY,
.offload = nft_cmp_fast_offload,
};
static u32 nft_cmp_mask(u32 bitlen)
{
return (__force u32)cpu_to_le32(~0U >> (sizeof(u32) * BITS_PER_BYTE - bitlen));
}
static void nft_cmp16_fast_mask(struct nft_data *data, unsigned int bitlen)
{
int len = bitlen / BITS_PER_BYTE;
int i, words = len / sizeof(u32);
for (i = 0; i < words; i++) {
data->data[i] = 0xffffffff;
bitlen -= sizeof(u32) * BITS_PER_BYTE;
}
if (len % sizeof(u32))
data->data[i++] = nft_cmp_mask(bitlen);
for (; i < 4; i++)
data->data[i] = 0;
}
static int nft_cmp16_fast_init(const struct nft_ctx *ctx,
const struct nft_expr *expr,
const struct nlattr * const tb[])
{
struct nft_cmp16_fast_expr *priv = nft_expr_priv(expr);
struct nft_data_desc desc = {
.type = NFT_DATA_VALUE,
.size = sizeof(priv->data),
};
int err;
err = nft_data_init(NULL, &priv->data, &desc, tb[NFTA_CMP_DATA]);
if (err < 0)
return err;
err = nft_parse_register_load(tb[NFTA_CMP_SREG], &priv->sreg, desc.len);
if (err < 0)
return err;
nft_cmp16_fast_mask(&priv->mask, desc.len * BITS_PER_BYTE);
priv->inv = ntohl(nla_get_be32(tb[NFTA_CMP_OP])) != NFT_CMP_EQ;
priv->len = desc.len;
return 0;
}
static int nft_cmp16_fast_offload(struct nft_offload_ctx *ctx,
struct nft_flow_rule *flow,
const struct nft_expr *expr)
{
const struct nft_cmp16_fast_expr *priv = nft_expr_priv(expr);
struct nft_cmp_expr cmp = {
.data = priv->data,
.sreg = priv->sreg,
.len = priv->len,
.op = priv->inv ? NFT_CMP_NEQ : NFT_CMP_EQ,
};
return __nft_cmp_offload(ctx, flow, &cmp);
}
static int nft_cmp16_fast_dump(struct sk_buff *skb,
const struct nft_expr *expr, bool reset)
{
const struct nft_cmp16_fast_expr *priv = nft_expr_priv(expr);
enum nft_cmp_ops op = priv->inv ? NFT_CMP_NEQ : NFT_CMP_EQ;
if (nft_dump_register(skb, NFTA_CMP_SREG, priv->sreg))
goto nla_put_failure;
if (nla_put_be32(skb, NFTA_CMP_OP, htonl(op)))
goto nla_put_failure;
if (nft_data_dump(skb, NFTA_CMP_DATA, &priv->data,
NFT_DATA_VALUE, priv->len) < 0)
goto nla_put_failure;
return 0;
nla_put_failure:
return -1;
}
const struct nft_expr_ops nft_cmp16_fast_ops = {
.type = &nft_cmp_type,
.size = NFT_EXPR_SIZE(sizeof(struct nft_cmp16_fast_expr)),
.eval = NULL, /* inlined */
.init = nft_cmp16_fast_init,
.dump = nft_cmp16_fast_dump,
.reduce = NFT_REDUCE_READONLY,
.offload = nft_cmp16_fast_offload,
};
static const struct nft_expr_ops *
nft_cmp_select_ops(const struct nft_ctx *ctx, const struct nlattr * const tb[])
{
struct nft_data data;
struct nft_data_desc desc = {
.type = NFT_DATA_VALUE,
.size = sizeof(data),
};
enum nft_cmp_ops op;
u8 sreg;
int err;
if (tb[NFTA_CMP_SREG] == NULL ||
tb[NFTA_CMP_OP] == NULL ||
tb[NFTA_CMP_DATA] == NULL)
return ERR_PTR(-EINVAL);
op = ntohl(nla_get_be32(tb[NFTA_CMP_OP]));
switch (op) {
case NFT_CMP_EQ:
case NFT_CMP_NEQ:
case NFT_CMP_LT:
case NFT_CMP_LTE:
case NFT_CMP_GT:
case NFT_CMP_GTE:
break;
default:
return ERR_PTR(-EINVAL);
}
err = nft_data_init(NULL, &data, &desc, tb[NFTA_CMP_DATA]);
if (err < 0)
return ERR_PTR(err);
sreg = ntohl(nla_get_be32(tb[NFTA_CMP_SREG]));
if (op == NFT_CMP_EQ || op == NFT_CMP_NEQ) {
if (desc.len <= sizeof(u32))
return &nft_cmp_fast_ops;
else if (desc.len <= sizeof(data) &&
((sreg >= NFT_REG_1 && sreg <= NFT_REG_4) ||
(sreg >= NFT_REG32_00 && sreg <= NFT_REG32_12 && sreg % 2 == 0)))
return &nft_cmp16_fast_ops;
}
return &nft_cmp_ops;
}
struct nft_expr_type nft_cmp_type __read_mostly = {
.name = "cmp",
.select_ops = nft_cmp_select_ops,
.policy = nft_cmp_policy,
.maxattr = NFTA_CMP_MAX,
.owner = THIS_MODULE,
};
| linux-master | net/netfilter/nft_cmp.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (c) 2003+ Evgeniy Polyakov <[email protected]>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/capability.h>
#include <linux/if.h>
#include <linux/inetdevice.h>
#include <linux/ip.h>
#include <linux/list.h>
#include <linux/rculist.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
#include <linux/tcp.h>
#include <net/ip.h>
#include <net/tcp.h>
#include <linux/netfilter/nfnetlink.h>
#include <linux/netfilter/x_tables.h>
#include <net/netfilter/nf_log.h>
#include <linux/netfilter/xt_osf.h>
static bool
xt_osf_match_packet(const struct sk_buff *skb, struct xt_action_param *p)
{
return nf_osf_match(skb, xt_family(p), xt_hooknum(p), xt_in(p),
xt_out(p), p->matchinfo, xt_net(p), nf_osf_fingers);
}
static struct xt_match xt_osf_match = {
.name = "osf",
.revision = 0,
.family = NFPROTO_IPV4,
.proto = IPPROTO_TCP,
.hooks = (1 << NF_INET_LOCAL_IN) |
(1 << NF_INET_PRE_ROUTING) |
(1 << NF_INET_FORWARD),
.match = xt_osf_match_packet,
.matchsize = sizeof(struct xt_osf_info),
.me = THIS_MODULE,
};
static int __init xt_osf_init(void)
{
int err;
err = xt_register_match(&xt_osf_match);
if (err) {
pr_err("Failed to register OS fingerprint "
"matching module (%d)\n", err);
return err;
}
return 0;
}
static void __exit xt_osf_fini(void)
{
xt_unregister_match(&xt_osf_match);
}
module_init(xt_osf_init);
module_exit(xt_osf_fini);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Evgeniy Polyakov <[email protected]>");
MODULE_DESCRIPTION("Passive OS fingerprint matching.");
MODULE_ALIAS("ipt_osf");
MODULE_ALIAS("ip6t_osf");
| linux-master | net/netfilter/xt_osf.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2014 Patrick McHardy <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/netlink.h>
#include <linux/netfilter.h>
#include <linux/netfilter/nf_tables.h>
#include <net/netfilter/nf_tables.h>
#include <net/netfilter/nft_reject.h>
#include <net/netfilter/ipv4/nf_reject.h>
#include <net/netfilter/ipv6/nf_reject.h>
static void nft_reject_inet_eval(const struct nft_expr *expr,
struct nft_regs *regs,
const struct nft_pktinfo *pkt)
{
struct nft_reject *priv = nft_expr_priv(expr);
switch (nft_pf(pkt)) {
case NFPROTO_IPV4:
switch (priv->type) {
case NFT_REJECT_ICMP_UNREACH:
nf_send_unreach(pkt->skb, priv->icmp_code,
nft_hook(pkt));
break;
case NFT_REJECT_TCP_RST:
nf_send_reset(nft_net(pkt), nft_sk(pkt),
pkt->skb, nft_hook(pkt));
break;
case NFT_REJECT_ICMPX_UNREACH:
nf_send_unreach(pkt->skb,
nft_reject_icmp_code(priv->icmp_code),
nft_hook(pkt));
break;
}
break;
case NFPROTO_IPV6:
switch (priv->type) {
case NFT_REJECT_ICMP_UNREACH:
nf_send_unreach6(nft_net(pkt), pkt->skb,
priv->icmp_code, nft_hook(pkt));
break;
case NFT_REJECT_TCP_RST:
nf_send_reset6(nft_net(pkt), nft_sk(pkt),
pkt->skb, nft_hook(pkt));
break;
case NFT_REJECT_ICMPX_UNREACH:
nf_send_unreach6(nft_net(pkt), pkt->skb,
nft_reject_icmpv6_code(priv->icmp_code),
nft_hook(pkt));
break;
}
break;
}
regs->verdict.code = NF_DROP;
}
static int nft_reject_inet_validate(const struct nft_ctx *ctx,
const struct nft_expr *expr,
const struct nft_data **data)
{
return nft_chain_validate_hooks(ctx->chain,
(1 << NF_INET_LOCAL_IN) |
(1 << NF_INET_FORWARD) |
(1 << NF_INET_LOCAL_OUT) |
(1 << NF_INET_PRE_ROUTING) |
(1 << NF_INET_INGRESS));
}
static struct nft_expr_type nft_reject_inet_type;
static const struct nft_expr_ops nft_reject_inet_ops = {
.type = &nft_reject_inet_type,
.size = NFT_EXPR_SIZE(sizeof(struct nft_reject)),
.eval = nft_reject_inet_eval,
.init = nft_reject_init,
.dump = nft_reject_dump,
.validate = nft_reject_inet_validate,
.reduce = NFT_REDUCE_READONLY,
};
static struct nft_expr_type nft_reject_inet_type __read_mostly = {
.family = NFPROTO_INET,
.name = "reject",
.ops = &nft_reject_inet_ops,
.policy = nft_reject_policy,
.maxattr = NFTA_REJECT_MAX,
.owner = THIS_MODULE,
};
static int __init nft_reject_inet_module_init(void)
{
return nft_register_expr(&nft_reject_inet_type);
}
static void __exit nft_reject_inet_module_exit(void)
{
nft_unregister_expr(&nft_reject_inet_type);
}
module_init(nft_reject_inet_module_init);
module_exit(nft_reject_inet_module_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Patrick McHardy <[email protected]>");
MODULE_ALIAS_NFT_AF_EXPR(1, "reject");
MODULE_DESCRIPTION("Netfilter nftables reject inet support");
| linux-master | net/netfilter/nft_reject_inet.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* Kernel module to match IPComp parameters for IPv4 and IPv6
*
* Copyright (C) 2013 WindRiver
*
* Author:
* Fan Du <[email protected]>
*
* Based on:
* net/netfilter/xt_esp.c
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/in.h>
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/ip.h>
#include <linux/netfilter/xt_ipcomp.h>
#include <linux/netfilter/x_tables.h>
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Fan Du <[email protected]>");
MODULE_DESCRIPTION("Xtables: IPv4/6 IPsec-IPComp SPI match");
MODULE_ALIAS("ipt_ipcomp");
MODULE_ALIAS("ip6t_ipcomp");
/* Returns 1 if the spi is matched by the range, 0 otherwise */
static inline bool
spi_match(u_int32_t min, u_int32_t max, u_int32_t spi, bool invert)
{
bool r;
pr_debug("spi_match:%c 0x%x <= 0x%x <= 0x%x\n",
invert ? '!' : ' ', min, spi, max);
r = (spi >= min && spi <= max) ^ invert;
pr_debug(" result %s\n", r ? "PASS" : "FAILED");
return r;
}
static bool comp_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
struct ip_comp_hdr _comphdr;
const struct ip_comp_hdr *chdr;
const struct xt_ipcomp *compinfo = par->matchinfo;
/* Must not be a fragment. */
if (par->fragoff != 0)
return false;
chdr = skb_header_pointer(skb, par->thoff, sizeof(_comphdr), &_comphdr);
if (chdr == NULL) {
/* We've been asked to examine this packet, and we
* can't. Hence, no choice but to drop.
*/
pr_debug("Dropping evil IPComp tinygram.\n");
par->hotdrop = true;
return false;
}
return spi_match(compinfo->spis[0], compinfo->spis[1],
ntohs(chdr->cpi),
!!(compinfo->invflags & XT_IPCOMP_INV_SPI));
}
static int comp_mt_check(const struct xt_mtchk_param *par)
{
const struct xt_ipcomp *compinfo = par->matchinfo;
/* Must specify no unknown invflags */
if (compinfo->invflags & ~XT_IPCOMP_INV_MASK) {
pr_info_ratelimited("unknown flags %X\n", compinfo->invflags);
return -EINVAL;
}
return 0;
}
static struct xt_match comp_mt_reg[] __read_mostly = {
{
.name = "ipcomp",
.family = NFPROTO_IPV4,
.match = comp_mt,
.matchsize = sizeof(struct xt_ipcomp),
.proto = IPPROTO_COMP,
.checkentry = comp_mt_check,
.me = THIS_MODULE,
},
{
.name = "ipcomp",
.family = NFPROTO_IPV6,
.match = comp_mt,
.matchsize = sizeof(struct xt_ipcomp),
.proto = IPPROTO_COMP,
.checkentry = comp_mt_check,
.me = THIS_MODULE,
},
};
static int __init comp_mt_init(void)
{
return xt_register_matches(comp_mt_reg, ARRAY_SIZE(comp_mt_reg));
}
static void __exit comp_mt_exit(void)
{
xt_unregister_matches(comp_mt_reg, ARRAY_SIZE(comp_mt_reg));
}
module_init(comp_mt_init);
module_exit(comp_mt_exit);
| linux-master | net/netfilter/xt_ipcomp.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* (C) 2012 Pablo Neira Ayuso <[email protected]>
*
* This software has been sponsored by Vyatta Inc. <http://www.vyatta.com>
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/skbuff.h>
#include <linux/netlink.h>
#include <linux/rculist.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/list.h>
#include <linux/errno.h>
#include <linux/capability.h>
#include <net/netlink.h>
#include <net/sock.h>
#include <net/netfilter/nf_conntrack_helper.h>
#include <net/netfilter/nf_conntrack_expect.h>
#include <net/netfilter/nf_conntrack_ecache.h>
#include <linux/netfilter/nfnetlink.h>
#include <linux/netfilter/nfnetlink_conntrack.h>
#include <linux/netfilter/nfnetlink_cthelper.h>
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Pablo Neira Ayuso <[email protected]>");
MODULE_DESCRIPTION("nfnl_cthelper: User-space connection tracking helpers");
struct nfnl_cthelper {
struct list_head list;
struct nf_conntrack_helper helper;
};
static LIST_HEAD(nfnl_cthelper_list);
static int
nfnl_userspace_cthelper(struct sk_buff *skb, unsigned int protoff,
struct nf_conn *ct, enum ip_conntrack_info ctinfo)
{
const struct nf_conn_help *help;
struct nf_conntrack_helper *helper;
help = nfct_help(ct);
if (help == NULL)
return NF_DROP;
/* rcu_read_lock()ed by nf_hook_thresh */
helper = rcu_dereference(help->helper);
if (helper == NULL)
return NF_DROP;
/* This is a user-space helper not yet configured, skip. */
if ((helper->flags &
(NF_CT_HELPER_F_USERSPACE | NF_CT_HELPER_F_CONFIGURED)) ==
NF_CT_HELPER_F_USERSPACE)
return NF_ACCEPT;
/* If the user-space helper is not available, don't block traffic. */
return NF_QUEUE_NR(helper->queue_num) | NF_VERDICT_FLAG_QUEUE_BYPASS;
}
static const struct nla_policy nfnl_cthelper_tuple_pol[NFCTH_TUPLE_MAX+1] = {
[NFCTH_TUPLE_L3PROTONUM] = { .type = NLA_U16, },
[NFCTH_TUPLE_L4PROTONUM] = { .type = NLA_U8, },
};
static int
nfnl_cthelper_parse_tuple(struct nf_conntrack_tuple *tuple,
const struct nlattr *attr)
{
int err;
struct nlattr *tb[NFCTH_TUPLE_MAX+1];
err = nla_parse_nested_deprecated(tb, NFCTH_TUPLE_MAX, attr,
nfnl_cthelper_tuple_pol, NULL);
if (err < 0)
return err;
if (!tb[NFCTH_TUPLE_L3PROTONUM] || !tb[NFCTH_TUPLE_L4PROTONUM])
return -EINVAL;
/* Not all fields are initialized so first zero the tuple */
memset(tuple, 0, sizeof(struct nf_conntrack_tuple));
tuple->src.l3num = ntohs(nla_get_be16(tb[NFCTH_TUPLE_L3PROTONUM]));
tuple->dst.protonum = nla_get_u8(tb[NFCTH_TUPLE_L4PROTONUM]);
return 0;
}
static int
nfnl_cthelper_from_nlattr(struct nlattr *attr, struct nf_conn *ct)
{
struct nf_conn_help *help = nfct_help(ct);
const struct nf_conntrack_helper *helper;
if (attr == NULL)
return -EINVAL;
helper = rcu_dereference(help->helper);
if (!helper || helper->data_len == 0)
return -EINVAL;
nla_memcpy(help->data, attr, sizeof(help->data));
return 0;
}
static int
nfnl_cthelper_to_nlattr(struct sk_buff *skb, const struct nf_conn *ct)
{
const struct nf_conn_help *help = nfct_help(ct);
const struct nf_conntrack_helper *helper;
helper = rcu_dereference(help->helper);
if (helper && helper->data_len &&
nla_put(skb, CTA_HELP_INFO, helper->data_len, &help->data))
goto nla_put_failure;
return 0;
nla_put_failure:
return -ENOSPC;
}
static const struct nla_policy nfnl_cthelper_expect_pol[NFCTH_POLICY_MAX+1] = {
[NFCTH_POLICY_NAME] = { .type = NLA_NUL_STRING,
.len = NF_CT_HELPER_NAME_LEN-1 },
[NFCTH_POLICY_EXPECT_MAX] = { .type = NLA_U32, },
[NFCTH_POLICY_EXPECT_TIMEOUT] = { .type = NLA_U32, },
};
static int
nfnl_cthelper_expect_policy(struct nf_conntrack_expect_policy *expect_policy,
const struct nlattr *attr)
{
int err;
struct nlattr *tb[NFCTH_POLICY_MAX+1];
err = nla_parse_nested_deprecated(tb, NFCTH_POLICY_MAX, attr,
nfnl_cthelper_expect_pol, NULL);
if (err < 0)
return err;
if (!tb[NFCTH_POLICY_NAME] ||
!tb[NFCTH_POLICY_EXPECT_MAX] ||
!tb[NFCTH_POLICY_EXPECT_TIMEOUT])
return -EINVAL;
nla_strscpy(expect_policy->name,
tb[NFCTH_POLICY_NAME], NF_CT_HELPER_NAME_LEN);
expect_policy->max_expected =
ntohl(nla_get_be32(tb[NFCTH_POLICY_EXPECT_MAX]));
if (expect_policy->max_expected > NF_CT_EXPECT_MAX_CNT)
return -EINVAL;
expect_policy->timeout =
ntohl(nla_get_be32(tb[NFCTH_POLICY_EXPECT_TIMEOUT]));
return 0;
}
static const struct nla_policy
nfnl_cthelper_expect_policy_set[NFCTH_POLICY_SET_MAX+1] = {
[NFCTH_POLICY_SET_NUM] = { .type = NLA_U32, },
};
static int
nfnl_cthelper_parse_expect_policy(struct nf_conntrack_helper *helper,
const struct nlattr *attr)
{
int i, ret;
struct nf_conntrack_expect_policy *expect_policy;
struct nlattr *tb[NFCTH_POLICY_SET_MAX+1];
unsigned int class_max;
ret = nla_parse_nested_deprecated(tb, NFCTH_POLICY_SET_MAX, attr,
nfnl_cthelper_expect_policy_set,
NULL);
if (ret < 0)
return ret;
if (!tb[NFCTH_POLICY_SET_NUM])
return -EINVAL;
class_max = ntohl(nla_get_be32(tb[NFCTH_POLICY_SET_NUM]));
if (class_max == 0)
return -EINVAL;
if (class_max > NF_CT_MAX_EXPECT_CLASSES)
return -EOVERFLOW;
expect_policy = kcalloc(class_max,
sizeof(struct nf_conntrack_expect_policy),
GFP_KERNEL);
if (expect_policy == NULL)
return -ENOMEM;
for (i = 0; i < class_max; i++) {
if (!tb[NFCTH_POLICY_SET+i])
goto err;
ret = nfnl_cthelper_expect_policy(&expect_policy[i],
tb[NFCTH_POLICY_SET+i]);
if (ret < 0)
goto err;
}
helper->expect_class_max = class_max - 1;
helper->expect_policy = expect_policy;
return 0;
err:
kfree(expect_policy);
return -EINVAL;
}
static int
nfnl_cthelper_create(const struct nlattr * const tb[],
struct nf_conntrack_tuple *tuple)
{
struct nf_conntrack_helper *helper;
struct nfnl_cthelper *nfcth;
unsigned int size;
int ret;
if (!tb[NFCTH_TUPLE] || !tb[NFCTH_POLICY] || !tb[NFCTH_PRIV_DATA_LEN])
return -EINVAL;
nfcth = kzalloc(sizeof(*nfcth), GFP_KERNEL);
if (nfcth == NULL)
return -ENOMEM;
helper = &nfcth->helper;
ret = nfnl_cthelper_parse_expect_policy(helper, tb[NFCTH_POLICY]);
if (ret < 0)
goto err1;
nla_strscpy(helper->name,
tb[NFCTH_NAME], NF_CT_HELPER_NAME_LEN);
size = ntohl(nla_get_be32(tb[NFCTH_PRIV_DATA_LEN]));
if (size > sizeof_field(struct nf_conn_help, data)) {
ret = -ENOMEM;
goto err2;
}
helper->data_len = size;
helper->flags |= NF_CT_HELPER_F_USERSPACE;
memcpy(&helper->tuple, tuple, sizeof(struct nf_conntrack_tuple));
helper->me = THIS_MODULE;
helper->help = nfnl_userspace_cthelper;
helper->from_nlattr = nfnl_cthelper_from_nlattr;
helper->to_nlattr = nfnl_cthelper_to_nlattr;
/* Default to queue number zero, this can be updated at any time. */
if (tb[NFCTH_QUEUE_NUM])
helper->queue_num = ntohl(nla_get_be32(tb[NFCTH_QUEUE_NUM]));
if (tb[NFCTH_STATUS]) {
int status = ntohl(nla_get_be32(tb[NFCTH_STATUS]));
switch(status) {
case NFCT_HELPER_STATUS_ENABLED:
helper->flags |= NF_CT_HELPER_F_CONFIGURED;
break;
case NFCT_HELPER_STATUS_DISABLED:
helper->flags &= ~NF_CT_HELPER_F_CONFIGURED;
break;
}
}
ret = nf_conntrack_helper_register(helper);
if (ret < 0)
goto err2;
list_add_tail(&nfcth->list, &nfnl_cthelper_list);
return 0;
err2:
kfree(helper->expect_policy);
err1:
kfree(nfcth);
return ret;
}
static int
nfnl_cthelper_update_policy_one(const struct nf_conntrack_expect_policy *policy,
struct nf_conntrack_expect_policy *new_policy,
const struct nlattr *attr)
{
struct nlattr *tb[NFCTH_POLICY_MAX + 1];
int err;
err = nla_parse_nested_deprecated(tb, NFCTH_POLICY_MAX, attr,
nfnl_cthelper_expect_pol, NULL);
if (err < 0)
return err;
if (!tb[NFCTH_POLICY_NAME] ||
!tb[NFCTH_POLICY_EXPECT_MAX] ||
!tb[NFCTH_POLICY_EXPECT_TIMEOUT])
return -EINVAL;
if (nla_strcmp(tb[NFCTH_POLICY_NAME], policy->name))
return -EBUSY;
new_policy->max_expected =
ntohl(nla_get_be32(tb[NFCTH_POLICY_EXPECT_MAX]));
if (new_policy->max_expected > NF_CT_EXPECT_MAX_CNT)
return -EINVAL;
new_policy->timeout =
ntohl(nla_get_be32(tb[NFCTH_POLICY_EXPECT_TIMEOUT]));
return 0;
}
static int nfnl_cthelper_update_policy_all(struct nlattr *tb[],
struct nf_conntrack_helper *helper)
{
struct nf_conntrack_expect_policy *new_policy;
struct nf_conntrack_expect_policy *policy;
int i, ret = 0;
new_policy = kmalloc_array(helper->expect_class_max + 1,
sizeof(*new_policy), GFP_KERNEL);
if (!new_policy)
return -ENOMEM;
/* Check first that all policy attributes are well-formed, so we don't
* leave things in inconsistent state on errors.
*/
for (i = 0; i < helper->expect_class_max + 1; i++) {
if (!tb[NFCTH_POLICY_SET + i]) {
ret = -EINVAL;
goto err;
}
ret = nfnl_cthelper_update_policy_one(&helper->expect_policy[i],
&new_policy[i],
tb[NFCTH_POLICY_SET + i]);
if (ret < 0)
goto err;
}
/* Now we can safely update them. */
for (i = 0; i < helper->expect_class_max + 1; i++) {
policy = (struct nf_conntrack_expect_policy *)
&helper->expect_policy[i];
policy->max_expected = new_policy->max_expected;
policy->timeout = new_policy->timeout;
}
err:
kfree(new_policy);
return ret;
}
static int nfnl_cthelper_update_policy(struct nf_conntrack_helper *helper,
const struct nlattr *attr)
{
struct nlattr *tb[NFCTH_POLICY_SET_MAX + 1];
unsigned int class_max;
int err;
err = nla_parse_nested_deprecated(tb, NFCTH_POLICY_SET_MAX, attr,
nfnl_cthelper_expect_policy_set,
NULL);
if (err < 0)
return err;
if (!tb[NFCTH_POLICY_SET_NUM])
return -EINVAL;
class_max = ntohl(nla_get_be32(tb[NFCTH_POLICY_SET_NUM]));
if (helper->expect_class_max + 1 != class_max)
return -EBUSY;
return nfnl_cthelper_update_policy_all(tb, helper);
}
static int
nfnl_cthelper_update(const struct nlattr * const tb[],
struct nf_conntrack_helper *helper)
{
u32 size;
int ret;
if (tb[NFCTH_PRIV_DATA_LEN]) {
size = ntohl(nla_get_be32(tb[NFCTH_PRIV_DATA_LEN]));
if (size != helper->data_len)
return -EBUSY;
}
if (tb[NFCTH_POLICY]) {
ret = nfnl_cthelper_update_policy(helper, tb[NFCTH_POLICY]);
if (ret < 0)
return ret;
}
if (tb[NFCTH_QUEUE_NUM])
helper->queue_num = ntohl(nla_get_be32(tb[NFCTH_QUEUE_NUM]));
if (tb[NFCTH_STATUS]) {
int status = ntohl(nla_get_be32(tb[NFCTH_STATUS]));
switch(status) {
case NFCT_HELPER_STATUS_ENABLED:
helper->flags |= NF_CT_HELPER_F_CONFIGURED;
break;
case NFCT_HELPER_STATUS_DISABLED:
helper->flags &= ~NF_CT_HELPER_F_CONFIGURED;
break;
}
}
return 0;
}
static int nfnl_cthelper_new(struct sk_buff *skb, const struct nfnl_info *info,
const struct nlattr * const tb[])
{
const char *helper_name;
struct nf_conntrack_helper *cur, *helper = NULL;
struct nf_conntrack_tuple tuple;
struct nfnl_cthelper *nlcth;
int ret = 0;
if (!capable(CAP_NET_ADMIN))
return -EPERM;
if (!tb[NFCTH_NAME] || !tb[NFCTH_TUPLE])
return -EINVAL;
helper_name = nla_data(tb[NFCTH_NAME]);
ret = nfnl_cthelper_parse_tuple(&tuple, tb[NFCTH_TUPLE]);
if (ret < 0)
return ret;
list_for_each_entry(nlcth, &nfnl_cthelper_list, list) {
cur = &nlcth->helper;
if (strncmp(cur->name, helper_name, NF_CT_HELPER_NAME_LEN))
continue;
if ((tuple.src.l3num != cur->tuple.src.l3num ||
tuple.dst.protonum != cur->tuple.dst.protonum))
continue;
if (info->nlh->nlmsg_flags & NLM_F_EXCL)
return -EEXIST;
helper = cur;
break;
}
if (helper == NULL)
ret = nfnl_cthelper_create(tb, &tuple);
else
ret = nfnl_cthelper_update(tb, helper);
return ret;
}
static int
nfnl_cthelper_dump_tuple(struct sk_buff *skb,
struct nf_conntrack_helper *helper)
{
struct nlattr *nest_parms;
nest_parms = nla_nest_start(skb, NFCTH_TUPLE);
if (nest_parms == NULL)
goto nla_put_failure;
if (nla_put_be16(skb, NFCTH_TUPLE_L3PROTONUM,
htons(helper->tuple.src.l3num)))
goto nla_put_failure;
if (nla_put_u8(skb, NFCTH_TUPLE_L4PROTONUM, helper->tuple.dst.protonum))
goto nla_put_failure;
nla_nest_end(skb, nest_parms);
return 0;
nla_put_failure:
return -1;
}
static int
nfnl_cthelper_dump_policy(struct sk_buff *skb,
struct nf_conntrack_helper *helper)
{
int i;
struct nlattr *nest_parms1, *nest_parms2;
nest_parms1 = nla_nest_start(skb, NFCTH_POLICY);
if (nest_parms1 == NULL)
goto nla_put_failure;
if (nla_put_be32(skb, NFCTH_POLICY_SET_NUM,
htonl(helper->expect_class_max + 1)))
goto nla_put_failure;
for (i = 0; i < helper->expect_class_max + 1; i++) {
nest_parms2 = nla_nest_start(skb, (NFCTH_POLICY_SET + i));
if (nest_parms2 == NULL)
goto nla_put_failure;
if (nla_put_string(skb, NFCTH_POLICY_NAME,
helper->expect_policy[i].name))
goto nla_put_failure;
if (nla_put_be32(skb, NFCTH_POLICY_EXPECT_MAX,
htonl(helper->expect_policy[i].max_expected)))
goto nla_put_failure;
if (nla_put_be32(skb, NFCTH_POLICY_EXPECT_TIMEOUT,
htonl(helper->expect_policy[i].timeout)))
goto nla_put_failure;
nla_nest_end(skb, nest_parms2);
}
nla_nest_end(skb, nest_parms1);
return 0;
nla_put_failure:
return -1;
}
static int
nfnl_cthelper_fill_info(struct sk_buff *skb, u32 portid, u32 seq, u32 type,
int event, struct nf_conntrack_helper *helper)
{
struct nlmsghdr *nlh;
unsigned int flags = portid ? NLM_F_MULTI : 0;
int status;
event = nfnl_msg_type(NFNL_SUBSYS_CTHELPER, event);
nlh = nfnl_msg_put(skb, portid, seq, event, flags, AF_UNSPEC,
NFNETLINK_V0, 0);
if (!nlh)
goto nlmsg_failure;
if (nla_put_string(skb, NFCTH_NAME, helper->name))
goto nla_put_failure;
if (nla_put_be32(skb, NFCTH_QUEUE_NUM, htonl(helper->queue_num)))
goto nla_put_failure;
if (nfnl_cthelper_dump_tuple(skb, helper) < 0)
goto nla_put_failure;
if (nfnl_cthelper_dump_policy(skb, helper) < 0)
goto nla_put_failure;
if (nla_put_be32(skb, NFCTH_PRIV_DATA_LEN, htonl(helper->data_len)))
goto nla_put_failure;
if (helper->flags & NF_CT_HELPER_F_CONFIGURED)
status = NFCT_HELPER_STATUS_ENABLED;
else
status = NFCT_HELPER_STATUS_DISABLED;
if (nla_put_be32(skb, NFCTH_STATUS, htonl(status)))
goto nla_put_failure;
nlmsg_end(skb, nlh);
return skb->len;
nlmsg_failure:
nla_put_failure:
nlmsg_cancel(skb, nlh);
return -1;
}
static int
nfnl_cthelper_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
{
struct nf_conntrack_helper *cur, *last;
rcu_read_lock();
last = (struct nf_conntrack_helper *)cb->args[1];
for (; cb->args[0] < nf_ct_helper_hsize; cb->args[0]++) {
restart:
hlist_for_each_entry_rcu(cur,
&nf_ct_helper_hash[cb->args[0]], hnode) {
/* skip non-userspace conntrack helpers. */
if (!(cur->flags & NF_CT_HELPER_F_USERSPACE))
continue;
if (cb->args[1]) {
if (cur != last)
continue;
cb->args[1] = 0;
}
if (nfnl_cthelper_fill_info(skb,
NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq,
NFNL_MSG_TYPE(cb->nlh->nlmsg_type),
NFNL_MSG_CTHELPER_NEW, cur) < 0) {
cb->args[1] = (unsigned long)cur;
goto out;
}
}
}
if (cb->args[1]) {
cb->args[1] = 0;
goto restart;
}
out:
rcu_read_unlock();
return skb->len;
}
static int nfnl_cthelper_get(struct sk_buff *skb, const struct nfnl_info *info,
const struct nlattr * const tb[])
{
int ret = -ENOENT;
struct nf_conntrack_helper *cur;
struct sk_buff *skb2;
char *helper_name = NULL;
struct nf_conntrack_tuple tuple;
struct nfnl_cthelper *nlcth;
bool tuple_set = false;
if (!capable(CAP_NET_ADMIN))
return -EPERM;
if (info->nlh->nlmsg_flags & NLM_F_DUMP) {
struct netlink_dump_control c = {
.dump = nfnl_cthelper_dump_table,
};
return netlink_dump_start(info->sk, skb, info->nlh, &c);
}
if (tb[NFCTH_NAME])
helper_name = nla_data(tb[NFCTH_NAME]);
if (tb[NFCTH_TUPLE]) {
ret = nfnl_cthelper_parse_tuple(&tuple, tb[NFCTH_TUPLE]);
if (ret < 0)
return ret;
tuple_set = true;
}
list_for_each_entry(nlcth, &nfnl_cthelper_list, list) {
cur = &nlcth->helper;
if (helper_name &&
strncmp(cur->name, helper_name, NF_CT_HELPER_NAME_LEN))
continue;
if (tuple_set &&
(tuple.src.l3num != cur->tuple.src.l3num ||
tuple.dst.protonum != cur->tuple.dst.protonum))
continue;
skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (skb2 == NULL) {
ret = -ENOMEM;
break;
}
ret = nfnl_cthelper_fill_info(skb2, NETLINK_CB(skb).portid,
info->nlh->nlmsg_seq,
NFNL_MSG_TYPE(info->nlh->nlmsg_type),
NFNL_MSG_CTHELPER_NEW, cur);
if (ret <= 0) {
kfree_skb(skb2);
break;
}
ret = nfnetlink_unicast(skb2, info->net, NETLINK_CB(skb).portid);
break;
}
return ret;
}
static int nfnl_cthelper_del(struct sk_buff *skb, const struct nfnl_info *info,
const struct nlattr * const tb[])
{
char *helper_name = NULL;
struct nf_conntrack_helper *cur;
struct nf_conntrack_tuple tuple;
bool tuple_set = false, found = false;
struct nfnl_cthelper *nlcth, *n;
int j = 0, ret;
if (!capable(CAP_NET_ADMIN))
return -EPERM;
if (tb[NFCTH_NAME])
helper_name = nla_data(tb[NFCTH_NAME]);
if (tb[NFCTH_TUPLE]) {
ret = nfnl_cthelper_parse_tuple(&tuple, tb[NFCTH_TUPLE]);
if (ret < 0)
return ret;
tuple_set = true;
}
ret = -ENOENT;
list_for_each_entry_safe(nlcth, n, &nfnl_cthelper_list, list) {
cur = &nlcth->helper;
j++;
if (helper_name &&
strncmp(cur->name, helper_name, NF_CT_HELPER_NAME_LEN))
continue;
if (tuple_set &&
(tuple.src.l3num != cur->tuple.src.l3num ||
tuple.dst.protonum != cur->tuple.dst.protonum))
continue;
if (refcount_dec_if_one(&cur->refcnt)) {
found = true;
nf_conntrack_helper_unregister(cur);
kfree(cur->expect_policy);
list_del(&nlcth->list);
kfree(nlcth);
} else {
ret = -EBUSY;
}
}
/* Make sure we return success if we flush and there is no helpers */
return (found || j == 0) ? 0 : ret;
}
static const struct nla_policy nfnl_cthelper_policy[NFCTH_MAX+1] = {
[NFCTH_NAME] = { .type = NLA_NUL_STRING,
.len = NF_CT_HELPER_NAME_LEN-1 },
[NFCTH_QUEUE_NUM] = { .type = NLA_U32, },
[NFCTH_PRIV_DATA_LEN] = { .type = NLA_U32, },
[NFCTH_STATUS] = { .type = NLA_U32, },
};
static const struct nfnl_callback nfnl_cthelper_cb[NFNL_MSG_CTHELPER_MAX] = {
[NFNL_MSG_CTHELPER_NEW] = {
.call = nfnl_cthelper_new,
.type = NFNL_CB_MUTEX,
.attr_count = NFCTH_MAX,
.policy = nfnl_cthelper_policy
},
[NFNL_MSG_CTHELPER_GET] = {
.call = nfnl_cthelper_get,
.type = NFNL_CB_MUTEX,
.attr_count = NFCTH_MAX,
.policy = nfnl_cthelper_policy
},
[NFNL_MSG_CTHELPER_DEL] = {
.call = nfnl_cthelper_del,
.type = NFNL_CB_MUTEX,
.attr_count = NFCTH_MAX,
.policy = nfnl_cthelper_policy
},
};
static const struct nfnetlink_subsystem nfnl_cthelper_subsys = {
.name = "cthelper",
.subsys_id = NFNL_SUBSYS_CTHELPER,
.cb_count = NFNL_MSG_CTHELPER_MAX,
.cb = nfnl_cthelper_cb,
};
MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_CTHELPER);
static int __init nfnl_cthelper_init(void)
{
int ret;
ret = nfnetlink_subsys_register(&nfnl_cthelper_subsys);
if (ret < 0) {
pr_err("nfnl_cthelper: cannot register with nfnetlink.\n");
goto err_out;
}
return 0;
err_out:
return ret;
}
static void __exit nfnl_cthelper_exit(void)
{
struct nf_conntrack_helper *cur;
struct nfnl_cthelper *nlcth, *n;
nfnetlink_subsys_unregister(&nfnl_cthelper_subsys);
list_for_each_entry_safe(nlcth, n, &nfnl_cthelper_list, list) {
cur = &nlcth->helper;
nf_conntrack_helper_unregister(cur);
kfree(cur->expect_policy);
kfree(nlcth);
}
}
module_init(nfnl_cthelper_init);
module_exit(nfnl_cthelper_exit);
| linux-master | net/netfilter/nfnetlink_cthelper.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* x_tables core - Backend for {ip,ip6,arp}_tables
*
* Copyright (C) 2006-2006 Harald Welte <[email protected]>
* Copyright (C) 2006-2012 Patrick McHardy <[email protected]>
*
* Based on existing ip_tables code which is
* Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
* Copyright (C) 2000-2005 Netfilter Core Team <[email protected]>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/socket.h>
#include <linux/net.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/string.h>
#include <linux/vmalloc.h>
#include <linux/mutex.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/audit.h>
#include <linux/user_namespace.h>
#include <net/net_namespace.h>
#include <net/netns/generic.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter_arp.h>
#include <linux/netfilter_ipv4/ip_tables.h>
#include <linux/netfilter_ipv6/ip6_tables.h>
#include <linux/netfilter_arp/arp_tables.h>
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Harald Welte <[email protected]>");
MODULE_DESCRIPTION("{ip,ip6,arp,eb}_tables backend module");
#define XT_PCPU_BLOCK_SIZE 4096
#define XT_MAX_TABLE_SIZE (512 * 1024 * 1024)
struct xt_template {
struct list_head list;
/* called when table is needed in the given netns */
int (*table_init)(struct net *net);
struct module *me;
/* A unique name... */
char name[XT_TABLE_MAXNAMELEN];
};
static struct list_head xt_templates[NFPROTO_NUMPROTO];
struct xt_pernet {
struct list_head tables[NFPROTO_NUMPROTO];
};
struct compat_delta {
unsigned int offset; /* offset in kernel */
int delta; /* delta in 32bit user land */
};
struct xt_af {
struct mutex mutex;
struct list_head match;
struct list_head target;
#ifdef CONFIG_NETFILTER_XTABLES_COMPAT
struct mutex compat_mutex;
struct compat_delta *compat_tab;
unsigned int number; /* number of slots in compat_tab[] */
unsigned int cur; /* number of used slots in compat_tab[] */
#endif
};
static unsigned int xt_pernet_id __read_mostly;
static struct xt_af *xt __read_mostly;
static const char *const xt_prefix[NFPROTO_NUMPROTO] = {
[NFPROTO_UNSPEC] = "x",
[NFPROTO_IPV4] = "ip",
[NFPROTO_ARP] = "arp",
[NFPROTO_BRIDGE] = "eb",
[NFPROTO_IPV6] = "ip6",
};
/* Registration hooks for targets. */
int xt_register_target(struct xt_target *target)
{
u_int8_t af = target->family;
mutex_lock(&xt[af].mutex);
list_add(&target->list, &xt[af].target);
mutex_unlock(&xt[af].mutex);
return 0;
}
EXPORT_SYMBOL(xt_register_target);
void
xt_unregister_target(struct xt_target *target)
{
u_int8_t af = target->family;
mutex_lock(&xt[af].mutex);
list_del(&target->list);
mutex_unlock(&xt[af].mutex);
}
EXPORT_SYMBOL(xt_unregister_target);
int
xt_register_targets(struct xt_target *target, unsigned int n)
{
unsigned int i;
int err = 0;
for (i = 0; i < n; i++) {
err = xt_register_target(&target[i]);
if (err)
goto err;
}
return err;
err:
if (i > 0)
xt_unregister_targets(target, i);
return err;
}
EXPORT_SYMBOL(xt_register_targets);
void
xt_unregister_targets(struct xt_target *target, unsigned int n)
{
while (n-- > 0)
xt_unregister_target(&target[n]);
}
EXPORT_SYMBOL(xt_unregister_targets);
int xt_register_match(struct xt_match *match)
{
u_int8_t af = match->family;
mutex_lock(&xt[af].mutex);
list_add(&match->list, &xt[af].match);
mutex_unlock(&xt[af].mutex);
return 0;
}
EXPORT_SYMBOL(xt_register_match);
void
xt_unregister_match(struct xt_match *match)
{
u_int8_t af = match->family;
mutex_lock(&xt[af].mutex);
list_del(&match->list);
mutex_unlock(&xt[af].mutex);
}
EXPORT_SYMBOL(xt_unregister_match);
int
xt_register_matches(struct xt_match *match, unsigned int n)
{
unsigned int i;
int err = 0;
for (i = 0; i < n; i++) {
err = xt_register_match(&match[i]);
if (err)
goto err;
}
return err;
err:
if (i > 0)
xt_unregister_matches(match, i);
return err;
}
EXPORT_SYMBOL(xt_register_matches);
void
xt_unregister_matches(struct xt_match *match, unsigned int n)
{
while (n-- > 0)
xt_unregister_match(&match[n]);
}
EXPORT_SYMBOL(xt_unregister_matches);
/*
* These are weird, but module loading must not be done with mutex
* held (since they will register), and we have to have a single
* function to use.
*/
/* Find match, grabs ref. Returns ERR_PTR() on error. */
struct xt_match *xt_find_match(u8 af, const char *name, u8 revision)
{
struct xt_match *m;
int err = -ENOENT;
if (strnlen(name, XT_EXTENSION_MAXNAMELEN) == XT_EXTENSION_MAXNAMELEN)
return ERR_PTR(-EINVAL);
mutex_lock(&xt[af].mutex);
list_for_each_entry(m, &xt[af].match, list) {
if (strcmp(m->name, name) == 0) {
if (m->revision == revision) {
if (try_module_get(m->me)) {
mutex_unlock(&xt[af].mutex);
return m;
}
} else
err = -EPROTOTYPE; /* Found something. */
}
}
mutex_unlock(&xt[af].mutex);
if (af != NFPROTO_UNSPEC)
/* Try searching again in the family-independent list */
return xt_find_match(NFPROTO_UNSPEC, name, revision);
return ERR_PTR(err);
}
EXPORT_SYMBOL(xt_find_match);
struct xt_match *
xt_request_find_match(uint8_t nfproto, const char *name, uint8_t revision)
{
struct xt_match *match;
if (strnlen(name, XT_EXTENSION_MAXNAMELEN) == XT_EXTENSION_MAXNAMELEN)
return ERR_PTR(-EINVAL);
match = xt_find_match(nfproto, name, revision);
if (IS_ERR(match)) {
request_module("%st_%s", xt_prefix[nfproto], name);
match = xt_find_match(nfproto, name, revision);
}
return match;
}
EXPORT_SYMBOL_GPL(xt_request_find_match);
/* Find target, grabs ref. Returns ERR_PTR() on error. */
static struct xt_target *xt_find_target(u8 af, const char *name, u8 revision)
{
struct xt_target *t;
int err = -ENOENT;
if (strnlen(name, XT_EXTENSION_MAXNAMELEN) == XT_EXTENSION_MAXNAMELEN)
return ERR_PTR(-EINVAL);
mutex_lock(&xt[af].mutex);
list_for_each_entry(t, &xt[af].target, list) {
if (strcmp(t->name, name) == 0) {
if (t->revision == revision) {
if (try_module_get(t->me)) {
mutex_unlock(&xt[af].mutex);
return t;
}
} else
err = -EPROTOTYPE; /* Found something. */
}
}
mutex_unlock(&xt[af].mutex);
if (af != NFPROTO_UNSPEC)
/* Try searching again in the family-independent list */
return xt_find_target(NFPROTO_UNSPEC, name, revision);
return ERR_PTR(err);
}
struct xt_target *xt_request_find_target(u8 af, const char *name, u8 revision)
{
struct xt_target *target;
if (strnlen(name, XT_EXTENSION_MAXNAMELEN) == XT_EXTENSION_MAXNAMELEN)
return ERR_PTR(-EINVAL);
target = xt_find_target(af, name, revision);
if (IS_ERR(target)) {
request_module("%st_%s", xt_prefix[af], name);
target = xt_find_target(af, name, revision);
}
return target;
}
EXPORT_SYMBOL_GPL(xt_request_find_target);
static int xt_obj_to_user(u16 __user *psize, u16 size,
void __user *pname, const char *name,
u8 __user *prev, u8 rev)
{
if (put_user(size, psize))
return -EFAULT;
if (copy_to_user(pname, name, strlen(name) + 1))
return -EFAULT;
if (put_user(rev, prev))
return -EFAULT;
return 0;
}
#define XT_OBJ_TO_USER(U, K, TYPE, C_SIZE) \
xt_obj_to_user(&U->u.TYPE##_size, C_SIZE ? : K->u.TYPE##_size, \
U->u.user.name, K->u.kernel.TYPE->name, \
&U->u.user.revision, K->u.kernel.TYPE->revision)
int xt_data_to_user(void __user *dst, const void *src,
int usersize, int size, int aligned_size)
{
usersize = usersize ? : size;
if (copy_to_user(dst, src, usersize))
return -EFAULT;
if (usersize != aligned_size &&
clear_user(dst + usersize, aligned_size - usersize))
return -EFAULT;
return 0;
}
EXPORT_SYMBOL_GPL(xt_data_to_user);
#define XT_DATA_TO_USER(U, K, TYPE) \
xt_data_to_user(U->data, K->data, \
K->u.kernel.TYPE->usersize, \
K->u.kernel.TYPE->TYPE##size, \
XT_ALIGN(K->u.kernel.TYPE->TYPE##size))
int xt_match_to_user(const struct xt_entry_match *m,
struct xt_entry_match __user *u)
{
return XT_OBJ_TO_USER(u, m, match, 0) ||
XT_DATA_TO_USER(u, m, match);
}
EXPORT_SYMBOL_GPL(xt_match_to_user);
int xt_target_to_user(const struct xt_entry_target *t,
struct xt_entry_target __user *u)
{
return XT_OBJ_TO_USER(u, t, target, 0) ||
XT_DATA_TO_USER(u, t, target);
}
EXPORT_SYMBOL_GPL(xt_target_to_user);
static int match_revfn(u8 af, const char *name, u8 revision, int *bestp)
{
const struct xt_match *m;
int have_rev = 0;
mutex_lock(&xt[af].mutex);
list_for_each_entry(m, &xt[af].match, list) {
if (strcmp(m->name, name) == 0) {
if (m->revision > *bestp)
*bestp = m->revision;
if (m->revision == revision)
have_rev = 1;
}
}
mutex_unlock(&xt[af].mutex);
if (af != NFPROTO_UNSPEC && !have_rev)
return match_revfn(NFPROTO_UNSPEC, name, revision, bestp);
return have_rev;
}
static int target_revfn(u8 af, const char *name, u8 revision, int *bestp)
{
const struct xt_target *t;
int have_rev = 0;
mutex_lock(&xt[af].mutex);
list_for_each_entry(t, &xt[af].target, list) {
if (strcmp(t->name, name) == 0) {
if (t->revision > *bestp)
*bestp = t->revision;
if (t->revision == revision)
have_rev = 1;
}
}
mutex_unlock(&xt[af].mutex);
if (af != NFPROTO_UNSPEC && !have_rev)
return target_revfn(NFPROTO_UNSPEC, name, revision, bestp);
return have_rev;
}
/* Returns true or false (if no such extension at all) */
int xt_find_revision(u8 af, const char *name, u8 revision, int target,
int *err)
{
int have_rev, best = -1;
if (target == 1)
have_rev = target_revfn(af, name, revision, &best);
else
have_rev = match_revfn(af, name, revision, &best);
/* Nothing at all? Return 0 to try loading module. */
if (best == -1) {
*err = -ENOENT;
return 0;
}
*err = best;
if (!have_rev)
*err = -EPROTONOSUPPORT;
return 1;
}
EXPORT_SYMBOL_GPL(xt_find_revision);
static char *
textify_hooks(char *buf, size_t size, unsigned int mask, uint8_t nfproto)
{
static const char *const inetbr_names[] = {
"PREROUTING", "INPUT", "FORWARD",
"OUTPUT", "POSTROUTING", "BROUTING",
};
static const char *const arp_names[] = {
"INPUT", "FORWARD", "OUTPUT",
};
const char *const *names;
unsigned int i, max;
char *p = buf;
bool np = false;
int res;
names = (nfproto == NFPROTO_ARP) ? arp_names : inetbr_names;
max = (nfproto == NFPROTO_ARP) ? ARRAY_SIZE(arp_names) :
ARRAY_SIZE(inetbr_names);
*p = '\0';
for (i = 0; i < max; ++i) {
if (!(mask & (1 << i)))
continue;
res = snprintf(p, size, "%s%s", np ? "/" : "", names[i]);
if (res > 0) {
size -= res;
p += res;
}
np = true;
}
return buf;
}
/**
* xt_check_proc_name - check that name is suitable for /proc file creation
*
* @name: file name candidate
* @size: length of buffer
*
* some x_tables modules wish to create a file in /proc.
* This function makes sure that the name is suitable for this
* purpose, it checks that name is NUL terminated and isn't a 'special'
* name, like "..".
*
* returns negative number on error or 0 if name is useable.
*/
int xt_check_proc_name(const char *name, unsigned int size)
{
if (name[0] == '\0')
return -EINVAL;
if (strnlen(name, size) == size)
return -ENAMETOOLONG;
if (strcmp(name, ".") == 0 ||
strcmp(name, "..") == 0 ||
strchr(name, '/'))
return -EINVAL;
return 0;
}
EXPORT_SYMBOL(xt_check_proc_name);
int xt_check_match(struct xt_mtchk_param *par,
unsigned int size, u16 proto, bool inv_proto)
{
int ret;
if (XT_ALIGN(par->match->matchsize) != size &&
par->match->matchsize != -1) {
/*
* ebt_among is exempt from centralized matchsize checking
* because it uses a dynamic-size data set.
*/
pr_err_ratelimited("%s_tables: %s.%u match: invalid size %u (kernel) != (user) %u\n",
xt_prefix[par->family], par->match->name,
par->match->revision,
XT_ALIGN(par->match->matchsize), size);
return -EINVAL;
}
if (par->match->table != NULL &&
strcmp(par->match->table, par->table) != 0) {
pr_info_ratelimited("%s_tables: %s match: only valid in %s table, not %s\n",
xt_prefix[par->family], par->match->name,
par->match->table, par->table);
return -EINVAL;
}
if (par->match->hooks && (par->hook_mask & ~par->match->hooks) != 0) {
char used[64], allow[64];
pr_info_ratelimited("%s_tables: %s match: used from hooks %s, but only valid from %s\n",
xt_prefix[par->family], par->match->name,
textify_hooks(used, sizeof(used),
par->hook_mask, par->family),
textify_hooks(allow, sizeof(allow),
par->match->hooks,
par->family));
return -EINVAL;
}
if (par->match->proto && (par->match->proto != proto || inv_proto)) {
pr_info_ratelimited("%s_tables: %s match: only valid for protocol %u\n",
xt_prefix[par->family], par->match->name,
par->match->proto);
return -EINVAL;
}
if (par->match->checkentry != NULL) {
ret = par->match->checkentry(par);
if (ret < 0)
return ret;
else if (ret > 0)
/* Flag up potential errors. */
return -EIO;
}
return 0;
}
EXPORT_SYMBOL_GPL(xt_check_match);
/** xt_check_entry_match - check that matches end before start of target
*
* @match: beginning of xt_entry_match
* @target: beginning of this rules target (alleged end of matches)
* @alignment: alignment requirement of match structures
*
* Validates that all matches add up to the beginning of the target,
* and that each match covers at least the base structure size.
*
* Return: 0 on success, negative errno on failure.
*/
static int xt_check_entry_match(const char *match, const char *target,
const size_t alignment)
{
const struct xt_entry_match *pos;
int length = target - match;
if (length == 0) /* no matches */
return 0;
pos = (struct xt_entry_match *)match;
do {
if ((unsigned long)pos % alignment)
return -EINVAL;
if (length < (int)sizeof(struct xt_entry_match))
return -EINVAL;
if (pos->u.match_size < sizeof(struct xt_entry_match))
return -EINVAL;
if (pos->u.match_size > length)
return -EINVAL;
length -= pos->u.match_size;
pos = ((void *)((char *)(pos) + (pos)->u.match_size));
} while (length > 0);
return 0;
}
/** xt_check_table_hooks - check hook entry points are sane
*
* @info xt_table_info to check
* @valid_hooks - hook entry points that we can enter from
*
* Validates that the hook entry and underflows points are set up.
*
* Return: 0 on success, negative errno on failure.
*/
int xt_check_table_hooks(const struct xt_table_info *info, unsigned int valid_hooks)
{
const char *err = "unsorted underflow";
unsigned int i, max_uflow, max_entry;
bool check_hooks = false;
BUILD_BUG_ON(ARRAY_SIZE(info->hook_entry) != ARRAY_SIZE(info->underflow));
max_entry = 0;
max_uflow = 0;
for (i = 0; i < ARRAY_SIZE(info->hook_entry); i++) {
if (!(valid_hooks & (1 << i)))
continue;
if (info->hook_entry[i] == 0xFFFFFFFF)
return -EINVAL;
if (info->underflow[i] == 0xFFFFFFFF)
return -EINVAL;
if (check_hooks) {
if (max_uflow > info->underflow[i])
goto error;
if (max_uflow == info->underflow[i]) {
err = "duplicate underflow";
goto error;
}
if (max_entry > info->hook_entry[i]) {
err = "unsorted entry";
goto error;
}
if (max_entry == info->hook_entry[i]) {
err = "duplicate entry";
goto error;
}
}
max_entry = info->hook_entry[i];
max_uflow = info->underflow[i];
check_hooks = true;
}
return 0;
error:
pr_err_ratelimited("%s at hook %d\n", err, i);
return -EINVAL;
}
EXPORT_SYMBOL(xt_check_table_hooks);
static bool verdict_ok(int verdict)
{
if (verdict > 0)
return true;
if (verdict < 0) {
int v = -verdict - 1;
if (verdict == XT_RETURN)
return true;
switch (v) {
case NF_ACCEPT: return true;
case NF_DROP: return true;
case NF_QUEUE: return true;
default:
break;
}
return false;
}
return false;
}
static bool error_tg_ok(unsigned int usersize, unsigned int kernsize,
const char *msg, unsigned int msglen)
{
return usersize == kernsize && strnlen(msg, msglen) < msglen;
}
#ifdef CONFIG_NETFILTER_XTABLES_COMPAT
int xt_compat_add_offset(u_int8_t af, unsigned int offset, int delta)
{
struct xt_af *xp = &xt[af];
WARN_ON(!mutex_is_locked(&xt[af].compat_mutex));
if (WARN_ON(!xp->compat_tab))
return -ENOMEM;
if (xp->cur >= xp->number)
return -EINVAL;
if (xp->cur)
delta += xp->compat_tab[xp->cur - 1].delta;
xp->compat_tab[xp->cur].offset = offset;
xp->compat_tab[xp->cur].delta = delta;
xp->cur++;
return 0;
}
EXPORT_SYMBOL_GPL(xt_compat_add_offset);
void xt_compat_flush_offsets(u_int8_t af)
{
WARN_ON(!mutex_is_locked(&xt[af].compat_mutex));
if (xt[af].compat_tab) {
vfree(xt[af].compat_tab);
xt[af].compat_tab = NULL;
xt[af].number = 0;
xt[af].cur = 0;
}
}
EXPORT_SYMBOL_GPL(xt_compat_flush_offsets);
int xt_compat_calc_jump(u_int8_t af, unsigned int offset)
{
struct compat_delta *tmp = xt[af].compat_tab;
int mid, left = 0, right = xt[af].cur - 1;
while (left <= right) {
mid = (left + right) >> 1;
if (offset > tmp[mid].offset)
left = mid + 1;
else if (offset < tmp[mid].offset)
right = mid - 1;
else
return mid ? tmp[mid - 1].delta : 0;
}
return left ? tmp[left - 1].delta : 0;
}
EXPORT_SYMBOL_GPL(xt_compat_calc_jump);
int xt_compat_init_offsets(u8 af, unsigned int number)
{
size_t mem;
WARN_ON(!mutex_is_locked(&xt[af].compat_mutex));
if (!number || number > (INT_MAX / sizeof(struct compat_delta)))
return -EINVAL;
if (WARN_ON(xt[af].compat_tab))
return -EINVAL;
mem = sizeof(struct compat_delta) * number;
if (mem > XT_MAX_TABLE_SIZE)
return -ENOMEM;
xt[af].compat_tab = vmalloc(mem);
if (!xt[af].compat_tab)
return -ENOMEM;
xt[af].number = number;
xt[af].cur = 0;
return 0;
}
EXPORT_SYMBOL(xt_compat_init_offsets);
int xt_compat_match_offset(const struct xt_match *match)
{
u_int16_t csize = match->compatsize ? : match->matchsize;
return XT_ALIGN(match->matchsize) - COMPAT_XT_ALIGN(csize);
}
EXPORT_SYMBOL_GPL(xt_compat_match_offset);
void xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr,
unsigned int *size)
{
const struct xt_match *match = m->u.kernel.match;
struct compat_xt_entry_match *cm = (struct compat_xt_entry_match *)m;
int off = xt_compat_match_offset(match);
u_int16_t msize = cm->u.user.match_size;
char name[sizeof(m->u.user.name)];
m = *dstptr;
memcpy(m, cm, sizeof(*cm));
if (match->compat_from_user)
match->compat_from_user(m->data, cm->data);
else
memcpy(m->data, cm->data, msize - sizeof(*cm));
msize += off;
m->u.user.match_size = msize;
strscpy(name, match->name, sizeof(name));
module_put(match->me);
strscpy_pad(m->u.user.name, name, sizeof(m->u.user.name));
*size += off;
*dstptr += msize;
}
EXPORT_SYMBOL_GPL(xt_compat_match_from_user);
#define COMPAT_XT_DATA_TO_USER(U, K, TYPE, C_SIZE) \
xt_data_to_user(U->data, K->data, \
K->u.kernel.TYPE->usersize, \
C_SIZE, \
COMPAT_XT_ALIGN(C_SIZE))
int xt_compat_match_to_user(const struct xt_entry_match *m,
void __user **dstptr, unsigned int *size)
{
const struct xt_match *match = m->u.kernel.match;
struct compat_xt_entry_match __user *cm = *dstptr;
int off = xt_compat_match_offset(match);
u_int16_t msize = m->u.user.match_size - off;
if (XT_OBJ_TO_USER(cm, m, match, msize))
return -EFAULT;
if (match->compat_to_user) {
if (match->compat_to_user((void __user *)cm->data, m->data))
return -EFAULT;
} else {
if (COMPAT_XT_DATA_TO_USER(cm, m, match, msize - sizeof(*cm)))
return -EFAULT;
}
*size -= off;
*dstptr += msize;
return 0;
}
EXPORT_SYMBOL_GPL(xt_compat_match_to_user);
/* non-compat version may have padding after verdict */
struct compat_xt_standard_target {
struct compat_xt_entry_target t;
compat_uint_t verdict;
};
struct compat_xt_error_target {
struct compat_xt_entry_target t;
char errorname[XT_FUNCTION_MAXNAMELEN];
};
int xt_compat_check_entry_offsets(const void *base, const char *elems,
unsigned int target_offset,
unsigned int next_offset)
{
long size_of_base_struct = elems - (const char *)base;
const struct compat_xt_entry_target *t;
const char *e = base;
if (target_offset < size_of_base_struct)
return -EINVAL;
if (target_offset + sizeof(*t) > next_offset)
return -EINVAL;
t = (void *)(e + target_offset);
if (t->u.target_size < sizeof(*t))
return -EINVAL;
if (target_offset + t->u.target_size > next_offset)
return -EINVAL;
if (strcmp(t->u.user.name, XT_STANDARD_TARGET) == 0) {
const struct compat_xt_standard_target *st = (const void *)t;
if (COMPAT_XT_ALIGN(target_offset + sizeof(*st)) != next_offset)
return -EINVAL;
if (!verdict_ok(st->verdict))
return -EINVAL;
} else if (strcmp(t->u.user.name, XT_ERROR_TARGET) == 0) {
const struct compat_xt_error_target *et = (const void *)t;
if (!error_tg_ok(t->u.target_size, sizeof(*et),
et->errorname, sizeof(et->errorname)))
return -EINVAL;
}
/* compat_xt_entry match has less strict alignment requirements,
* otherwise they are identical. In case of padding differences
* we need to add compat version of xt_check_entry_match.
*/
BUILD_BUG_ON(sizeof(struct compat_xt_entry_match) != sizeof(struct xt_entry_match));
return xt_check_entry_match(elems, base + target_offset,
__alignof__(struct compat_xt_entry_match));
}
EXPORT_SYMBOL(xt_compat_check_entry_offsets);
#endif /* CONFIG_NETFILTER_XTABLES_COMPAT */
/**
* xt_check_entry_offsets - validate arp/ip/ip6t_entry
*
* @base: pointer to arp/ip/ip6t_entry
* @elems: pointer to first xt_entry_match, i.e. ip(6)t_entry->elems
* @target_offset: the arp/ip/ip6_t->target_offset
* @next_offset: the arp/ip/ip6_t->next_offset
*
* validates that target_offset and next_offset are sane and that all
* match sizes (if any) align with the target offset.
*
* This function does not validate the targets or matches themselves, it
* only tests that all the offsets and sizes are correct, that all
* match structures are aligned, and that the last structure ends where
* the target structure begins.
*
* Also see xt_compat_check_entry_offsets for CONFIG_NETFILTER_XTABLES_COMPAT version.
*
* The arp/ip/ip6t_entry structure @base must have passed following tests:
* - it must point to a valid memory location
* - base to base + next_offset must be accessible, i.e. not exceed allocated
* length.
*
* A well-formed entry looks like this:
*
* ip(6)t_entry match [mtdata] match [mtdata] target [tgdata] ip(6)t_entry
* e->elems[]-----' | |
* matchsize | |
* matchsize | |
* | |
* target_offset---------------------------------' |
* next_offset---------------------------------------------------'
*
* elems[]: flexible array member at end of ip(6)/arpt_entry struct.
* This is where matches (if any) and the target reside.
* target_offset: beginning of target.
* next_offset: start of the next rule; also: size of this rule.
* Since targets have a minimum size, target_offset + minlen <= next_offset.
*
* Every match stores its size, sum of sizes must not exceed target_offset.
*
* Return: 0 on success, negative errno on failure.
*/
int xt_check_entry_offsets(const void *base,
const char *elems,
unsigned int target_offset,
unsigned int next_offset)
{
long size_of_base_struct = elems - (const char *)base;
const struct xt_entry_target *t;
const char *e = base;
/* target start is within the ip/ip6/arpt_entry struct */
if (target_offset < size_of_base_struct)
return -EINVAL;
if (target_offset + sizeof(*t) > next_offset)
return -EINVAL;
t = (void *)(e + target_offset);
if (t->u.target_size < sizeof(*t))
return -EINVAL;
if (target_offset + t->u.target_size > next_offset)
return -EINVAL;
if (strcmp(t->u.user.name, XT_STANDARD_TARGET) == 0) {
const struct xt_standard_target *st = (const void *)t;
if (XT_ALIGN(target_offset + sizeof(*st)) != next_offset)
return -EINVAL;
if (!verdict_ok(st->verdict))
return -EINVAL;
} else if (strcmp(t->u.user.name, XT_ERROR_TARGET) == 0) {
const struct xt_error_target *et = (const void *)t;
if (!error_tg_ok(t->u.target_size, sizeof(*et),
et->errorname, sizeof(et->errorname)))
return -EINVAL;
}
return xt_check_entry_match(elems, base + target_offset,
__alignof__(struct xt_entry_match));
}
EXPORT_SYMBOL(xt_check_entry_offsets);
/**
* xt_alloc_entry_offsets - allocate array to store rule head offsets
*
* @size: number of entries
*
* Return: NULL or zeroed kmalloc'd or vmalloc'd array
*/
unsigned int *xt_alloc_entry_offsets(unsigned int size)
{
if (size > XT_MAX_TABLE_SIZE / sizeof(unsigned int))
return NULL;
return kvcalloc(size, sizeof(unsigned int), GFP_KERNEL);
}
EXPORT_SYMBOL(xt_alloc_entry_offsets);
/**
* xt_find_jump_offset - check if target is a valid jump offset
*
* @offsets: array containing all valid rule start offsets of a rule blob
* @target: the jump target to search for
* @size: entries in @offset
*/
bool xt_find_jump_offset(const unsigned int *offsets,
unsigned int target, unsigned int size)
{
int m, low = 0, hi = size;
while (hi > low) {
m = (low + hi) / 2u;
if (offsets[m] > target)
hi = m;
else if (offsets[m] < target)
low = m + 1;
else
return true;
}
return false;
}
EXPORT_SYMBOL(xt_find_jump_offset);
int xt_check_target(struct xt_tgchk_param *par,
unsigned int size, u16 proto, bool inv_proto)
{
int ret;
if (XT_ALIGN(par->target->targetsize) != size) {
pr_err_ratelimited("%s_tables: %s.%u target: invalid size %u (kernel) != (user) %u\n",
xt_prefix[par->family], par->target->name,
par->target->revision,
XT_ALIGN(par->target->targetsize), size);
return -EINVAL;
}
if (par->target->table != NULL &&
strcmp(par->target->table, par->table) != 0) {
pr_info_ratelimited("%s_tables: %s target: only valid in %s table, not %s\n",
xt_prefix[par->family], par->target->name,
par->target->table, par->table);
return -EINVAL;
}
if (par->target->hooks && (par->hook_mask & ~par->target->hooks) != 0) {
char used[64], allow[64];
pr_info_ratelimited("%s_tables: %s target: used from hooks %s, but only usable from %s\n",
xt_prefix[par->family], par->target->name,
textify_hooks(used, sizeof(used),
par->hook_mask, par->family),
textify_hooks(allow, sizeof(allow),
par->target->hooks,
par->family));
return -EINVAL;
}
if (par->target->proto && (par->target->proto != proto || inv_proto)) {
pr_info_ratelimited("%s_tables: %s target: only valid for protocol %u\n",
xt_prefix[par->family], par->target->name,
par->target->proto);
return -EINVAL;
}
if (par->target->checkentry != NULL) {
ret = par->target->checkentry(par);
if (ret < 0)
return ret;
else if (ret > 0)
/* Flag up potential errors. */
return -EIO;
}
return 0;
}
EXPORT_SYMBOL_GPL(xt_check_target);
/**
* xt_copy_counters - copy counters and metadata from a sockptr_t
*
* @arg: src sockptr
* @len: alleged size of userspace memory
* @info: where to store the xt_counters_info metadata
*
* Copies counter meta data from @user and stores it in @info.
*
* vmallocs memory to hold the counters, then copies the counter data
* from @user to the new memory and returns a pointer to it.
*
* If called from a compat syscall, @info gets converted automatically to the
* 64bit representation.
*
* The metadata associated with the counters is stored in @info.
*
* Return: returns pointer that caller has to test via IS_ERR().
* If IS_ERR is false, caller has to vfree the pointer.
*/
void *xt_copy_counters(sockptr_t arg, unsigned int len,
struct xt_counters_info *info)
{
size_t offset;
void *mem;
u64 size;
#ifdef CONFIG_NETFILTER_XTABLES_COMPAT
if (in_compat_syscall()) {
/* structures only differ in size due to alignment */
struct compat_xt_counters_info compat_tmp;
if (len <= sizeof(compat_tmp))
return ERR_PTR(-EINVAL);
len -= sizeof(compat_tmp);
if (copy_from_sockptr(&compat_tmp, arg, sizeof(compat_tmp)) != 0)
return ERR_PTR(-EFAULT);
memcpy(info->name, compat_tmp.name, sizeof(info->name) - 1);
info->num_counters = compat_tmp.num_counters;
offset = sizeof(compat_tmp);
} else
#endif
{
if (len <= sizeof(*info))
return ERR_PTR(-EINVAL);
len -= sizeof(*info);
if (copy_from_sockptr(info, arg, sizeof(*info)) != 0)
return ERR_PTR(-EFAULT);
offset = sizeof(*info);
}
info->name[sizeof(info->name) - 1] = '\0';
size = sizeof(struct xt_counters);
size *= info->num_counters;
if (size != (u64)len)
return ERR_PTR(-EINVAL);
mem = vmalloc(len);
if (!mem)
return ERR_PTR(-ENOMEM);
if (copy_from_sockptr_offset(mem, arg, offset, len) == 0)
return mem;
vfree(mem);
return ERR_PTR(-EFAULT);
}
EXPORT_SYMBOL_GPL(xt_copy_counters);
#ifdef CONFIG_NETFILTER_XTABLES_COMPAT
int xt_compat_target_offset(const struct xt_target *target)
{
u_int16_t csize = target->compatsize ? : target->targetsize;
return XT_ALIGN(target->targetsize) - COMPAT_XT_ALIGN(csize);
}
EXPORT_SYMBOL_GPL(xt_compat_target_offset);
void xt_compat_target_from_user(struct xt_entry_target *t, void **dstptr,
unsigned int *size)
{
const struct xt_target *target = t->u.kernel.target;
struct compat_xt_entry_target *ct = (struct compat_xt_entry_target *)t;
int off = xt_compat_target_offset(target);
u_int16_t tsize = ct->u.user.target_size;
char name[sizeof(t->u.user.name)];
t = *dstptr;
memcpy(t, ct, sizeof(*ct));
if (target->compat_from_user)
target->compat_from_user(t->data, ct->data);
else
memcpy(t->data, ct->data, tsize - sizeof(*ct));
tsize += off;
t->u.user.target_size = tsize;
strscpy(name, target->name, sizeof(name));
module_put(target->me);
strscpy_pad(t->u.user.name, name, sizeof(t->u.user.name));
*size += off;
*dstptr += tsize;
}
EXPORT_SYMBOL_GPL(xt_compat_target_from_user);
int xt_compat_target_to_user(const struct xt_entry_target *t,
void __user **dstptr, unsigned int *size)
{
const struct xt_target *target = t->u.kernel.target;
struct compat_xt_entry_target __user *ct = *dstptr;
int off = xt_compat_target_offset(target);
u_int16_t tsize = t->u.user.target_size - off;
if (XT_OBJ_TO_USER(ct, t, target, tsize))
return -EFAULT;
if (target->compat_to_user) {
if (target->compat_to_user((void __user *)ct->data, t->data))
return -EFAULT;
} else {
if (COMPAT_XT_DATA_TO_USER(ct, t, target, tsize - sizeof(*ct)))
return -EFAULT;
}
*size -= off;
*dstptr += tsize;
return 0;
}
EXPORT_SYMBOL_GPL(xt_compat_target_to_user);
#endif
struct xt_table_info *xt_alloc_table_info(unsigned int size)
{
struct xt_table_info *info = NULL;
size_t sz = sizeof(*info) + size;
if (sz < sizeof(*info) || sz >= XT_MAX_TABLE_SIZE)
return NULL;
info = kvmalloc(sz, GFP_KERNEL_ACCOUNT);
if (!info)
return NULL;
memset(info, 0, sizeof(*info));
info->size = size;
return info;
}
EXPORT_SYMBOL(xt_alloc_table_info);
void xt_free_table_info(struct xt_table_info *info)
{
int cpu;
if (info->jumpstack != NULL) {
for_each_possible_cpu(cpu)
kvfree(info->jumpstack[cpu]);
kvfree(info->jumpstack);
}
kvfree(info);
}
EXPORT_SYMBOL(xt_free_table_info);
struct xt_table *xt_find_table(struct net *net, u8 af, const char *name)
{
struct xt_pernet *xt_net = net_generic(net, xt_pernet_id);
struct xt_table *t;
mutex_lock(&xt[af].mutex);
list_for_each_entry(t, &xt_net->tables[af], list) {
if (strcmp(t->name, name) == 0) {
mutex_unlock(&xt[af].mutex);
return t;
}
}
mutex_unlock(&xt[af].mutex);
return NULL;
}
EXPORT_SYMBOL(xt_find_table);
/* Find table by name, grabs mutex & ref. Returns ERR_PTR on error. */
struct xt_table *xt_find_table_lock(struct net *net, u_int8_t af,
const char *name)
{
struct xt_pernet *xt_net = net_generic(net, xt_pernet_id);
struct module *owner = NULL;
struct xt_template *tmpl;
struct xt_table *t;
mutex_lock(&xt[af].mutex);
list_for_each_entry(t, &xt_net->tables[af], list)
if (strcmp(t->name, name) == 0 && try_module_get(t->me))
return t;
/* Table doesn't exist in this netns, check larval list */
list_for_each_entry(tmpl, &xt_templates[af], list) {
int err;
if (strcmp(tmpl->name, name))
continue;
if (!try_module_get(tmpl->me))
goto out;
owner = tmpl->me;
mutex_unlock(&xt[af].mutex);
err = tmpl->table_init(net);
if (err < 0) {
module_put(owner);
return ERR_PTR(err);
}
mutex_lock(&xt[af].mutex);
break;
}
/* and once again: */
list_for_each_entry(t, &xt_net->tables[af], list)
if (strcmp(t->name, name) == 0)
return t;
module_put(owner);
out:
mutex_unlock(&xt[af].mutex);
return ERR_PTR(-ENOENT);
}
EXPORT_SYMBOL_GPL(xt_find_table_lock);
struct xt_table *xt_request_find_table_lock(struct net *net, u_int8_t af,
const char *name)
{
struct xt_table *t = xt_find_table_lock(net, af, name);
#ifdef CONFIG_MODULES
if (IS_ERR(t)) {
int err = request_module("%stable_%s", xt_prefix[af], name);
if (err < 0)
return ERR_PTR(err);
t = xt_find_table_lock(net, af, name);
}
#endif
return t;
}
EXPORT_SYMBOL_GPL(xt_request_find_table_lock);
void xt_table_unlock(struct xt_table *table)
{
mutex_unlock(&xt[table->af].mutex);
}
EXPORT_SYMBOL_GPL(xt_table_unlock);
#ifdef CONFIG_NETFILTER_XTABLES_COMPAT
void xt_compat_lock(u_int8_t af)
{
mutex_lock(&xt[af].compat_mutex);
}
EXPORT_SYMBOL_GPL(xt_compat_lock);
void xt_compat_unlock(u_int8_t af)
{
mutex_unlock(&xt[af].compat_mutex);
}
EXPORT_SYMBOL_GPL(xt_compat_unlock);
#endif
DEFINE_PER_CPU(seqcount_t, xt_recseq);
EXPORT_PER_CPU_SYMBOL_GPL(xt_recseq);
struct static_key xt_tee_enabled __read_mostly;
EXPORT_SYMBOL_GPL(xt_tee_enabled);
static int xt_jumpstack_alloc(struct xt_table_info *i)
{
unsigned int size;
int cpu;
size = sizeof(void **) * nr_cpu_ids;
if (size > PAGE_SIZE)
i->jumpstack = kvzalloc(size, GFP_KERNEL);
else
i->jumpstack = kzalloc(size, GFP_KERNEL);
if (i->jumpstack == NULL)
return -ENOMEM;
/* ruleset without jumps -- no stack needed */
if (i->stacksize == 0)
return 0;
/* Jumpstack needs to be able to record two full callchains, one
* from the first rule set traversal, plus one table reentrancy
* via -j TEE without clobbering the callchain that brought us to
* TEE target.
*
* This is done by allocating two jumpstacks per cpu, on reentry
* the upper half of the stack is used.
*
* see the jumpstack setup in ipt_do_table() for more details.
*/
size = sizeof(void *) * i->stacksize * 2u;
for_each_possible_cpu(cpu) {
i->jumpstack[cpu] = kvmalloc_node(size, GFP_KERNEL,
cpu_to_node(cpu));
if (i->jumpstack[cpu] == NULL)
/*
* Freeing will be done later on by the callers. The
* chain is: xt_replace_table -> __do_replace ->
* do_replace -> xt_free_table_info.
*/
return -ENOMEM;
}
return 0;
}
struct xt_counters *xt_counters_alloc(unsigned int counters)
{
struct xt_counters *mem;
if (counters == 0 || counters > INT_MAX / sizeof(*mem))
return NULL;
counters *= sizeof(*mem);
if (counters > XT_MAX_TABLE_SIZE)
return NULL;
return vzalloc(counters);
}
EXPORT_SYMBOL(xt_counters_alloc);
struct xt_table_info *
xt_replace_table(struct xt_table *table,
unsigned int num_counters,
struct xt_table_info *newinfo,
int *error)
{
struct xt_table_info *private;
unsigned int cpu;
int ret;
ret = xt_jumpstack_alloc(newinfo);
if (ret < 0) {
*error = ret;
return NULL;
}
/* Do the substitution. */
local_bh_disable();
private = table->private;
/* Check inside lock: is the old number correct? */
if (num_counters != private->number) {
pr_debug("num_counters != table->private->number (%u/%u)\n",
num_counters, private->number);
local_bh_enable();
*error = -EAGAIN;
return NULL;
}
newinfo->initial_entries = private->initial_entries;
/*
* Ensure contents of newinfo are visible before assigning to
* private.
*/
smp_wmb();
table->private = newinfo;
/* make sure all cpus see new ->private value */
smp_mb();
/*
* Even though table entries have now been swapped, other CPU's
* may still be using the old entries...
*/
local_bh_enable();
/* ... so wait for even xt_recseq on all cpus */
for_each_possible_cpu(cpu) {
seqcount_t *s = &per_cpu(xt_recseq, cpu);
u32 seq = raw_read_seqcount(s);
if (seq & 1) {
do {
cond_resched();
cpu_relax();
} while (seq == raw_read_seqcount(s));
}
}
audit_log_nfcfg(table->name, table->af, private->number,
!private->number ? AUDIT_XT_OP_REGISTER :
AUDIT_XT_OP_REPLACE,
GFP_KERNEL);
return private;
}
EXPORT_SYMBOL_GPL(xt_replace_table);
struct xt_table *xt_register_table(struct net *net,
const struct xt_table *input_table,
struct xt_table_info *bootstrap,
struct xt_table_info *newinfo)
{
struct xt_pernet *xt_net = net_generic(net, xt_pernet_id);
struct xt_table_info *private;
struct xt_table *t, *table;
int ret;
/* Don't add one object to multiple lists. */
table = kmemdup(input_table, sizeof(struct xt_table), GFP_KERNEL);
if (!table) {
ret = -ENOMEM;
goto out;
}
mutex_lock(&xt[table->af].mutex);
/* Don't autoload: we'd eat our tail... */
list_for_each_entry(t, &xt_net->tables[table->af], list) {
if (strcmp(t->name, table->name) == 0) {
ret = -EEXIST;
goto unlock;
}
}
/* Simplifies replace_table code. */
table->private = bootstrap;
if (!xt_replace_table(table, 0, newinfo, &ret))
goto unlock;
private = table->private;
pr_debug("table->private->number = %u\n", private->number);
/* save number of initial entries */
private->initial_entries = private->number;
list_add(&table->list, &xt_net->tables[table->af]);
mutex_unlock(&xt[table->af].mutex);
return table;
unlock:
mutex_unlock(&xt[table->af].mutex);
kfree(table);
out:
return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(xt_register_table);
void *xt_unregister_table(struct xt_table *table)
{
struct xt_table_info *private;
mutex_lock(&xt[table->af].mutex);
private = table->private;
list_del(&table->list);
mutex_unlock(&xt[table->af].mutex);
audit_log_nfcfg(table->name, table->af, private->number,
AUDIT_XT_OP_UNREGISTER, GFP_KERNEL);
kfree(table->ops);
kfree(table);
return private;
}
EXPORT_SYMBOL_GPL(xt_unregister_table);
#ifdef CONFIG_PROC_FS
static void *xt_table_seq_start(struct seq_file *seq, loff_t *pos)
{
u8 af = (unsigned long)pde_data(file_inode(seq->file));
struct net *net = seq_file_net(seq);
struct xt_pernet *xt_net;
xt_net = net_generic(net, xt_pernet_id);
mutex_lock(&xt[af].mutex);
return seq_list_start(&xt_net->tables[af], *pos);
}
static void *xt_table_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
u8 af = (unsigned long)pde_data(file_inode(seq->file));
struct net *net = seq_file_net(seq);
struct xt_pernet *xt_net;
xt_net = net_generic(net, xt_pernet_id);
return seq_list_next(v, &xt_net->tables[af], pos);
}
static void xt_table_seq_stop(struct seq_file *seq, void *v)
{
u_int8_t af = (unsigned long)pde_data(file_inode(seq->file));
mutex_unlock(&xt[af].mutex);
}
static int xt_table_seq_show(struct seq_file *seq, void *v)
{
struct xt_table *table = list_entry(v, struct xt_table, list);
if (*table->name)
seq_printf(seq, "%s\n", table->name);
return 0;
}
static const struct seq_operations xt_table_seq_ops = {
.start = xt_table_seq_start,
.next = xt_table_seq_next,
.stop = xt_table_seq_stop,
.show = xt_table_seq_show,
};
/*
* Traverse state for ip{,6}_{tables,matches} for helping crossing
* the multi-AF mutexes.
*/
struct nf_mttg_trav {
struct list_head *head, *curr;
uint8_t class;
};
enum {
MTTG_TRAV_INIT,
MTTG_TRAV_NFP_UNSPEC,
MTTG_TRAV_NFP_SPEC,
MTTG_TRAV_DONE,
};
static void *xt_mttg_seq_next(struct seq_file *seq, void *v, loff_t *ppos,
bool is_target)
{
static const uint8_t next_class[] = {
[MTTG_TRAV_NFP_UNSPEC] = MTTG_TRAV_NFP_SPEC,
[MTTG_TRAV_NFP_SPEC] = MTTG_TRAV_DONE,
};
uint8_t nfproto = (unsigned long)pde_data(file_inode(seq->file));
struct nf_mttg_trav *trav = seq->private;
if (ppos != NULL)
++(*ppos);
switch (trav->class) {
case MTTG_TRAV_INIT:
trav->class = MTTG_TRAV_NFP_UNSPEC;
mutex_lock(&xt[NFPROTO_UNSPEC].mutex);
trav->head = trav->curr = is_target ?
&xt[NFPROTO_UNSPEC].target : &xt[NFPROTO_UNSPEC].match;
break;
case MTTG_TRAV_NFP_UNSPEC:
trav->curr = trav->curr->next;
if (trav->curr != trav->head)
break;
mutex_unlock(&xt[NFPROTO_UNSPEC].mutex);
mutex_lock(&xt[nfproto].mutex);
trav->head = trav->curr = is_target ?
&xt[nfproto].target : &xt[nfproto].match;
trav->class = next_class[trav->class];
break;
case MTTG_TRAV_NFP_SPEC:
trav->curr = trav->curr->next;
if (trav->curr != trav->head)
break;
fallthrough;
default:
return NULL;
}
return trav;
}
static void *xt_mttg_seq_start(struct seq_file *seq, loff_t *pos,
bool is_target)
{
struct nf_mttg_trav *trav = seq->private;
unsigned int j;
trav->class = MTTG_TRAV_INIT;
for (j = 0; j < *pos; ++j)
if (xt_mttg_seq_next(seq, NULL, NULL, is_target) == NULL)
return NULL;
return trav;
}
static void xt_mttg_seq_stop(struct seq_file *seq, void *v)
{
uint8_t nfproto = (unsigned long)pde_data(file_inode(seq->file));
struct nf_mttg_trav *trav = seq->private;
switch (trav->class) {
case MTTG_TRAV_NFP_UNSPEC:
mutex_unlock(&xt[NFPROTO_UNSPEC].mutex);
break;
case MTTG_TRAV_NFP_SPEC:
mutex_unlock(&xt[nfproto].mutex);
break;
}
}
static void *xt_match_seq_start(struct seq_file *seq, loff_t *pos)
{
return xt_mttg_seq_start(seq, pos, false);
}
static void *xt_match_seq_next(struct seq_file *seq, void *v, loff_t *ppos)
{
return xt_mttg_seq_next(seq, v, ppos, false);
}
static int xt_match_seq_show(struct seq_file *seq, void *v)
{
const struct nf_mttg_trav *trav = seq->private;
const struct xt_match *match;
switch (trav->class) {
case MTTG_TRAV_NFP_UNSPEC:
case MTTG_TRAV_NFP_SPEC:
if (trav->curr == trav->head)
return 0;
match = list_entry(trav->curr, struct xt_match, list);
if (*match->name)
seq_printf(seq, "%s\n", match->name);
}
return 0;
}
static const struct seq_operations xt_match_seq_ops = {
.start = xt_match_seq_start,
.next = xt_match_seq_next,
.stop = xt_mttg_seq_stop,
.show = xt_match_seq_show,
};
static void *xt_target_seq_start(struct seq_file *seq, loff_t *pos)
{
return xt_mttg_seq_start(seq, pos, true);
}
static void *xt_target_seq_next(struct seq_file *seq, void *v, loff_t *ppos)
{
return xt_mttg_seq_next(seq, v, ppos, true);
}
static int xt_target_seq_show(struct seq_file *seq, void *v)
{
const struct nf_mttg_trav *trav = seq->private;
const struct xt_target *target;
switch (trav->class) {
case MTTG_TRAV_NFP_UNSPEC:
case MTTG_TRAV_NFP_SPEC:
if (trav->curr == trav->head)
return 0;
target = list_entry(trav->curr, struct xt_target, list);
if (*target->name)
seq_printf(seq, "%s\n", target->name);
}
return 0;
}
static const struct seq_operations xt_target_seq_ops = {
.start = xt_target_seq_start,
.next = xt_target_seq_next,
.stop = xt_mttg_seq_stop,
.show = xt_target_seq_show,
};
#define FORMAT_TABLES "_tables_names"
#define FORMAT_MATCHES "_tables_matches"
#define FORMAT_TARGETS "_tables_targets"
#endif /* CONFIG_PROC_FS */
/**
* xt_hook_ops_alloc - set up hooks for a new table
* @table: table with metadata needed to set up hooks
* @fn: Hook function
*
* This function will create the nf_hook_ops that the x_table needs
* to hand to xt_hook_link_net().
*/
struct nf_hook_ops *
xt_hook_ops_alloc(const struct xt_table *table, nf_hookfn *fn)
{
unsigned int hook_mask = table->valid_hooks;
uint8_t i, num_hooks = hweight32(hook_mask);
uint8_t hooknum;
struct nf_hook_ops *ops;
if (!num_hooks)
return ERR_PTR(-EINVAL);
ops = kcalloc(num_hooks, sizeof(*ops), GFP_KERNEL);
if (ops == NULL)
return ERR_PTR(-ENOMEM);
for (i = 0, hooknum = 0; i < num_hooks && hook_mask != 0;
hook_mask >>= 1, ++hooknum) {
if (!(hook_mask & 1))
continue;
ops[i].hook = fn;
ops[i].pf = table->af;
ops[i].hooknum = hooknum;
ops[i].priority = table->priority;
++i;
}
return ops;
}
EXPORT_SYMBOL_GPL(xt_hook_ops_alloc);
int xt_register_template(const struct xt_table *table,
int (*table_init)(struct net *net))
{
int ret = -EEXIST, af = table->af;
struct xt_template *t;
mutex_lock(&xt[af].mutex);
list_for_each_entry(t, &xt_templates[af], list) {
if (WARN_ON_ONCE(strcmp(table->name, t->name) == 0))
goto out_unlock;
}
ret = -ENOMEM;
t = kzalloc(sizeof(*t), GFP_KERNEL);
if (!t)
goto out_unlock;
BUILD_BUG_ON(sizeof(t->name) != sizeof(table->name));
strscpy(t->name, table->name, sizeof(t->name));
t->table_init = table_init;
t->me = table->me;
list_add(&t->list, &xt_templates[af]);
ret = 0;
out_unlock:
mutex_unlock(&xt[af].mutex);
return ret;
}
EXPORT_SYMBOL_GPL(xt_register_template);
void xt_unregister_template(const struct xt_table *table)
{
struct xt_template *t;
int af = table->af;
mutex_lock(&xt[af].mutex);
list_for_each_entry(t, &xt_templates[af], list) {
if (strcmp(table->name, t->name))
continue;
list_del(&t->list);
mutex_unlock(&xt[af].mutex);
kfree(t);
return;
}
mutex_unlock(&xt[af].mutex);
WARN_ON_ONCE(1);
}
EXPORT_SYMBOL_GPL(xt_unregister_template);
int xt_proto_init(struct net *net, u_int8_t af)
{
#ifdef CONFIG_PROC_FS
char buf[XT_FUNCTION_MAXNAMELEN];
struct proc_dir_entry *proc;
kuid_t root_uid;
kgid_t root_gid;
#endif
if (af >= ARRAY_SIZE(xt_prefix))
return -EINVAL;
#ifdef CONFIG_PROC_FS
root_uid = make_kuid(net->user_ns, 0);
root_gid = make_kgid(net->user_ns, 0);
strscpy(buf, xt_prefix[af], sizeof(buf));
strlcat(buf, FORMAT_TABLES, sizeof(buf));
proc = proc_create_net_data(buf, 0440, net->proc_net, &xt_table_seq_ops,
sizeof(struct seq_net_private),
(void *)(unsigned long)af);
if (!proc)
goto out;
if (uid_valid(root_uid) && gid_valid(root_gid))
proc_set_user(proc, root_uid, root_gid);
strscpy(buf, xt_prefix[af], sizeof(buf));
strlcat(buf, FORMAT_MATCHES, sizeof(buf));
proc = proc_create_seq_private(buf, 0440, net->proc_net,
&xt_match_seq_ops, sizeof(struct nf_mttg_trav),
(void *)(unsigned long)af);
if (!proc)
goto out_remove_tables;
if (uid_valid(root_uid) && gid_valid(root_gid))
proc_set_user(proc, root_uid, root_gid);
strscpy(buf, xt_prefix[af], sizeof(buf));
strlcat(buf, FORMAT_TARGETS, sizeof(buf));
proc = proc_create_seq_private(buf, 0440, net->proc_net,
&xt_target_seq_ops, sizeof(struct nf_mttg_trav),
(void *)(unsigned long)af);
if (!proc)
goto out_remove_matches;
if (uid_valid(root_uid) && gid_valid(root_gid))
proc_set_user(proc, root_uid, root_gid);
#endif
return 0;
#ifdef CONFIG_PROC_FS
out_remove_matches:
strscpy(buf, xt_prefix[af], sizeof(buf));
strlcat(buf, FORMAT_MATCHES, sizeof(buf));
remove_proc_entry(buf, net->proc_net);
out_remove_tables:
strscpy(buf, xt_prefix[af], sizeof(buf));
strlcat(buf, FORMAT_TABLES, sizeof(buf));
remove_proc_entry(buf, net->proc_net);
out:
return -1;
#endif
}
EXPORT_SYMBOL_GPL(xt_proto_init);
void xt_proto_fini(struct net *net, u_int8_t af)
{
#ifdef CONFIG_PROC_FS
char buf[XT_FUNCTION_MAXNAMELEN];
strscpy(buf, xt_prefix[af], sizeof(buf));
strlcat(buf, FORMAT_TABLES, sizeof(buf));
remove_proc_entry(buf, net->proc_net);
strscpy(buf, xt_prefix[af], sizeof(buf));
strlcat(buf, FORMAT_TARGETS, sizeof(buf));
remove_proc_entry(buf, net->proc_net);
strscpy(buf, xt_prefix[af], sizeof(buf));
strlcat(buf, FORMAT_MATCHES, sizeof(buf));
remove_proc_entry(buf, net->proc_net);
#endif /*CONFIG_PROC_FS*/
}
EXPORT_SYMBOL_GPL(xt_proto_fini);
/**
* xt_percpu_counter_alloc - allocate x_tables rule counter
*
* @state: pointer to xt_percpu allocation state
* @counter: pointer to counter struct inside the ip(6)/arpt_entry struct
*
* On SMP, the packet counter [ ip(6)t_entry->counters.pcnt ] will then
* contain the address of the real (percpu) counter.
*
* Rule evaluation needs to use xt_get_this_cpu_counter() helper
* to fetch the real percpu counter.
*
* To speed up allocation and improve data locality, a 4kb block is
* allocated. Freeing any counter may free an entire block, so all
* counters allocated using the same state must be freed at the same
* time.
*
* xt_percpu_counter_alloc_state contains the base address of the
* allocated page and the current sub-offset.
*
* returns false on error.
*/
bool xt_percpu_counter_alloc(struct xt_percpu_counter_alloc_state *state,
struct xt_counters *counter)
{
BUILD_BUG_ON(XT_PCPU_BLOCK_SIZE < (sizeof(*counter) * 2));
if (nr_cpu_ids <= 1)
return true;
if (!state->mem) {
state->mem = __alloc_percpu(XT_PCPU_BLOCK_SIZE,
XT_PCPU_BLOCK_SIZE);
if (!state->mem)
return false;
}
counter->pcnt = (__force unsigned long)(state->mem + state->off);
state->off += sizeof(*counter);
if (state->off > (XT_PCPU_BLOCK_SIZE - sizeof(*counter))) {
state->mem = NULL;
state->off = 0;
}
return true;
}
EXPORT_SYMBOL_GPL(xt_percpu_counter_alloc);
void xt_percpu_counter_free(struct xt_counters *counters)
{
unsigned long pcnt = counters->pcnt;
if (nr_cpu_ids > 1 && (pcnt & (XT_PCPU_BLOCK_SIZE - 1)) == 0)
free_percpu((void __percpu *)pcnt);
}
EXPORT_SYMBOL_GPL(xt_percpu_counter_free);
static int __net_init xt_net_init(struct net *net)
{
struct xt_pernet *xt_net = net_generic(net, xt_pernet_id);
int i;
for (i = 0; i < NFPROTO_NUMPROTO; i++)
INIT_LIST_HEAD(&xt_net->tables[i]);
return 0;
}
static void __net_exit xt_net_exit(struct net *net)
{
struct xt_pernet *xt_net = net_generic(net, xt_pernet_id);
int i;
for (i = 0; i < NFPROTO_NUMPROTO; i++)
WARN_ON_ONCE(!list_empty(&xt_net->tables[i]));
}
static struct pernet_operations xt_net_ops = {
.init = xt_net_init,
.exit = xt_net_exit,
.id = &xt_pernet_id,
.size = sizeof(struct xt_pernet),
};
static int __init xt_init(void)
{
unsigned int i;
int rv;
for_each_possible_cpu(i) {
seqcount_init(&per_cpu(xt_recseq, i));
}
xt = kcalloc(NFPROTO_NUMPROTO, sizeof(struct xt_af), GFP_KERNEL);
if (!xt)
return -ENOMEM;
for (i = 0; i < NFPROTO_NUMPROTO; i++) {
mutex_init(&xt[i].mutex);
#ifdef CONFIG_NETFILTER_XTABLES_COMPAT
mutex_init(&xt[i].compat_mutex);
xt[i].compat_tab = NULL;
#endif
INIT_LIST_HEAD(&xt[i].target);
INIT_LIST_HEAD(&xt[i].match);
INIT_LIST_HEAD(&xt_templates[i]);
}
rv = register_pernet_subsys(&xt_net_ops);
if (rv < 0)
kfree(xt);
return rv;
}
static void __exit xt_fini(void)
{
unregister_pernet_subsys(&xt_net_ops);
kfree(xt);
}
module_init(xt_init);
module_exit(xt_fini);
| linux-master | net/netfilter/x_tables.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Xtables module to match packets using a BPF filter.
* Copyright 2013 Google Inc.
* Written by Willem de Bruijn <[email protected]>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/syscalls.h>
#include <linux/skbuff.h>
#include <linux/filter.h>
#include <linux/bpf.h>
#include <linux/netfilter/xt_bpf.h>
#include <linux/netfilter/x_tables.h>
MODULE_AUTHOR("Willem de Bruijn <[email protected]>");
MODULE_DESCRIPTION("Xtables: BPF filter match");
MODULE_LICENSE("GPL");
MODULE_ALIAS("ipt_bpf");
MODULE_ALIAS("ip6t_bpf");
static int __bpf_mt_check_bytecode(struct sock_filter *insns, __u16 len,
struct bpf_prog **ret)
{
struct sock_fprog_kern program;
if (len > XT_BPF_MAX_NUM_INSTR)
return -EINVAL;
program.len = len;
program.filter = insns;
if (bpf_prog_create(ret, &program)) {
pr_info_ratelimited("check failed: parse error\n");
return -EINVAL;
}
return 0;
}
static int __bpf_mt_check_fd(int fd, struct bpf_prog **ret)
{
struct bpf_prog *prog;
prog = bpf_prog_get_type(fd, BPF_PROG_TYPE_SOCKET_FILTER);
if (IS_ERR(prog))
return PTR_ERR(prog);
*ret = prog;
return 0;
}
static int __bpf_mt_check_path(const char *path, struct bpf_prog **ret)
{
if (strnlen(path, XT_BPF_PATH_MAX) == XT_BPF_PATH_MAX)
return -EINVAL;
*ret = bpf_prog_get_type_path(path, BPF_PROG_TYPE_SOCKET_FILTER);
return PTR_ERR_OR_ZERO(*ret);
}
static int bpf_mt_check(const struct xt_mtchk_param *par)
{
struct xt_bpf_info *info = par->matchinfo;
return __bpf_mt_check_bytecode(info->bpf_program,
info->bpf_program_num_elem,
&info->filter);
}
static int bpf_mt_check_v1(const struct xt_mtchk_param *par)
{
struct xt_bpf_info_v1 *info = par->matchinfo;
if (info->mode == XT_BPF_MODE_BYTECODE)
return __bpf_mt_check_bytecode(info->bpf_program,
info->bpf_program_num_elem,
&info->filter);
else if (info->mode == XT_BPF_MODE_FD_ELF)
return __bpf_mt_check_fd(info->fd, &info->filter);
else if (info->mode == XT_BPF_MODE_PATH_PINNED)
return __bpf_mt_check_path(info->path, &info->filter);
else
return -EINVAL;
}
static bool bpf_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct xt_bpf_info *info = par->matchinfo;
return bpf_prog_run(info->filter, skb);
}
static bool bpf_mt_v1(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct xt_bpf_info_v1 *info = par->matchinfo;
return !!bpf_prog_run_save_cb(info->filter, (struct sk_buff *) skb);
}
static void bpf_mt_destroy(const struct xt_mtdtor_param *par)
{
const struct xt_bpf_info *info = par->matchinfo;
bpf_prog_destroy(info->filter);
}
static void bpf_mt_destroy_v1(const struct xt_mtdtor_param *par)
{
const struct xt_bpf_info_v1 *info = par->matchinfo;
bpf_prog_destroy(info->filter);
}
static struct xt_match bpf_mt_reg[] __read_mostly = {
{
.name = "bpf",
.revision = 0,
.family = NFPROTO_UNSPEC,
.checkentry = bpf_mt_check,
.match = bpf_mt,
.destroy = bpf_mt_destroy,
.matchsize = sizeof(struct xt_bpf_info),
.usersize = offsetof(struct xt_bpf_info, filter),
.me = THIS_MODULE,
},
{
.name = "bpf",
.revision = 1,
.family = NFPROTO_UNSPEC,
.checkentry = bpf_mt_check_v1,
.match = bpf_mt_v1,
.destroy = bpf_mt_destroy_v1,
.matchsize = sizeof(struct xt_bpf_info_v1),
.usersize = offsetof(struct xt_bpf_info_v1, filter),
.me = THIS_MODULE,
},
};
static int __init bpf_mt_init(void)
{
return xt_register_matches(bpf_mt_reg, ARRAY_SIZE(bpf_mt_reg));
}
static void __exit bpf_mt_exit(void)
{
xt_unregister_matches(bpf_mt_reg, ARRAY_SIZE(bpf_mt_reg));
}
module_init(bpf_mt_init);
module_exit(bpf_mt_exit);
| linux-master | net/netfilter/xt_bpf.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* xt_ipvs - kernel module to match IPVS connection properties
*
* Author: Hannes Eder <[email protected]>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/spinlock.h>
#include <linux/skbuff.h>
#ifdef CONFIG_IP_VS_IPV6
#include <net/ipv6.h>
#endif
#include <linux/ip_vs.h>
#include <linux/types.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter/xt_ipvs.h>
#include <net/netfilter/nf_conntrack.h>
#include <net/ip_vs.h>
MODULE_AUTHOR("Hannes Eder <[email protected]>");
MODULE_DESCRIPTION("Xtables: match IPVS connection properties");
MODULE_LICENSE("GPL");
MODULE_ALIAS("ipt_ipvs");
MODULE_ALIAS("ip6t_ipvs");
/* borrowed from xt_conntrack */
static bool ipvs_mt_addrcmp(const union nf_inet_addr *kaddr,
const union nf_inet_addr *uaddr,
const union nf_inet_addr *umask,
unsigned int l3proto)
{
if (l3proto == NFPROTO_IPV4)
return ((kaddr->ip ^ uaddr->ip) & umask->ip) == 0;
#ifdef CONFIG_IP_VS_IPV6
else if (l3proto == NFPROTO_IPV6)
return ipv6_masked_addr_cmp(&kaddr->in6, &umask->in6,
&uaddr->in6) == 0;
#endif
else
return false;
}
static bool
ipvs_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct xt_ipvs_mtinfo *data = par->matchinfo;
struct netns_ipvs *ipvs = net_ipvs(xt_net(par));
/* ipvs_mt_check ensures that family is only NFPROTO_IPV[46]. */
const u_int8_t family = xt_family(par);
struct ip_vs_iphdr iph;
struct ip_vs_protocol *pp;
struct ip_vs_conn *cp;
bool match = true;
if (data->bitmask == XT_IPVS_IPVS_PROPERTY) {
match = skb->ipvs_property ^
!!(data->invert & XT_IPVS_IPVS_PROPERTY);
goto out;
}
/* other flags than XT_IPVS_IPVS_PROPERTY are set */
if (!skb->ipvs_property) {
match = false;
goto out;
}
ip_vs_fill_iph_skb(family, skb, true, &iph);
if (data->bitmask & XT_IPVS_PROTO)
if ((iph.protocol == data->l4proto) ^
!(data->invert & XT_IPVS_PROTO)) {
match = false;
goto out;
}
pp = ip_vs_proto_get(iph.protocol);
if (unlikely(!pp)) {
match = false;
goto out;
}
/*
* Check if the packet belongs to an existing entry
*/
cp = pp->conn_out_get(ipvs, family, skb, &iph);
if (unlikely(cp == NULL)) {
match = false;
goto out;
}
/*
* We found a connection, i.e. ct != 0, make sure to call
* __ip_vs_conn_put before returning. In our case jump to out_put_con.
*/
if (data->bitmask & XT_IPVS_VPORT)
if ((cp->vport == data->vport) ^
!(data->invert & XT_IPVS_VPORT)) {
match = false;
goto out_put_cp;
}
if (data->bitmask & XT_IPVS_VPORTCTL)
if ((cp->control != NULL &&
cp->control->vport == data->vportctl) ^
!(data->invert & XT_IPVS_VPORTCTL)) {
match = false;
goto out_put_cp;
}
if (data->bitmask & XT_IPVS_DIR) {
enum ip_conntrack_info ctinfo;
struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
if (ct == NULL) {
match = false;
goto out_put_cp;
}
if ((ctinfo >= IP_CT_IS_REPLY) ^
!!(data->invert & XT_IPVS_DIR)) {
match = false;
goto out_put_cp;
}
}
if (data->bitmask & XT_IPVS_METHOD)
if (((cp->flags & IP_VS_CONN_F_FWD_MASK) == data->fwd_method) ^
!(data->invert & XT_IPVS_METHOD)) {
match = false;
goto out_put_cp;
}
if (data->bitmask & XT_IPVS_VADDR) {
if (ipvs_mt_addrcmp(&cp->vaddr, &data->vaddr,
&data->vmask, family) ^
!(data->invert & XT_IPVS_VADDR)) {
match = false;
goto out_put_cp;
}
}
out_put_cp:
__ip_vs_conn_put(cp);
out:
pr_debug("match=%d\n", match);
return match;
}
static int ipvs_mt_check(const struct xt_mtchk_param *par)
{
if (par->family != NFPROTO_IPV4
#ifdef CONFIG_IP_VS_IPV6
&& par->family != NFPROTO_IPV6
#endif
) {
pr_info_ratelimited("protocol family %u not supported\n",
par->family);
return -EINVAL;
}
return 0;
}
static struct xt_match xt_ipvs_mt_reg __read_mostly = {
.name = "ipvs",
.revision = 0,
.family = NFPROTO_UNSPEC,
.match = ipvs_mt,
.checkentry = ipvs_mt_check,
.matchsize = XT_ALIGN(sizeof(struct xt_ipvs_mtinfo)),
.me = THIS_MODULE,
};
static int __init ipvs_mt_init(void)
{
return xt_register_match(&xt_ipvs_mt_reg);
}
static void __exit ipvs_mt_exit(void)
{
xt_unregister_match(&xt_ipvs_mt_reg);
}
module_init(ipvs_mt_init);
module_exit(ipvs_mt_exit);
| linux-master | net/netfilter/xt_ipvs.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* (C) 2011 Pablo Neira Ayuso <[email protected]>
* (C) 2011 Intra2net AG <https://www.intra2net.com>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter/nfnetlink_acct.h>
#include <linux/netfilter/xt_nfacct.h>
MODULE_AUTHOR("Pablo Neira Ayuso <[email protected]>");
MODULE_DESCRIPTION("Xtables: match for the extended accounting infrastructure");
MODULE_LICENSE("GPL");
MODULE_ALIAS("ipt_nfacct");
MODULE_ALIAS("ip6t_nfacct");
static bool nfacct_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
int overquota;
const struct xt_nfacct_match_info *info = par->targinfo;
nfnl_acct_update(skb, info->nfacct);
overquota = nfnl_acct_overquota(xt_net(par), info->nfacct);
return overquota != NFACCT_UNDERQUOTA;
}
static int
nfacct_mt_checkentry(const struct xt_mtchk_param *par)
{
struct xt_nfacct_match_info *info = par->matchinfo;
struct nf_acct *nfacct;
nfacct = nfnl_acct_find_get(par->net, info->name);
if (nfacct == NULL) {
pr_info_ratelimited("accounting object `%s' does not exists\n",
info->name);
return -ENOENT;
}
info->nfacct = nfacct;
return 0;
}
static void
nfacct_mt_destroy(const struct xt_mtdtor_param *par)
{
const struct xt_nfacct_match_info *info = par->matchinfo;
nfnl_acct_put(info->nfacct);
}
static struct xt_match nfacct_mt_reg[] __read_mostly = {
{
.name = "nfacct",
.revision = 0,
.family = NFPROTO_UNSPEC,
.checkentry = nfacct_mt_checkentry,
.match = nfacct_mt,
.destroy = nfacct_mt_destroy,
.matchsize = sizeof(struct xt_nfacct_match_info),
.usersize = offsetof(struct xt_nfacct_match_info, nfacct),
.me = THIS_MODULE,
},
{
.name = "nfacct",
.revision = 1,
.family = NFPROTO_UNSPEC,
.checkentry = nfacct_mt_checkentry,
.match = nfacct_mt,
.destroy = nfacct_mt_destroy,
.matchsize = sizeof(struct xt_nfacct_match_info_v1),
.usersize = offsetof(struct xt_nfacct_match_info_v1, nfacct),
.me = THIS_MODULE,
},
};
static int __init nfacct_mt_init(void)
{
return xt_register_matches(nfacct_mt_reg, ARRAY_SIZE(nfacct_mt_reg));
}
static void __exit nfacct_mt_exit(void)
{
xt_unregister_matches(nfacct_mt_reg, ARRAY_SIZE(nfacct_mt_reg));
}
module_init(nfacct_mt_init);
module_exit(nfacct_mt_exit);
| linux-master | net/netfilter/xt_nfacct.c |
// SPDX-License-Identifier: GPL-2.0-only
/* PIPAPO: PIle PAcket POlicies: AVX2 packet lookup routines
*
* Copyright (c) 2019-2020 Red Hat GmbH
*
* Author: Stefano Brivio <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/netlink.h>
#include <linux/netfilter.h>
#include <linux/netfilter/nf_tables.h>
#include <net/netfilter/nf_tables_core.h>
#include <uapi/linux/netfilter/nf_tables.h>
#include <linux/bitmap.h>
#include <linux/bitops.h>
#include <linux/compiler.h>
#include <asm/fpu/api.h>
#include "nft_set_pipapo_avx2.h"
#include "nft_set_pipapo.h"
#define NFT_PIPAPO_LONGS_PER_M256 (XSAVE_YMM_SIZE / BITS_PER_LONG)
/* Load from memory into YMM register with non-temporal hint ("stream load"),
* that is, don't fetch lines from memory into the cache. This avoids pushing
* precious packet data out of the cache hierarchy, and is appropriate when:
*
* - loading buckets from lookup tables, as they are not going to be used
* again before packets are entirely classified
*
* - loading the result bitmap from the previous field, as it's never used
* again
*/
#define NFT_PIPAPO_AVX2_LOAD(reg, loc) \
asm volatile("vmovntdqa %0, %%ymm" #reg : : "m" (loc))
/* Stream a single lookup table bucket into YMM register given lookup table,
* group index, value of packet bits, bucket size.
*/
#define NFT_PIPAPO_AVX2_BUCKET_LOAD4(reg, lt, group, v, bsize) \
NFT_PIPAPO_AVX2_LOAD(reg, \
lt[((group) * NFT_PIPAPO_BUCKETS(4) + \
(v)) * (bsize)])
#define NFT_PIPAPO_AVX2_BUCKET_LOAD8(reg, lt, group, v, bsize) \
NFT_PIPAPO_AVX2_LOAD(reg, \
lt[((group) * NFT_PIPAPO_BUCKETS(8) + \
(v)) * (bsize)])
/* Bitwise AND: the staple operation of this algorithm */
#define NFT_PIPAPO_AVX2_AND(dst, a, b) \
asm volatile("vpand %ymm" #a ", %ymm" #b ", %ymm" #dst)
/* Jump to label if @reg is zero */
#define NFT_PIPAPO_AVX2_NOMATCH_GOTO(reg, label) \
asm_volatile_goto("vptest %%ymm" #reg ", %%ymm" #reg ";" \
"je %l[" #label "]" : : : : label)
/* Store 256 bits from YMM register into memory. Contrary to bucket load
* operation, we don't bypass the cache here, as stored matching results
* are always used shortly after.
*/
#define NFT_PIPAPO_AVX2_STORE(loc, reg) \
asm volatile("vmovdqa %%ymm" #reg ", %0" : "=m" (loc))
/* Zero out a complete YMM register, @reg */
#define NFT_PIPAPO_AVX2_ZERO(reg) \
asm volatile("vpxor %ymm" #reg ", %ymm" #reg ", %ymm" #reg)
/* Current working bitmap index, toggled between field matches */
static DEFINE_PER_CPU(bool, nft_pipapo_avx2_scratch_index);
/**
* nft_pipapo_avx2_prepare() - Prepare before main algorithm body
*
* This zeroes out ymm15, which is later used whenever we need to clear a
* memory location, by storing its content into memory.
*/
static void nft_pipapo_avx2_prepare(void)
{
NFT_PIPAPO_AVX2_ZERO(15);
}
/**
* nft_pipapo_avx2_fill() - Fill a bitmap region with ones
* @data: Base memory area
* @start: First bit to set
* @len: Count of bits to fill
*
* This is nothing else than a version of bitmap_set(), as used e.g. by
* pipapo_refill(), tailored for the microarchitectures using it and better
* suited for the specific usage: it's very likely that we'll set a small number
* of bits, not crossing a word boundary, and correct branch prediction is
* critical here.
*
* This function doesn't actually use any AVX2 instruction.
*/
static void nft_pipapo_avx2_fill(unsigned long *data, int start, int len)
{
int offset = start % BITS_PER_LONG;
unsigned long mask;
data += start / BITS_PER_LONG;
if (likely(len == 1)) {
*data |= BIT(offset);
return;
}
if (likely(len < BITS_PER_LONG || offset)) {
if (likely(len + offset <= BITS_PER_LONG)) {
*data |= GENMASK(len - 1 + offset, offset);
return;
}
*data |= ~0UL << offset;
len -= BITS_PER_LONG - offset;
data++;
if (len <= BITS_PER_LONG) {
mask = ~0UL >> (BITS_PER_LONG - len);
*data |= mask;
return;
}
}
memset(data, 0xff, len / BITS_PER_BYTE);
data += len / BITS_PER_LONG;
len %= BITS_PER_LONG;
if (len)
*data |= ~0UL >> (BITS_PER_LONG - len);
}
/**
* nft_pipapo_avx2_refill() - Scan bitmap, select mapping table item, set bits
* @offset: Start from given bitmap (equivalent to bucket) offset, in longs
* @map: Bitmap to be scanned for set bits
* @dst: Destination bitmap
* @mt: Mapping table containing bit set specifiers
* @last: Return index of first set bit, if this is the last field
*
* This is an alternative implementation of pipapo_refill() suitable for usage
* with AVX2 lookup routines: we know there are four words to be scanned, at
* a given offset inside the map, for each matching iteration.
*
* This function doesn't actually use any AVX2 instruction.
*
* Return: first set bit index if @last, index of first filled word otherwise.
*/
static int nft_pipapo_avx2_refill(int offset, unsigned long *map,
unsigned long *dst,
union nft_pipapo_map_bucket *mt, bool last)
{
int ret = -1;
#define NFT_PIPAPO_AVX2_REFILL_ONE_WORD(x) \
do { \
while (map[(x)]) { \
int r = __builtin_ctzl(map[(x)]); \
int i = (offset + (x)) * BITS_PER_LONG + r; \
\
if (last) \
return i; \
\
nft_pipapo_avx2_fill(dst, mt[i].to, mt[i].n); \
\
if (ret == -1) \
ret = mt[i].to; \
\
map[(x)] &= ~(1UL << r); \
} \
} while (0)
NFT_PIPAPO_AVX2_REFILL_ONE_WORD(0);
NFT_PIPAPO_AVX2_REFILL_ONE_WORD(1);
NFT_PIPAPO_AVX2_REFILL_ONE_WORD(2);
NFT_PIPAPO_AVX2_REFILL_ONE_WORD(3);
#undef NFT_PIPAPO_AVX2_REFILL_ONE_WORD
return ret;
}
/**
* nft_pipapo_avx2_lookup_4b_2() - AVX2-based lookup for 2 four-bit groups
* @map: Previous match result, used as initial bitmap
* @fill: Destination bitmap to be filled with current match result
* @f: Field, containing lookup and mapping tables
* @offset: Ignore buckets before the given index, no bits are filled there
* @pkt: Packet data, pointer to input nftables register
* @first: If this is the first field, don't source previous result
* @last: Last field: stop at the first match and return bit index
*
* Load buckets from lookup table corresponding to the values of each 4-bit
* group of packet bytes, and perform a bitwise intersection between them. If
* this is the first field in the set, simply AND the buckets together
* (equivalent to using an all-ones starting bitmap), use the provided starting
* bitmap otherwise. Then call nft_pipapo_avx2_refill() to generate the next
* working bitmap, @fill.
*
* This is used for 8-bit fields (i.e. protocol numbers).
*
* Out-of-order (and superscalar) execution is vital here, so it's critical to
* avoid false data dependencies. CPU and compiler could (mostly) take care of
* this on their own, but the operation ordering is explicitly given here with
* a likely execution order in mind, to highlight possible stalls. That's why
* a number of logically distinct operations (i.e. loading buckets, intersecting
* buckets) are interleaved.
*
* Return: -1 on no match, rule index of match if @last, otherwise first long
* word index to be checked next (i.e. first filled word).
*/
static int nft_pipapo_avx2_lookup_4b_2(unsigned long *map, unsigned long *fill,
struct nft_pipapo_field *f, int offset,
const u8 *pkt, bool first, bool last)
{
int i, ret = -1, m256_size = f->bsize / NFT_PIPAPO_LONGS_PER_M256, b;
u8 pg[2] = { pkt[0] >> 4, pkt[0] & 0xf };
unsigned long *lt = f->lt, bsize = f->bsize;
lt += offset * NFT_PIPAPO_LONGS_PER_M256;
for (i = offset; i < m256_size; i++, lt += NFT_PIPAPO_LONGS_PER_M256) {
int i_ul = i * NFT_PIPAPO_LONGS_PER_M256;
if (first) {
NFT_PIPAPO_AVX2_BUCKET_LOAD4(0, lt, 0, pg[0], bsize);
NFT_PIPAPO_AVX2_BUCKET_LOAD4(1, lt, 1, pg[1], bsize);
NFT_PIPAPO_AVX2_AND(4, 0, 1);
} else {
NFT_PIPAPO_AVX2_BUCKET_LOAD4(0, lt, 0, pg[0], bsize);
NFT_PIPAPO_AVX2_LOAD(2, map[i_ul]);
NFT_PIPAPO_AVX2_BUCKET_LOAD4(1, lt, 1, pg[1], bsize);
NFT_PIPAPO_AVX2_NOMATCH_GOTO(2, nothing);
NFT_PIPAPO_AVX2_AND(3, 0, 1);
NFT_PIPAPO_AVX2_AND(4, 2, 3);
}
NFT_PIPAPO_AVX2_NOMATCH_GOTO(4, nomatch);
NFT_PIPAPO_AVX2_STORE(map[i_ul], 4);
b = nft_pipapo_avx2_refill(i_ul, &map[i_ul], fill, f->mt, last);
if (last)
return b;
if (unlikely(ret == -1))
ret = b / XSAVE_YMM_SIZE;
continue;
nomatch:
NFT_PIPAPO_AVX2_STORE(map[i_ul], 15);
nothing:
;
}
return ret;
}
/**
* nft_pipapo_avx2_lookup_4b_4() - AVX2-based lookup for 4 four-bit groups
* @map: Previous match result, used as initial bitmap
* @fill: Destination bitmap to be filled with current match result
* @f: Field, containing lookup and mapping tables
* @offset: Ignore buckets before the given index, no bits are filled there
* @pkt: Packet data, pointer to input nftables register
* @first: If this is the first field, don't source previous result
* @last: Last field: stop at the first match and return bit index
*
* See nft_pipapo_avx2_lookup_4b_2().
*
* This is used for 16-bit fields (i.e. ports).
*
* Return: -1 on no match, rule index of match if @last, otherwise first long
* word index to be checked next (i.e. first filled word).
*/
static int nft_pipapo_avx2_lookup_4b_4(unsigned long *map, unsigned long *fill,
struct nft_pipapo_field *f, int offset,
const u8 *pkt, bool first, bool last)
{
int i, ret = -1, m256_size = f->bsize / NFT_PIPAPO_LONGS_PER_M256, b;
u8 pg[4] = { pkt[0] >> 4, pkt[0] & 0xf, pkt[1] >> 4, pkt[1] & 0xf };
unsigned long *lt = f->lt, bsize = f->bsize;
lt += offset * NFT_PIPAPO_LONGS_PER_M256;
for (i = offset; i < m256_size; i++, lt += NFT_PIPAPO_LONGS_PER_M256) {
int i_ul = i * NFT_PIPAPO_LONGS_PER_M256;
if (first) {
NFT_PIPAPO_AVX2_BUCKET_LOAD4(0, lt, 0, pg[0], bsize);
NFT_PIPAPO_AVX2_BUCKET_LOAD4(1, lt, 1, pg[1], bsize);
NFT_PIPAPO_AVX2_BUCKET_LOAD4(2, lt, 2, pg[2], bsize);
NFT_PIPAPO_AVX2_BUCKET_LOAD4(3, lt, 3, pg[3], bsize);
NFT_PIPAPO_AVX2_AND(4, 0, 1);
NFT_PIPAPO_AVX2_AND(5, 2, 3);
NFT_PIPAPO_AVX2_AND(7, 4, 5);
} else {
NFT_PIPAPO_AVX2_BUCKET_LOAD4(0, lt, 0, pg[0], bsize);
NFT_PIPAPO_AVX2_LOAD(1, map[i_ul]);
NFT_PIPAPO_AVX2_BUCKET_LOAD4(2, lt, 1, pg[1], bsize);
NFT_PIPAPO_AVX2_BUCKET_LOAD4(3, lt, 2, pg[2], bsize);
NFT_PIPAPO_AVX2_BUCKET_LOAD4(4, lt, 3, pg[3], bsize);
NFT_PIPAPO_AVX2_AND(5, 0, 1);
NFT_PIPAPO_AVX2_NOMATCH_GOTO(1, nothing);
NFT_PIPAPO_AVX2_AND(6, 2, 3);
NFT_PIPAPO_AVX2_AND(7, 4, 5);
/* Stall */
NFT_PIPAPO_AVX2_AND(7, 6, 7);
}
/* Stall */
NFT_PIPAPO_AVX2_NOMATCH_GOTO(7, nomatch);
NFT_PIPAPO_AVX2_STORE(map[i_ul], 7);
b = nft_pipapo_avx2_refill(i_ul, &map[i_ul], fill, f->mt, last);
if (last)
return b;
if (unlikely(ret == -1))
ret = b / XSAVE_YMM_SIZE;
continue;
nomatch:
NFT_PIPAPO_AVX2_STORE(map[i_ul], 15);
nothing:
;
}
return ret;
}
/**
* nft_pipapo_avx2_lookup_4b_8() - AVX2-based lookup for 8 four-bit groups
* @map: Previous match result, used as initial bitmap
* @fill: Destination bitmap to be filled with current match result
* @f: Field, containing lookup and mapping tables
* @offset: Ignore buckets before the given index, no bits are filled there
* @pkt: Packet data, pointer to input nftables register
* @first: If this is the first field, don't source previous result
* @last: Last field: stop at the first match and return bit index
*
* See nft_pipapo_avx2_lookup_4b_2().
*
* This is used for 32-bit fields (i.e. IPv4 addresses).
*
* Return: -1 on no match, rule index of match if @last, otherwise first long
* word index to be checked next (i.e. first filled word).
*/
static int nft_pipapo_avx2_lookup_4b_8(unsigned long *map, unsigned long *fill,
struct nft_pipapo_field *f, int offset,
const u8 *pkt, bool first, bool last)
{
u8 pg[8] = { pkt[0] >> 4, pkt[0] & 0xf, pkt[1] >> 4, pkt[1] & 0xf,
pkt[2] >> 4, pkt[2] & 0xf, pkt[3] >> 4, pkt[3] & 0xf,
};
int i, ret = -1, m256_size = f->bsize / NFT_PIPAPO_LONGS_PER_M256, b;
unsigned long *lt = f->lt, bsize = f->bsize;
lt += offset * NFT_PIPAPO_LONGS_PER_M256;
for (i = offset; i < m256_size; i++, lt += NFT_PIPAPO_LONGS_PER_M256) {
int i_ul = i * NFT_PIPAPO_LONGS_PER_M256;
if (first) {
NFT_PIPAPO_AVX2_BUCKET_LOAD4(0, lt, 0, pg[0], bsize);
NFT_PIPAPO_AVX2_BUCKET_LOAD4(1, lt, 1, pg[1], bsize);
NFT_PIPAPO_AVX2_BUCKET_LOAD4(2, lt, 2, pg[2], bsize);
NFT_PIPAPO_AVX2_BUCKET_LOAD4(3, lt, 3, pg[3], bsize);
NFT_PIPAPO_AVX2_BUCKET_LOAD4(4, lt, 4, pg[4], bsize);
NFT_PIPAPO_AVX2_AND(5, 0, 1);
NFT_PIPAPO_AVX2_BUCKET_LOAD4(6, lt, 5, pg[5], bsize);
NFT_PIPAPO_AVX2_BUCKET_LOAD4(7, lt, 6, pg[6], bsize);
NFT_PIPAPO_AVX2_AND(8, 2, 3);
NFT_PIPAPO_AVX2_AND(9, 4, 5);
NFT_PIPAPO_AVX2_BUCKET_LOAD4(10, lt, 7, pg[7], bsize);
NFT_PIPAPO_AVX2_AND(11, 6, 7);
NFT_PIPAPO_AVX2_AND(12, 8, 9);
NFT_PIPAPO_AVX2_AND(13, 10, 11);
/* Stall */
NFT_PIPAPO_AVX2_AND(1, 12, 13);
} else {
NFT_PIPAPO_AVX2_BUCKET_LOAD4(0, lt, 0, pg[0], bsize);
NFT_PIPAPO_AVX2_LOAD(1, map[i_ul]);
NFT_PIPAPO_AVX2_BUCKET_LOAD4(2, lt, 1, pg[1], bsize);
NFT_PIPAPO_AVX2_BUCKET_LOAD4(3, lt, 2, pg[2], bsize);
NFT_PIPAPO_AVX2_BUCKET_LOAD4(4, lt, 3, pg[3], bsize);
NFT_PIPAPO_AVX2_NOMATCH_GOTO(1, nothing);
NFT_PIPAPO_AVX2_AND(5, 0, 1);
NFT_PIPAPO_AVX2_BUCKET_LOAD4(6, lt, 4, pg[4], bsize);
NFT_PIPAPO_AVX2_BUCKET_LOAD4(7, lt, 5, pg[5], bsize);
NFT_PIPAPO_AVX2_AND(8, 2, 3);
NFT_PIPAPO_AVX2_BUCKET_LOAD4(9, lt, 6, pg[6], bsize);
NFT_PIPAPO_AVX2_AND(10, 4, 5);
NFT_PIPAPO_AVX2_BUCKET_LOAD4(11, lt, 7, pg[7], bsize);
NFT_PIPAPO_AVX2_AND(12, 6, 7);
NFT_PIPAPO_AVX2_AND(13, 8, 9);
NFT_PIPAPO_AVX2_AND(14, 10, 11);
/* Stall */
NFT_PIPAPO_AVX2_AND(1, 12, 13);
NFT_PIPAPO_AVX2_AND(1, 1, 14);
}
NFT_PIPAPO_AVX2_NOMATCH_GOTO(1, nomatch);
NFT_PIPAPO_AVX2_STORE(map[i_ul], 1);
b = nft_pipapo_avx2_refill(i_ul, &map[i_ul], fill, f->mt, last);
if (last)
return b;
if (unlikely(ret == -1))
ret = b / XSAVE_YMM_SIZE;
continue;
nomatch:
NFT_PIPAPO_AVX2_STORE(map[i_ul], 15);
nothing:
;
}
return ret;
}
/**
* nft_pipapo_avx2_lookup_4b_12() - AVX2-based lookup for 12 four-bit groups
* @map: Previous match result, used as initial bitmap
* @fill: Destination bitmap to be filled with current match result
* @f: Field, containing lookup and mapping tables
* @offset: Ignore buckets before the given index, no bits are filled there
* @pkt: Packet data, pointer to input nftables register
* @first: If this is the first field, don't source previous result
* @last: Last field: stop at the first match and return bit index
*
* See nft_pipapo_avx2_lookup_4b_2().
*
* This is used for 48-bit fields (i.e. MAC addresses/EUI-48).
*
* Return: -1 on no match, rule index of match if @last, otherwise first long
* word index to be checked next (i.e. first filled word).
*/
static int nft_pipapo_avx2_lookup_4b_12(unsigned long *map, unsigned long *fill,
struct nft_pipapo_field *f, int offset,
const u8 *pkt, bool first, bool last)
{
u8 pg[12] = { pkt[0] >> 4, pkt[0] & 0xf, pkt[1] >> 4, pkt[1] & 0xf,
pkt[2] >> 4, pkt[2] & 0xf, pkt[3] >> 4, pkt[3] & 0xf,
pkt[4] >> 4, pkt[4] & 0xf, pkt[5] >> 4, pkt[5] & 0xf,
};
int i, ret = -1, m256_size = f->bsize / NFT_PIPAPO_LONGS_PER_M256, b;
unsigned long *lt = f->lt, bsize = f->bsize;
lt += offset * NFT_PIPAPO_LONGS_PER_M256;
for (i = offset; i < m256_size; i++, lt += NFT_PIPAPO_LONGS_PER_M256) {
int i_ul = i * NFT_PIPAPO_LONGS_PER_M256;
if (!first)
NFT_PIPAPO_AVX2_LOAD(0, map[i_ul]);
NFT_PIPAPO_AVX2_BUCKET_LOAD4(1, lt, 0, pg[0], bsize);
NFT_PIPAPO_AVX2_BUCKET_LOAD4(2, lt, 1, pg[1], bsize);
NFT_PIPAPO_AVX2_BUCKET_LOAD4(3, lt, 2, pg[2], bsize);
if (!first) {
NFT_PIPAPO_AVX2_NOMATCH_GOTO(0, nothing);
NFT_PIPAPO_AVX2_AND(1, 1, 0);
}
NFT_PIPAPO_AVX2_BUCKET_LOAD4(4, lt, 3, pg[3], bsize);
NFT_PIPAPO_AVX2_BUCKET_LOAD4(5, lt, 4, pg[4], bsize);
NFT_PIPAPO_AVX2_AND(6, 2, 3);
NFT_PIPAPO_AVX2_BUCKET_LOAD4(7, lt, 5, pg[5], bsize);
NFT_PIPAPO_AVX2_BUCKET_LOAD4(8, lt, 6, pg[6], bsize);
NFT_PIPAPO_AVX2_AND(9, 1, 4);
NFT_PIPAPO_AVX2_BUCKET_LOAD4(10, lt, 7, pg[7], bsize);
NFT_PIPAPO_AVX2_AND(11, 5, 6);
NFT_PIPAPO_AVX2_BUCKET_LOAD4(12, lt, 8, pg[8], bsize);
NFT_PIPAPO_AVX2_AND(13, 7, 8);
NFT_PIPAPO_AVX2_BUCKET_LOAD4(14, lt, 9, pg[9], bsize);
NFT_PIPAPO_AVX2_AND(0, 9, 10);
NFT_PIPAPO_AVX2_BUCKET_LOAD4(1, lt, 10, pg[10], bsize);
NFT_PIPAPO_AVX2_AND(2, 11, 12);
NFT_PIPAPO_AVX2_BUCKET_LOAD4(3, lt, 11, pg[11], bsize);
NFT_PIPAPO_AVX2_AND(4, 13, 14);
NFT_PIPAPO_AVX2_AND(5, 0, 1);
NFT_PIPAPO_AVX2_AND(6, 2, 3);
/* Stalls */
NFT_PIPAPO_AVX2_AND(7, 4, 5);
NFT_PIPAPO_AVX2_AND(8, 6, 7);
NFT_PIPAPO_AVX2_NOMATCH_GOTO(8, nomatch);
NFT_PIPAPO_AVX2_STORE(map[i_ul], 8);
b = nft_pipapo_avx2_refill(i_ul, &map[i_ul], fill, f->mt, last);
if (last)
return b;
if (unlikely(ret == -1))
ret = b / XSAVE_YMM_SIZE;
continue;
nomatch:
NFT_PIPAPO_AVX2_STORE(map[i_ul], 15);
nothing:
;
}
return ret;
}
/**
* nft_pipapo_avx2_lookup_4b_32() - AVX2-based lookup for 32 four-bit groups
* @map: Previous match result, used as initial bitmap
* @fill: Destination bitmap to be filled with current match result
* @f: Field, containing lookup and mapping tables
* @offset: Ignore buckets before the given index, no bits are filled there
* @pkt: Packet data, pointer to input nftables register
* @first: If this is the first field, don't source previous result
* @last: Last field: stop at the first match and return bit index
*
* See nft_pipapo_avx2_lookup_4b_2().
*
* This is used for 128-bit fields (i.e. IPv6 addresses).
*
* Return: -1 on no match, rule index of match if @last, otherwise first long
* word index to be checked next (i.e. first filled word).
*/
static int nft_pipapo_avx2_lookup_4b_32(unsigned long *map, unsigned long *fill,
struct nft_pipapo_field *f, int offset,
const u8 *pkt, bool first, bool last)
{
u8 pg[32] = { pkt[0] >> 4, pkt[0] & 0xf, pkt[1] >> 4, pkt[1] & 0xf,
pkt[2] >> 4, pkt[2] & 0xf, pkt[3] >> 4, pkt[3] & 0xf,
pkt[4] >> 4, pkt[4] & 0xf, pkt[5] >> 4, pkt[5] & 0xf,
pkt[6] >> 4, pkt[6] & 0xf, pkt[7] >> 4, pkt[7] & 0xf,
pkt[8] >> 4, pkt[8] & 0xf, pkt[9] >> 4, pkt[9] & 0xf,
pkt[10] >> 4, pkt[10] & 0xf, pkt[11] >> 4, pkt[11] & 0xf,
pkt[12] >> 4, pkt[12] & 0xf, pkt[13] >> 4, pkt[13] & 0xf,
pkt[14] >> 4, pkt[14] & 0xf, pkt[15] >> 4, pkt[15] & 0xf,
};
int i, ret = -1, m256_size = f->bsize / NFT_PIPAPO_LONGS_PER_M256, b;
unsigned long *lt = f->lt, bsize = f->bsize;
lt += offset * NFT_PIPAPO_LONGS_PER_M256;
for (i = offset; i < m256_size; i++, lt += NFT_PIPAPO_LONGS_PER_M256) {
int i_ul = i * NFT_PIPAPO_LONGS_PER_M256;
if (!first)
NFT_PIPAPO_AVX2_LOAD(0, map[i_ul]);
NFT_PIPAPO_AVX2_BUCKET_LOAD4(1, lt, 0, pg[0], bsize);
NFT_PIPAPO_AVX2_BUCKET_LOAD4(2, lt, 1, pg[1], bsize);
NFT_PIPAPO_AVX2_BUCKET_LOAD4(3, lt, 2, pg[2], bsize);
NFT_PIPAPO_AVX2_BUCKET_LOAD4(4, lt, 3, pg[3], bsize);
if (!first) {
NFT_PIPAPO_AVX2_NOMATCH_GOTO(0, nothing);
NFT_PIPAPO_AVX2_AND(1, 1, 0);
}
NFT_PIPAPO_AVX2_AND(5, 2, 3);
NFT_PIPAPO_AVX2_BUCKET_LOAD4(6, lt, 4, pg[4], bsize);
NFT_PIPAPO_AVX2_BUCKET_LOAD4(7, lt, 5, pg[5], bsize);
NFT_PIPAPO_AVX2_AND(8, 1, 4);
NFT_PIPAPO_AVX2_BUCKET_LOAD4(9, lt, 6, pg[6], bsize);
NFT_PIPAPO_AVX2_AND(10, 5, 6);
NFT_PIPAPO_AVX2_BUCKET_LOAD4(11, lt, 7, pg[7], bsize);
NFT_PIPAPO_AVX2_AND(12, 7, 8);
NFT_PIPAPO_AVX2_BUCKET_LOAD4(13, lt, 8, pg[8], bsize);
NFT_PIPAPO_AVX2_AND(14, 9, 10);
NFT_PIPAPO_AVX2_BUCKET_LOAD4(0, lt, 9, pg[9], bsize);
NFT_PIPAPO_AVX2_AND(1, 11, 12);
NFT_PIPAPO_AVX2_BUCKET_LOAD4(2, lt, 10, pg[10], bsize);
NFT_PIPAPO_AVX2_BUCKET_LOAD4(3, lt, 11, pg[11], bsize);
NFT_PIPAPO_AVX2_AND(4, 13, 14);
NFT_PIPAPO_AVX2_BUCKET_LOAD4(5, lt, 12, pg[12], bsize);
NFT_PIPAPO_AVX2_BUCKET_LOAD4(6, lt, 13, pg[13], bsize);
NFT_PIPAPO_AVX2_AND(7, 0, 1);
NFT_PIPAPO_AVX2_BUCKET_LOAD4(8, lt, 14, pg[14], bsize);
NFT_PIPAPO_AVX2_AND(9, 2, 3);
NFT_PIPAPO_AVX2_BUCKET_LOAD4(10, lt, 15, pg[15], bsize);
NFT_PIPAPO_AVX2_AND(11, 4, 5);
NFT_PIPAPO_AVX2_BUCKET_LOAD4(12, lt, 16, pg[16], bsize);
NFT_PIPAPO_AVX2_AND(13, 6, 7);
NFT_PIPAPO_AVX2_BUCKET_LOAD4(14, lt, 17, pg[17], bsize);
NFT_PIPAPO_AVX2_AND(0, 8, 9);
NFT_PIPAPO_AVX2_BUCKET_LOAD4(1, lt, 18, pg[18], bsize);
NFT_PIPAPO_AVX2_AND(2, 10, 11);
NFT_PIPAPO_AVX2_BUCKET_LOAD4(3, lt, 19, pg[19], bsize);
NFT_PIPAPO_AVX2_AND(4, 12, 13);
NFT_PIPAPO_AVX2_BUCKET_LOAD4(5, lt, 20, pg[20], bsize);
NFT_PIPAPO_AVX2_AND(6, 14, 0);
NFT_PIPAPO_AVX2_AND(7, 1, 2);
NFT_PIPAPO_AVX2_BUCKET_LOAD4(8, lt, 21, pg[21], bsize);
NFT_PIPAPO_AVX2_AND(9, 3, 4);
NFT_PIPAPO_AVX2_BUCKET_LOAD4(10, lt, 22, pg[22], bsize);
NFT_PIPAPO_AVX2_AND(11, 5, 6);
NFT_PIPAPO_AVX2_BUCKET_LOAD4(12, lt, 23, pg[23], bsize);
NFT_PIPAPO_AVX2_AND(13, 7, 8);
NFT_PIPAPO_AVX2_BUCKET_LOAD4(14, lt, 24, pg[24], bsize);
NFT_PIPAPO_AVX2_BUCKET_LOAD4(0, lt, 25, pg[25], bsize);
NFT_PIPAPO_AVX2_AND(1, 9, 10);
NFT_PIPAPO_AVX2_AND(2, 11, 12);
NFT_PIPAPO_AVX2_BUCKET_LOAD4(3, lt, 26, pg[26], bsize);
NFT_PIPAPO_AVX2_AND(4, 13, 14);
NFT_PIPAPO_AVX2_BUCKET_LOAD4(5, lt, 27, pg[27], bsize);
NFT_PIPAPO_AVX2_AND(6, 0, 1);
NFT_PIPAPO_AVX2_BUCKET_LOAD4(7, lt, 28, pg[28], bsize);
NFT_PIPAPO_AVX2_BUCKET_LOAD4(8, lt, 29, pg[29], bsize);
NFT_PIPAPO_AVX2_AND(9, 2, 3);
NFT_PIPAPO_AVX2_BUCKET_LOAD4(10, lt, 30, pg[30], bsize);
NFT_PIPAPO_AVX2_AND(11, 4, 5);
NFT_PIPAPO_AVX2_BUCKET_LOAD4(12, lt, 31, pg[31], bsize);
NFT_PIPAPO_AVX2_AND(0, 6, 7);
NFT_PIPAPO_AVX2_AND(1, 8, 9);
NFT_PIPAPO_AVX2_AND(2, 10, 11);
NFT_PIPAPO_AVX2_AND(3, 12, 0);
/* Stalls */
NFT_PIPAPO_AVX2_AND(4, 1, 2);
NFT_PIPAPO_AVX2_AND(5, 3, 4);
NFT_PIPAPO_AVX2_NOMATCH_GOTO(5, nomatch);
NFT_PIPAPO_AVX2_STORE(map[i_ul], 5);
b = nft_pipapo_avx2_refill(i_ul, &map[i_ul], fill, f->mt, last);
if (last)
return b;
if (unlikely(ret == -1))
ret = b / XSAVE_YMM_SIZE;
continue;
nomatch:
NFT_PIPAPO_AVX2_STORE(map[i_ul], 15);
nothing:
;
}
return ret;
}
/**
* nft_pipapo_avx2_lookup_8b_1() - AVX2-based lookup for one eight-bit group
* @map: Previous match result, used as initial bitmap
* @fill: Destination bitmap to be filled with current match result
* @f: Field, containing lookup and mapping tables
* @offset: Ignore buckets before the given index, no bits are filled there
* @pkt: Packet data, pointer to input nftables register
* @first: If this is the first field, don't source previous result
* @last: Last field: stop at the first match and return bit index
*
* See nft_pipapo_avx2_lookup_4b_2().
*
* This is used for 8-bit fields (i.e. protocol numbers).
*
* Return: -1 on no match, rule index of match if @last, otherwise first long
* word index to be checked next (i.e. first filled word).
*/
static int nft_pipapo_avx2_lookup_8b_1(unsigned long *map, unsigned long *fill,
struct nft_pipapo_field *f, int offset,
const u8 *pkt, bool first, bool last)
{
int i, ret = -1, m256_size = f->bsize / NFT_PIPAPO_LONGS_PER_M256, b;
unsigned long *lt = f->lt, bsize = f->bsize;
lt += offset * NFT_PIPAPO_LONGS_PER_M256;
for (i = offset; i < m256_size; i++, lt += NFT_PIPAPO_LONGS_PER_M256) {
int i_ul = i * NFT_PIPAPO_LONGS_PER_M256;
if (first) {
NFT_PIPAPO_AVX2_BUCKET_LOAD8(2, lt, 0, pkt[0], bsize);
} else {
NFT_PIPAPO_AVX2_BUCKET_LOAD8(0, lt, 0, pkt[0], bsize);
NFT_PIPAPO_AVX2_LOAD(1, map[i_ul]);
NFT_PIPAPO_AVX2_AND(2, 0, 1);
NFT_PIPAPO_AVX2_NOMATCH_GOTO(1, nothing);
}
NFT_PIPAPO_AVX2_NOMATCH_GOTO(2, nomatch);
NFT_PIPAPO_AVX2_STORE(map[i_ul], 2);
b = nft_pipapo_avx2_refill(i_ul, &map[i_ul], fill, f->mt, last);
if (last)
return b;
if (unlikely(ret == -1))
ret = b / XSAVE_YMM_SIZE;
continue;
nomatch:
NFT_PIPAPO_AVX2_STORE(map[i_ul], 15);
nothing:
;
}
return ret;
}
/**
* nft_pipapo_avx2_lookup_8b_2() - AVX2-based lookup for 2 eight-bit groups
* @map: Previous match result, used as initial bitmap
* @fill: Destination bitmap to be filled with current match result
* @f: Field, containing lookup and mapping tables
* @offset: Ignore buckets before the given index, no bits are filled there
* @pkt: Packet data, pointer to input nftables register
* @first: If this is the first field, don't source previous result
* @last: Last field: stop at the first match and return bit index
*
* See nft_pipapo_avx2_lookup_4b_2().
*
* This is used for 16-bit fields (i.e. ports).
*
* Return: -1 on no match, rule index of match if @last, otherwise first long
* word index to be checked next (i.e. first filled word).
*/
static int nft_pipapo_avx2_lookup_8b_2(unsigned long *map, unsigned long *fill,
struct nft_pipapo_field *f, int offset,
const u8 *pkt, bool first, bool last)
{
int i, ret = -1, m256_size = f->bsize / NFT_PIPAPO_LONGS_PER_M256, b;
unsigned long *lt = f->lt, bsize = f->bsize;
lt += offset * NFT_PIPAPO_LONGS_PER_M256;
for (i = offset; i < m256_size; i++, lt += NFT_PIPAPO_LONGS_PER_M256) {
int i_ul = i * NFT_PIPAPO_LONGS_PER_M256;
if (first) {
NFT_PIPAPO_AVX2_BUCKET_LOAD8(0, lt, 0, pkt[0], bsize);
NFT_PIPAPO_AVX2_BUCKET_LOAD8(1, lt, 1, pkt[1], bsize);
NFT_PIPAPO_AVX2_AND(4, 0, 1);
} else {
NFT_PIPAPO_AVX2_LOAD(0, map[i_ul]);
NFT_PIPAPO_AVX2_BUCKET_LOAD8(1, lt, 0, pkt[0], bsize);
NFT_PIPAPO_AVX2_BUCKET_LOAD8(2, lt, 1, pkt[1], bsize);
/* Stall */
NFT_PIPAPO_AVX2_AND(3, 0, 1);
NFT_PIPAPO_AVX2_NOMATCH_GOTO(0, nothing);
NFT_PIPAPO_AVX2_AND(4, 3, 2);
}
/* Stall */
NFT_PIPAPO_AVX2_NOMATCH_GOTO(4, nomatch);
NFT_PIPAPO_AVX2_STORE(map[i_ul], 4);
b = nft_pipapo_avx2_refill(i_ul, &map[i_ul], fill, f->mt, last);
if (last)
return b;
if (unlikely(ret == -1))
ret = b / XSAVE_YMM_SIZE;
continue;
nomatch:
NFT_PIPAPO_AVX2_STORE(map[i_ul], 15);
nothing:
;
}
return ret;
}
/**
* nft_pipapo_avx2_lookup_8b_4() - AVX2-based lookup for 4 eight-bit groups
* @map: Previous match result, used as initial bitmap
* @fill: Destination bitmap to be filled with current match result
* @f: Field, containing lookup and mapping tables
* @offset: Ignore buckets before the given index, no bits are filled there
* @pkt: Packet data, pointer to input nftables register
* @first: If this is the first field, don't source previous result
* @last: Last field: stop at the first match and return bit index
*
* See nft_pipapo_avx2_lookup_4b_2().
*
* This is used for 32-bit fields (i.e. IPv4 addresses).
*
* Return: -1 on no match, rule index of match if @last, otherwise first long
* word index to be checked next (i.e. first filled word).
*/
static int nft_pipapo_avx2_lookup_8b_4(unsigned long *map, unsigned long *fill,
struct nft_pipapo_field *f, int offset,
const u8 *pkt, bool first, bool last)
{
int i, ret = -1, m256_size = f->bsize / NFT_PIPAPO_LONGS_PER_M256, b;
unsigned long *lt = f->lt, bsize = f->bsize;
lt += offset * NFT_PIPAPO_LONGS_PER_M256;
for (i = offset; i < m256_size; i++, lt += NFT_PIPAPO_LONGS_PER_M256) {
int i_ul = i * NFT_PIPAPO_LONGS_PER_M256;
if (first) {
NFT_PIPAPO_AVX2_BUCKET_LOAD8(0, lt, 0, pkt[0], bsize);
NFT_PIPAPO_AVX2_BUCKET_LOAD8(1, lt, 1, pkt[1], bsize);
NFT_PIPAPO_AVX2_BUCKET_LOAD8(2, lt, 2, pkt[2], bsize);
NFT_PIPAPO_AVX2_BUCKET_LOAD8(3, lt, 3, pkt[3], bsize);
/* Stall */
NFT_PIPAPO_AVX2_AND(4, 0, 1);
NFT_PIPAPO_AVX2_AND(5, 2, 3);
NFT_PIPAPO_AVX2_AND(0, 4, 5);
} else {
NFT_PIPAPO_AVX2_BUCKET_LOAD8(0, lt, 0, pkt[0], bsize);
NFT_PIPAPO_AVX2_LOAD(1, map[i_ul]);
NFT_PIPAPO_AVX2_BUCKET_LOAD8(2, lt, 1, pkt[1], bsize);
NFT_PIPAPO_AVX2_BUCKET_LOAD8(3, lt, 2, pkt[2], bsize);
NFT_PIPAPO_AVX2_BUCKET_LOAD8(4, lt, 3, pkt[3], bsize);
NFT_PIPAPO_AVX2_AND(5, 0, 1);
NFT_PIPAPO_AVX2_NOMATCH_GOTO(1, nothing);
NFT_PIPAPO_AVX2_AND(6, 2, 3);
/* Stall */
NFT_PIPAPO_AVX2_AND(7, 4, 5);
NFT_PIPAPO_AVX2_AND(0, 6, 7);
}
NFT_PIPAPO_AVX2_NOMATCH_GOTO(0, nomatch);
NFT_PIPAPO_AVX2_STORE(map[i_ul], 0);
b = nft_pipapo_avx2_refill(i_ul, &map[i_ul], fill, f->mt, last);
if (last)
return b;
if (unlikely(ret == -1))
ret = b / XSAVE_YMM_SIZE;
continue;
nomatch:
NFT_PIPAPO_AVX2_STORE(map[i_ul], 15);
nothing:
;
}
return ret;
}
/**
* nft_pipapo_avx2_lookup_8b_6() - AVX2-based lookup for 6 eight-bit groups
* @map: Previous match result, used as initial bitmap
* @fill: Destination bitmap to be filled with current match result
* @f: Field, containing lookup and mapping tables
* @offset: Ignore buckets before the given index, no bits are filled there
* @pkt: Packet data, pointer to input nftables register
* @first: If this is the first field, don't source previous result
* @last: Last field: stop at the first match and return bit index
*
* See nft_pipapo_avx2_lookup_4b_2().
*
* This is used for 48-bit fields (i.e. MAC addresses/EUI-48).
*
* Return: -1 on no match, rule index of match if @last, otherwise first long
* word index to be checked next (i.e. first filled word).
*/
static int nft_pipapo_avx2_lookup_8b_6(unsigned long *map, unsigned long *fill,
struct nft_pipapo_field *f, int offset,
const u8 *pkt, bool first, bool last)
{
int i, ret = -1, m256_size = f->bsize / NFT_PIPAPO_LONGS_PER_M256, b;
unsigned long *lt = f->lt, bsize = f->bsize;
lt += offset * NFT_PIPAPO_LONGS_PER_M256;
for (i = offset; i < m256_size; i++, lt += NFT_PIPAPO_LONGS_PER_M256) {
int i_ul = i * NFT_PIPAPO_LONGS_PER_M256;
if (first) {
NFT_PIPAPO_AVX2_BUCKET_LOAD8(0, lt, 0, pkt[0], bsize);
NFT_PIPAPO_AVX2_BUCKET_LOAD8(1, lt, 1, pkt[1], bsize);
NFT_PIPAPO_AVX2_BUCKET_LOAD8(2, lt, 2, pkt[2], bsize);
NFT_PIPAPO_AVX2_BUCKET_LOAD8(3, lt, 3, pkt[3], bsize);
NFT_PIPAPO_AVX2_BUCKET_LOAD8(4, lt, 4, pkt[4], bsize);
NFT_PIPAPO_AVX2_AND(5, 0, 1);
NFT_PIPAPO_AVX2_BUCKET_LOAD8(6, lt, 5, pkt[5], bsize);
NFT_PIPAPO_AVX2_AND(7, 2, 3);
/* Stall */
NFT_PIPAPO_AVX2_AND(0, 4, 5);
NFT_PIPAPO_AVX2_AND(1, 6, 7);
NFT_PIPAPO_AVX2_AND(4, 0, 1);
} else {
NFT_PIPAPO_AVX2_BUCKET_LOAD8(0, lt, 0, pkt[0], bsize);
NFT_PIPAPO_AVX2_LOAD(1, map[i_ul]);
NFT_PIPAPO_AVX2_BUCKET_LOAD8(2, lt, 1, pkt[1], bsize);
NFT_PIPAPO_AVX2_BUCKET_LOAD8(3, lt, 2, pkt[2], bsize);
NFT_PIPAPO_AVX2_BUCKET_LOAD8(4, lt, 3, pkt[3], bsize);
NFT_PIPAPO_AVX2_AND(5, 0, 1);
NFT_PIPAPO_AVX2_NOMATCH_GOTO(1, nothing);
NFT_PIPAPO_AVX2_AND(6, 2, 3);
NFT_PIPAPO_AVX2_BUCKET_LOAD8(7, lt, 4, pkt[4], bsize);
NFT_PIPAPO_AVX2_AND(0, 4, 5);
NFT_PIPAPO_AVX2_BUCKET_LOAD8(1, lt, 5, pkt[5], bsize);
NFT_PIPAPO_AVX2_AND(2, 6, 7);
/* Stall */
NFT_PIPAPO_AVX2_AND(3, 0, 1);
NFT_PIPAPO_AVX2_AND(4, 2, 3);
}
NFT_PIPAPO_AVX2_NOMATCH_GOTO(4, nomatch);
NFT_PIPAPO_AVX2_STORE(map[i_ul], 4);
b = nft_pipapo_avx2_refill(i_ul, &map[i_ul], fill, f->mt, last);
if (last)
return b;
if (unlikely(ret == -1))
ret = b / XSAVE_YMM_SIZE;
continue;
nomatch:
NFT_PIPAPO_AVX2_STORE(map[i_ul], 15);
nothing:
;
}
return ret;
}
/**
* nft_pipapo_avx2_lookup_8b_16() - AVX2-based lookup for 16 eight-bit groups
* @map: Previous match result, used as initial bitmap
* @fill: Destination bitmap to be filled with current match result
* @f: Field, containing lookup and mapping tables
* @offset: Ignore buckets before the given index, no bits are filled there
* @pkt: Packet data, pointer to input nftables register
* @first: If this is the first field, don't source previous result
* @last: Last field: stop at the first match and return bit index
*
* See nft_pipapo_avx2_lookup_4b_2().
*
* This is used for 128-bit fields (i.e. IPv6 addresses).
*
* Return: -1 on no match, rule index of match if @last, otherwise first long
* word index to be checked next (i.e. first filled word).
*/
static int nft_pipapo_avx2_lookup_8b_16(unsigned long *map, unsigned long *fill,
struct nft_pipapo_field *f, int offset,
const u8 *pkt, bool first, bool last)
{
int i, ret = -1, m256_size = f->bsize / NFT_PIPAPO_LONGS_PER_M256, b;
unsigned long *lt = f->lt, bsize = f->bsize;
lt += offset * NFT_PIPAPO_LONGS_PER_M256;
for (i = offset; i < m256_size; i++, lt += NFT_PIPAPO_LONGS_PER_M256) {
int i_ul = i * NFT_PIPAPO_LONGS_PER_M256;
if (!first)
NFT_PIPAPO_AVX2_LOAD(0, map[i_ul]);
NFT_PIPAPO_AVX2_BUCKET_LOAD8(1, lt, 0, pkt[0], bsize);
NFT_PIPAPO_AVX2_BUCKET_LOAD8(2, lt, 1, pkt[1], bsize);
NFT_PIPAPO_AVX2_BUCKET_LOAD8(3, lt, 2, pkt[2], bsize);
if (!first) {
NFT_PIPAPO_AVX2_NOMATCH_GOTO(0, nothing);
NFT_PIPAPO_AVX2_AND(1, 1, 0);
}
NFT_PIPAPO_AVX2_BUCKET_LOAD8(4, lt, 3, pkt[3], bsize);
NFT_PIPAPO_AVX2_BUCKET_LOAD8(5, lt, 4, pkt[4], bsize);
NFT_PIPAPO_AVX2_AND(6, 1, 2);
NFT_PIPAPO_AVX2_BUCKET_LOAD8(7, lt, 5, pkt[5], bsize);
NFT_PIPAPO_AVX2_AND(0, 3, 4);
NFT_PIPAPO_AVX2_BUCKET_LOAD8(1, lt, 6, pkt[6], bsize);
NFT_PIPAPO_AVX2_BUCKET_LOAD8(2, lt, 7, pkt[7], bsize);
NFT_PIPAPO_AVX2_AND(3, 5, 6);
NFT_PIPAPO_AVX2_AND(4, 0, 1);
NFT_PIPAPO_AVX2_BUCKET_LOAD8(5, lt, 8, pkt[8], bsize);
NFT_PIPAPO_AVX2_AND(6, 2, 3);
NFT_PIPAPO_AVX2_BUCKET_LOAD8(7, lt, 9, pkt[9], bsize);
NFT_PIPAPO_AVX2_AND(0, 4, 5);
NFT_PIPAPO_AVX2_BUCKET_LOAD8(1, lt, 10, pkt[10], bsize);
NFT_PIPAPO_AVX2_AND(2, 6, 7);
NFT_PIPAPO_AVX2_BUCKET_LOAD8(3, lt, 11, pkt[11], bsize);
NFT_PIPAPO_AVX2_AND(4, 0, 1);
NFT_PIPAPO_AVX2_BUCKET_LOAD8(5, lt, 12, pkt[12], bsize);
NFT_PIPAPO_AVX2_AND(6, 2, 3);
NFT_PIPAPO_AVX2_BUCKET_LOAD8(7, lt, 13, pkt[13], bsize);
NFT_PIPAPO_AVX2_AND(0, 4, 5);
NFT_PIPAPO_AVX2_BUCKET_LOAD8(1, lt, 14, pkt[14], bsize);
NFT_PIPAPO_AVX2_AND(2, 6, 7);
NFT_PIPAPO_AVX2_BUCKET_LOAD8(3, lt, 15, pkt[15], bsize);
NFT_PIPAPO_AVX2_AND(4, 0, 1);
/* Stall */
NFT_PIPAPO_AVX2_AND(5, 2, 3);
NFT_PIPAPO_AVX2_AND(6, 4, 5);
NFT_PIPAPO_AVX2_NOMATCH_GOTO(6, nomatch);
NFT_PIPAPO_AVX2_STORE(map[i_ul], 6);
b = nft_pipapo_avx2_refill(i_ul, &map[i_ul], fill, f->mt, last);
if (last)
return b;
if (unlikely(ret == -1))
ret = b / XSAVE_YMM_SIZE;
continue;
nomatch:
NFT_PIPAPO_AVX2_STORE(map[i_ul], 15);
nothing:
;
}
return ret;
}
/**
* nft_pipapo_avx2_lookup_slow() - Fallback function for uncommon field sizes
* @map: Previous match result, used as initial bitmap
* @fill: Destination bitmap to be filled with current match result
* @f: Field, containing lookup and mapping tables
* @offset: Ignore buckets before the given index, no bits are filled there
* @pkt: Packet data, pointer to input nftables register
* @first: If this is the first field, don't source previous result
* @last: Last field: stop at the first match and return bit index
*
* This function should never be called, but is provided for the case the field
* size doesn't match any of the known data types. Matching rate is
* substantially lower than AVX2 routines.
*
* Return: -1 on no match, rule index of match if @last, otherwise first long
* word index to be checked next (i.e. first filled word).
*/
static int nft_pipapo_avx2_lookup_slow(unsigned long *map, unsigned long *fill,
struct nft_pipapo_field *f, int offset,
const u8 *pkt, bool first, bool last)
{
unsigned long bsize = f->bsize;
int i, ret = -1, b;
if (first)
memset(map, 0xff, bsize * sizeof(*map));
for (i = offset; i < bsize; i++) {
if (f->bb == 8)
pipapo_and_field_buckets_8bit(f, map, pkt);
else
pipapo_and_field_buckets_4bit(f, map, pkt);
NFT_PIPAPO_GROUP_BITS_ARE_8_OR_4;
b = pipapo_refill(map, bsize, f->rules, fill, f->mt, last);
if (last)
return b;
if (ret == -1)
ret = b / XSAVE_YMM_SIZE;
}
return ret;
}
/**
* nft_pipapo_avx2_estimate() - Set size, space and lookup complexity
* @desc: Set description, element count and field description used
* @features: Flags: NFT_SET_INTERVAL needs to be there
* @est: Storage for estimation data
*
* Return: true if set is compatible and AVX2 available, false otherwise.
*/
bool nft_pipapo_avx2_estimate(const struct nft_set_desc *desc, u32 features,
struct nft_set_estimate *est)
{
if (!(features & NFT_SET_INTERVAL) ||
desc->field_count < NFT_PIPAPO_MIN_FIELDS)
return false;
if (!boot_cpu_has(X86_FEATURE_AVX2) || !boot_cpu_has(X86_FEATURE_AVX))
return false;
est->size = pipapo_estimate_size(desc);
if (!est->size)
return false;
est->lookup = NFT_SET_CLASS_O_LOG_N;
est->space = NFT_SET_CLASS_O_N;
return true;
}
/**
* nft_pipapo_avx2_lookup() - Lookup function for AVX2 implementation
* @net: Network namespace
* @set: nftables API set representation
* @key: nftables API element representation containing key data
* @ext: nftables API extension pointer, filled with matching reference
*
* For more details, see DOC: Theory of Operation in nft_set_pipapo.c.
*
* This implementation exploits the repetitive characteristic of the algorithm
* to provide a fast, vectorised version using the AVX2 SIMD instruction set.
*
* Return: true on match, false otherwise.
*/
bool nft_pipapo_avx2_lookup(const struct net *net, const struct nft_set *set,
const u32 *key, const struct nft_set_ext **ext)
{
struct nft_pipapo *priv = nft_set_priv(set);
unsigned long *res, *fill, *scratch;
u8 genmask = nft_genmask_cur(net);
const u8 *rp = (const u8 *)key;
struct nft_pipapo_match *m;
struct nft_pipapo_field *f;
bool map_index;
int i, ret = 0;
if (unlikely(!irq_fpu_usable()))
return nft_pipapo_lookup(net, set, key, ext);
m = rcu_dereference(priv->match);
/* This also protects access to all data related to scratch maps.
*
* Note that we don't need a valid MXCSR state for any of the
* operations we use here, so pass 0 as mask and spare a LDMXCSR
* instruction.
*/
kernel_fpu_begin_mask(0);
scratch = *raw_cpu_ptr(m->scratch_aligned);
if (unlikely(!scratch)) {
kernel_fpu_end();
return false;
}
map_index = raw_cpu_read(nft_pipapo_avx2_scratch_index);
res = scratch + (map_index ? m->bsize_max : 0);
fill = scratch + (map_index ? 0 : m->bsize_max);
/* Starting map doesn't need to be set for this implementation */
nft_pipapo_avx2_prepare();
next_match:
nft_pipapo_for_each_field(f, i, m) {
bool last = i == m->field_count - 1, first = !i;
#define NFT_SET_PIPAPO_AVX2_LOOKUP(b, n) \
(ret = nft_pipapo_avx2_lookup_##b##b_##n(res, fill, f, \
ret, rp, \
first, last))
if (likely(f->bb == 8)) {
if (f->groups == 1) {
NFT_SET_PIPAPO_AVX2_LOOKUP(8, 1);
} else if (f->groups == 2) {
NFT_SET_PIPAPO_AVX2_LOOKUP(8, 2);
} else if (f->groups == 4) {
NFT_SET_PIPAPO_AVX2_LOOKUP(8, 4);
} else if (f->groups == 6) {
NFT_SET_PIPAPO_AVX2_LOOKUP(8, 6);
} else if (f->groups == 16) {
NFT_SET_PIPAPO_AVX2_LOOKUP(8, 16);
} else {
ret = nft_pipapo_avx2_lookup_slow(res, fill, f,
ret, rp,
first, last);
}
} else {
if (f->groups == 2) {
NFT_SET_PIPAPO_AVX2_LOOKUP(4, 2);
} else if (f->groups == 4) {
NFT_SET_PIPAPO_AVX2_LOOKUP(4, 4);
} else if (f->groups == 8) {
NFT_SET_PIPAPO_AVX2_LOOKUP(4, 8);
} else if (f->groups == 12) {
NFT_SET_PIPAPO_AVX2_LOOKUP(4, 12);
} else if (f->groups == 32) {
NFT_SET_PIPAPO_AVX2_LOOKUP(4, 32);
} else {
ret = nft_pipapo_avx2_lookup_slow(res, fill, f,
ret, rp,
first, last);
}
}
NFT_PIPAPO_GROUP_BITS_ARE_8_OR_4;
#undef NFT_SET_PIPAPO_AVX2_LOOKUP
if (ret < 0)
goto out;
if (last) {
*ext = &f->mt[ret].e->ext;
if (unlikely(nft_set_elem_expired(*ext) ||
!nft_set_elem_active(*ext, genmask))) {
ret = 0;
goto next_match;
}
goto out;
}
swap(res, fill);
rp += NFT_PIPAPO_GROUPS_PADDED_SIZE(f);
}
out:
if (i % 2)
raw_cpu_write(nft_pipapo_avx2_scratch_index, !map_index);
kernel_fpu_end();
return ret >= 0;
}
| linux-master | net/netfilter/nft_set_pipapo_avx2.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2008-2009 Patrick McHardy <[email protected]>
*
* Development of this code funded by Astaro AG (http://www.astaro.com/)
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/seqlock.h>
#include <linux/netlink.h>
#include <linux/netfilter.h>
#include <linux/netfilter/nf_tables.h>
#include <net/netfilter/nf_tables.h>
#include <net/netfilter/nf_tables_core.h>
#include <net/netfilter/nf_tables_offload.h>
struct nft_counter {
s64 bytes;
s64 packets;
};
struct nft_counter_percpu_priv {
struct nft_counter __percpu *counter;
};
static DEFINE_PER_CPU(seqcount_t, nft_counter_seq);
static inline void nft_counter_do_eval(struct nft_counter_percpu_priv *priv,
struct nft_regs *regs,
const struct nft_pktinfo *pkt)
{
struct nft_counter *this_cpu;
seqcount_t *myseq;
local_bh_disable();
this_cpu = this_cpu_ptr(priv->counter);
myseq = this_cpu_ptr(&nft_counter_seq);
write_seqcount_begin(myseq);
this_cpu->bytes += pkt->skb->len;
this_cpu->packets++;
write_seqcount_end(myseq);
local_bh_enable();
}
static inline void nft_counter_obj_eval(struct nft_object *obj,
struct nft_regs *regs,
const struct nft_pktinfo *pkt)
{
struct nft_counter_percpu_priv *priv = nft_obj_data(obj);
nft_counter_do_eval(priv, regs, pkt);
}
static int nft_counter_do_init(const struct nlattr * const tb[],
struct nft_counter_percpu_priv *priv)
{
struct nft_counter __percpu *cpu_stats;
struct nft_counter *this_cpu;
cpu_stats = alloc_percpu_gfp(struct nft_counter, GFP_KERNEL_ACCOUNT);
if (cpu_stats == NULL)
return -ENOMEM;
preempt_disable();
this_cpu = this_cpu_ptr(cpu_stats);
if (tb[NFTA_COUNTER_PACKETS]) {
this_cpu->packets =
be64_to_cpu(nla_get_be64(tb[NFTA_COUNTER_PACKETS]));
}
if (tb[NFTA_COUNTER_BYTES]) {
this_cpu->bytes =
be64_to_cpu(nla_get_be64(tb[NFTA_COUNTER_BYTES]));
}
preempt_enable();
priv->counter = cpu_stats;
return 0;
}
static int nft_counter_obj_init(const struct nft_ctx *ctx,
const struct nlattr * const tb[],
struct nft_object *obj)
{
struct nft_counter_percpu_priv *priv = nft_obj_data(obj);
return nft_counter_do_init(tb, priv);
}
static void nft_counter_do_destroy(struct nft_counter_percpu_priv *priv)
{
free_percpu(priv->counter);
}
static void nft_counter_obj_destroy(const struct nft_ctx *ctx,
struct nft_object *obj)
{
struct nft_counter_percpu_priv *priv = nft_obj_data(obj);
nft_counter_do_destroy(priv);
}
static void nft_counter_reset(struct nft_counter_percpu_priv *priv,
struct nft_counter *total)
{
struct nft_counter *this_cpu;
local_bh_disable();
this_cpu = this_cpu_ptr(priv->counter);
this_cpu->packets -= total->packets;
this_cpu->bytes -= total->bytes;
local_bh_enable();
}
static void nft_counter_fetch(struct nft_counter_percpu_priv *priv,
struct nft_counter *total)
{
struct nft_counter *this_cpu;
const seqcount_t *myseq;
u64 bytes, packets;
unsigned int seq;
int cpu;
memset(total, 0, sizeof(*total));
for_each_possible_cpu(cpu) {
myseq = per_cpu_ptr(&nft_counter_seq, cpu);
this_cpu = per_cpu_ptr(priv->counter, cpu);
do {
seq = read_seqcount_begin(myseq);
bytes = this_cpu->bytes;
packets = this_cpu->packets;
} while (read_seqcount_retry(myseq, seq));
total->bytes += bytes;
total->packets += packets;
}
}
static int nft_counter_do_dump(struct sk_buff *skb,
struct nft_counter_percpu_priv *priv,
bool reset)
{
struct nft_counter total;
nft_counter_fetch(priv, &total);
if (nla_put_be64(skb, NFTA_COUNTER_BYTES, cpu_to_be64(total.bytes),
NFTA_COUNTER_PAD) ||
nla_put_be64(skb, NFTA_COUNTER_PACKETS, cpu_to_be64(total.packets),
NFTA_COUNTER_PAD))
goto nla_put_failure;
if (reset)
nft_counter_reset(priv, &total);
return 0;
nla_put_failure:
return -1;
}
static int nft_counter_obj_dump(struct sk_buff *skb,
struct nft_object *obj, bool reset)
{
struct nft_counter_percpu_priv *priv = nft_obj_data(obj);
return nft_counter_do_dump(skb, priv, reset);
}
static const struct nla_policy nft_counter_policy[NFTA_COUNTER_MAX + 1] = {
[NFTA_COUNTER_PACKETS] = { .type = NLA_U64 },
[NFTA_COUNTER_BYTES] = { .type = NLA_U64 },
};
struct nft_object_type nft_counter_obj_type;
static const struct nft_object_ops nft_counter_obj_ops = {
.type = &nft_counter_obj_type,
.size = sizeof(struct nft_counter_percpu_priv),
.eval = nft_counter_obj_eval,
.init = nft_counter_obj_init,
.destroy = nft_counter_obj_destroy,
.dump = nft_counter_obj_dump,
};
struct nft_object_type nft_counter_obj_type __read_mostly = {
.type = NFT_OBJECT_COUNTER,
.ops = &nft_counter_obj_ops,
.maxattr = NFTA_COUNTER_MAX,
.policy = nft_counter_policy,
.owner = THIS_MODULE,
};
void nft_counter_eval(const struct nft_expr *expr, struct nft_regs *regs,
const struct nft_pktinfo *pkt)
{
struct nft_counter_percpu_priv *priv = nft_expr_priv(expr);
nft_counter_do_eval(priv, regs, pkt);
}
static int nft_counter_dump(struct sk_buff *skb,
const struct nft_expr *expr, bool reset)
{
struct nft_counter_percpu_priv *priv = nft_expr_priv(expr);
return nft_counter_do_dump(skb, priv, reset);
}
static int nft_counter_init(const struct nft_ctx *ctx,
const struct nft_expr *expr,
const struct nlattr * const tb[])
{
struct nft_counter_percpu_priv *priv = nft_expr_priv(expr);
return nft_counter_do_init(tb, priv);
}
static void nft_counter_destroy(const struct nft_ctx *ctx,
const struct nft_expr *expr)
{
struct nft_counter_percpu_priv *priv = nft_expr_priv(expr);
nft_counter_do_destroy(priv);
}
static int nft_counter_clone(struct nft_expr *dst, const struct nft_expr *src)
{
struct nft_counter_percpu_priv *priv = nft_expr_priv(src);
struct nft_counter_percpu_priv *priv_clone = nft_expr_priv(dst);
struct nft_counter __percpu *cpu_stats;
struct nft_counter *this_cpu;
struct nft_counter total;
nft_counter_fetch(priv, &total);
cpu_stats = alloc_percpu_gfp(struct nft_counter, GFP_ATOMIC);
if (cpu_stats == NULL)
return -ENOMEM;
preempt_disable();
this_cpu = this_cpu_ptr(cpu_stats);
this_cpu->packets = total.packets;
this_cpu->bytes = total.bytes;
preempt_enable();
priv_clone->counter = cpu_stats;
return 0;
}
static int nft_counter_offload(struct nft_offload_ctx *ctx,
struct nft_flow_rule *flow,
const struct nft_expr *expr)
{
/* No specific offload action is needed, but report success. */
return 0;
}
static void nft_counter_offload_stats(struct nft_expr *expr,
const struct flow_stats *stats)
{
struct nft_counter_percpu_priv *priv = nft_expr_priv(expr);
struct nft_counter *this_cpu;
seqcount_t *myseq;
preempt_disable();
this_cpu = this_cpu_ptr(priv->counter);
myseq = this_cpu_ptr(&nft_counter_seq);
write_seqcount_begin(myseq);
this_cpu->packets += stats->pkts;
this_cpu->bytes += stats->bytes;
write_seqcount_end(myseq);
preempt_enable();
}
void nft_counter_init_seqcount(void)
{
int cpu;
for_each_possible_cpu(cpu)
seqcount_init(per_cpu_ptr(&nft_counter_seq, cpu));
}
struct nft_expr_type nft_counter_type;
static const struct nft_expr_ops nft_counter_ops = {
.type = &nft_counter_type,
.size = NFT_EXPR_SIZE(sizeof(struct nft_counter_percpu_priv)),
.eval = nft_counter_eval,
.init = nft_counter_init,
.destroy = nft_counter_destroy,
.destroy_clone = nft_counter_destroy,
.dump = nft_counter_dump,
.clone = nft_counter_clone,
.reduce = NFT_REDUCE_READONLY,
.offload = nft_counter_offload,
.offload_stats = nft_counter_offload_stats,
};
struct nft_expr_type nft_counter_type __read_mostly = {
.name = "counter",
.ops = &nft_counter_ops,
.policy = nft_counter_policy,
.maxattr = NFTA_COUNTER_MAX,
.flags = NFT_EXPR_STATEFUL,
.owner = THIS_MODULE,
};
| linux-master | net/netfilter/nft_counter.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2008-2009 Patrick McHardy <[email protected]>
*
* Development of this code funded by Astaro AG (http://www.astaro.com/)
*/
#include <asm/unaligned.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/netlink.h>
#include <linux/netfilter.h>
#include <linux/netfilter/nf_tables.h>
#include <net/netfilter/nf_tables_core.h>
#include <net/netfilter/nf_tables.h>
struct nft_byteorder {
u8 sreg;
u8 dreg;
enum nft_byteorder_ops op:8;
u8 len;
u8 size;
};
void nft_byteorder_eval(const struct nft_expr *expr,
struct nft_regs *regs,
const struct nft_pktinfo *pkt)
{
const struct nft_byteorder *priv = nft_expr_priv(expr);
u32 *src = ®s->data[priv->sreg];
u32 *dst = ®s->data[priv->dreg];
u16 *s16, *d16;
unsigned int i;
s16 = (void *)src;
d16 = (void *)dst;
switch (priv->size) {
case 8: {
u64 src64;
switch (priv->op) {
case NFT_BYTEORDER_NTOH:
for (i = 0; i < priv->len / 8; i++) {
src64 = nft_reg_load64(&src[i]);
nft_reg_store64(&dst[i],
be64_to_cpu((__force __be64)src64));
}
break;
case NFT_BYTEORDER_HTON:
for (i = 0; i < priv->len / 8; i++) {
src64 = (__force __u64)
cpu_to_be64(nft_reg_load64(&src[i]));
nft_reg_store64(&dst[i], src64);
}
break;
}
break;
}
case 4:
switch (priv->op) {
case NFT_BYTEORDER_NTOH:
for (i = 0; i < priv->len / 4; i++)
dst[i] = ntohl((__force __be32)src[i]);
break;
case NFT_BYTEORDER_HTON:
for (i = 0; i < priv->len / 4; i++)
dst[i] = (__force __u32)htonl(src[i]);
break;
}
break;
case 2:
switch (priv->op) {
case NFT_BYTEORDER_NTOH:
for (i = 0; i < priv->len / 2; i++)
d16[i] = ntohs((__force __be16)s16[i]);
break;
case NFT_BYTEORDER_HTON:
for (i = 0; i < priv->len / 2; i++)
d16[i] = (__force __u16)htons(s16[i]);
break;
}
break;
}
}
static const struct nla_policy nft_byteorder_policy[NFTA_BYTEORDER_MAX + 1] = {
[NFTA_BYTEORDER_SREG] = { .type = NLA_U32 },
[NFTA_BYTEORDER_DREG] = { .type = NLA_U32 },
[NFTA_BYTEORDER_OP] = NLA_POLICY_MAX(NLA_BE32, 255),
[NFTA_BYTEORDER_LEN] = NLA_POLICY_MAX(NLA_BE32, 255),
[NFTA_BYTEORDER_SIZE] = NLA_POLICY_MAX(NLA_BE32, 255),
};
static int nft_byteorder_init(const struct nft_ctx *ctx,
const struct nft_expr *expr,
const struct nlattr * const tb[])
{
struct nft_byteorder *priv = nft_expr_priv(expr);
u32 size, len;
int err;
if (tb[NFTA_BYTEORDER_SREG] == NULL ||
tb[NFTA_BYTEORDER_DREG] == NULL ||
tb[NFTA_BYTEORDER_LEN] == NULL ||
tb[NFTA_BYTEORDER_SIZE] == NULL ||
tb[NFTA_BYTEORDER_OP] == NULL)
return -EINVAL;
priv->op = ntohl(nla_get_be32(tb[NFTA_BYTEORDER_OP]));
switch (priv->op) {
case NFT_BYTEORDER_NTOH:
case NFT_BYTEORDER_HTON:
break;
default:
return -EINVAL;
}
err = nft_parse_u32_check(tb[NFTA_BYTEORDER_SIZE], U8_MAX, &size);
if (err < 0)
return err;
priv->size = size;
switch (priv->size) {
case 2:
case 4:
case 8:
break;
default:
return -EINVAL;
}
err = nft_parse_u32_check(tb[NFTA_BYTEORDER_LEN], U8_MAX, &len);
if (err < 0)
return err;
priv->len = len;
err = nft_parse_register_load(tb[NFTA_BYTEORDER_SREG], &priv->sreg,
priv->len);
if (err < 0)
return err;
return nft_parse_register_store(ctx, tb[NFTA_BYTEORDER_DREG],
&priv->dreg, NULL, NFT_DATA_VALUE,
priv->len);
}
static int nft_byteorder_dump(struct sk_buff *skb,
const struct nft_expr *expr, bool reset)
{
const struct nft_byteorder *priv = nft_expr_priv(expr);
if (nft_dump_register(skb, NFTA_BYTEORDER_SREG, priv->sreg))
goto nla_put_failure;
if (nft_dump_register(skb, NFTA_BYTEORDER_DREG, priv->dreg))
goto nla_put_failure;
if (nla_put_be32(skb, NFTA_BYTEORDER_OP, htonl(priv->op)))
goto nla_put_failure;
if (nla_put_be32(skb, NFTA_BYTEORDER_LEN, htonl(priv->len)))
goto nla_put_failure;
if (nla_put_be32(skb, NFTA_BYTEORDER_SIZE, htonl(priv->size)))
goto nla_put_failure;
return 0;
nla_put_failure:
return -1;
}
static bool nft_byteorder_reduce(struct nft_regs_track *track,
const struct nft_expr *expr)
{
struct nft_byteorder *priv = nft_expr_priv(expr);
nft_reg_track_cancel(track, priv->dreg, priv->len);
return false;
}
static const struct nft_expr_ops nft_byteorder_ops = {
.type = &nft_byteorder_type,
.size = NFT_EXPR_SIZE(sizeof(struct nft_byteorder)),
.eval = nft_byteorder_eval,
.init = nft_byteorder_init,
.dump = nft_byteorder_dump,
.reduce = nft_byteorder_reduce,
};
struct nft_expr_type nft_byteorder_type __read_mostly = {
.name = "byteorder",
.ops = &nft_byteorder_ops,
.policy = nft_byteorder_policy,
.maxattr = NFTA_BYTEORDER_MAX,
.owner = THIS_MODULE,
};
| linux-master | net/netfilter/nft_byteorder.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* (C) 2010 Pablo Neira Ayuso <[email protected]>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/netfilter.h>
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/moduleparam.h>
#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_conntrack_extend.h>
#include <net/netfilter/nf_conntrack_timestamp.h>
static bool nf_ct_tstamp __read_mostly;
module_param_named(tstamp, nf_ct_tstamp, bool, 0644);
MODULE_PARM_DESC(tstamp, "Enable connection tracking flow timestamping.");
void nf_conntrack_tstamp_pernet_init(struct net *net)
{
net->ct.sysctl_tstamp = nf_ct_tstamp;
}
| linux-master | net/netfilter/nf_conntrack_timestamp.c |
// SPDX-License-Identifier: GPL-2.0-only
/* (C) 1999-2001 Paul `Rusty' Russell
* (C) 2002-2006 Netfilter Core Team <[email protected]>
*/
#include <linux/types.h>
#include <linux/export.h>
#include <linux/init.h>
#include <linux/udp.h>
#include <linux/tcp.h>
#include <linux/icmp.h>
#include <linux/icmpv6.h>
#include <linux/dccp.h>
#include <linux/sctp.h>
#include <net/sctp/checksum.h>
#include <linux/netfilter.h>
#include <net/netfilter/nf_nat.h>
#include <linux/ipv6.h>
#include <linux/netfilter_ipv6.h>
#include <net/checksum.h>
#include <net/ip6_checksum.h>
#include <net/ip6_route.h>
#include <net/xfrm.h>
#include <net/ipv6.h>
#include <net/netfilter/nf_conntrack_core.h>
#include <net/netfilter/nf_conntrack.h>
#include <linux/netfilter/nfnetlink_conntrack.h>
static void nf_csum_update(struct sk_buff *skb,
unsigned int iphdroff, __sum16 *check,
const struct nf_conntrack_tuple *t,
enum nf_nat_manip_type maniptype);
static void
__udp_manip_pkt(struct sk_buff *skb,
unsigned int iphdroff, struct udphdr *hdr,
const struct nf_conntrack_tuple *tuple,
enum nf_nat_manip_type maniptype, bool do_csum)
{
__be16 *portptr, newport;
if (maniptype == NF_NAT_MANIP_SRC) {
/* Get rid of src port */
newport = tuple->src.u.udp.port;
portptr = &hdr->source;
} else {
/* Get rid of dst port */
newport = tuple->dst.u.udp.port;
portptr = &hdr->dest;
}
if (do_csum) {
nf_csum_update(skb, iphdroff, &hdr->check, tuple, maniptype);
inet_proto_csum_replace2(&hdr->check, skb, *portptr, newport,
false);
if (!hdr->check)
hdr->check = CSUM_MANGLED_0;
}
*portptr = newport;
}
static bool udp_manip_pkt(struct sk_buff *skb,
unsigned int iphdroff, unsigned int hdroff,
const struct nf_conntrack_tuple *tuple,
enum nf_nat_manip_type maniptype)
{
struct udphdr *hdr;
if (skb_ensure_writable(skb, hdroff + sizeof(*hdr)))
return false;
hdr = (struct udphdr *)(skb->data + hdroff);
__udp_manip_pkt(skb, iphdroff, hdr, tuple, maniptype, !!hdr->check);
return true;
}
static bool udplite_manip_pkt(struct sk_buff *skb,
unsigned int iphdroff, unsigned int hdroff,
const struct nf_conntrack_tuple *tuple,
enum nf_nat_manip_type maniptype)
{
#ifdef CONFIG_NF_CT_PROTO_UDPLITE
struct udphdr *hdr;
if (skb_ensure_writable(skb, hdroff + sizeof(*hdr)))
return false;
hdr = (struct udphdr *)(skb->data + hdroff);
__udp_manip_pkt(skb, iphdroff, hdr, tuple, maniptype, true);
#endif
return true;
}
static bool
sctp_manip_pkt(struct sk_buff *skb,
unsigned int iphdroff, unsigned int hdroff,
const struct nf_conntrack_tuple *tuple,
enum nf_nat_manip_type maniptype)
{
#ifdef CONFIG_NF_CT_PROTO_SCTP
struct sctphdr *hdr;
int hdrsize = 8;
/* This could be an inner header returned in imcp packet; in such
* cases we cannot update the checksum field since it is outside
* of the 8 bytes of transport layer headers we are guaranteed.
*/
if (skb->len >= hdroff + sizeof(*hdr))
hdrsize = sizeof(*hdr);
if (skb_ensure_writable(skb, hdroff + hdrsize))
return false;
hdr = (struct sctphdr *)(skb->data + hdroff);
if (maniptype == NF_NAT_MANIP_SRC) {
/* Get rid of src port */
hdr->source = tuple->src.u.sctp.port;
} else {
/* Get rid of dst port */
hdr->dest = tuple->dst.u.sctp.port;
}
if (hdrsize < sizeof(*hdr))
return true;
if (skb->ip_summed != CHECKSUM_PARTIAL) {
hdr->checksum = sctp_compute_cksum(skb, hdroff);
skb->ip_summed = CHECKSUM_NONE;
}
#endif
return true;
}
static bool
tcp_manip_pkt(struct sk_buff *skb,
unsigned int iphdroff, unsigned int hdroff,
const struct nf_conntrack_tuple *tuple,
enum nf_nat_manip_type maniptype)
{
struct tcphdr *hdr;
__be16 *portptr, newport, oldport;
int hdrsize = 8; /* TCP connection tracking guarantees this much */
/* this could be a inner header returned in icmp packet; in such
cases we cannot update the checksum field since it is outside of
the 8 bytes of transport layer headers we are guaranteed */
if (skb->len >= hdroff + sizeof(struct tcphdr))
hdrsize = sizeof(struct tcphdr);
if (skb_ensure_writable(skb, hdroff + hdrsize))
return false;
hdr = (struct tcphdr *)(skb->data + hdroff);
if (maniptype == NF_NAT_MANIP_SRC) {
/* Get rid of src port */
newport = tuple->src.u.tcp.port;
portptr = &hdr->source;
} else {
/* Get rid of dst port */
newport = tuple->dst.u.tcp.port;
portptr = &hdr->dest;
}
oldport = *portptr;
*portptr = newport;
if (hdrsize < sizeof(*hdr))
return true;
nf_csum_update(skb, iphdroff, &hdr->check, tuple, maniptype);
inet_proto_csum_replace2(&hdr->check, skb, oldport, newport, false);
return true;
}
static bool
dccp_manip_pkt(struct sk_buff *skb,
unsigned int iphdroff, unsigned int hdroff,
const struct nf_conntrack_tuple *tuple,
enum nf_nat_manip_type maniptype)
{
#ifdef CONFIG_NF_CT_PROTO_DCCP
struct dccp_hdr *hdr;
__be16 *portptr, oldport, newport;
int hdrsize = 8; /* DCCP connection tracking guarantees this much */
if (skb->len >= hdroff + sizeof(struct dccp_hdr))
hdrsize = sizeof(struct dccp_hdr);
if (skb_ensure_writable(skb, hdroff + hdrsize))
return false;
hdr = (struct dccp_hdr *)(skb->data + hdroff);
if (maniptype == NF_NAT_MANIP_SRC) {
newport = tuple->src.u.dccp.port;
portptr = &hdr->dccph_sport;
} else {
newport = tuple->dst.u.dccp.port;
portptr = &hdr->dccph_dport;
}
oldport = *portptr;
*portptr = newport;
if (hdrsize < sizeof(*hdr))
return true;
nf_csum_update(skb, iphdroff, &hdr->dccph_checksum, tuple, maniptype);
inet_proto_csum_replace2(&hdr->dccph_checksum, skb, oldport, newport,
false);
#endif
return true;
}
static bool
icmp_manip_pkt(struct sk_buff *skb,
unsigned int iphdroff, unsigned int hdroff,
const struct nf_conntrack_tuple *tuple,
enum nf_nat_manip_type maniptype)
{
struct icmphdr *hdr;
if (skb_ensure_writable(skb, hdroff + sizeof(*hdr)))
return false;
hdr = (struct icmphdr *)(skb->data + hdroff);
switch (hdr->type) {
case ICMP_ECHO:
case ICMP_ECHOREPLY:
case ICMP_TIMESTAMP:
case ICMP_TIMESTAMPREPLY:
case ICMP_INFO_REQUEST:
case ICMP_INFO_REPLY:
case ICMP_ADDRESS:
case ICMP_ADDRESSREPLY:
break;
default:
return true;
}
inet_proto_csum_replace2(&hdr->checksum, skb,
hdr->un.echo.id, tuple->src.u.icmp.id, false);
hdr->un.echo.id = tuple->src.u.icmp.id;
return true;
}
static bool
icmpv6_manip_pkt(struct sk_buff *skb,
unsigned int iphdroff, unsigned int hdroff,
const struct nf_conntrack_tuple *tuple,
enum nf_nat_manip_type maniptype)
{
struct icmp6hdr *hdr;
if (skb_ensure_writable(skb, hdroff + sizeof(*hdr)))
return false;
hdr = (struct icmp6hdr *)(skb->data + hdroff);
nf_csum_update(skb, iphdroff, &hdr->icmp6_cksum, tuple, maniptype);
if (hdr->icmp6_type == ICMPV6_ECHO_REQUEST ||
hdr->icmp6_type == ICMPV6_ECHO_REPLY) {
inet_proto_csum_replace2(&hdr->icmp6_cksum, skb,
hdr->icmp6_identifier,
tuple->src.u.icmp.id, false);
hdr->icmp6_identifier = tuple->src.u.icmp.id;
}
return true;
}
/* manipulate a GRE packet according to maniptype */
static bool
gre_manip_pkt(struct sk_buff *skb,
unsigned int iphdroff, unsigned int hdroff,
const struct nf_conntrack_tuple *tuple,
enum nf_nat_manip_type maniptype)
{
#if IS_ENABLED(CONFIG_NF_CT_PROTO_GRE)
const struct gre_base_hdr *greh;
struct pptp_gre_header *pgreh;
/* pgreh includes two optional 32bit fields which are not required
* to be there. That's where the magic '8' comes from */
if (skb_ensure_writable(skb, hdroff + sizeof(*pgreh) - 8))
return false;
greh = (void *)skb->data + hdroff;
pgreh = (struct pptp_gre_header *)greh;
/* we only have destination manip of a packet, since 'source key'
* is not present in the packet itself */
if (maniptype != NF_NAT_MANIP_DST)
return true;
switch (greh->flags & GRE_VERSION) {
case GRE_VERSION_0:
/* We do not currently NAT any GREv0 packets.
* Try to behave like "nf_nat_proto_unknown" */
break;
case GRE_VERSION_1:
pr_debug("call_id -> 0x%04x\n", ntohs(tuple->dst.u.gre.key));
pgreh->call_id = tuple->dst.u.gre.key;
break;
default:
pr_debug("can't nat unknown GRE version\n");
return false;
}
#endif
return true;
}
static bool l4proto_manip_pkt(struct sk_buff *skb,
unsigned int iphdroff, unsigned int hdroff,
const struct nf_conntrack_tuple *tuple,
enum nf_nat_manip_type maniptype)
{
switch (tuple->dst.protonum) {
case IPPROTO_TCP:
return tcp_manip_pkt(skb, iphdroff, hdroff,
tuple, maniptype);
case IPPROTO_UDP:
return udp_manip_pkt(skb, iphdroff, hdroff,
tuple, maniptype);
case IPPROTO_UDPLITE:
return udplite_manip_pkt(skb, iphdroff, hdroff,
tuple, maniptype);
case IPPROTO_SCTP:
return sctp_manip_pkt(skb, iphdroff, hdroff,
tuple, maniptype);
case IPPROTO_ICMP:
return icmp_manip_pkt(skb, iphdroff, hdroff,
tuple, maniptype);
case IPPROTO_ICMPV6:
return icmpv6_manip_pkt(skb, iphdroff, hdroff,
tuple, maniptype);
case IPPROTO_DCCP:
return dccp_manip_pkt(skb, iphdroff, hdroff,
tuple, maniptype);
case IPPROTO_GRE:
return gre_manip_pkt(skb, iphdroff, hdroff,
tuple, maniptype);
}
/* If we don't know protocol -- no error, pass it unmodified. */
return true;
}
static bool nf_nat_ipv4_manip_pkt(struct sk_buff *skb,
unsigned int iphdroff,
const struct nf_conntrack_tuple *target,
enum nf_nat_manip_type maniptype)
{
struct iphdr *iph;
unsigned int hdroff;
if (skb_ensure_writable(skb, iphdroff + sizeof(*iph)))
return false;
iph = (void *)skb->data + iphdroff;
hdroff = iphdroff + iph->ihl * 4;
if (!l4proto_manip_pkt(skb, iphdroff, hdroff, target, maniptype))
return false;
iph = (void *)skb->data + iphdroff;
if (maniptype == NF_NAT_MANIP_SRC) {
csum_replace4(&iph->check, iph->saddr, target->src.u3.ip);
iph->saddr = target->src.u3.ip;
} else {
csum_replace4(&iph->check, iph->daddr, target->dst.u3.ip);
iph->daddr = target->dst.u3.ip;
}
return true;
}
static bool nf_nat_ipv6_manip_pkt(struct sk_buff *skb,
unsigned int iphdroff,
const struct nf_conntrack_tuple *target,
enum nf_nat_manip_type maniptype)
{
#if IS_ENABLED(CONFIG_IPV6)
struct ipv6hdr *ipv6h;
__be16 frag_off;
int hdroff;
u8 nexthdr;
if (skb_ensure_writable(skb, iphdroff + sizeof(*ipv6h)))
return false;
ipv6h = (void *)skb->data + iphdroff;
nexthdr = ipv6h->nexthdr;
hdroff = ipv6_skip_exthdr(skb, iphdroff + sizeof(*ipv6h),
&nexthdr, &frag_off);
if (hdroff < 0)
goto manip_addr;
if ((frag_off & htons(~0x7)) == 0 &&
!l4proto_manip_pkt(skb, iphdroff, hdroff, target, maniptype))
return false;
/* must reload, offset might have changed */
ipv6h = (void *)skb->data + iphdroff;
manip_addr:
if (maniptype == NF_NAT_MANIP_SRC)
ipv6h->saddr = target->src.u3.in6;
else
ipv6h->daddr = target->dst.u3.in6;
#endif
return true;
}
unsigned int nf_nat_manip_pkt(struct sk_buff *skb, struct nf_conn *ct,
enum nf_nat_manip_type mtype,
enum ip_conntrack_dir dir)
{
struct nf_conntrack_tuple target;
/* We are aiming to look like inverse of other direction. */
nf_ct_invert_tuple(&target, &ct->tuplehash[!dir].tuple);
switch (target.src.l3num) {
case NFPROTO_IPV6:
if (nf_nat_ipv6_manip_pkt(skb, 0, &target, mtype))
return NF_ACCEPT;
break;
case NFPROTO_IPV4:
if (nf_nat_ipv4_manip_pkt(skb, 0, &target, mtype))
return NF_ACCEPT;
break;
default:
WARN_ON_ONCE(1);
break;
}
return NF_DROP;
}
static void nf_nat_ipv4_csum_update(struct sk_buff *skb,
unsigned int iphdroff, __sum16 *check,
const struct nf_conntrack_tuple *t,
enum nf_nat_manip_type maniptype)
{
struct iphdr *iph = (struct iphdr *)(skb->data + iphdroff);
__be32 oldip, newip;
if (maniptype == NF_NAT_MANIP_SRC) {
oldip = iph->saddr;
newip = t->src.u3.ip;
} else {
oldip = iph->daddr;
newip = t->dst.u3.ip;
}
inet_proto_csum_replace4(check, skb, oldip, newip, true);
}
static void nf_nat_ipv6_csum_update(struct sk_buff *skb,
unsigned int iphdroff, __sum16 *check,
const struct nf_conntrack_tuple *t,
enum nf_nat_manip_type maniptype)
{
#if IS_ENABLED(CONFIG_IPV6)
const struct ipv6hdr *ipv6h = (struct ipv6hdr *)(skb->data + iphdroff);
const struct in6_addr *oldip, *newip;
if (maniptype == NF_NAT_MANIP_SRC) {
oldip = &ipv6h->saddr;
newip = &t->src.u3.in6;
} else {
oldip = &ipv6h->daddr;
newip = &t->dst.u3.in6;
}
inet_proto_csum_replace16(check, skb, oldip->s6_addr32,
newip->s6_addr32, true);
#endif
}
static void nf_csum_update(struct sk_buff *skb,
unsigned int iphdroff, __sum16 *check,
const struct nf_conntrack_tuple *t,
enum nf_nat_manip_type maniptype)
{
switch (t->src.l3num) {
case NFPROTO_IPV4:
nf_nat_ipv4_csum_update(skb, iphdroff, check, t, maniptype);
return;
case NFPROTO_IPV6:
nf_nat_ipv6_csum_update(skb, iphdroff, check, t, maniptype);
return;
}
}
static void nf_nat_ipv4_csum_recalc(struct sk_buff *skb,
u8 proto, void *data, __sum16 *check,
int datalen, int oldlen)
{
if (skb->ip_summed != CHECKSUM_PARTIAL) {
const struct iphdr *iph = ip_hdr(skb);
skb->ip_summed = CHECKSUM_PARTIAL;
skb->csum_start = skb_headroom(skb) + skb_network_offset(skb) +
ip_hdrlen(skb);
skb->csum_offset = (void *)check - data;
*check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, datalen,
proto, 0);
} else {
inet_proto_csum_replace2(check, skb,
htons(oldlen), htons(datalen), true);
}
}
#if IS_ENABLED(CONFIG_IPV6)
static void nf_nat_ipv6_csum_recalc(struct sk_buff *skb,
u8 proto, void *data, __sum16 *check,
int datalen, int oldlen)
{
if (skb->ip_summed != CHECKSUM_PARTIAL) {
const struct ipv6hdr *ipv6h = ipv6_hdr(skb);
skb->ip_summed = CHECKSUM_PARTIAL;
skb->csum_start = skb_headroom(skb) + skb_network_offset(skb) +
(data - (void *)skb->data);
skb->csum_offset = (void *)check - data;
*check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr,
datalen, proto, 0);
} else {
inet_proto_csum_replace2(check, skb,
htons(oldlen), htons(datalen), true);
}
}
#endif
void nf_nat_csum_recalc(struct sk_buff *skb,
u8 nfproto, u8 proto, void *data, __sum16 *check,
int datalen, int oldlen)
{
switch (nfproto) {
case NFPROTO_IPV4:
nf_nat_ipv4_csum_recalc(skb, proto, data, check,
datalen, oldlen);
return;
#if IS_ENABLED(CONFIG_IPV6)
case NFPROTO_IPV6:
nf_nat_ipv6_csum_recalc(skb, proto, data, check,
datalen, oldlen);
return;
#endif
}
WARN_ON_ONCE(1);
}
int nf_nat_icmp_reply_translation(struct sk_buff *skb,
struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
unsigned int hooknum)
{
struct {
struct icmphdr icmp;
struct iphdr ip;
} *inside;
enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
enum nf_nat_manip_type manip = HOOK2MANIP(hooknum);
unsigned int hdrlen = ip_hdrlen(skb);
struct nf_conntrack_tuple target;
unsigned long statusbit;
WARN_ON(ctinfo != IP_CT_RELATED && ctinfo != IP_CT_RELATED_REPLY);
if (skb_ensure_writable(skb, hdrlen + sizeof(*inside)))
return 0;
if (nf_ip_checksum(skb, hooknum, hdrlen, IPPROTO_ICMP))
return 0;
inside = (void *)skb->data + hdrlen;
if (inside->icmp.type == ICMP_REDIRECT) {
if ((ct->status & IPS_NAT_DONE_MASK) != IPS_NAT_DONE_MASK)
return 0;
if (ct->status & IPS_NAT_MASK)
return 0;
}
if (manip == NF_NAT_MANIP_SRC)
statusbit = IPS_SRC_NAT;
else
statusbit = IPS_DST_NAT;
/* Invert if this is reply direction */
if (dir == IP_CT_DIR_REPLY)
statusbit ^= IPS_NAT_MASK;
if (!(ct->status & statusbit))
return 1;
if (!nf_nat_ipv4_manip_pkt(skb, hdrlen + sizeof(inside->icmp),
&ct->tuplehash[!dir].tuple, !manip))
return 0;
if (skb->ip_summed != CHECKSUM_PARTIAL) {
/* Reloading "inside" here since manip_pkt may reallocate */
inside = (void *)skb->data + hdrlen;
inside->icmp.checksum = 0;
inside->icmp.checksum =
csum_fold(skb_checksum(skb, hdrlen,
skb->len - hdrlen, 0));
}
/* Change outer to look like the reply to an incoming packet */
nf_ct_invert_tuple(&target, &ct->tuplehash[!dir].tuple);
target.dst.protonum = IPPROTO_ICMP;
if (!nf_nat_ipv4_manip_pkt(skb, 0, &target, manip))
return 0;
return 1;
}
EXPORT_SYMBOL_GPL(nf_nat_icmp_reply_translation);
static unsigned int
nf_nat_ipv4_fn(void *priv, struct sk_buff *skb,
const struct nf_hook_state *state)
{
struct nf_conn *ct;
enum ip_conntrack_info ctinfo;
ct = nf_ct_get(skb, &ctinfo);
if (!ct)
return NF_ACCEPT;
if (ctinfo == IP_CT_RELATED || ctinfo == IP_CT_RELATED_REPLY) {
if (ip_hdr(skb)->protocol == IPPROTO_ICMP) {
if (!nf_nat_icmp_reply_translation(skb, ct, ctinfo,
state->hook))
return NF_DROP;
else
return NF_ACCEPT;
}
}
return nf_nat_inet_fn(priv, skb, state);
}
static unsigned int
nf_nat_ipv4_pre_routing(void *priv, struct sk_buff *skb,
const struct nf_hook_state *state)
{
unsigned int ret;
__be32 daddr = ip_hdr(skb)->daddr;
ret = nf_nat_ipv4_fn(priv, skb, state);
if (ret == NF_ACCEPT && daddr != ip_hdr(skb)->daddr)
skb_dst_drop(skb);
return ret;
}
#ifdef CONFIG_XFRM
static int nf_xfrm_me_harder(struct net *net, struct sk_buff *skb, unsigned int family)
{
struct sock *sk = skb->sk;
struct dst_entry *dst;
unsigned int hh_len;
struct flowi fl;
int err;
err = xfrm_decode_session(skb, &fl, family);
if (err < 0)
return err;
dst = skb_dst(skb);
if (dst->xfrm)
dst = ((struct xfrm_dst *)dst)->route;
if (!dst_hold_safe(dst))
return -EHOSTUNREACH;
if (sk && !net_eq(net, sock_net(sk)))
sk = NULL;
dst = xfrm_lookup(net, dst, &fl, sk, 0);
if (IS_ERR(dst))
return PTR_ERR(dst);
skb_dst_drop(skb);
skb_dst_set(skb, dst);
/* Change in oif may mean change in hh_len. */
hh_len = skb_dst(skb)->dev->hard_header_len;
if (skb_headroom(skb) < hh_len &&
pskb_expand_head(skb, hh_len - skb_headroom(skb), 0, GFP_ATOMIC))
return -ENOMEM;
return 0;
}
#endif
static unsigned int
nf_nat_ipv4_local_in(void *priv, struct sk_buff *skb,
const struct nf_hook_state *state)
{
__be32 saddr = ip_hdr(skb)->saddr;
struct sock *sk = skb->sk;
unsigned int ret;
ret = nf_nat_ipv4_fn(priv, skb, state);
if (ret == NF_ACCEPT && sk && saddr != ip_hdr(skb)->saddr &&
!inet_sk_transparent(sk))
skb_orphan(skb); /* TCP edemux obtained wrong socket */
return ret;
}
static unsigned int
nf_nat_ipv4_out(void *priv, struct sk_buff *skb,
const struct nf_hook_state *state)
{
#ifdef CONFIG_XFRM
const struct nf_conn *ct;
enum ip_conntrack_info ctinfo;
int err;
#endif
unsigned int ret;
ret = nf_nat_ipv4_fn(priv, skb, state);
#ifdef CONFIG_XFRM
if (ret != NF_ACCEPT)
return ret;
if (IPCB(skb)->flags & IPSKB_XFRM_TRANSFORMED)
return ret;
ct = nf_ct_get(skb, &ctinfo);
if (ct) {
enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
if (ct->tuplehash[dir].tuple.src.u3.ip !=
ct->tuplehash[!dir].tuple.dst.u3.ip ||
(ct->tuplehash[dir].tuple.dst.protonum != IPPROTO_ICMP &&
ct->tuplehash[dir].tuple.src.u.all !=
ct->tuplehash[!dir].tuple.dst.u.all)) {
err = nf_xfrm_me_harder(state->net, skb, AF_INET);
if (err < 0)
ret = NF_DROP_ERR(err);
}
}
#endif
return ret;
}
static unsigned int
nf_nat_ipv4_local_fn(void *priv, struct sk_buff *skb,
const struct nf_hook_state *state)
{
const struct nf_conn *ct;
enum ip_conntrack_info ctinfo;
unsigned int ret;
int err;
ret = nf_nat_ipv4_fn(priv, skb, state);
if (ret != NF_ACCEPT)
return ret;
ct = nf_ct_get(skb, &ctinfo);
if (ct) {
enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
if (ct->tuplehash[dir].tuple.dst.u3.ip !=
ct->tuplehash[!dir].tuple.src.u3.ip) {
err = ip_route_me_harder(state->net, state->sk, skb, RTN_UNSPEC);
if (err < 0)
ret = NF_DROP_ERR(err);
}
#ifdef CONFIG_XFRM
else if (!(IPCB(skb)->flags & IPSKB_XFRM_TRANSFORMED) &&
ct->tuplehash[dir].tuple.dst.protonum != IPPROTO_ICMP &&
ct->tuplehash[dir].tuple.dst.u.all !=
ct->tuplehash[!dir].tuple.src.u.all) {
err = nf_xfrm_me_harder(state->net, skb, AF_INET);
if (err < 0)
ret = NF_DROP_ERR(err);
}
#endif
}
return ret;
}
static const struct nf_hook_ops nf_nat_ipv4_ops[] = {
/* Before packet filtering, change destination */
{
.hook = nf_nat_ipv4_pre_routing,
.pf = NFPROTO_IPV4,
.hooknum = NF_INET_PRE_ROUTING,
.priority = NF_IP_PRI_NAT_DST,
},
/* After packet filtering, change source */
{
.hook = nf_nat_ipv4_out,
.pf = NFPROTO_IPV4,
.hooknum = NF_INET_POST_ROUTING,
.priority = NF_IP_PRI_NAT_SRC,
},
/* Before packet filtering, change destination */
{
.hook = nf_nat_ipv4_local_fn,
.pf = NFPROTO_IPV4,
.hooknum = NF_INET_LOCAL_OUT,
.priority = NF_IP_PRI_NAT_DST,
},
/* After packet filtering, change source */
{
.hook = nf_nat_ipv4_local_in,
.pf = NFPROTO_IPV4,
.hooknum = NF_INET_LOCAL_IN,
.priority = NF_IP_PRI_NAT_SRC,
},
};
int nf_nat_ipv4_register_fn(struct net *net, const struct nf_hook_ops *ops)
{
return nf_nat_register_fn(net, ops->pf, ops, nf_nat_ipv4_ops,
ARRAY_SIZE(nf_nat_ipv4_ops));
}
EXPORT_SYMBOL_GPL(nf_nat_ipv4_register_fn);
void nf_nat_ipv4_unregister_fn(struct net *net, const struct nf_hook_ops *ops)
{
nf_nat_unregister_fn(net, ops->pf, ops, ARRAY_SIZE(nf_nat_ipv4_ops));
}
EXPORT_SYMBOL_GPL(nf_nat_ipv4_unregister_fn);
#if IS_ENABLED(CONFIG_IPV6)
int nf_nat_icmpv6_reply_translation(struct sk_buff *skb,
struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
unsigned int hooknum,
unsigned int hdrlen)
{
struct {
struct icmp6hdr icmp6;
struct ipv6hdr ip6;
} *inside;
enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
enum nf_nat_manip_type manip = HOOK2MANIP(hooknum);
struct nf_conntrack_tuple target;
unsigned long statusbit;
WARN_ON(ctinfo != IP_CT_RELATED && ctinfo != IP_CT_RELATED_REPLY);
if (skb_ensure_writable(skb, hdrlen + sizeof(*inside)))
return 0;
if (nf_ip6_checksum(skb, hooknum, hdrlen, IPPROTO_ICMPV6))
return 0;
inside = (void *)skb->data + hdrlen;
if (inside->icmp6.icmp6_type == NDISC_REDIRECT) {
if ((ct->status & IPS_NAT_DONE_MASK) != IPS_NAT_DONE_MASK)
return 0;
if (ct->status & IPS_NAT_MASK)
return 0;
}
if (manip == NF_NAT_MANIP_SRC)
statusbit = IPS_SRC_NAT;
else
statusbit = IPS_DST_NAT;
/* Invert if this is reply direction */
if (dir == IP_CT_DIR_REPLY)
statusbit ^= IPS_NAT_MASK;
if (!(ct->status & statusbit))
return 1;
if (!nf_nat_ipv6_manip_pkt(skb, hdrlen + sizeof(inside->icmp6),
&ct->tuplehash[!dir].tuple, !manip))
return 0;
if (skb->ip_summed != CHECKSUM_PARTIAL) {
struct ipv6hdr *ipv6h = ipv6_hdr(skb);
inside = (void *)skb->data + hdrlen;
inside->icmp6.icmp6_cksum = 0;
inside->icmp6.icmp6_cksum =
csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr,
skb->len - hdrlen, IPPROTO_ICMPV6,
skb_checksum(skb, hdrlen,
skb->len - hdrlen, 0));
}
nf_ct_invert_tuple(&target, &ct->tuplehash[!dir].tuple);
target.dst.protonum = IPPROTO_ICMPV6;
if (!nf_nat_ipv6_manip_pkt(skb, 0, &target, manip))
return 0;
return 1;
}
EXPORT_SYMBOL_GPL(nf_nat_icmpv6_reply_translation);
static unsigned int
nf_nat_ipv6_fn(void *priv, struct sk_buff *skb,
const struct nf_hook_state *state)
{
struct nf_conn *ct;
enum ip_conntrack_info ctinfo;
__be16 frag_off;
int hdrlen;
u8 nexthdr;
ct = nf_ct_get(skb, &ctinfo);
/* Can't track? It's not due to stress, or conntrack would
* have dropped it. Hence it's the user's responsibilty to
* packet filter it out, or implement conntrack/NAT for that
* protocol. 8) --RR
*/
if (!ct)
return NF_ACCEPT;
if (ctinfo == IP_CT_RELATED || ctinfo == IP_CT_RELATED_REPLY) {
nexthdr = ipv6_hdr(skb)->nexthdr;
hdrlen = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr),
&nexthdr, &frag_off);
if (hdrlen >= 0 && nexthdr == IPPROTO_ICMPV6) {
if (!nf_nat_icmpv6_reply_translation(skb, ct, ctinfo,
state->hook,
hdrlen))
return NF_DROP;
else
return NF_ACCEPT;
}
}
return nf_nat_inet_fn(priv, skb, state);
}
static unsigned int
nf_nat_ipv6_in(void *priv, struct sk_buff *skb,
const struct nf_hook_state *state)
{
unsigned int ret;
struct in6_addr daddr = ipv6_hdr(skb)->daddr;
ret = nf_nat_ipv6_fn(priv, skb, state);
if (ret != NF_DROP && ret != NF_STOLEN &&
ipv6_addr_cmp(&daddr, &ipv6_hdr(skb)->daddr))
skb_dst_drop(skb);
return ret;
}
static unsigned int
nf_nat_ipv6_out(void *priv, struct sk_buff *skb,
const struct nf_hook_state *state)
{
#ifdef CONFIG_XFRM
const struct nf_conn *ct;
enum ip_conntrack_info ctinfo;
int err;
#endif
unsigned int ret;
ret = nf_nat_ipv6_fn(priv, skb, state);
#ifdef CONFIG_XFRM
if (ret != NF_ACCEPT)
return ret;
if (IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED)
return ret;
ct = nf_ct_get(skb, &ctinfo);
if (ct) {
enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
if (!nf_inet_addr_cmp(&ct->tuplehash[dir].tuple.src.u3,
&ct->tuplehash[!dir].tuple.dst.u3) ||
(ct->tuplehash[dir].tuple.dst.protonum != IPPROTO_ICMPV6 &&
ct->tuplehash[dir].tuple.src.u.all !=
ct->tuplehash[!dir].tuple.dst.u.all)) {
err = nf_xfrm_me_harder(state->net, skb, AF_INET6);
if (err < 0)
ret = NF_DROP_ERR(err);
}
}
#endif
return ret;
}
static unsigned int
nf_nat_ipv6_local_fn(void *priv, struct sk_buff *skb,
const struct nf_hook_state *state)
{
const struct nf_conn *ct;
enum ip_conntrack_info ctinfo;
unsigned int ret;
int err;
ret = nf_nat_ipv6_fn(priv, skb, state);
if (ret != NF_ACCEPT)
return ret;
ct = nf_ct_get(skb, &ctinfo);
if (ct) {
enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
if (!nf_inet_addr_cmp(&ct->tuplehash[dir].tuple.dst.u3,
&ct->tuplehash[!dir].tuple.src.u3)) {
err = nf_ip6_route_me_harder(state->net, state->sk, skb);
if (err < 0)
ret = NF_DROP_ERR(err);
}
#ifdef CONFIG_XFRM
else if (!(IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED) &&
ct->tuplehash[dir].tuple.dst.protonum != IPPROTO_ICMPV6 &&
ct->tuplehash[dir].tuple.dst.u.all !=
ct->tuplehash[!dir].tuple.src.u.all) {
err = nf_xfrm_me_harder(state->net, skb, AF_INET6);
if (err < 0)
ret = NF_DROP_ERR(err);
}
#endif
}
return ret;
}
static const struct nf_hook_ops nf_nat_ipv6_ops[] = {
/* Before packet filtering, change destination */
{
.hook = nf_nat_ipv6_in,
.pf = NFPROTO_IPV6,
.hooknum = NF_INET_PRE_ROUTING,
.priority = NF_IP6_PRI_NAT_DST,
},
/* After packet filtering, change source */
{
.hook = nf_nat_ipv6_out,
.pf = NFPROTO_IPV6,
.hooknum = NF_INET_POST_ROUTING,
.priority = NF_IP6_PRI_NAT_SRC,
},
/* Before packet filtering, change destination */
{
.hook = nf_nat_ipv6_local_fn,
.pf = NFPROTO_IPV6,
.hooknum = NF_INET_LOCAL_OUT,
.priority = NF_IP6_PRI_NAT_DST,
},
/* After packet filtering, change source */
{
.hook = nf_nat_ipv6_fn,
.pf = NFPROTO_IPV6,
.hooknum = NF_INET_LOCAL_IN,
.priority = NF_IP6_PRI_NAT_SRC,
},
};
int nf_nat_ipv6_register_fn(struct net *net, const struct nf_hook_ops *ops)
{
return nf_nat_register_fn(net, ops->pf, ops, nf_nat_ipv6_ops,
ARRAY_SIZE(nf_nat_ipv6_ops));
}
EXPORT_SYMBOL_GPL(nf_nat_ipv6_register_fn);
void nf_nat_ipv6_unregister_fn(struct net *net, const struct nf_hook_ops *ops)
{
nf_nat_unregister_fn(net, ops->pf, ops, ARRAY_SIZE(nf_nat_ipv6_ops));
}
EXPORT_SYMBOL_GPL(nf_nat_ipv6_unregister_fn);
#endif /* CONFIG_IPV6 */
#if defined(CONFIG_NF_TABLES_INET) && IS_ENABLED(CONFIG_NFT_NAT)
int nf_nat_inet_register_fn(struct net *net, const struct nf_hook_ops *ops)
{
int ret;
if (WARN_ON_ONCE(ops->pf != NFPROTO_INET))
return -EINVAL;
ret = nf_nat_register_fn(net, NFPROTO_IPV6, ops, nf_nat_ipv6_ops,
ARRAY_SIZE(nf_nat_ipv6_ops));
if (ret)
return ret;
ret = nf_nat_register_fn(net, NFPROTO_IPV4, ops, nf_nat_ipv4_ops,
ARRAY_SIZE(nf_nat_ipv4_ops));
if (ret)
nf_nat_unregister_fn(net, NFPROTO_IPV6, ops,
ARRAY_SIZE(nf_nat_ipv6_ops));
return ret;
}
EXPORT_SYMBOL_GPL(nf_nat_inet_register_fn);
void nf_nat_inet_unregister_fn(struct net *net, const struct nf_hook_ops *ops)
{
nf_nat_unregister_fn(net, NFPROTO_IPV4, ops, ARRAY_SIZE(nf_nat_ipv4_ops));
nf_nat_unregister_fn(net, NFPROTO_IPV6, ops, ARRAY_SIZE(nf_nat_ipv6_ops));
}
EXPORT_SYMBOL_GPL(nf_nat_inet_unregister_fn);
#endif /* NFT INET NAT */
| linux-master | net/netfilter/nf_nat_proto.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
*
* Generic part shared by ipv4 and ipv6 backends.
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/netlink.h>
#include <linux/netfilter.h>
#include <linux/netfilter/nf_tables.h>
#include <net/netfilter/nf_tables_core.h>
#include <net/netfilter/nf_tables.h>
#include <linux/in.h>
#include <net/xfrm.h>
static const struct nla_policy nft_xfrm_policy[NFTA_XFRM_MAX + 1] = {
[NFTA_XFRM_KEY] = NLA_POLICY_MAX(NLA_BE32, 255),
[NFTA_XFRM_DIR] = { .type = NLA_U8 },
[NFTA_XFRM_SPNUM] = NLA_POLICY_MAX(NLA_BE32, 255),
[NFTA_XFRM_DREG] = { .type = NLA_U32 },
};
struct nft_xfrm {
enum nft_xfrm_keys key:8;
u8 dreg;
u8 dir;
u8 spnum;
u8 len;
};
static int nft_xfrm_get_init(const struct nft_ctx *ctx,
const struct nft_expr *expr,
const struct nlattr * const tb[])
{
struct nft_xfrm *priv = nft_expr_priv(expr);
unsigned int len = 0;
u32 spnum = 0;
u8 dir;
if (!tb[NFTA_XFRM_KEY] || !tb[NFTA_XFRM_DIR] || !tb[NFTA_XFRM_DREG])
return -EINVAL;
switch (ctx->family) {
case NFPROTO_IPV4:
case NFPROTO_IPV6:
case NFPROTO_INET:
break;
default:
return -EOPNOTSUPP;
}
priv->key = ntohl(nla_get_be32(tb[NFTA_XFRM_KEY]));
switch (priv->key) {
case NFT_XFRM_KEY_REQID:
case NFT_XFRM_KEY_SPI:
len = sizeof(u32);
break;
case NFT_XFRM_KEY_DADDR_IP4:
case NFT_XFRM_KEY_SADDR_IP4:
len = sizeof(struct in_addr);
break;
case NFT_XFRM_KEY_DADDR_IP6:
case NFT_XFRM_KEY_SADDR_IP6:
len = sizeof(struct in6_addr);
break;
default:
return -EINVAL;
}
dir = nla_get_u8(tb[NFTA_XFRM_DIR]);
switch (dir) {
case XFRM_POLICY_IN:
case XFRM_POLICY_OUT:
priv->dir = dir;
break;
default:
return -EINVAL;
}
if (tb[NFTA_XFRM_SPNUM])
spnum = ntohl(nla_get_be32(tb[NFTA_XFRM_SPNUM]));
if (spnum >= XFRM_MAX_DEPTH)
return -ERANGE;
priv->spnum = spnum;
priv->len = len;
return nft_parse_register_store(ctx, tb[NFTA_XFRM_DREG], &priv->dreg,
NULL, NFT_DATA_VALUE, len);
}
/* Return true if key asks for daddr/saddr and current
* state does have a valid address (BEET, TUNNEL).
*/
static bool xfrm_state_addr_ok(enum nft_xfrm_keys k, u8 family, u8 mode)
{
switch (k) {
case NFT_XFRM_KEY_DADDR_IP4:
case NFT_XFRM_KEY_SADDR_IP4:
if (family == NFPROTO_IPV4)
break;
return false;
case NFT_XFRM_KEY_DADDR_IP6:
case NFT_XFRM_KEY_SADDR_IP6:
if (family == NFPROTO_IPV6)
break;
return false;
default:
return true;
}
return mode == XFRM_MODE_BEET || mode == XFRM_MODE_TUNNEL;
}
static void nft_xfrm_state_get_key(const struct nft_xfrm *priv,
struct nft_regs *regs,
const struct xfrm_state *state)
{
u32 *dest = ®s->data[priv->dreg];
if (!xfrm_state_addr_ok(priv->key,
state->props.family,
state->props.mode)) {
regs->verdict.code = NFT_BREAK;
return;
}
switch (priv->key) {
case NFT_XFRM_KEY_UNSPEC:
case __NFT_XFRM_KEY_MAX:
WARN_ON_ONCE(1);
break;
case NFT_XFRM_KEY_DADDR_IP4:
*dest = (__force __u32)state->id.daddr.a4;
return;
case NFT_XFRM_KEY_DADDR_IP6:
memcpy(dest, &state->id.daddr.in6, sizeof(struct in6_addr));
return;
case NFT_XFRM_KEY_SADDR_IP4:
*dest = (__force __u32)state->props.saddr.a4;
return;
case NFT_XFRM_KEY_SADDR_IP6:
memcpy(dest, &state->props.saddr.in6, sizeof(struct in6_addr));
return;
case NFT_XFRM_KEY_REQID:
*dest = state->props.reqid;
return;
case NFT_XFRM_KEY_SPI:
*dest = (__force __u32)state->id.spi;
return;
}
regs->verdict.code = NFT_BREAK;
}
static void nft_xfrm_get_eval_in(const struct nft_xfrm *priv,
struct nft_regs *regs,
const struct nft_pktinfo *pkt)
{
const struct sec_path *sp = skb_sec_path(pkt->skb);
const struct xfrm_state *state;
if (sp == NULL || sp->len <= priv->spnum) {
regs->verdict.code = NFT_BREAK;
return;
}
state = sp->xvec[priv->spnum];
nft_xfrm_state_get_key(priv, regs, state);
}
static void nft_xfrm_get_eval_out(const struct nft_xfrm *priv,
struct nft_regs *regs,
const struct nft_pktinfo *pkt)
{
const struct dst_entry *dst = skb_dst(pkt->skb);
int i;
for (i = 0; dst && dst->xfrm;
dst = ((const struct xfrm_dst *)dst)->child, i++) {
if (i < priv->spnum)
continue;
nft_xfrm_state_get_key(priv, regs, dst->xfrm);
return;
}
regs->verdict.code = NFT_BREAK;
}
static void nft_xfrm_get_eval(const struct nft_expr *expr,
struct nft_regs *regs,
const struct nft_pktinfo *pkt)
{
const struct nft_xfrm *priv = nft_expr_priv(expr);
switch (priv->dir) {
case XFRM_POLICY_IN:
nft_xfrm_get_eval_in(priv, regs, pkt);
break;
case XFRM_POLICY_OUT:
nft_xfrm_get_eval_out(priv, regs, pkt);
break;
default:
WARN_ON_ONCE(1);
regs->verdict.code = NFT_BREAK;
break;
}
}
static int nft_xfrm_get_dump(struct sk_buff *skb,
const struct nft_expr *expr, bool reset)
{
const struct nft_xfrm *priv = nft_expr_priv(expr);
if (nft_dump_register(skb, NFTA_XFRM_DREG, priv->dreg))
return -1;
if (nla_put_be32(skb, NFTA_XFRM_KEY, htonl(priv->key)))
return -1;
if (nla_put_u8(skb, NFTA_XFRM_DIR, priv->dir))
return -1;
if (nla_put_be32(skb, NFTA_XFRM_SPNUM, htonl(priv->spnum)))
return -1;
return 0;
}
static int nft_xfrm_validate(const struct nft_ctx *ctx, const struct nft_expr *expr,
const struct nft_data **data)
{
const struct nft_xfrm *priv = nft_expr_priv(expr);
unsigned int hooks;
switch (priv->dir) {
case XFRM_POLICY_IN:
hooks = (1 << NF_INET_FORWARD) |
(1 << NF_INET_LOCAL_IN) |
(1 << NF_INET_PRE_ROUTING);
break;
case XFRM_POLICY_OUT:
hooks = (1 << NF_INET_FORWARD) |
(1 << NF_INET_LOCAL_OUT) |
(1 << NF_INET_POST_ROUTING);
break;
default:
WARN_ON_ONCE(1);
return -EINVAL;
}
return nft_chain_validate_hooks(ctx->chain, hooks);
}
static bool nft_xfrm_reduce(struct nft_regs_track *track,
const struct nft_expr *expr)
{
const struct nft_xfrm *priv = nft_expr_priv(expr);
const struct nft_xfrm *xfrm;
if (!nft_reg_track_cmp(track, expr, priv->dreg)) {
nft_reg_track_update(track, expr, priv->dreg, priv->len);
return false;
}
xfrm = nft_expr_priv(track->regs[priv->dreg].selector);
if (priv->key != xfrm->key ||
priv->dreg != xfrm->dreg ||
priv->dir != xfrm->dir ||
priv->spnum != xfrm->spnum) {
nft_reg_track_update(track, expr, priv->dreg, priv->len);
return false;
}
if (!track->regs[priv->dreg].bitwise)
return true;
return nft_expr_reduce_bitwise(track, expr);
}
static struct nft_expr_type nft_xfrm_type;
static const struct nft_expr_ops nft_xfrm_get_ops = {
.type = &nft_xfrm_type,
.size = NFT_EXPR_SIZE(sizeof(struct nft_xfrm)),
.eval = nft_xfrm_get_eval,
.init = nft_xfrm_get_init,
.dump = nft_xfrm_get_dump,
.validate = nft_xfrm_validate,
.reduce = nft_xfrm_reduce,
};
static struct nft_expr_type nft_xfrm_type __read_mostly = {
.name = "xfrm",
.ops = &nft_xfrm_get_ops,
.policy = nft_xfrm_policy,
.maxattr = NFTA_XFRM_MAX,
.owner = THIS_MODULE,
};
static int __init nft_xfrm_module_init(void)
{
return nft_register_expr(&nft_xfrm_type);
}
static void __exit nft_xfrm_module_exit(void)
{
nft_unregister_expr(&nft_xfrm_type);
}
module_init(nft_xfrm_module_init);
module_exit(nft_xfrm_module_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("nf_tables: xfrm/IPSec matching");
MODULE_AUTHOR("Florian Westphal <[email protected]>");
MODULE_AUTHOR("Máté Eckl <[email protected]>");
MODULE_ALIAS_NFT_EXPR("xfrm");
| linux-master | net/netfilter/nft_xfrm.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* (C) 2012 by Pablo Neira Ayuso <[email protected]>
* (C) 2012 by Vyatta Inc. <http://www.vyatta.com>
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/rculist.h>
#include <linux/rculist_nulls.h>
#include <linux/types.h>
#include <linux/timer.h>
#include <linux/security.h>
#include <linux/skbuff.h>
#include <linux/errno.h>
#include <linux/netlink.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/netfilter.h>
#include <net/netlink.h>
#include <net/netns/generic.h>
#include <net/sock.h>
#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_conntrack_core.h>
#include <net/netfilter/nf_conntrack_l4proto.h>
#include <net/netfilter/nf_conntrack_tuple.h>
#include <net/netfilter/nf_conntrack_timeout.h>
#include <linux/netfilter/nfnetlink.h>
#include <linux/netfilter/nfnetlink_cttimeout.h>
static unsigned int nfct_timeout_id __read_mostly;
struct ctnl_timeout {
struct list_head head;
struct list_head free_head;
struct rcu_head rcu_head;
refcount_t refcnt;
char name[CTNL_TIMEOUT_NAME_MAX];
/* must be at the end */
struct nf_ct_timeout timeout;
};
struct nfct_timeout_pernet {
struct list_head nfct_timeout_list;
struct list_head nfct_timeout_freelist;
};
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Pablo Neira Ayuso <[email protected]>");
MODULE_DESCRIPTION("cttimeout: Extended Netfilter Connection Tracking timeout tuning");
static const struct nla_policy cttimeout_nla_policy[CTA_TIMEOUT_MAX+1] = {
[CTA_TIMEOUT_NAME] = { .type = NLA_NUL_STRING,
.len = CTNL_TIMEOUT_NAME_MAX - 1},
[CTA_TIMEOUT_L3PROTO] = { .type = NLA_U16 },
[CTA_TIMEOUT_L4PROTO] = { .type = NLA_U8 },
[CTA_TIMEOUT_DATA] = { .type = NLA_NESTED },
};
static struct nfct_timeout_pernet *nfct_timeout_pernet(struct net *net)
{
return net_generic(net, nfct_timeout_id);
}
static int
ctnl_timeout_parse_policy(void *timeout,
const struct nf_conntrack_l4proto *l4proto,
struct net *net, const struct nlattr *attr)
{
struct nlattr **tb;
int ret = 0;
tb = kcalloc(l4proto->ctnl_timeout.nlattr_max + 1, sizeof(*tb),
GFP_KERNEL);
if (!tb)
return -ENOMEM;
ret = nla_parse_nested_deprecated(tb,
l4proto->ctnl_timeout.nlattr_max,
attr,
l4proto->ctnl_timeout.nla_policy,
NULL);
if (ret < 0)
goto err;
ret = l4proto->ctnl_timeout.nlattr_to_obj(tb, net, timeout);
err:
kfree(tb);
return ret;
}
static int cttimeout_new_timeout(struct sk_buff *skb,
const struct nfnl_info *info,
const struct nlattr * const cda[])
{
struct nfct_timeout_pernet *pernet = nfct_timeout_pernet(info->net);
__u16 l3num;
__u8 l4num;
const struct nf_conntrack_l4proto *l4proto;
struct ctnl_timeout *timeout, *matching = NULL;
char *name;
int ret;
if (!cda[CTA_TIMEOUT_NAME] ||
!cda[CTA_TIMEOUT_L3PROTO] ||
!cda[CTA_TIMEOUT_L4PROTO] ||
!cda[CTA_TIMEOUT_DATA])
return -EINVAL;
name = nla_data(cda[CTA_TIMEOUT_NAME]);
l3num = ntohs(nla_get_be16(cda[CTA_TIMEOUT_L3PROTO]));
l4num = nla_get_u8(cda[CTA_TIMEOUT_L4PROTO]);
list_for_each_entry(timeout, &pernet->nfct_timeout_list, head) {
if (strncmp(timeout->name, name, CTNL_TIMEOUT_NAME_MAX) != 0)
continue;
if (info->nlh->nlmsg_flags & NLM_F_EXCL)
return -EEXIST;
matching = timeout;
break;
}
if (matching) {
if (info->nlh->nlmsg_flags & NLM_F_REPLACE) {
/* You cannot replace one timeout policy by another of
* different kind, sorry.
*/
if (matching->timeout.l3num != l3num ||
matching->timeout.l4proto->l4proto != l4num)
return -EINVAL;
return ctnl_timeout_parse_policy(&matching->timeout.data,
matching->timeout.l4proto,
info->net,
cda[CTA_TIMEOUT_DATA]);
}
return -EBUSY;
}
l4proto = nf_ct_l4proto_find(l4num);
/* This protocol is not supportted, skip. */
if (l4proto->l4proto != l4num) {
ret = -EOPNOTSUPP;
goto err_proto_put;
}
timeout = kzalloc(sizeof(struct ctnl_timeout) +
l4proto->ctnl_timeout.obj_size, GFP_KERNEL);
if (timeout == NULL) {
ret = -ENOMEM;
goto err_proto_put;
}
ret = ctnl_timeout_parse_policy(&timeout->timeout.data, l4proto,
info->net, cda[CTA_TIMEOUT_DATA]);
if (ret < 0)
goto err;
strcpy(timeout->name, nla_data(cda[CTA_TIMEOUT_NAME]));
timeout->timeout.l3num = l3num;
timeout->timeout.l4proto = l4proto;
refcount_set(&timeout->refcnt, 1);
__module_get(THIS_MODULE);
list_add_tail_rcu(&timeout->head, &pernet->nfct_timeout_list);
return 0;
err:
kfree(timeout);
err_proto_put:
return ret;
}
static int
ctnl_timeout_fill_info(struct sk_buff *skb, u32 portid, u32 seq, u32 type,
int event, struct ctnl_timeout *timeout)
{
struct nlmsghdr *nlh;
unsigned int flags = portid ? NLM_F_MULTI : 0;
const struct nf_conntrack_l4proto *l4proto = timeout->timeout.l4proto;
struct nlattr *nest_parms;
int ret;
event = nfnl_msg_type(NFNL_SUBSYS_CTNETLINK_TIMEOUT, event);
nlh = nfnl_msg_put(skb, portid, seq, event, flags, AF_UNSPEC,
NFNETLINK_V0, 0);
if (!nlh)
goto nlmsg_failure;
if (nla_put_string(skb, CTA_TIMEOUT_NAME, timeout->name) ||
nla_put_be16(skb, CTA_TIMEOUT_L3PROTO,
htons(timeout->timeout.l3num)) ||
nla_put_u8(skb, CTA_TIMEOUT_L4PROTO, l4proto->l4proto) ||
nla_put_be32(skb, CTA_TIMEOUT_USE,
htonl(refcount_read(&timeout->refcnt))))
goto nla_put_failure;
nest_parms = nla_nest_start(skb, CTA_TIMEOUT_DATA);
if (!nest_parms)
goto nla_put_failure;
ret = l4proto->ctnl_timeout.obj_to_nlattr(skb, &timeout->timeout.data);
if (ret < 0)
goto nla_put_failure;
nla_nest_end(skb, nest_parms);
nlmsg_end(skb, nlh);
return skb->len;
nlmsg_failure:
nla_put_failure:
nlmsg_cancel(skb, nlh);
return -1;
}
static int
ctnl_timeout_dump(struct sk_buff *skb, struct netlink_callback *cb)
{
struct nfct_timeout_pernet *pernet;
struct net *net = sock_net(skb->sk);
struct ctnl_timeout *cur, *last;
if (cb->args[2])
return 0;
last = (struct ctnl_timeout *)cb->args[1];
if (cb->args[1])
cb->args[1] = 0;
rcu_read_lock();
pernet = nfct_timeout_pernet(net);
list_for_each_entry_rcu(cur, &pernet->nfct_timeout_list, head) {
if (last) {
if (cur != last)
continue;
last = NULL;
}
if (ctnl_timeout_fill_info(skb, NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq,
NFNL_MSG_TYPE(cb->nlh->nlmsg_type),
IPCTNL_MSG_TIMEOUT_NEW, cur) < 0) {
cb->args[1] = (unsigned long)cur;
break;
}
}
if (!cb->args[1])
cb->args[2] = 1;
rcu_read_unlock();
return skb->len;
}
static int cttimeout_get_timeout(struct sk_buff *skb,
const struct nfnl_info *info,
const struct nlattr * const cda[])
{
struct nfct_timeout_pernet *pernet = nfct_timeout_pernet(info->net);
int ret = -ENOENT;
char *name;
struct ctnl_timeout *cur;
if (info->nlh->nlmsg_flags & NLM_F_DUMP) {
struct netlink_dump_control c = {
.dump = ctnl_timeout_dump,
};
return netlink_dump_start(info->sk, skb, info->nlh, &c);
}
if (!cda[CTA_TIMEOUT_NAME])
return -EINVAL;
name = nla_data(cda[CTA_TIMEOUT_NAME]);
list_for_each_entry(cur, &pernet->nfct_timeout_list, head) {
struct sk_buff *skb2;
if (strncmp(cur->name, name, CTNL_TIMEOUT_NAME_MAX) != 0)
continue;
skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (skb2 == NULL) {
ret = -ENOMEM;
break;
}
ret = ctnl_timeout_fill_info(skb2, NETLINK_CB(skb).portid,
info->nlh->nlmsg_seq,
NFNL_MSG_TYPE(info->nlh->nlmsg_type),
IPCTNL_MSG_TIMEOUT_NEW, cur);
if (ret <= 0) {
kfree_skb(skb2);
break;
}
ret = nfnetlink_unicast(skb2, info->net, NETLINK_CB(skb).portid);
break;
}
return ret;
}
/* try to delete object, fail if it is still in use. */
static int ctnl_timeout_try_del(struct net *net, struct ctnl_timeout *timeout)
{
int ret = 0;
/* We want to avoid races with ctnl_timeout_put. So only when the
* current refcnt is 1, we decrease it to 0.
*/
if (refcount_dec_if_one(&timeout->refcnt)) {
/* We are protected by nfnl mutex. */
list_del_rcu(&timeout->head);
nf_ct_untimeout(net, &timeout->timeout);
kfree_rcu(timeout, rcu_head);
} else {
ret = -EBUSY;
}
return ret;
}
static int cttimeout_del_timeout(struct sk_buff *skb,
const struct nfnl_info *info,
const struct nlattr * const cda[])
{
struct nfct_timeout_pernet *pernet = nfct_timeout_pernet(info->net);
struct ctnl_timeout *cur, *tmp;
int ret = -ENOENT;
char *name;
if (!cda[CTA_TIMEOUT_NAME]) {
list_for_each_entry_safe(cur, tmp, &pernet->nfct_timeout_list,
head)
ctnl_timeout_try_del(info->net, cur);
return 0;
}
name = nla_data(cda[CTA_TIMEOUT_NAME]);
list_for_each_entry(cur, &pernet->nfct_timeout_list, head) {
if (strncmp(cur->name, name, CTNL_TIMEOUT_NAME_MAX) != 0)
continue;
ret = ctnl_timeout_try_del(info->net, cur);
if (ret < 0)
return ret;
break;
}
return ret;
}
static int cttimeout_default_set(struct sk_buff *skb,
const struct nfnl_info *info,
const struct nlattr * const cda[])
{
const struct nf_conntrack_l4proto *l4proto;
__u8 l4num;
int ret;
if (!cda[CTA_TIMEOUT_L3PROTO] ||
!cda[CTA_TIMEOUT_L4PROTO] ||
!cda[CTA_TIMEOUT_DATA])
return -EINVAL;
l4num = nla_get_u8(cda[CTA_TIMEOUT_L4PROTO]);
l4proto = nf_ct_l4proto_find(l4num);
/* This protocol is not supported, skip. */
if (l4proto->l4proto != l4num) {
ret = -EOPNOTSUPP;
goto err;
}
ret = ctnl_timeout_parse_policy(NULL, l4proto, info->net,
cda[CTA_TIMEOUT_DATA]);
if (ret < 0)
goto err;
return 0;
err:
return ret;
}
static int
cttimeout_default_fill_info(struct net *net, struct sk_buff *skb, u32 portid,
u32 seq, u32 type, int event, u16 l3num,
const struct nf_conntrack_l4proto *l4proto,
const unsigned int *timeouts)
{
struct nlmsghdr *nlh;
unsigned int flags = portid ? NLM_F_MULTI : 0;
struct nlattr *nest_parms;
int ret;
event = nfnl_msg_type(NFNL_SUBSYS_CTNETLINK_TIMEOUT, event);
nlh = nfnl_msg_put(skb, portid, seq, event, flags, AF_UNSPEC,
NFNETLINK_V0, 0);
if (!nlh)
goto nlmsg_failure;
if (nla_put_be16(skb, CTA_TIMEOUT_L3PROTO, htons(l3num)) ||
nla_put_u8(skb, CTA_TIMEOUT_L4PROTO, l4proto->l4proto))
goto nla_put_failure;
nest_parms = nla_nest_start(skb, CTA_TIMEOUT_DATA);
if (!nest_parms)
goto nla_put_failure;
ret = l4proto->ctnl_timeout.obj_to_nlattr(skb, timeouts);
if (ret < 0)
goto nla_put_failure;
nla_nest_end(skb, nest_parms);
nlmsg_end(skb, nlh);
return skb->len;
nlmsg_failure:
nla_put_failure:
nlmsg_cancel(skb, nlh);
return -1;
}
static int cttimeout_default_get(struct sk_buff *skb,
const struct nfnl_info *info,
const struct nlattr * const cda[])
{
const struct nf_conntrack_l4proto *l4proto;
unsigned int *timeouts = NULL;
struct sk_buff *skb2;
__u16 l3num;
__u8 l4num;
int ret;
if (!cda[CTA_TIMEOUT_L3PROTO] || !cda[CTA_TIMEOUT_L4PROTO])
return -EINVAL;
l3num = ntohs(nla_get_be16(cda[CTA_TIMEOUT_L3PROTO]));
l4num = nla_get_u8(cda[CTA_TIMEOUT_L4PROTO]);
l4proto = nf_ct_l4proto_find(l4num);
if (l4proto->l4proto != l4num)
return -EOPNOTSUPP;
switch (l4proto->l4proto) {
case IPPROTO_ICMP:
timeouts = &nf_icmp_pernet(info->net)->timeout;
break;
case IPPROTO_TCP:
timeouts = nf_tcp_pernet(info->net)->timeouts;
break;
case IPPROTO_UDP:
case IPPROTO_UDPLITE:
timeouts = nf_udp_pernet(info->net)->timeouts;
break;
case IPPROTO_DCCP:
#ifdef CONFIG_NF_CT_PROTO_DCCP
timeouts = nf_dccp_pernet(info->net)->dccp_timeout;
#endif
break;
case IPPROTO_ICMPV6:
timeouts = &nf_icmpv6_pernet(info->net)->timeout;
break;
case IPPROTO_SCTP:
#ifdef CONFIG_NF_CT_PROTO_SCTP
timeouts = nf_sctp_pernet(info->net)->timeouts;
#endif
break;
case IPPROTO_GRE:
#ifdef CONFIG_NF_CT_PROTO_GRE
timeouts = nf_gre_pernet(info->net)->timeouts;
#endif
break;
case 255:
timeouts = &nf_generic_pernet(info->net)->timeout;
break;
default:
WARN_ONCE(1, "Missing timeouts for proto %d", l4proto->l4proto);
break;
}
if (!timeouts)
return -EOPNOTSUPP;
skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!skb2)
return -ENOMEM;
ret = cttimeout_default_fill_info(info->net, skb2,
NETLINK_CB(skb).portid,
info->nlh->nlmsg_seq,
NFNL_MSG_TYPE(info->nlh->nlmsg_type),
IPCTNL_MSG_TIMEOUT_DEFAULT_SET,
l3num, l4proto, timeouts);
if (ret <= 0) {
kfree_skb(skb2);
return -ENOMEM;
}
return nfnetlink_unicast(skb2, info->net, NETLINK_CB(skb).portid);
}
static struct nf_ct_timeout *ctnl_timeout_find_get(struct net *net,
const char *name)
{
struct nfct_timeout_pernet *pernet = nfct_timeout_pernet(net);
struct ctnl_timeout *timeout, *matching = NULL;
list_for_each_entry_rcu(timeout, &pernet->nfct_timeout_list, head) {
if (strncmp(timeout->name, name, CTNL_TIMEOUT_NAME_MAX) != 0)
continue;
if (!refcount_inc_not_zero(&timeout->refcnt))
goto err;
matching = timeout;
break;
}
err:
return matching ? &matching->timeout : NULL;
}
static void ctnl_timeout_put(struct nf_ct_timeout *t)
{
struct ctnl_timeout *timeout =
container_of(t, struct ctnl_timeout, timeout);
if (refcount_dec_and_test(&timeout->refcnt)) {
kfree_rcu(timeout, rcu_head);
module_put(THIS_MODULE);
}
}
static const struct nfnl_callback cttimeout_cb[IPCTNL_MSG_TIMEOUT_MAX] = {
[IPCTNL_MSG_TIMEOUT_NEW] = {
.call = cttimeout_new_timeout,
.type = NFNL_CB_MUTEX,
.attr_count = CTA_TIMEOUT_MAX,
.policy = cttimeout_nla_policy
},
[IPCTNL_MSG_TIMEOUT_GET] = {
.call = cttimeout_get_timeout,
.type = NFNL_CB_MUTEX,
.attr_count = CTA_TIMEOUT_MAX,
.policy = cttimeout_nla_policy
},
[IPCTNL_MSG_TIMEOUT_DELETE] = {
.call = cttimeout_del_timeout,
.type = NFNL_CB_MUTEX,
.attr_count = CTA_TIMEOUT_MAX,
.policy = cttimeout_nla_policy
},
[IPCTNL_MSG_TIMEOUT_DEFAULT_SET] = {
.call = cttimeout_default_set,
.type = NFNL_CB_MUTEX,
.attr_count = CTA_TIMEOUT_MAX,
.policy = cttimeout_nla_policy
},
[IPCTNL_MSG_TIMEOUT_DEFAULT_GET] = {
.call = cttimeout_default_get,
.type = NFNL_CB_MUTEX,
.attr_count = CTA_TIMEOUT_MAX,
.policy = cttimeout_nla_policy
},
};
static const struct nfnetlink_subsystem cttimeout_subsys = {
.name = "conntrack_timeout",
.subsys_id = NFNL_SUBSYS_CTNETLINK_TIMEOUT,
.cb_count = IPCTNL_MSG_TIMEOUT_MAX,
.cb = cttimeout_cb,
};
MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_CTNETLINK_TIMEOUT);
static int __net_init cttimeout_net_init(struct net *net)
{
struct nfct_timeout_pernet *pernet = nfct_timeout_pernet(net);
INIT_LIST_HEAD(&pernet->nfct_timeout_list);
INIT_LIST_HEAD(&pernet->nfct_timeout_freelist);
return 0;
}
static void __net_exit cttimeout_net_pre_exit(struct net *net)
{
struct nfct_timeout_pernet *pernet = nfct_timeout_pernet(net);
struct ctnl_timeout *cur, *tmp;
list_for_each_entry_safe(cur, tmp, &pernet->nfct_timeout_list, head) {
list_del_rcu(&cur->head);
list_add(&cur->free_head, &pernet->nfct_timeout_freelist);
}
/* core calls synchronize_rcu() after this */
}
static void __net_exit cttimeout_net_exit(struct net *net)
{
struct nfct_timeout_pernet *pernet = nfct_timeout_pernet(net);
struct ctnl_timeout *cur, *tmp;
if (list_empty(&pernet->nfct_timeout_freelist))
return;
nf_ct_untimeout(net, NULL);
list_for_each_entry_safe(cur, tmp, &pernet->nfct_timeout_freelist, free_head) {
list_del(&cur->free_head);
if (refcount_dec_and_test(&cur->refcnt))
kfree_rcu(cur, rcu_head);
}
}
static struct pernet_operations cttimeout_ops = {
.init = cttimeout_net_init,
.pre_exit = cttimeout_net_pre_exit,
.exit = cttimeout_net_exit,
.id = &nfct_timeout_id,
.size = sizeof(struct nfct_timeout_pernet),
};
static const struct nf_ct_timeout_hooks hooks = {
.timeout_find_get = ctnl_timeout_find_get,
.timeout_put = ctnl_timeout_put,
};
static int __init cttimeout_init(void)
{
int ret;
ret = register_pernet_subsys(&cttimeout_ops);
if (ret < 0)
return ret;
ret = nfnetlink_subsys_register(&cttimeout_subsys);
if (ret < 0) {
pr_err("cttimeout_init: cannot register cttimeout with "
"nfnetlink.\n");
goto err_out;
}
RCU_INIT_POINTER(nf_ct_timeout_hook, &hooks);
return 0;
err_out:
unregister_pernet_subsys(&cttimeout_ops);
return ret;
}
static int untimeout(struct nf_conn *ct, void *timeout)
{
struct nf_conn_timeout *timeout_ext = nf_ct_timeout_find(ct);
if (timeout_ext)
RCU_INIT_POINTER(timeout_ext->timeout, NULL);
return 0;
}
static void __exit cttimeout_exit(void)
{
nfnetlink_subsys_unregister(&cttimeout_subsys);
unregister_pernet_subsys(&cttimeout_ops);
RCU_INIT_POINTER(nf_ct_timeout_hook, NULL);
nf_ct_iterate_destroy(untimeout, NULL);
}
module_init(cttimeout_init);
module_exit(cttimeout_exit);
| linux-master | net/netfilter/nfnetlink_cttimeout.c |
/*
* Rusty Russell (C)2000 -- This code is GPL.
* Patrick McHardy (c) 2006-2012
*/
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/proc_fs.h>
#include <linux/skbuff.h>
#include <linux/netfilter.h>
#include <linux/netfilter_ipv4.h>
#include <linux/netfilter_ipv6.h>
#include <linux/netfilter_bridge.h>
#include <linux/seq_file.h>
#include <linux/rcupdate.h>
#include <net/protocol.h>
#include <net/netfilter/nf_queue.h>
#include <net/dst.h>
#include "nf_internals.h"
static const struct nf_queue_handler __rcu *nf_queue_handler;
/*
* Hook for nfnetlink_queue to register its queue handler.
* We do this so that most of the NFQUEUE code can be modular.
*
* Once the queue is registered it must reinject all packets it
* receives, no matter what.
*/
void nf_register_queue_handler(const struct nf_queue_handler *qh)
{
/* should never happen, we only have one queueing backend in kernel */
WARN_ON(rcu_access_pointer(nf_queue_handler));
rcu_assign_pointer(nf_queue_handler, qh);
}
EXPORT_SYMBOL(nf_register_queue_handler);
/* The caller must flush their queue before this */
void nf_unregister_queue_handler(void)
{
RCU_INIT_POINTER(nf_queue_handler, NULL);
}
EXPORT_SYMBOL(nf_unregister_queue_handler);
static void nf_queue_sock_put(struct sock *sk)
{
#ifdef CONFIG_INET
sock_gen_put(sk);
#else
sock_put(sk);
#endif
}
static void nf_queue_entry_release_refs(struct nf_queue_entry *entry)
{
struct nf_hook_state *state = &entry->state;
/* Release those devices we held, or Alexey will kill me. */
dev_put(state->in);
dev_put(state->out);
if (state->sk)
nf_queue_sock_put(state->sk);
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
dev_put(entry->physin);
dev_put(entry->physout);
#endif
}
void nf_queue_entry_free(struct nf_queue_entry *entry)
{
nf_queue_entry_release_refs(entry);
kfree(entry);
}
EXPORT_SYMBOL_GPL(nf_queue_entry_free);
static void __nf_queue_entry_init_physdevs(struct nf_queue_entry *entry)
{
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
const struct sk_buff *skb = entry->skb;
struct nf_bridge_info *nf_bridge;
nf_bridge = nf_bridge_info_get(skb);
if (nf_bridge) {
entry->physin = nf_bridge_get_physindev(skb);
entry->physout = nf_bridge_get_physoutdev(skb);
} else {
entry->physin = NULL;
entry->physout = NULL;
}
#endif
}
/* Bump dev refs so they don't vanish while packet is out */
bool nf_queue_entry_get_refs(struct nf_queue_entry *entry)
{
struct nf_hook_state *state = &entry->state;
if (state->sk && !refcount_inc_not_zero(&state->sk->sk_refcnt))
return false;
dev_hold(state->in);
dev_hold(state->out);
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
dev_hold(entry->physin);
dev_hold(entry->physout);
#endif
return true;
}
EXPORT_SYMBOL_GPL(nf_queue_entry_get_refs);
void nf_queue_nf_hook_drop(struct net *net)
{
const struct nf_queue_handler *qh;
rcu_read_lock();
qh = rcu_dereference(nf_queue_handler);
if (qh)
qh->nf_hook_drop(net);
rcu_read_unlock();
}
EXPORT_SYMBOL_GPL(nf_queue_nf_hook_drop);
static void nf_ip_saveroute(const struct sk_buff *skb,
struct nf_queue_entry *entry)
{
struct ip_rt_info *rt_info = nf_queue_entry_reroute(entry);
if (entry->state.hook == NF_INET_LOCAL_OUT) {
const struct iphdr *iph = ip_hdr(skb);
rt_info->tos = iph->tos;
rt_info->daddr = iph->daddr;
rt_info->saddr = iph->saddr;
rt_info->mark = skb->mark;
}
}
static void nf_ip6_saveroute(const struct sk_buff *skb,
struct nf_queue_entry *entry)
{
struct ip6_rt_info *rt_info = nf_queue_entry_reroute(entry);
if (entry->state.hook == NF_INET_LOCAL_OUT) {
const struct ipv6hdr *iph = ipv6_hdr(skb);
rt_info->daddr = iph->daddr;
rt_info->saddr = iph->saddr;
rt_info->mark = skb->mark;
}
}
static int __nf_queue(struct sk_buff *skb, const struct nf_hook_state *state,
unsigned int index, unsigned int queuenum)
{
struct nf_queue_entry *entry = NULL;
const struct nf_queue_handler *qh;
unsigned int route_key_size;
int status;
/* QUEUE == DROP if no one is waiting, to be safe. */
qh = rcu_dereference(nf_queue_handler);
if (!qh)
return -ESRCH;
switch (state->pf) {
case AF_INET:
route_key_size = sizeof(struct ip_rt_info);
break;
case AF_INET6:
route_key_size = sizeof(struct ip6_rt_info);
break;
default:
route_key_size = 0;
break;
}
if (skb_sk_is_prefetched(skb)) {
struct sock *sk = skb->sk;
if (!sk_is_refcounted(sk)) {
if (!refcount_inc_not_zero(&sk->sk_refcnt))
return -ENOTCONN;
/* drop refcount on skb_orphan */
skb->destructor = sock_edemux;
}
}
entry = kmalloc(sizeof(*entry) + route_key_size, GFP_ATOMIC);
if (!entry)
return -ENOMEM;
if (skb_dst(skb) && !skb_dst_force(skb)) {
kfree(entry);
return -ENETDOWN;
}
*entry = (struct nf_queue_entry) {
.skb = skb,
.state = *state,
.hook_index = index,
.size = sizeof(*entry) + route_key_size,
};
__nf_queue_entry_init_physdevs(entry);
if (!nf_queue_entry_get_refs(entry)) {
kfree(entry);
return -ENOTCONN;
}
switch (entry->state.pf) {
case AF_INET:
nf_ip_saveroute(skb, entry);
break;
case AF_INET6:
nf_ip6_saveroute(skb, entry);
break;
}
status = qh->outfn(entry, queuenum);
if (status < 0) {
nf_queue_entry_free(entry);
return status;
}
return 0;
}
/* Packets leaving via this function must come back through nf_reinject(). */
int nf_queue(struct sk_buff *skb, struct nf_hook_state *state,
unsigned int index, unsigned int verdict)
{
int ret;
ret = __nf_queue(skb, state, index, verdict >> NF_VERDICT_QBITS);
if (ret < 0) {
if (ret == -ESRCH &&
(verdict & NF_VERDICT_FLAG_QUEUE_BYPASS))
return 1;
kfree_skb(skb);
}
return 0;
}
EXPORT_SYMBOL_GPL(nf_queue);
static unsigned int nf_iterate(struct sk_buff *skb,
struct nf_hook_state *state,
const struct nf_hook_entries *hooks,
unsigned int *index)
{
const struct nf_hook_entry *hook;
unsigned int verdict, i = *index;
while (i < hooks->num_hook_entries) {
hook = &hooks->hooks[i];
repeat:
verdict = nf_hook_entry_hookfn(hook, skb, state);
if (verdict != NF_ACCEPT) {
*index = i;
if (verdict != NF_REPEAT)
return verdict;
goto repeat;
}
i++;
}
*index = i;
return NF_ACCEPT;
}
static struct nf_hook_entries *nf_hook_entries_head(const struct net *net, u8 pf, u8 hooknum)
{
switch (pf) {
#ifdef CONFIG_NETFILTER_FAMILY_BRIDGE
case NFPROTO_BRIDGE:
return rcu_dereference(net->nf.hooks_bridge[hooknum]);
#endif
case NFPROTO_IPV4:
return rcu_dereference(net->nf.hooks_ipv4[hooknum]);
case NFPROTO_IPV6:
return rcu_dereference(net->nf.hooks_ipv6[hooknum]);
default:
WARN_ON_ONCE(1);
return NULL;
}
return NULL;
}
/* Caller must hold rcu read-side lock */
void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)
{
const struct nf_hook_entry *hook_entry;
const struct nf_hook_entries *hooks;
struct sk_buff *skb = entry->skb;
const struct net *net;
unsigned int i;
int err;
u8 pf;
net = entry->state.net;
pf = entry->state.pf;
hooks = nf_hook_entries_head(net, pf, entry->state.hook);
i = entry->hook_index;
if (WARN_ON_ONCE(!hooks || i >= hooks->num_hook_entries)) {
kfree_skb(skb);
nf_queue_entry_free(entry);
return;
}
hook_entry = &hooks->hooks[i];
/* Continue traversal iff userspace said ok... */
if (verdict == NF_REPEAT)
verdict = nf_hook_entry_hookfn(hook_entry, skb, &entry->state);
if (verdict == NF_ACCEPT) {
if (nf_reroute(skb, entry) < 0)
verdict = NF_DROP;
}
if (verdict == NF_ACCEPT) {
next_hook:
++i;
verdict = nf_iterate(skb, &entry->state, hooks, &i);
}
switch (verdict & NF_VERDICT_MASK) {
case NF_ACCEPT:
case NF_STOP:
local_bh_disable();
entry->state.okfn(entry->state.net, entry->state.sk, skb);
local_bh_enable();
break;
case NF_QUEUE:
err = nf_queue(skb, &entry->state, i, verdict);
if (err == 1)
goto next_hook;
break;
case NF_STOLEN:
break;
default:
kfree_skb(skb);
}
nf_queue_entry_free(entry);
}
EXPORT_SYMBOL(nf_reinject);
| linux-master | net/netfilter/nf_queue.c |
/* SPDX-License-Identifier: GPL-2.0 */
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/seqlock.h>
#include <linux/netlink.h>
#include <linux/netfilter.h>
#include <linux/netfilter/nf_tables.h>
#include <net/netfilter/nf_tables.h>
#include <net/dst_metadata.h>
#include <net/ip_tunnels.h>
#include <net/vxlan.h>
#include <net/erspan.h>
#include <net/geneve.h>
struct nft_tunnel {
enum nft_tunnel_keys key:8;
u8 dreg;
enum nft_tunnel_mode mode:8;
u8 len;
};
static void nft_tunnel_get_eval(const struct nft_expr *expr,
struct nft_regs *regs,
const struct nft_pktinfo *pkt)
{
const struct nft_tunnel *priv = nft_expr_priv(expr);
u32 *dest = ®s->data[priv->dreg];
struct ip_tunnel_info *tun_info;
tun_info = skb_tunnel_info(pkt->skb);
switch (priv->key) {
case NFT_TUNNEL_PATH:
if (!tun_info) {
nft_reg_store8(dest, false);
return;
}
if (priv->mode == NFT_TUNNEL_MODE_NONE ||
(priv->mode == NFT_TUNNEL_MODE_RX &&
!(tun_info->mode & IP_TUNNEL_INFO_TX)) ||
(priv->mode == NFT_TUNNEL_MODE_TX &&
(tun_info->mode & IP_TUNNEL_INFO_TX)))
nft_reg_store8(dest, true);
else
nft_reg_store8(dest, false);
break;
case NFT_TUNNEL_ID:
if (!tun_info) {
regs->verdict.code = NFT_BREAK;
return;
}
if (priv->mode == NFT_TUNNEL_MODE_NONE ||
(priv->mode == NFT_TUNNEL_MODE_RX &&
!(tun_info->mode & IP_TUNNEL_INFO_TX)) ||
(priv->mode == NFT_TUNNEL_MODE_TX &&
(tun_info->mode & IP_TUNNEL_INFO_TX)))
*dest = ntohl(tunnel_id_to_key32(tun_info->key.tun_id));
else
regs->verdict.code = NFT_BREAK;
break;
default:
WARN_ON(1);
regs->verdict.code = NFT_BREAK;
}
}
static const struct nla_policy nft_tunnel_policy[NFTA_TUNNEL_MAX + 1] = {
[NFTA_TUNNEL_KEY] = NLA_POLICY_MAX(NLA_BE32, 255),
[NFTA_TUNNEL_DREG] = { .type = NLA_U32 },
[NFTA_TUNNEL_MODE] = NLA_POLICY_MAX(NLA_BE32, 255),
};
static int nft_tunnel_get_init(const struct nft_ctx *ctx,
const struct nft_expr *expr,
const struct nlattr * const tb[])
{
struct nft_tunnel *priv = nft_expr_priv(expr);
u32 len;
if (!tb[NFTA_TUNNEL_KEY] ||
!tb[NFTA_TUNNEL_DREG])
return -EINVAL;
priv->key = ntohl(nla_get_be32(tb[NFTA_TUNNEL_KEY]));
switch (priv->key) {
case NFT_TUNNEL_PATH:
len = sizeof(u8);
break;
case NFT_TUNNEL_ID:
len = sizeof(u32);
break;
default:
return -EOPNOTSUPP;
}
if (tb[NFTA_TUNNEL_MODE]) {
priv->mode = ntohl(nla_get_be32(tb[NFTA_TUNNEL_MODE]));
if (priv->mode > NFT_TUNNEL_MODE_MAX)
return -EOPNOTSUPP;
} else {
priv->mode = NFT_TUNNEL_MODE_NONE;
}
priv->len = len;
return nft_parse_register_store(ctx, tb[NFTA_TUNNEL_DREG], &priv->dreg,
NULL, NFT_DATA_VALUE, len);
}
static int nft_tunnel_get_dump(struct sk_buff *skb,
const struct nft_expr *expr, bool reset)
{
const struct nft_tunnel *priv = nft_expr_priv(expr);
if (nla_put_be32(skb, NFTA_TUNNEL_KEY, htonl(priv->key)))
goto nla_put_failure;
if (nft_dump_register(skb, NFTA_TUNNEL_DREG, priv->dreg))
goto nla_put_failure;
if (nla_put_be32(skb, NFTA_TUNNEL_MODE, htonl(priv->mode)))
goto nla_put_failure;
return 0;
nla_put_failure:
return -1;
}
static bool nft_tunnel_get_reduce(struct nft_regs_track *track,
const struct nft_expr *expr)
{
const struct nft_tunnel *priv = nft_expr_priv(expr);
const struct nft_tunnel *tunnel;
if (!nft_reg_track_cmp(track, expr, priv->dreg)) {
nft_reg_track_update(track, expr, priv->dreg, priv->len);
return false;
}
tunnel = nft_expr_priv(track->regs[priv->dreg].selector);
if (priv->key != tunnel->key ||
priv->dreg != tunnel->dreg ||
priv->mode != tunnel->mode) {
nft_reg_track_update(track, expr, priv->dreg, priv->len);
return false;
}
if (!track->regs[priv->dreg].bitwise)
return true;
return false;
}
static struct nft_expr_type nft_tunnel_type;
static const struct nft_expr_ops nft_tunnel_get_ops = {
.type = &nft_tunnel_type,
.size = NFT_EXPR_SIZE(sizeof(struct nft_tunnel)),
.eval = nft_tunnel_get_eval,
.init = nft_tunnel_get_init,
.dump = nft_tunnel_get_dump,
.reduce = nft_tunnel_get_reduce,
};
static struct nft_expr_type nft_tunnel_type __read_mostly = {
.name = "tunnel",
.family = NFPROTO_NETDEV,
.ops = &nft_tunnel_get_ops,
.policy = nft_tunnel_policy,
.maxattr = NFTA_TUNNEL_MAX,
.owner = THIS_MODULE,
};
struct nft_tunnel_opts {
union {
struct vxlan_metadata vxlan;
struct erspan_metadata erspan;
u8 data[IP_TUNNEL_OPTS_MAX];
} u;
u32 len;
__be16 flags;
};
struct nft_tunnel_obj {
struct metadata_dst *md;
struct nft_tunnel_opts opts;
};
static const struct nla_policy nft_tunnel_ip_policy[NFTA_TUNNEL_KEY_IP_MAX + 1] = {
[NFTA_TUNNEL_KEY_IP_SRC] = { .type = NLA_U32 },
[NFTA_TUNNEL_KEY_IP_DST] = { .type = NLA_U32 },
};
static int nft_tunnel_obj_ip_init(const struct nft_ctx *ctx,
const struct nlattr *attr,
struct ip_tunnel_info *info)
{
struct nlattr *tb[NFTA_TUNNEL_KEY_IP_MAX + 1];
int err;
err = nla_parse_nested_deprecated(tb, NFTA_TUNNEL_KEY_IP_MAX, attr,
nft_tunnel_ip_policy, NULL);
if (err < 0)
return err;
if (!tb[NFTA_TUNNEL_KEY_IP_DST])
return -EINVAL;
if (tb[NFTA_TUNNEL_KEY_IP_SRC])
info->key.u.ipv4.src = nla_get_be32(tb[NFTA_TUNNEL_KEY_IP_SRC]);
if (tb[NFTA_TUNNEL_KEY_IP_DST])
info->key.u.ipv4.dst = nla_get_be32(tb[NFTA_TUNNEL_KEY_IP_DST]);
return 0;
}
static const struct nla_policy nft_tunnel_ip6_policy[NFTA_TUNNEL_KEY_IP6_MAX + 1] = {
[NFTA_TUNNEL_KEY_IP6_SRC] = { .len = sizeof(struct in6_addr), },
[NFTA_TUNNEL_KEY_IP6_DST] = { .len = sizeof(struct in6_addr), },
[NFTA_TUNNEL_KEY_IP6_FLOWLABEL] = { .type = NLA_U32, }
};
static int nft_tunnel_obj_ip6_init(const struct nft_ctx *ctx,
const struct nlattr *attr,
struct ip_tunnel_info *info)
{
struct nlattr *tb[NFTA_TUNNEL_KEY_IP6_MAX + 1];
int err;
err = nla_parse_nested_deprecated(tb, NFTA_TUNNEL_KEY_IP6_MAX, attr,
nft_tunnel_ip6_policy, NULL);
if (err < 0)
return err;
if (!tb[NFTA_TUNNEL_KEY_IP6_DST])
return -EINVAL;
if (tb[NFTA_TUNNEL_KEY_IP6_SRC]) {
memcpy(&info->key.u.ipv6.src,
nla_data(tb[NFTA_TUNNEL_KEY_IP6_SRC]),
sizeof(struct in6_addr));
}
if (tb[NFTA_TUNNEL_KEY_IP6_DST]) {
memcpy(&info->key.u.ipv6.dst,
nla_data(tb[NFTA_TUNNEL_KEY_IP6_DST]),
sizeof(struct in6_addr));
}
if (tb[NFTA_TUNNEL_KEY_IP6_FLOWLABEL])
info->key.label = nla_get_be32(tb[NFTA_TUNNEL_KEY_IP6_FLOWLABEL]);
info->mode |= IP_TUNNEL_INFO_IPV6;
return 0;
}
static const struct nla_policy nft_tunnel_opts_vxlan_policy[NFTA_TUNNEL_KEY_VXLAN_MAX + 1] = {
[NFTA_TUNNEL_KEY_VXLAN_GBP] = { .type = NLA_U32 },
};
static int nft_tunnel_obj_vxlan_init(const struct nlattr *attr,
struct nft_tunnel_opts *opts)
{
struct nlattr *tb[NFTA_TUNNEL_KEY_VXLAN_MAX + 1];
int err;
err = nla_parse_nested_deprecated(tb, NFTA_TUNNEL_KEY_VXLAN_MAX, attr,
nft_tunnel_opts_vxlan_policy, NULL);
if (err < 0)
return err;
if (!tb[NFTA_TUNNEL_KEY_VXLAN_GBP])
return -EINVAL;
opts->u.vxlan.gbp = ntohl(nla_get_be32(tb[NFTA_TUNNEL_KEY_VXLAN_GBP]));
opts->len = sizeof(struct vxlan_metadata);
opts->flags = TUNNEL_VXLAN_OPT;
return 0;
}
static const struct nla_policy nft_tunnel_opts_erspan_policy[NFTA_TUNNEL_KEY_ERSPAN_MAX + 1] = {
[NFTA_TUNNEL_KEY_ERSPAN_VERSION] = { .type = NLA_U32 },
[NFTA_TUNNEL_KEY_ERSPAN_V1_INDEX] = { .type = NLA_U32 },
[NFTA_TUNNEL_KEY_ERSPAN_V2_DIR] = { .type = NLA_U8 },
[NFTA_TUNNEL_KEY_ERSPAN_V2_HWID] = { .type = NLA_U8 },
};
static int nft_tunnel_obj_erspan_init(const struct nlattr *attr,
struct nft_tunnel_opts *opts)
{
struct nlattr *tb[NFTA_TUNNEL_KEY_ERSPAN_MAX + 1];
uint8_t hwid, dir;
int err, version;
err = nla_parse_nested_deprecated(tb, NFTA_TUNNEL_KEY_ERSPAN_MAX,
attr, nft_tunnel_opts_erspan_policy,
NULL);
if (err < 0)
return err;
if (!tb[NFTA_TUNNEL_KEY_ERSPAN_VERSION])
return -EINVAL;
version = ntohl(nla_get_be32(tb[NFTA_TUNNEL_KEY_ERSPAN_VERSION]));
switch (version) {
case ERSPAN_VERSION:
if (!tb[NFTA_TUNNEL_KEY_ERSPAN_V1_INDEX])
return -EINVAL;
opts->u.erspan.u.index =
nla_get_be32(tb[NFTA_TUNNEL_KEY_ERSPAN_V1_INDEX]);
break;
case ERSPAN_VERSION2:
if (!tb[NFTA_TUNNEL_KEY_ERSPAN_V2_DIR] ||
!tb[NFTA_TUNNEL_KEY_ERSPAN_V2_HWID])
return -EINVAL;
hwid = nla_get_u8(tb[NFTA_TUNNEL_KEY_ERSPAN_V2_HWID]);
dir = nla_get_u8(tb[NFTA_TUNNEL_KEY_ERSPAN_V2_DIR]);
set_hwid(&opts->u.erspan.u.md2, hwid);
opts->u.erspan.u.md2.dir = dir;
break;
default:
return -EOPNOTSUPP;
}
opts->u.erspan.version = version;
opts->len = sizeof(struct erspan_metadata);
opts->flags = TUNNEL_ERSPAN_OPT;
return 0;
}
static const struct nla_policy nft_tunnel_opts_geneve_policy[NFTA_TUNNEL_KEY_GENEVE_MAX + 1] = {
[NFTA_TUNNEL_KEY_GENEVE_CLASS] = { .type = NLA_U16 },
[NFTA_TUNNEL_KEY_GENEVE_TYPE] = { .type = NLA_U8 },
[NFTA_TUNNEL_KEY_GENEVE_DATA] = { .type = NLA_BINARY, .len = 128 },
};
static int nft_tunnel_obj_geneve_init(const struct nlattr *attr,
struct nft_tunnel_opts *opts)
{
struct geneve_opt *opt = (struct geneve_opt *)opts->u.data + opts->len;
struct nlattr *tb[NFTA_TUNNEL_KEY_GENEVE_MAX + 1];
int err, data_len;
err = nla_parse_nested(tb, NFTA_TUNNEL_KEY_GENEVE_MAX, attr,
nft_tunnel_opts_geneve_policy, NULL);
if (err < 0)
return err;
if (!tb[NFTA_TUNNEL_KEY_GENEVE_CLASS] ||
!tb[NFTA_TUNNEL_KEY_GENEVE_TYPE] ||
!tb[NFTA_TUNNEL_KEY_GENEVE_DATA])
return -EINVAL;
attr = tb[NFTA_TUNNEL_KEY_GENEVE_DATA];
data_len = nla_len(attr);
if (data_len % 4)
return -EINVAL;
opts->len += sizeof(*opt) + data_len;
if (opts->len > IP_TUNNEL_OPTS_MAX)
return -EINVAL;
memcpy(opt->opt_data, nla_data(attr), data_len);
opt->length = data_len / 4;
opt->opt_class = nla_get_be16(tb[NFTA_TUNNEL_KEY_GENEVE_CLASS]);
opt->type = nla_get_u8(tb[NFTA_TUNNEL_KEY_GENEVE_TYPE]);
opts->flags = TUNNEL_GENEVE_OPT;
return 0;
}
static const struct nla_policy nft_tunnel_opts_policy[NFTA_TUNNEL_KEY_OPTS_MAX + 1] = {
[NFTA_TUNNEL_KEY_OPTS_UNSPEC] = {
.strict_start_type = NFTA_TUNNEL_KEY_OPTS_GENEVE },
[NFTA_TUNNEL_KEY_OPTS_VXLAN] = { .type = NLA_NESTED, },
[NFTA_TUNNEL_KEY_OPTS_ERSPAN] = { .type = NLA_NESTED, },
[NFTA_TUNNEL_KEY_OPTS_GENEVE] = { .type = NLA_NESTED, },
};
static int nft_tunnel_obj_opts_init(const struct nft_ctx *ctx,
const struct nlattr *attr,
struct ip_tunnel_info *info,
struct nft_tunnel_opts *opts)
{
struct nlattr *nla;
__be16 type = 0;
int err, rem;
err = nla_validate_nested_deprecated(attr, NFTA_TUNNEL_KEY_OPTS_MAX,
nft_tunnel_opts_policy, NULL);
if (err < 0)
return err;
nla_for_each_attr(nla, nla_data(attr), nla_len(attr), rem) {
switch (nla_type(nla)) {
case NFTA_TUNNEL_KEY_OPTS_VXLAN:
if (type)
return -EINVAL;
err = nft_tunnel_obj_vxlan_init(nla, opts);
if (err)
return err;
type = TUNNEL_VXLAN_OPT;
break;
case NFTA_TUNNEL_KEY_OPTS_ERSPAN:
if (type)
return -EINVAL;
err = nft_tunnel_obj_erspan_init(nla, opts);
if (err)
return err;
type = TUNNEL_ERSPAN_OPT;
break;
case NFTA_TUNNEL_KEY_OPTS_GENEVE:
if (type && type != TUNNEL_GENEVE_OPT)
return -EINVAL;
err = nft_tunnel_obj_geneve_init(nla, opts);
if (err)
return err;
type = TUNNEL_GENEVE_OPT;
break;
default:
return -EOPNOTSUPP;
}
}
return err;
}
static const struct nla_policy nft_tunnel_key_policy[NFTA_TUNNEL_KEY_MAX + 1] = {
[NFTA_TUNNEL_KEY_IP] = { .type = NLA_NESTED, },
[NFTA_TUNNEL_KEY_IP6] = { .type = NLA_NESTED, },
[NFTA_TUNNEL_KEY_ID] = { .type = NLA_U32, },
[NFTA_TUNNEL_KEY_FLAGS] = { .type = NLA_U32, },
[NFTA_TUNNEL_KEY_TOS] = { .type = NLA_U8, },
[NFTA_TUNNEL_KEY_TTL] = { .type = NLA_U8, },
[NFTA_TUNNEL_KEY_SPORT] = { .type = NLA_U16, },
[NFTA_TUNNEL_KEY_DPORT] = { .type = NLA_U16, },
[NFTA_TUNNEL_KEY_OPTS] = { .type = NLA_NESTED, },
};
static int nft_tunnel_obj_init(const struct nft_ctx *ctx,
const struct nlattr * const tb[],
struct nft_object *obj)
{
struct nft_tunnel_obj *priv = nft_obj_data(obj);
struct ip_tunnel_info info;
struct metadata_dst *md;
int err;
if (!tb[NFTA_TUNNEL_KEY_ID])
return -EINVAL;
memset(&info, 0, sizeof(info));
info.mode = IP_TUNNEL_INFO_TX;
info.key.tun_id = key32_to_tunnel_id(nla_get_be32(tb[NFTA_TUNNEL_KEY_ID]));
info.key.tun_flags = TUNNEL_KEY | TUNNEL_CSUM | TUNNEL_NOCACHE;
if (tb[NFTA_TUNNEL_KEY_IP]) {
err = nft_tunnel_obj_ip_init(ctx, tb[NFTA_TUNNEL_KEY_IP], &info);
if (err < 0)
return err;
} else if (tb[NFTA_TUNNEL_KEY_IP6]) {
err = nft_tunnel_obj_ip6_init(ctx, tb[NFTA_TUNNEL_KEY_IP6], &info);
if (err < 0)
return err;
} else {
return -EINVAL;
}
if (tb[NFTA_TUNNEL_KEY_SPORT]) {
info.key.tp_src = nla_get_be16(tb[NFTA_TUNNEL_KEY_SPORT]);
}
if (tb[NFTA_TUNNEL_KEY_DPORT]) {
info.key.tp_dst = nla_get_be16(tb[NFTA_TUNNEL_KEY_DPORT]);
}
if (tb[NFTA_TUNNEL_KEY_FLAGS]) {
u32 tun_flags;
tun_flags = ntohl(nla_get_be32(tb[NFTA_TUNNEL_KEY_FLAGS]));
if (tun_flags & ~NFT_TUNNEL_F_MASK)
return -EOPNOTSUPP;
if (tun_flags & NFT_TUNNEL_F_ZERO_CSUM_TX)
info.key.tun_flags &= ~TUNNEL_CSUM;
if (tun_flags & NFT_TUNNEL_F_DONT_FRAGMENT)
info.key.tun_flags |= TUNNEL_DONT_FRAGMENT;
if (tun_flags & NFT_TUNNEL_F_SEQ_NUMBER)
info.key.tun_flags |= TUNNEL_SEQ;
}
if (tb[NFTA_TUNNEL_KEY_TOS])
info.key.tos = nla_get_u8(tb[NFTA_TUNNEL_KEY_TOS]);
if (tb[NFTA_TUNNEL_KEY_TTL])
info.key.ttl = nla_get_u8(tb[NFTA_TUNNEL_KEY_TTL]);
else
info.key.ttl = U8_MAX;
if (tb[NFTA_TUNNEL_KEY_OPTS]) {
err = nft_tunnel_obj_opts_init(ctx, tb[NFTA_TUNNEL_KEY_OPTS],
&info, &priv->opts);
if (err < 0)
return err;
}
md = metadata_dst_alloc(priv->opts.len, METADATA_IP_TUNNEL, GFP_KERNEL);
if (!md)
return -ENOMEM;
memcpy(&md->u.tun_info, &info, sizeof(info));
#ifdef CONFIG_DST_CACHE
err = dst_cache_init(&md->u.tun_info.dst_cache, GFP_KERNEL);
if (err < 0) {
metadata_dst_free(md);
return err;
}
#endif
ip_tunnel_info_opts_set(&md->u.tun_info, &priv->opts.u, priv->opts.len,
priv->opts.flags);
priv->md = md;
return 0;
}
static inline void nft_tunnel_obj_eval(struct nft_object *obj,
struct nft_regs *regs,
const struct nft_pktinfo *pkt)
{
struct nft_tunnel_obj *priv = nft_obj_data(obj);
struct sk_buff *skb = pkt->skb;
skb_dst_drop(skb);
dst_hold((struct dst_entry *) priv->md);
skb_dst_set(skb, (struct dst_entry *) priv->md);
}
static int nft_tunnel_ip_dump(struct sk_buff *skb, struct ip_tunnel_info *info)
{
struct nlattr *nest;
if (info->mode & IP_TUNNEL_INFO_IPV6) {
nest = nla_nest_start_noflag(skb, NFTA_TUNNEL_KEY_IP6);
if (!nest)
return -1;
if (nla_put_in6_addr(skb, NFTA_TUNNEL_KEY_IP6_SRC,
&info->key.u.ipv6.src) < 0 ||
nla_put_in6_addr(skb, NFTA_TUNNEL_KEY_IP6_DST,
&info->key.u.ipv6.dst) < 0 ||
nla_put_be32(skb, NFTA_TUNNEL_KEY_IP6_FLOWLABEL,
info->key.label)) {
nla_nest_cancel(skb, nest);
return -1;
}
nla_nest_end(skb, nest);
} else {
nest = nla_nest_start_noflag(skb, NFTA_TUNNEL_KEY_IP);
if (!nest)
return -1;
if (nla_put_in_addr(skb, NFTA_TUNNEL_KEY_IP_SRC,
info->key.u.ipv4.src) < 0 ||
nla_put_in_addr(skb, NFTA_TUNNEL_KEY_IP_DST,
info->key.u.ipv4.dst) < 0) {
nla_nest_cancel(skb, nest);
return -1;
}
nla_nest_end(skb, nest);
}
return 0;
}
static int nft_tunnel_opts_dump(struct sk_buff *skb,
struct nft_tunnel_obj *priv)
{
struct nft_tunnel_opts *opts = &priv->opts;
struct nlattr *nest, *inner;
nest = nla_nest_start_noflag(skb, NFTA_TUNNEL_KEY_OPTS);
if (!nest)
return -1;
if (opts->flags & TUNNEL_VXLAN_OPT) {
inner = nla_nest_start_noflag(skb, NFTA_TUNNEL_KEY_OPTS_VXLAN);
if (!inner)
goto failure;
if (nla_put_be32(skb, NFTA_TUNNEL_KEY_VXLAN_GBP,
htonl(opts->u.vxlan.gbp)))
goto inner_failure;
nla_nest_end(skb, inner);
} else if (opts->flags & TUNNEL_ERSPAN_OPT) {
inner = nla_nest_start_noflag(skb, NFTA_TUNNEL_KEY_OPTS_ERSPAN);
if (!inner)
goto failure;
if (nla_put_be32(skb, NFTA_TUNNEL_KEY_ERSPAN_VERSION,
htonl(opts->u.erspan.version)))
goto inner_failure;
switch (opts->u.erspan.version) {
case ERSPAN_VERSION:
if (nla_put_be32(skb, NFTA_TUNNEL_KEY_ERSPAN_V1_INDEX,
opts->u.erspan.u.index))
goto inner_failure;
break;
case ERSPAN_VERSION2:
if (nla_put_u8(skb, NFTA_TUNNEL_KEY_ERSPAN_V2_HWID,
get_hwid(&opts->u.erspan.u.md2)) ||
nla_put_u8(skb, NFTA_TUNNEL_KEY_ERSPAN_V2_DIR,
opts->u.erspan.u.md2.dir))
goto inner_failure;
break;
}
nla_nest_end(skb, inner);
} else if (opts->flags & TUNNEL_GENEVE_OPT) {
struct geneve_opt *opt;
int offset = 0;
inner = nla_nest_start_noflag(skb, NFTA_TUNNEL_KEY_OPTS_GENEVE);
if (!inner)
goto failure;
while (opts->len > offset) {
opt = (struct geneve_opt *)opts->u.data + offset;
if (nla_put_be16(skb, NFTA_TUNNEL_KEY_GENEVE_CLASS,
opt->opt_class) ||
nla_put_u8(skb, NFTA_TUNNEL_KEY_GENEVE_TYPE,
opt->type) ||
nla_put(skb, NFTA_TUNNEL_KEY_GENEVE_DATA,
opt->length * 4, opt->opt_data))
goto inner_failure;
offset += sizeof(*opt) + opt->length * 4;
}
nla_nest_end(skb, inner);
}
nla_nest_end(skb, nest);
return 0;
inner_failure:
nla_nest_cancel(skb, inner);
failure:
nla_nest_cancel(skb, nest);
return -1;
}
static int nft_tunnel_ports_dump(struct sk_buff *skb,
struct ip_tunnel_info *info)
{
if (nla_put_be16(skb, NFTA_TUNNEL_KEY_SPORT, info->key.tp_src) < 0 ||
nla_put_be16(skb, NFTA_TUNNEL_KEY_DPORT, info->key.tp_dst) < 0)
return -1;
return 0;
}
static int nft_tunnel_flags_dump(struct sk_buff *skb,
struct ip_tunnel_info *info)
{
u32 flags = 0;
if (info->key.tun_flags & TUNNEL_DONT_FRAGMENT)
flags |= NFT_TUNNEL_F_DONT_FRAGMENT;
if (!(info->key.tun_flags & TUNNEL_CSUM))
flags |= NFT_TUNNEL_F_ZERO_CSUM_TX;
if (info->key.tun_flags & TUNNEL_SEQ)
flags |= NFT_TUNNEL_F_SEQ_NUMBER;
if (nla_put_be32(skb, NFTA_TUNNEL_KEY_FLAGS, htonl(flags)) < 0)
return -1;
return 0;
}
static int nft_tunnel_obj_dump(struct sk_buff *skb,
struct nft_object *obj, bool reset)
{
struct nft_tunnel_obj *priv = nft_obj_data(obj);
struct ip_tunnel_info *info = &priv->md->u.tun_info;
if (nla_put_be32(skb, NFTA_TUNNEL_KEY_ID,
tunnel_id_to_key32(info->key.tun_id)) ||
nft_tunnel_ip_dump(skb, info) < 0 ||
nft_tunnel_ports_dump(skb, info) < 0 ||
nft_tunnel_flags_dump(skb, info) < 0 ||
nla_put_u8(skb, NFTA_TUNNEL_KEY_TOS, info->key.tos) ||
nla_put_u8(skb, NFTA_TUNNEL_KEY_TTL, info->key.ttl) ||
nft_tunnel_opts_dump(skb, priv) < 0)
goto nla_put_failure;
return 0;
nla_put_failure:
return -1;
}
static void nft_tunnel_obj_destroy(const struct nft_ctx *ctx,
struct nft_object *obj)
{
struct nft_tunnel_obj *priv = nft_obj_data(obj);
metadata_dst_free(priv->md);
}
static struct nft_object_type nft_tunnel_obj_type;
static const struct nft_object_ops nft_tunnel_obj_ops = {
.type = &nft_tunnel_obj_type,
.size = sizeof(struct nft_tunnel_obj),
.eval = nft_tunnel_obj_eval,
.init = nft_tunnel_obj_init,
.destroy = nft_tunnel_obj_destroy,
.dump = nft_tunnel_obj_dump,
};
static struct nft_object_type nft_tunnel_obj_type __read_mostly = {
.type = NFT_OBJECT_TUNNEL,
.ops = &nft_tunnel_obj_ops,
.maxattr = NFTA_TUNNEL_KEY_MAX,
.policy = nft_tunnel_key_policy,
.owner = THIS_MODULE,
};
static int __init nft_tunnel_module_init(void)
{
int err;
err = nft_register_expr(&nft_tunnel_type);
if (err < 0)
return err;
err = nft_register_obj(&nft_tunnel_obj_type);
if (err < 0)
nft_unregister_expr(&nft_tunnel_type);
return err;
}
static void __exit nft_tunnel_module_exit(void)
{
nft_unregister_obj(&nft_tunnel_obj_type);
nft_unregister_expr(&nft_tunnel_type);
}
module_init(nft_tunnel_module_init);
module_exit(nft_tunnel_module_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Pablo Neira Ayuso <[email protected]>");
MODULE_ALIAS_NFT_EXPR("tunnel");
MODULE_ALIAS_NFT_OBJ(NFT_OBJECT_TUNNEL);
MODULE_DESCRIPTION("nftables tunnel expression support");
| linux-master | net/netfilter/nft_tunnel.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* (C) 1999-2001 Paul `Rusty' Russell
* (C) 2002-2006 Netfilter Core Team <[email protected]>
* (C) 2011 Patrick McHardy <[email protected]>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/netfilter.h>
#include <linux/netfilter/x_tables.h>
#include <net/netfilter/nf_nat.h>
static int xt_nat_checkentry_v0(const struct xt_tgchk_param *par)
{
const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo;
if (mr->rangesize != 1) {
pr_info_ratelimited("multiple ranges no longer supported\n");
return -EINVAL;
}
return nf_ct_netns_get(par->net, par->family);
}
static int xt_nat_checkentry(const struct xt_tgchk_param *par)
{
return nf_ct_netns_get(par->net, par->family);
}
static void xt_nat_destroy(const struct xt_tgdtor_param *par)
{
nf_ct_netns_put(par->net, par->family);
}
static void xt_nat_convert_range(struct nf_nat_range2 *dst,
const struct nf_nat_ipv4_range *src)
{
memset(&dst->min_addr, 0, sizeof(dst->min_addr));
memset(&dst->max_addr, 0, sizeof(dst->max_addr));
memset(&dst->base_proto, 0, sizeof(dst->base_proto));
dst->flags = src->flags;
dst->min_addr.ip = src->min_ip;
dst->max_addr.ip = src->max_ip;
dst->min_proto = src->min;
dst->max_proto = src->max;
}
static unsigned int
xt_snat_target_v0(struct sk_buff *skb, const struct xt_action_param *par)
{
const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo;
struct nf_nat_range2 range;
enum ip_conntrack_info ctinfo;
struct nf_conn *ct;
ct = nf_ct_get(skb, &ctinfo);
WARN_ON(!(ct != NULL &&
(ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED ||
ctinfo == IP_CT_RELATED_REPLY)));
xt_nat_convert_range(&range, &mr->range[0]);
return nf_nat_setup_info(ct, &range, NF_NAT_MANIP_SRC);
}
static unsigned int
xt_dnat_target_v0(struct sk_buff *skb, const struct xt_action_param *par)
{
const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo;
struct nf_nat_range2 range;
enum ip_conntrack_info ctinfo;
struct nf_conn *ct;
ct = nf_ct_get(skb, &ctinfo);
WARN_ON(!(ct != NULL &&
(ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED)));
xt_nat_convert_range(&range, &mr->range[0]);
return nf_nat_setup_info(ct, &range, NF_NAT_MANIP_DST);
}
static unsigned int
xt_snat_target_v1(struct sk_buff *skb, const struct xt_action_param *par)
{
const struct nf_nat_range *range_v1 = par->targinfo;
struct nf_nat_range2 range;
enum ip_conntrack_info ctinfo;
struct nf_conn *ct;
ct = nf_ct_get(skb, &ctinfo);
WARN_ON(!(ct != NULL &&
(ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED ||
ctinfo == IP_CT_RELATED_REPLY)));
memcpy(&range, range_v1, sizeof(*range_v1));
memset(&range.base_proto, 0, sizeof(range.base_proto));
return nf_nat_setup_info(ct, &range, NF_NAT_MANIP_SRC);
}
static unsigned int
xt_dnat_target_v1(struct sk_buff *skb, const struct xt_action_param *par)
{
const struct nf_nat_range *range_v1 = par->targinfo;
struct nf_nat_range2 range;
enum ip_conntrack_info ctinfo;
struct nf_conn *ct;
ct = nf_ct_get(skb, &ctinfo);
WARN_ON(!(ct != NULL &&
(ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED)));
memcpy(&range, range_v1, sizeof(*range_v1));
memset(&range.base_proto, 0, sizeof(range.base_proto));
return nf_nat_setup_info(ct, &range, NF_NAT_MANIP_DST);
}
static unsigned int
xt_snat_target_v2(struct sk_buff *skb, const struct xt_action_param *par)
{
const struct nf_nat_range2 *range = par->targinfo;
enum ip_conntrack_info ctinfo;
struct nf_conn *ct;
ct = nf_ct_get(skb, &ctinfo);
WARN_ON(!(ct != NULL &&
(ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED ||
ctinfo == IP_CT_RELATED_REPLY)));
return nf_nat_setup_info(ct, range, NF_NAT_MANIP_SRC);
}
static unsigned int
xt_dnat_target_v2(struct sk_buff *skb, const struct xt_action_param *par)
{
const struct nf_nat_range2 *range = par->targinfo;
enum ip_conntrack_info ctinfo;
struct nf_conn *ct;
ct = nf_ct_get(skb, &ctinfo);
WARN_ON(!(ct != NULL &&
(ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED)));
return nf_nat_setup_info(ct, range, NF_NAT_MANIP_DST);
}
static struct xt_target xt_nat_target_reg[] __read_mostly = {
{
.name = "SNAT",
.revision = 0,
.checkentry = xt_nat_checkentry_v0,
.destroy = xt_nat_destroy,
.target = xt_snat_target_v0,
.targetsize = sizeof(struct nf_nat_ipv4_multi_range_compat),
.family = NFPROTO_IPV4,
.table = "nat",
.hooks = (1 << NF_INET_POST_ROUTING) |
(1 << NF_INET_LOCAL_IN),
.me = THIS_MODULE,
},
{
.name = "DNAT",
.revision = 0,
.checkentry = xt_nat_checkentry_v0,
.destroy = xt_nat_destroy,
.target = xt_dnat_target_v0,
.targetsize = sizeof(struct nf_nat_ipv4_multi_range_compat),
.family = NFPROTO_IPV4,
.table = "nat",
.hooks = (1 << NF_INET_PRE_ROUTING) |
(1 << NF_INET_LOCAL_OUT),
.me = THIS_MODULE,
},
{
.name = "SNAT",
.revision = 1,
.checkentry = xt_nat_checkentry,
.destroy = xt_nat_destroy,
.target = xt_snat_target_v1,
.targetsize = sizeof(struct nf_nat_range),
.table = "nat",
.hooks = (1 << NF_INET_POST_ROUTING) |
(1 << NF_INET_LOCAL_IN),
.me = THIS_MODULE,
},
{
.name = "DNAT",
.revision = 1,
.checkentry = xt_nat_checkentry,
.destroy = xt_nat_destroy,
.target = xt_dnat_target_v1,
.targetsize = sizeof(struct nf_nat_range),
.table = "nat",
.hooks = (1 << NF_INET_PRE_ROUTING) |
(1 << NF_INET_LOCAL_OUT),
.me = THIS_MODULE,
},
{
.name = "SNAT",
.revision = 2,
.checkentry = xt_nat_checkentry,
.destroy = xt_nat_destroy,
.target = xt_snat_target_v2,
.targetsize = sizeof(struct nf_nat_range2),
.table = "nat",
.hooks = (1 << NF_INET_POST_ROUTING) |
(1 << NF_INET_LOCAL_IN),
.me = THIS_MODULE,
},
{
.name = "DNAT",
.revision = 2,
.checkentry = xt_nat_checkentry,
.destroy = xt_nat_destroy,
.target = xt_dnat_target_v2,
.targetsize = sizeof(struct nf_nat_range2),
.table = "nat",
.hooks = (1 << NF_INET_PRE_ROUTING) |
(1 << NF_INET_LOCAL_OUT),
.me = THIS_MODULE,
},
};
static int __init xt_nat_init(void)
{
return xt_register_targets(xt_nat_target_reg,
ARRAY_SIZE(xt_nat_target_reg));
}
static void __exit xt_nat_exit(void)
{
xt_unregister_targets(xt_nat_target_reg, ARRAY_SIZE(xt_nat_target_reg));
}
module_init(xt_nat_init);
module_exit(xt_nat_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Patrick McHardy <[email protected]>");
MODULE_ALIAS("ipt_SNAT");
MODULE_ALIAS("ipt_DNAT");
MODULE_ALIAS("ip6t_SNAT");
MODULE_ALIAS("ip6t_DNAT");
MODULE_DESCRIPTION("SNAT and DNAT targets support");
| linux-master | net/netfilter/xt_nat.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2006 Patrick McHardy <[email protected]>
* Copyright © CC Computer Consultants GmbH, 2007 - 2008
*
* This is a replacement of the old ipt_recent module, which carried the
* following copyright notice:
*
* Author: Stephen Frost <[email protected]>
* Copyright 2002-2003, Stephen Frost, 2.5.x port by [email protected]
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/init.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/string.h>
#include <linux/ctype.h>
#include <linux/list.h>
#include <linux/random.h>
#include <linux/jhash.h>
#include <linux/bitops.h>
#include <linux/skbuff.h>
#include <linux/inet.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <net/net_namespace.h>
#include <net/netns/generic.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter/xt_recent.h>
MODULE_AUTHOR("Patrick McHardy <[email protected]>");
MODULE_AUTHOR("Jan Engelhardt <[email protected]>");
MODULE_DESCRIPTION("Xtables: \"recently-seen\" host matching");
MODULE_LICENSE("GPL");
MODULE_ALIAS("ipt_recent");
MODULE_ALIAS("ip6t_recent");
static unsigned int ip_list_tot __read_mostly = 100;
static unsigned int ip_list_hash_size __read_mostly;
static unsigned int ip_list_perms __read_mostly = 0644;
static unsigned int ip_list_uid __read_mostly;
static unsigned int ip_list_gid __read_mostly;
module_param(ip_list_tot, uint, 0400);
module_param(ip_list_hash_size, uint, 0400);
module_param(ip_list_perms, uint, 0400);
module_param(ip_list_uid, uint, 0644);
module_param(ip_list_gid, uint, 0644);
MODULE_PARM_DESC(ip_list_tot, "number of IPs to remember per list");
MODULE_PARM_DESC(ip_list_hash_size, "size of hash table used to look up IPs");
MODULE_PARM_DESC(ip_list_perms, "permissions on /proc/net/xt_recent/* files");
MODULE_PARM_DESC(ip_list_uid, "default owner of /proc/net/xt_recent/* files");
MODULE_PARM_DESC(ip_list_gid, "default owning group of /proc/net/xt_recent/* files");
/* retained for backwards compatibility */
static unsigned int ip_pkt_list_tot __read_mostly;
module_param(ip_pkt_list_tot, uint, 0400);
MODULE_PARM_DESC(ip_pkt_list_tot, "number of packets per IP address to remember (max. 255)");
#define XT_RECENT_MAX_NSTAMPS 256
struct recent_entry {
struct list_head list;
struct list_head lru_list;
union nf_inet_addr addr;
u_int16_t family;
u_int8_t ttl;
u_int8_t index;
u_int16_t nstamps;
unsigned long stamps[];
};
struct recent_table {
struct list_head list;
char name[XT_RECENT_NAME_LEN];
union nf_inet_addr mask;
unsigned int refcnt;
unsigned int entries;
u8 nstamps_max_mask;
struct list_head lru_list;
struct list_head iphash[];
};
struct recent_net {
struct list_head tables;
#ifdef CONFIG_PROC_FS
struct proc_dir_entry *xt_recent;
#endif
};
static unsigned int recent_net_id __read_mostly;
static inline struct recent_net *recent_pernet(struct net *net)
{
return net_generic(net, recent_net_id);
}
static DEFINE_SPINLOCK(recent_lock);
static DEFINE_MUTEX(recent_mutex);
#ifdef CONFIG_PROC_FS
static const struct proc_ops recent_mt_proc_ops;
#endif
static u_int32_t hash_rnd __read_mostly;
static inline unsigned int recent_entry_hash4(const union nf_inet_addr *addr)
{
return jhash_1word((__force u32)addr->ip, hash_rnd) &
(ip_list_hash_size - 1);
}
static inline unsigned int recent_entry_hash6(const union nf_inet_addr *addr)
{
return jhash2((u32 *)addr->ip6, ARRAY_SIZE(addr->ip6), hash_rnd) &
(ip_list_hash_size - 1);
}
static struct recent_entry *
recent_entry_lookup(const struct recent_table *table,
const union nf_inet_addr *addrp, u_int16_t family,
u_int8_t ttl)
{
struct recent_entry *e;
unsigned int h;
if (family == NFPROTO_IPV4)
h = recent_entry_hash4(addrp);
else
h = recent_entry_hash6(addrp);
list_for_each_entry(e, &table->iphash[h], list)
if (e->family == family &&
memcmp(&e->addr, addrp, sizeof(e->addr)) == 0 &&
(ttl == e->ttl || ttl == 0 || e->ttl == 0))
return e;
return NULL;
}
static void recent_entry_remove(struct recent_table *t, struct recent_entry *e)
{
list_del(&e->list);
list_del(&e->lru_list);
kfree(e);
t->entries--;
}
/*
* Drop entries with timestamps older then 'time'.
*/
static void recent_entry_reap(struct recent_table *t, unsigned long time,
struct recent_entry *working, bool update)
{
struct recent_entry *e;
/*
* The head of the LRU list is always the oldest entry.
*/
e = list_entry(t->lru_list.next, struct recent_entry, lru_list);
/*
* Do not reap the entry which are going to be updated.
*/
if (e == working && update)
return;
/*
* The last time stamp is the most recent.
*/
if (time_after(time, e->stamps[e->index-1]))
recent_entry_remove(t, e);
}
static struct recent_entry *
recent_entry_init(struct recent_table *t, const union nf_inet_addr *addr,
u_int16_t family, u_int8_t ttl)
{
struct recent_entry *e;
unsigned int nstamps_max = t->nstamps_max_mask;
if (t->entries >= ip_list_tot) {
e = list_entry(t->lru_list.next, struct recent_entry, lru_list);
recent_entry_remove(t, e);
}
nstamps_max += 1;
e = kmalloc(struct_size(e, stamps, nstamps_max), GFP_ATOMIC);
if (e == NULL)
return NULL;
memcpy(&e->addr, addr, sizeof(e->addr));
e->ttl = ttl;
e->stamps[0] = jiffies;
e->nstamps = 1;
e->index = 1;
e->family = family;
if (family == NFPROTO_IPV4)
list_add_tail(&e->list, &t->iphash[recent_entry_hash4(addr)]);
else
list_add_tail(&e->list, &t->iphash[recent_entry_hash6(addr)]);
list_add_tail(&e->lru_list, &t->lru_list);
t->entries++;
return e;
}
static void recent_entry_update(struct recent_table *t, struct recent_entry *e)
{
e->index &= t->nstamps_max_mask;
e->stamps[e->index++] = jiffies;
if (e->index > e->nstamps)
e->nstamps = e->index;
list_move_tail(&e->lru_list, &t->lru_list);
}
static struct recent_table *recent_table_lookup(struct recent_net *recent_net,
const char *name)
{
struct recent_table *t;
list_for_each_entry(t, &recent_net->tables, list)
if (!strcmp(t->name, name))
return t;
return NULL;
}
static void recent_table_flush(struct recent_table *t)
{
struct recent_entry *e, *next;
unsigned int i;
for (i = 0; i < ip_list_hash_size; i++)
list_for_each_entry_safe(e, next, &t->iphash[i], list)
recent_entry_remove(t, e);
}
static bool
recent_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
struct net *net = xt_net(par);
struct recent_net *recent_net = recent_pernet(net);
const struct xt_recent_mtinfo_v1 *info = par->matchinfo;
struct recent_table *t;
struct recent_entry *e;
union nf_inet_addr addr = {}, addr_mask;
u_int8_t ttl;
bool ret = info->invert;
if (xt_family(par) == NFPROTO_IPV4) {
const struct iphdr *iph = ip_hdr(skb);
if (info->side == XT_RECENT_DEST)
addr.ip = iph->daddr;
else
addr.ip = iph->saddr;
ttl = iph->ttl;
} else {
const struct ipv6hdr *iph = ipv6_hdr(skb);
if (info->side == XT_RECENT_DEST)
memcpy(&addr.in6, &iph->daddr, sizeof(addr.in6));
else
memcpy(&addr.in6, &iph->saddr, sizeof(addr.in6));
ttl = iph->hop_limit;
}
/* use TTL as seen before forwarding */
if (xt_out(par) != NULL &&
(!skb->sk || !net_eq(net, sock_net(skb->sk))))
ttl++;
spin_lock_bh(&recent_lock);
t = recent_table_lookup(recent_net, info->name);
nf_inet_addr_mask(&addr, &addr_mask, &t->mask);
e = recent_entry_lookup(t, &addr_mask, xt_family(par),
(info->check_set & XT_RECENT_TTL) ? ttl : 0);
if (e == NULL) {
if (!(info->check_set & XT_RECENT_SET))
goto out;
e = recent_entry_init(t, &addr_mask, xt_family(par), ttl);
if (e == NULL)
par->hotdrop = true;
ret = !ret;
goto out;
}
if (info->check_set & XT_RECENT_SET)
ret = !ret;
else if (info->check_set & XT_RECENT_REMOVE) {
recent_entry_remove(t, e);
ret = !ret;
} else if (info->check_set & (XT_RECENT_CHECK | XT_RECENT_UPDATE)) {
unsigned long time = jiffies - info->seconds * HZ;
unsigned int i, hits = 0;
for (i = 0; i < e->nstamps; i++) {
if (info->seconds && time_after(time, e->stamps[i]))
continue;
if (!info->hit_count || ++hits >= info->hit_count) {
ret = !ret;
break;
}
}
/* info->seconds must be non-zero */
if (info->check_set & XT_RECENT_REAP)
recent_entry_reap(t, time, e,
info->check_set & XT_RECENT_UPDATE && ret);
}
if (info->check_set & XT_RECENT_SET ||
(info->check_set & XT_RECENT_UPDATE && ret)) {
recent_entry_update(t, e);
e->ttl = ttl;
}
out:
spin_unlock_bh(&recent_lock);
return ret;
}
static void recent_table_free(void *addr)
{
kvfree(addr);
}
static int recent_mt_check(const struct xt_mtchk_param *par,
const struct xt_recent_mtinfo_v1 *info)
{
struct recent_net *recent_net = recent_pernet(par->net);
struct recent_table *t;
#ifdef CONFIG_PROC_FS
struct proc_dir_entry *pde;
kuid_t uid;
kgid_t gid;
#endif
unsigned int nstamp_mask;
unsigned int i;
int ret = -EINVAL;
net_get_random_once(&hash_rnd, sizeof(hash_rnd));
if (info->check_set & ~XT_RECENT_VALID_FLAGS) {
pr_info_ratelimited("Unsupported userspace flags (%08x)\n",
info->check_set);
return -EINVAL;
}
if (hweight8(info->check_set &
(XT_RECENT_SET | XT_RECENT_REMOVE |
XT_RECENT_CHECK | XT_RECENT_UPDATE)) != 1)
return -EINVAL;
if ((info->check_set & (XT_RECENT_SET | XT_RECENT_REMOVE)) &&
(info->seconds || info->hit_count ||
(info->check_set & XT_RECENT_MODIFIERS)))
return -EINVAL;
if ((info->check_set & XT_RECENT_REAP) && !info->seconds)
return -EINVAL;
if (info->hit_count >= XT_RECENT_MAX_NSTAMPS) {
pr_info_ratelimited("hitcount (%u) is larger than allowed maximum (%u)\n",
info->hit_count, XT_RECENT_MAX_NSTAMPS - 1);
return -EINVAL;
}
ret = xt_check_proc_name(info->name, sizeof(info->name));
if (ret)
return ret;
if (ip_pkt_list_tot && info->hit_count < ip_pkt_list_tot)
nstamp_mask = roundup_pow_of_two(ip_pkt_list_tot) - 1;
else if (info->hit_count)
nstamp_mask = roundup_pow_of_two(info->hit_count) - 1;
else
nstamp_mask = 32 - 1;
mutex_lock(&recent_mutex);
t = recent_table_lookup(recent_net, info->name);
if (t != NULL) {
if (nstamp_mask > t->nstamps_max_mask) {
spin_lock_bh(&recent_lock);
recent_table_flush(t);
t->nstamps_max_mask = nstamp_mask;
spin_unlock_bh(&recent_lock);
}
t->refcnt++;
ret = 0;
goto out;
}
t = kvzalloc(struct_size(t, iphash, ip_list_hash_size), GFP_KERNEL);
if (t == NULL) {
ret = -ENOMEM;
goto out;
}
t->refcnt = 1;
t->nstamps_max_mask = nstamp_mask;
memcpy(&t->mask, &info->mask, sizeof(t->mask));
strcpy(t->name, info->name);
INIT_LIST_HEAD(&t->lru_list);
for (i = 0; i < ip_list_hash_size; i++)
INIT_LIST_HEAD(&t->iphash[i]);
#ifdef CONFIG_PROC_FS
uid = make_kuid(&init_user_ns, ip_list_uid);
gid = make_kgid(&init_user_ns, ip_list_gid);
if (!uid_valid(uid) || !gid_valid(gid)) {
recent_table_free(t);
ret = -EINVAL;
goto out;
}
pde = proc_create_data(t->name, ip_list_perms, recent_net->xt_recent,
&recent_mt_proc_ops, t);
if (pde == NULL) {
recent_table_free(t);
ret = -ENOMEM;
goto out;
}
proc_set_user(pde, uid, gid);
#endif
spin_lock_bh(&recent_lock);
list_add_tail(&t->list, &recent_net->tables);
spin_unlock_bh(&recent_lock);
ret = 0;
out:
mutex_unlock(&recent_mutex);
return ret;
}
static int recent_mt_check_v0(const struct xt_mtchk_param *par)
{
const struct xt_recent_mtinfo_v0 *info_v0 = par->matchinfo;
struct xt_recent_mtinfo_v1 info_v1;
/* Copy revision 0 structure to revision 1 */
memcpy(&info_v1, info_v0, sizeof(struct xt_recent_mtinfo));
/* Set default mask to ensure backward compatible behaviour */
memset(info_v1.mask.all, 0xFF, sizeof(info_v1.mask.all));
return recent_mt_check(par, &info_v1);
}
static int recent_mt_check_v1(const struct xt_mtchk_param *par)
{
return recent_mt_check(par, par->matchinfo);
}
static void recent_mt_destroy(const struct xt_mtdtor_param *par)
{
struct recent_net *recent_net = recent_pernet(par->net);
const struct xt_recent_mtinfo_v1 *info = par->matchinfo;
struct recent_table *t;
mutex_lock(&recent_mutex);
t = recent_table_lookup(recent_net, info->name);
if (--t->refcnt == 0) {
spin_lock_bh(&recent_lock);
list_del(&t->list);
spin_unlock_bh(&recent_lock);
#ifdef CONFIG_PROC_FS
if (recent_net->xt_recent != NULL)
remove_proc_entry(t->name, recent_net->xt_recent);
#endif
recent_table_flush(t);
recent_table_free(t);
}
mutex_unlock(&recent_mutex);
}
#ifdef CONFIG_PROC_FS
struct recent_iter_state {
const struct recent_table *table;
unsigned int bucket;
};
static void *recent_seq_start(struct seq_file *seq, loff_t *pos)
__acquires(recent_lock)
{
struct recent_iter_state *st = seq->private;
const struct recent_table *t = st->table;
struct recent_entry *e;
loff_t p = *pos;
spin_lock_bh(&recent_lock);
for (st->bucket = 0; st->bucket < ip_list_hash_size; st->bucket++)
list_for_each_entry(e, &t->iphash[st->bucket], list)
if (p-- == 0)
return e;
return NULL;
}
static void *recent_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
struct recent_iter_state *st = seq->private;
const struct recent_table *t = st->table;
const struct recent_entry *e = v;
const struct list_head *head = e->list.next;
(*pos)++;
while (head == &t->iphash[st->bucket]) {
if (++st->bucket >= ip_list_hash_size)
return NULL;
head = t->iphash[st->bucket].next;
}
return list_entry(head, struct recent_entry, list);
}
static void recent_seq_stop(struct seq_file *s, void *v)
__releases(recent_lock)
{
spin_unlock_bh(&recent_lock);
}
static int recent_seq_show(struct seq_file *seq, void *v)
{
const struct recent_entry *e = v;
struct recent_iter_state *st = seq->private;
const struct recent_table *t = st->table;
unsigned int i;
i = (e->index - 1) & t->nstamps_max_mask;
if (e->family == NFPROTO_IPV4)
seq_printf(seq, "src=%pI4 ttl: %u last_seen: %lu oldest_pkt: %u",
&e->addr.ip, e->ttl, e->stamps[i], e->index);
else
seq_printf(seq, "src=%pI6 ttl: %u last_seen: %lu oldest_pkt: %u",
&e->addr.in6, e->ttl, e->stamps[i], e->index);
for (i = 0; i < e->nstamps; i++)
seq_printf(seq, "%s %lu", i ? "," : "", e->stamps[i]);
seq_putc(seq, '\n');
return 0;
}
static const struct seq_operations recent_seq_ops = {
.start = recent_seq_start,
.next = recent_seq_next,
.stop = recent_seq_stop,
.show = recent_seq_show,
};
static int recent_seq_open(struct inode *inode, struct file *file)
{
struct recent_iter_state *st;
st = __seq_open_private(file, &recent_seq_ops, sizeof(*st));
if (st == NULL)
return -ENOMEM;
st->table = pde_data(inode);
return 0;
}
static ssize_t
recent_mt_proc_write(struct file *file, const char __user *input,
size_t size, loff_t *loff)
{
struct recent_table *t = pde_data(file_inode(file));
struct recent_entry *e;
char buf[sizeof("+b335:1d35:1e55:dead:c0de:1715:5afe:c0de")];
const char *c = buf;
union nf_inet_addr addr = {};
u_int16_t family;
bool add, succ;
if (size == 0)
return 0;
if (size > sizeof(buf))
size = sizeof(buf);
if (copy_from_user(buf, input, size) != 0)
return -EFAULT;
/* Strict protocol! */
if (*loff != 0)
return -ESPIPE;
switch (*c) {
case '/': /* flush table */
spin_lock_bh(&recent_lock);
recent_table_flush(t);
spin_unlock_bh(&recent_lock);
return size;
case '-': /* remove address */
add = false;
break;
case '+': /* add address */
add = true;
break;
default:
pr_info_ratelimited("Need \"+ip\", \"-ip\" or \"/\"\n");
return -EINVAL;
}
++c;
--size;
if (strnchr(c, size, ':') != NULL) {
family = NFPROTO_IPV6;
succ = in6_pton(c, size, (void *)&addr, '\n', NULL);
} else {
family = NFPROTO_IPV4;
succ = in4_pton(c, size, (void *)&addr, '\n', NULL);
}
if (!succ)
return -EINVAL;
spin_lock_bh(&recent_lock);
e = recent_entry_lookup(t, &addr, family, 0);
if (e == NULL) {
if (add)
recent_entry_init(t, &addr, family, 0);
} else {
if (add)
recent_entry_update(t, e);
else
recent_entry_remove(t, e);
}
spin_unlock_bh(&recent_lock);
/* Note we removed one above */
*loff += size + 1;
return size + 1;
}
static const struct proc_ops recent_mt_proc_ops = {
.proc_open = recent_seq_open,
.proc_read = seq_read,
.proc_write = recent_mt_proc_write,
.proc_release = seq_release_private,
.proc_lseek = seq_lseek,
};
static int __net_init recent_proc_net_init(struct net *net)
{
struct recent_net *recent_net = recent_pernet(net);
recent_net->xt_recent = proc_mkdir("xt_recent", net->proc_net);
if (!recent_net->xt_recent)
return -ENOMEM;
return 0;
}
static void __net_exit recent_proc_net_exit(struct net *net)
{
struct recent_net *recent_net = recent_pernet(net);
struct recent_table *t;
/* recent_net_exit() is called before recent_mt_destroy(). Make sure
* that the parent xt_recent proc entry is empty before trying to
* remove it.
*/
spin_lock_bh(&recent_lock);
list_for_each_entry(t, &recent_net->tables, list)
remove_proc_entry(t->name, recent_net->xt_recent);
recent_net->xt_recent = NULL;
spin_unlock_bh(&recent_lock);
remove_proc_entry("xt_recent", net->proc_net);
}
#else
static inline int recent_proc_net_init(struct net *net)
{
return 0;
}
static inline void recent_proc_net_exit(struct net *net)
{
}
#endif /* CONFIG_PROC_FS */
static int __net_init recent_net_init(struct net *net)
{
struct recent_net *recent_net = recent_pernet(net);
INIT_LIST_HEAD(&recent_net->tables);
return recent_proc_net_init(net);
}
static void __net_exit recent_net_exit(struct net *net)
{
recent_proc_net_exit(net);
}
static struct pernet_operations recent_net_ops = {
.init = recent_net_init,
.exit = recent_net_exit,
.id = &recent_net_id,
.size = sizeof(struct recent_net),
};
static struct xt_match recent_mt_reg[] __read_mostly = {
{
.name = "recent",
.revision = 0,
.family = NFPROTO_IPV4,
.match = recent_mt,
.matchsize = sizeof(struct xt_recent_mtinfo),
.checkentry = recent_mt_check_v0,
.destroy = recent_mt_destroy,
.me = THIS_MODULE,
},
{
.name = "recent",
.revision = 0,
.family = NFPROTO_IPV6,
.match = recent_mt,
.matchsize = sizeof(struct xt_recent_mtinfo),
.checkentry = recent_mt_check_v0,
.destroy = recent_mt_destroy,
.me = THIS_MODULE,
},
{
.name = "recent",
.revision = 1,
.family = NFPROTO_IPV4,
.match = recent_mt,
.matchsize = sizeof(struct xt_recent_mtinfo_v1),
.checkentry = recent_mt_check_v1,
.destroy = recent_mt_destroy,
.me = THIS_MODULE,
},
{
.name = "recent",
.revision = 1,
.family = NFPROTO_IPV6,
.match = recent_mt,
.matchsize = sizeof(struct xt_recent_mtinfo_v1),
.checkentry = recent_mt_check_v1,
.destroy = recent_mt_destroy,
.me = THIS_MODULE,
}
};
static int __init recent_mt_init(void)
{
int err;
BUILD_BUG_ON_NOT_POWER_OF_2(XT_RECENT_MAX_NSTAMPS);
if (!ip_list_tot || ip_pkt_list_tot >= XT_RECENT_MAX_NSTAMPS)
return -EINVAL;
ip_list_hash_size = 1 << fls(ip_list_tot);
err = register_pernet_subsys(&recent_net_ops);
if (err)
return err;
err = xt_register_matches(recent_mt_reg, ARRAY_SIZE(recent_mt_reg));
if (err)
unregister_pernet_subsys(&recent_net_ops);
return err;
}
static void __exit recent_mt_exit(void)
{
xt_unregister_matches(recent_mt_reg, ARRAY_SIZE(recent_mt_reg));
unregister_pernet_subsys(&recent_net_ops);
}
module_init(recent_mt_init);
module_exit(recent_mt_exit);
| linux-master | net/netfilter/xt_recent.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* test/set flag bits stored in conntrack extension area.
*
* (C) 2013 Astaro GmbH & Co KG
*/
#include <linux/export.h>
#include <linux/types.h>
#include <net/netfilter/nf_conntrack_ecache.h>
#include <net/netfilter/nf_conntrack_labels.h>
static DEFINE_SPINLOCK(nf_connlabels_lock);
static int replace_u32(u32 *address, u32 mask, u32 new)
{
u32 old, tmp;
do {
old = *address;
tmp = (old & mask) ^ new;
if (old == tmp)
return 0;
} while (cmpxchg(address, old, tmp) != old);
return 1;
}
int nf_connlabels_replace(struct nf_conn *ct,
const u32 *data,
const u32 *mask, unsigned int words32)
{
struct nf_conn_labels *labels;
unsigned int size, i;
int changed = 0;
u32 *dst;
labels = nf_ct_labels_find(ct);
if (!labels)
return -ENOSPC;
size = sizeof(labels->bits);
if (size < (words32 * sizeof(u32)))
words32 = size / sizeof(u32);
dst = (u32 *) labels->bits;
for (i = 0; i < words32; i++)
changed |= replace_u32(&dst[i], mask ? ~mask[i] : 0, data[i]);
size /= sizeof(u32);
for (i = words32; i < size; i++) /* pad */
replace_u32(&dst[i], 0, 0);
if (changed)
nf_conntrack_event_cache(IPCT_LABEL, ct);
return 0;
}
EXPORT_SYMBOL_GPL(nf_connlabels_replace);
int nf_connlabels_get(struct net *net, unsigned int bits)
{
if (BIT_WORD(bits) >= NF_CT_LABELS_MAX_SIZE / sizeof(long))
return -ERANGE;
spin_lock(&nf_connlabels_lock);
net->ct.labels_used++;
spin_unlock(&nf_connlabels_lock);
BUILD_BUG_ON(NF_CT_LABELS_MAX_SIZE / sizeof(long) >= U8_MAX);
return 0;
}
EXPORT_SYMBOL_GPL(nf_connlabels_get);
void nf_connlabels_put(struct net *net)
{
spin_lock(&nf_connlabels_lock);
net->ct.labels_used--;
spin_unlock(&nf_connlabels_lock);
}
EXPORT_SYMBOL_GPL(nf_connlabels_put);
| linux-master | net/netfilter/nf_conntrack_labels.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2008-2009 Patrick McHardy <[email protected]>
* Copyright (c) 2013 Eric Leblond <[email protected]>
*
* Development of this code funded by Astaro AG (http://www.astaro.com/)
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/netlink.h>
#include <linux/netfilter.h>
#include <linux/netfilter/nf_tables.h>
#include <net/netfilter/nf_tables.h>
#include <net/netfilter/nft_reject.h>
#include <linux/icmp.h>
#include <linux/icmpv6.h>
const struct nla_policy nft_reject_policy[NFTA_REJECT_MAX + 1] = {
[NFTA_REJECT_TYPE] = NLA_POLICY_MAX(NLA_BE32, 255),
[NFTA_REJECT_ICMP_CODE] = { .type = NLA_U8 },
};
EXPORT_SYMBOL_GPL(nft_reject_policy);
int nft_reject_validate(const struct nft_ctx *ctx,
const struct nft_expr *expr,
const struct nft_data **data)
{
return nft_chain_validate_hooks(ctx->chain,
(1 << NF_INET_LOCAL_IN) |
(1 << NF_INET_FORWARD) |
(1 << NF_INET_LOCAL_OUT) |
(1 << NF_INET_PRE_ROUTING));
}
EXPORT_SYMBOL_GPL(nft_reject_validate);
int nft_reject_init(const struct nft_ctx *ctx,
const struct nft_expr *expr,
const struct nlattr * const tb[])
{
struct nft_reject *priv = nft_expr_priv(expr);
int icmp_code;
if (tb[NFTA_REJECT_TYPE] == NULL)
return -EINVAL;
priv->type = ntohl(nla_get_be32(tb[NFTA_REJECT_TYPE]));
switch (priv->type) {
case NFT_REJECT_ICMP_UNREACH:
case NFT_REJECT_ICMPX_UNREACH:
if (tb[NFTA_REJECT_ICMP_CODE] == NULL)
return -EINVAL;
icmp_code = nla_get_u8(tb[NFTA_REJECT_ICMP_CODE]);
if (priv->type == NFT_REJECT_ICMPX_UNREACH &&
icmp_code > NFT_REJECT_ICMPX_MAX)
return -EINVAL;
priv->icmp_code = icmp_code;
break;
case NFT_REJECT_TCP_RST:
break;
default:
return -EINVAL;
}
return 0;
}
EXPORT_SYMBOL_GPL(nft_reject_init);
int nft_reject_dump(struct sk_buff *skb,
const struct nft_expr *expr, bool reset)
{
const struct nft_reject *priv = nft_expr_priv(expr);
if (nla_put_be32(skb, NFTA_REJECT_TYPE, htonl(priv->type)))
goto nla_put_failure;
switch (priv->type) {
case NFT_REJECT_ICMP_UNREACH:
case NFT_REJECT_ICMPX_UNREACH:
if (nla_put_u8(skb, NFTA_REJECT_ICMP_CODE, priv->icmp_code))
goto nla_put_failure;
break;
default:
break;
}
return 0;
nla_put_failure:
return -1;
}
EXPORT_SYMBOL_GPL(nft_reject_dump);
static u8 icmp_code_v4[NFT_REJECT_ICMPX_MAX + 1] = {
[NFT_REJECT_ICMPX_NO_ROUTE] = ICMP_NET_UNREACH,
[NFT_REJECT_ICMPX_PORT_UNREACH] = ICMP_PORT_UNREACH,
[NFT_REJECT_ICMPX_HOST_UNREACH] = ICMP_HOST_UNREACH,
[NFT_REJECT_ICMPX_ADMIN_PROHIBITED] = ICMP_PKT_FILTERED,
};
int nft_reject_icmp_code(u8 code)
{
if (WARN_ON_ONCE(code > NFT_REJECT_ICMPX_MAX))
return ICMP_NET_UNREACH;
return icmp_code_v4[code];
}
EXPORT_SYMBOL_GPL(nft_reject_icmp_code);
static u8 icmp_code_v6[NFT_REJECT_ICMPX_MAX + 1] = {
[NFT_REJECT_ICMPX_NO_ROUTE] = ICMPV6_NOROUTE,
[NFT_REJECT_ICMPX_PORT_UNREACH] = ICMPV6_PORT_UNREACH,
[NFT_REJECT_ICMPX_HOST_UNREACH] = ICMPV6_ADDR_UNREACH,
[NFT_REJECT_ICMPX_ADMIN_PROHIBITED] = ICMPV6_ADM_PROHIBITED,
};
int nft_reject_icmpv6_code(u8 code)
{
if (WARN_ON_ONCE(code > NFT_REJECT_ICMPX_MAX))
return ICMPV6_NOROUTE;
return icmp_code_v6[code];
}
EXPORT_SYMBOL_GPL(nft_reject_icmpv6_code);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Patrick McHardy <[email protected]>");
MODULE_DESCRIPTION("Netfilter x_tables over nftables module");
| linux-master | net/netfilter/nft_reject.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Module for modifying the secmark field of the skb, for use by
* security subsystems.
*
* Based on the nfmark match by:
* (C) 1999-2001 Marc Boucher <[email protected]>
*
* (C) 2006,2008 Red Hat, Inc., James Morris <[email protected]>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/security.h>
#include <linux/skbuff.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter/xt_SECMARK.h>
MODULE_LICENSE("GPL");
MODULE_AUTHOR("James Morris <[email protected]>");
MODULE_DESCRIPTION("Xtables: packet security mark modification");
MODULE_ALIAS("ipt_SECMARK");
MODULE_ALIAS("ip6t_SECMARK");
static u8 mode;
static unsigned int
secmark_tg(struct sk_buff *skb, const struct xt_secmark_target_info_v1 *info)
{
u32 secmark = 0;
switch (mode) {
case SECMARK_MODE_SEL:
secmark = info->secid;
break;
default:
BUG();
}
skb->secmark = secmark;
return XT_CONTINUE;
}
static int checkentry_lsm(struct xt_secmark_target_info_v1 *info)
{
int err;
info->secctx[SECMARK_SECCTX_MAX - 1] = '\0';
info->secid = 0;
err = security_secctx_to_secid(info->secctx, strlen(info->secctx),
&info->secid);
if (err) {
if (err == -EINVAL)
pr_info_ratelimited("invalid security context \'%s\'\n",
info->secctx);
return err;
}
if (!info->secid) {
pr_info_ratelimited("unable to map security context \'%s\'\n",
info->secctx);
return -ENOENT;
}
err = security_secmark_relabel_packet(info->secid);
if (err) {
pr_info_ratelimited("unable to obtain relabeling permission\n");
return err;
}
security_secmark_refcount_inc();
return 0;
}
static int
secmark_tg_check(const char *table, struct xt_secmark_target_info_v1 *info)
{
int err;
if (strcmp(table, "mangle") != 0 &&
strcmp(table, "security") != 0) {
pr_info_ratelimited("only valid in \'mangle\' or \'security\' table, not \'%s\'\n",
table);
return -EINVAL;
}
if (mode && mode != info->mode) {
pr_info_ratelimited("mode already set to %hu cannot mix with rules for mode %hu\n",
mode, info->mode);
return -EINVAL;
}
switch (info->mode) {
case SECMARK_MODE_SEL:
break;
default:
pr_info_ratelimited("invalid mode: %hu\n", info->mode);
return -EINVAL;
}
err = checkentry_lsm(info);
if (err)
return err;
if (!mode)
mode = info->mode;
return 0;
}
static void secmark_tg_destroy(const struct xt_tgdtor_param *par)
{
switch (mode) {
case SECMARK_MODE_SEL:
security_secmark_refcount_dec();
}
}
static int secmark_tg_check_v0(const struct xt_tgchk_param *par)
{
struct xt_secmark_target_info *info = par->targinfo;
struct xt_secmark_target_info_v1 newinfo = {
.mode = info->mode,
};
int ret;
memcpy(newinfo.secctx, info->secctx, SECMARK_SECCTX_MAX);
ret = secmark_tg_check(par->table, &newinfo);
info->secid = newinfo.secid;
return ret;
}
static unsigned int
secmark_tg_v0(struct sk_buff *skb, const struct xt_action_param *par)
{
const struct xt_secmark_target_info *info = par->targinfo;
struct xt_secmark_target_info_v1 newinfo = {
.secid = info->secid,
};
return secmark_tg(skb, &newinfo);
}
static int secmark_tg_check_v1(const struct xt_tgchk_param *par)
{
return secmark_tg_check(par->table, par->targinfo);
}
static unsigned int
secmark_tg_v1(struct sk_buff *skb, const struct xt_action_param *par)
{
return secmark_tg(skb, par->targinfo);
}
static struct xt_target secmark_tg_reg[] __read_mostly = {
{
.name = "SECMARK",
.revision = 0,
.family = NFPROTO_UNSPEC,
.checkentry = secmark_tg_check_v0,
.destroy = secmark_tg_destroy,
.target = secmark_tg_v0,
.targetsize = sizeof(struct xt_secmark_target_info),
.me = THIS_MODULE,
},
{
.name = "SECMARK",
.revision = 1,
.family = NFPROTO_UNSPEC,
.checkentry = secmark_tg_check_v1,
.destroy = secmark_tg_destroy,
.target = secmark_tg_v1,
.targetsize = sizeof(struct xt_secmark_target_info_v1),
.usersize = offsetof(struct xt_secmark_target_info_v1, secid),
.me = THIS_MODULE,
},
};
static int __init secmark_tg_init(void)
{
return xt_register_targets(secmark_tg_reg, ARRAY_SIZE(secmark_tg_reg));
}
static void __exit secmark_tg_exit(void)
{
xt_unregister_targets(secmark_tg_reg, ARRAY_SIZE(secmark_tg_reg));
}
module_init(secmark_tg_init);
module_exit(secmark_tg_exit);
| linux-master | net/netfilter/xt_SECMARK.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/netfilter.h>
#include <linux/mutex.h>
#include <net/sock.h>
#include "nf_internals.h"
/* Sockopts only registered and called from user context, so
net locking would be overkill. Also, [gs]etsockopt calls may
sleep. */
static DEFINE_MUTEX(nf_sockopt_mutex);
static LIST_HEAD(nf_sockopts);
/* Do exclusive ranges overlap? */
static inline int overlap(int min1, int max1, int min2, int max2)
{
return max1 > min2 && min1 < max2;
}
/* Functions to register sockopt ranges (exclusive). */
int nf_register_sockopt(struct nf_sockopt_ops *reg)
{
struct nf_sockopt_ops *ops;
int ret = 0;
mutex_lock(&nf_sockopt_mutex);
list_for_each_entry(ops, &nf_sockopts, list) {
if (ops->pf == reg->pf
&& (overlap(ops->set_optmin, ops->set_optmax,
reg->set_optmin, reg->set_optmax)
|| overlap(ops->get_optmin, ops->get_optmax,
reg->get_optmin, reg->get_optmax))) {
pr_debug("nf_sock overlap: %u-%u/%u-%u v %u-%u/%u-%u\n",
ops->set_optmin, ops->set_optmax,
ops->get_optmin, ops->get_optmax,
reg->set_optmin, reg->set_optmax,
reg->get_optmin, reg->get_optmax);
ret = -EBUSY;
goto out;
}
}
list_add(®->list, &nf_sockopts);
out:
mutex_unlock(&nf_sockopt_mutex);
return ret;
}
EXPORT_SYMBOL(nf_register_sockopt);
void nf_unregister_sockopt(struct nf_sockopt_ops *reg)
{
mutex_lock(&nf_sockopt_mutex);
list_del(®->list);
mutex_unlock(&nf_sockopt_mutex);
}
EXPORT_SYMBOL(nf_unregister_sockopt);
static struct nf_sockopt_ops *nf_sockopt_find(struct sock *sk, u_int8_t pf,
int val, int get)
{
struct nf_sockopt_ops *ops;
mutex_lock(&nf_sockopt_mutex);
list_for_each_entry(ops, &nf_sockopts, list) {
if (ops->pf == pf) {
if (!try_module_get(ops->owner))
goto out_nosup;
if (get) {
if (val >= ops->get_optmin &&
val < ops->get_optmax)
goto out;
} else {
if (val >= ops->set_optmin &&
val < ops->set_optmax)
goto out;
}
module_put(ops->owner);
}
}
out_nosup:
ops = ERR_PTR(-ENOPROTOOPT);
out:
mutex_unlock(&nf_sockopt_mutex);
return ops;
}
int nf_setsockopt(struct sock *sk, u_int8_t pf, int val, sockptr_t opt,
unsigned int len)
{
struct nf_sockopt_ops *ops;
int ret;
ops = nf_sockopt_find(sk, pf, val, 0);
if (IS_ERR(ops))
return PTR_ERR(ops);
ret = ops->set(sk, val, opt, len);
module_put(ops->owner);
return ret;
}
EXPORT_SYMBOL(nf_setsockopt);
int nf_getsockopt(struct sock *sk, u_int8_t pf, int val, char __user *opt,
int *len)
{
struct nf_sockopt_ops *ops;
int ret;
ops = nf_sockopt_find(sk, pf, val, 1);
if (IS_ERR(ops))
return PTR_ERR(ops);
ret = ops->get(sk, val, opt, len);
module_put(ops->owner);
return ret;
}
EXPORT_SYMBOL(nf_getsockopt);
| linux-master | net/netfilter/nf_sockopt.c |
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/proc_fs.h>
#include <linux/skbuff.h>
#include <linux/netfilter.h>
#include <linux/seq_file.h>
#include <net/protocol.h>
#include <net/netfilter/nf_log.h>
#include "nf_internals.h"
/* Internal logging interface, which relies on the real
LOG target modules */
#define NFLOGGER_NAME_LEN 64
int sysctl_nf_log_all_netns __read_mostly;
EXPORT_SYMBOL(sysctl_nf_log_all_netns);
static struct nf_logger __rcu *loggers[NFPROTO_NUMPROTO][NF_LOG_TYPE_MAX] __read_mostly;
static DEFINE_MUTEX(nf_log_mutex);
#define nft_log_dereference(logger) \
rcu_dereference_protected(logger, lockdep_is_held(&nf_log_mutex))
static struct nf_logger *__find_logger(int pf, const char *str_logger)
{
struct nf_logger *log;
int i;
for (i = 0; i < NF_LOG_TYPE_MAX; i++) {
if (loggers[pf][i] == NULL)
continue;
log = nft_log_dereference(loggers[pf][i]);
if (!strncasecmp(str_logger, log->name, strlen(log->name)))
return log;
}
return NULL;
}
int nf_log_set(struct net *net, u_int8_t pf, const struct nf_logger *logger)
{
const struct nf_logger *log;
if (pf == NFPROTO_UNSPEC || pf >= ARRAY_SIZE(net->nf.nf_loggers))
return -EOPNOTSUPP;
mutex_lock(&nf_log_mutex);
log = nft_log_dereference(net->nf.nf_loggers[pf]);
if (log == NULL)
rcu_assign_pointer(net->nf.nf_loggers[pf], logger);
mutex_unlock(&nf_log_mutex);
return 0;
}
EXPORT_SYMBOL(nf_log_set);
void nf_log_unset(struct net *net, const struct nf_logger *logger)
{
int i;
const struct nf_logger *log;
mutex_lock(&nf_log_mutex);
for (i = 0; i < NFPROTO_NUMPROTO; i++) {
log = nft_log_dereference(net->nf.nf_loggers[i]);
if (log == logger)
RCU_INIT_POINTER(net->nf.nf_loggers[i], NULL);
}
mutex_unlock(&nf_log_mutex);
}
EXPORT_SYMBOL(nf_log_unset);
/* return EEXIST if the same logger is registered, 0 on success. */
int nf_log_register(u_int8_t pf, struct nf_logger *logger)
{
int i;
int ret = 0;
if (pf >= ARRAY_SIZE(init_net.nf.nf_loggers))
return -EINVAL;
mutex_lock(&nf_log_mutex);
if (pf == NFPROTO_UNSPEC) {
for (i = NFPROTO_UNSPEC; i < NFPROTO_NUMPROTO; i++) {
if (rcu_access_pointer(loggers[i][logger->type])) {
ret = -EEXIST;
goto unlock;
}
}
for (i = NFPROTO_UNSPEC; i < NFPROTO_NUMPROTO; i++)
rcu_assign_pointer(loggers[i][logger->type], logger);
} else {
if (rcu_access_pointer(loggers[pf][logger->type])) {
ret = -EEXIST;
goto unlock;
}
rcu_assign_pointer(loggers[pf][logger->type], logger);
}
unlock:
mutex_unlock(&nf_log_mutex);
return ret;
}
EXPORT_SYMBOL(nf_log_register);
void nf_log_unregister(struct nf_logger *logger)
{
const struct nf_logger *log;
int i;
mutex_lock(&nf_log_mutex);
for (i = 0; i < NFPROTO_NUMPROTO; i++) {
log = nft_log_dereference(loggers[i][logger->type]);
if (log == logger)
RCU_INIT_POINTER(loggers[i][logger->type], NULL);
}
mutex_unlock(&nf_log_mutex);
synchronize_rcu();
}
EXPORT_SYMBOL(nf_log_unregister);
int nf_log_bind_pf(struct net *net, u_int8_t pf,
const struct nf_logger *logger)
{
if (pf >= ARRAY_SIZE(net->nf.nf_loggers))
return -EINVAL;
mutex_lock(&nf_log_mutex);
if (__find_logger(pf, logger->name) == NULL) {
mutex_unlock(&nf_log_mutex);
return -ENOENT;
}
rcu_assign_pointer(net->nf.nf_loggers[pf], logger);
mutex_unlock(&nf_log_mutex);
return 0;
}
EXPORT_SYMBOL(nf_log_bind_pf);
void nf_log_unbind_pf(struct net *net, u_int8_t pf)
{
if (pf >= ARRAY_SIZE(net->nf.nf_loggers))
return;
mutex_lock(&nf_log_mutex);
RCU_INIT_POINTER(net->nf.nf_loggers[pf], NULL);
mutex_unlock(&nf_log_mutex);
}
EXPORT_SYMBOL(nf_log_unbind_pf);
int nf_logger_find_get(int pf, enum nf_log_type type)
{
struct nf_logger *logger;
int ret = -ENOENT;
if (pf == NFPROTO_INET) {
ret = nf_logger_find_get(NFPROTO_IPV4, type);
if (ret < 0)
return ret;
ret = nf_logger_find_get(NFPROTO_IPV6, type);
if (ret < 0) {
nf_logger_put(NFPROTO_IPV4, type);
return ret;
}
return 0;
}
rcu_read_lock();
logger = rcu_dereference(loggers[pf][type]);
if (logger == NULL)
goto out;
if (try_module_get(logger->me))
ret = 0;
out:
rcu_read_unlock();
return ret;
}
EXPORT_SYMBOL_GPL(nf_logger_find_get);
void nf_logger_put(int pf, enum nf_log_type type)
{
struct nf_logger *logger;
if (pf == NFPROTO_INET) {
nf_logger_put(NFPROTO_IPV4, type);
nf_logger_put(NFPROTO_IPV6, type);
return;
}
BUG_ON(loggers[pf][type] == NULL);
rcu_read_lock();
logger = rcu_dereference(loggers[pf][type]);
module_put(logger->me);
rcu_read_unlock();
}
EXPORT_SYMBOL_GPL(nf_logger_put);
void nf_log_packet(struct net *net,
u_int8_t pf,
unsigned int hooknum,
const struct sk_buff *skb,
const struct net_device *in,
const struct net_device *out,
const struct nf_loginfo *loginfo,
const char *fmt, ...)
{
va_list args;
char prefix[NF_LOG_PREFIXLEN];
const struct nf_logger *logger;
rcu_read_lock();
if (loginfo != NULL)
logger = rcu_dereference(loggers[pf][loginfo->type]);
else
logger = rcu_dereference(net->nf.nf_loggers[pf]);
if (logger) {
va_start(args, fmt);
vsnprintf(prefix, sizeof(prefix), fmt, args);
va_end(args);
logger->logfn(net, pf, hooknum, skb, in, out, loginfo, prefix);
}
rcu_read_unlock();
}
EXPORT_SYMBOL(nf_log_packet);
void nf_log_trace(struct net *net,
u_int8_t pf,
unsigned int hooknum,
const struct sk_buff *skb,
const struct net_device *in,
const struct net_device *out,
const struct nf_loginfo *loginfo, const char *fmt, ...)
{
va_list args;
char prefix[NF_LOG_PREFIXLEN];
const struct nf_logger *logger;
rcu_read_lock();
logger = rcu_dereference(net->nf.nf_loggers[pf]);
if (logger) {
va_start(args, fmt);
vsnprintf(prefix, sizeof(prefix), fmt, args);
va_end(args);
logger->logfn(net, pf, hooknum, skb, in, out, loginfo, prefix);
}
rcu_read_unlock();
}
EXPORT_SYMBOL(nf_log_trace);
#define S_SIZE (1024 - (sizeof(unsigned int) + 1))
struct nf_log_buf {
unsigned int count;
char buf[S_SIZE + 1];
};
static struct nf_log_buf emergency, *emergency_ptr = &emergency;
__printf(2, 3) int nf_log_buf_add(struct nf_log_buf *m, const char *f, ...)
{
va_list args;
int len;
if (likely(m->count < S_SIZE)) {
va_start(args, f);
len = vsnprintf(m->buf + m->count, S_SIZE - m->count, f, args);
va_end(args);
if (likely(m->count + len < S_SIZE)) {
m->count += len;
return 0;
}
}
m->count = S_SIZE;
printk_once(KERN_ERR KBUILD_MODNAME " please increase S_SIZE\n");
return -1;
}
EXPORT_SYMBOL_GPL(nf_log_buf_add);
struct nf_log_buf *nf_log_buf_open(void)
{
struct nf_log_buf *m = kmalloc(sizeof(*m), GFP_ATOMIC);
if (unlikely(!m)) {
local_bh_disable();
do {
m = xchg(&emergency_ptr, NULL);
} while (!m);
}
m->count = 0;
return m;
}
EXPORT_SYMBOL_GPL(nf_log_buf_open);
void nf_log_buf_close(struct nf_log_buf *m)
{
m->buf[m->count] = 0;
printk("%s\n", m->buf);
if (likely(m != &emergency))
kfree(m);
else {
emergency_ptr = m;
local_bh_enable();
}
}
EXPORT_SYMBOL_GPL(nf_log_buf_close);
#ifdef CONFIG_PROC_FS
static void *seq_start(struct seq_file *seq, loff_t *pos)
{
struct net *net = seq_file_net(seq);
mutex_lock(&nf_log_mutex);
if (*pos >= ARRAY_SIZE(net->nf.nf_loggers))
return NULL;
return pos;
}
static void *seq_next(struct seq_file *s, void *v, loff_t *pos)
{
struct net *net = seq_file_net(s);
(*pos)++;
if (*pos >= ARRAY_SIZE(net->nf.nf_loggers))
return NULL;
return pos;
}
static void seq_stop(struct seq_file *s, void *v)
{
mutex_unlock(&nf_log_mutex);
}
static int seq_show(struct seq_file *s, void *v)
{
loff_t *pos = v;
const struct nf_logger *logger;
int i;
struct net *net = seq_file_net(s);
logger = nft_log_dereference(net->nf.nf_loggers[*pos]);
if (!logger)
seq_printf(s, "%2lld NONE (", *pos);
else
seq_printf(s, "%2lld %s (", *pos, logger->name);
if (seq_has_overflowed(s))
return -ENOSPC;
for (i = 0; i < NF_LOG_TYPE_MAX; i++) {
if (loggers[*pos][i] == NULL)
continue;
logger = nft_log_dereference(loggers[*pos][i]);
seq_puts(s, logger->name);
if (i == 0 && loggers[*pos][i + 1] != NULL)
seq_puts(s, ",");
if (seq_has_overflowed(s))
return -ENOSPC;
}
seq_puts(s, ")\n");
if (seq_has_overflowed(s))
return -ENOSPC;
return 0;
}
static const struct seq_operations nflog_seq_ops = {
.start = seq_start,
.next = seq_next,
.stop = seq_stop,
.show = seq_show,
};
#endif /* PROC_FS */
#ifdef CONFIG_SYSCTL
static char nf_log_sysctl_fnames[NFPROTO_NUMPROTO-NFPROTO_UNSPEC][3];
static struct ctl_table nf_log_sysctl_table[NFPROTO_NUMPROTO+1];
static struct ctl_table_header *nf_log_sysctl_fhdr;
static struct ctl_table nf_log_sysctl_ftable[] = {
{
.procname = "nf_log_all_netns",
.data = &sysctl_nf_log_all_netns,
.maxlen = sizeof(sysctl_nf_log_all_netns),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{ }
};
static int nf_log_proc_dostring(struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
const struct nf_logger *logger;
char buf[NFLOGGER_NAME_LEN];
int r = 0;
int tindex = (unsigned long)table->extra1;
struct net *net = table->extra2;
if (write) {
struct ctl_table tmp = *table;
/* proc_dostring() can append to existing strings, so we need to
* initialize it as an empty string.
*/
buf[0] = '\0';
tmp.data = buf;
r = proc_dostring(&tmp, write, buffer, lenp, ppos);
if (r)
return r;
if (!strcmp(buf, "NONE")) {
nf_log_unbind_pf(net, tindex);
return 0;
}
mutex_lock(&nf_log_mutex);
logger = __find_logger(tindex, buf);
if (logger == NULL) {
mutex_unlock(&nf_log_mutex);
return -ENOENT;
}
rcu_assign_pointer(net->nf.nf_loggers[tindex], logger);
mutex_unlock(&nf_log_mutex);
} else {
struct ctl_table tmp = *table;
tmp.data = buf;
mutex_lock(&nf_log_mutex);
logger = nft_log_dereference(net->nf.nf_loggers[tindex]);
if (!logger)
strscpy(buf, "NONE", sizeof(buf));
else
strscpy(buf, logger->name, sizeof(buf));
mutex_unlock(&nf_log_mutex);
r = proc_dostring(&tmp, write, buffer, lenp, ppos);
}
return r;
}
static int netfilter_log_sysctl_init(struct net *net)
{
int i;
struct ctl_table *table;
table = nf_log_sysctl_table;
if (!net_eq(net, &init_net)) {
table = kmemdup(nf_log_sysctl_table,
sizeof(nf_log_sysctl_table),
GFP_KERNEL);
if (!table)
goto err_alloc;
} else {
for (i = NFPROTO_UNSPEC; i < NFPROTO_NUMPROTO; i++) {
snprintf(nf_log_sysctl_fnames[i],
3, "%d", i);
nf_log_sysctl_table[i].procname =
nf_log_sysctl_fnames[i];
nf_log_sysctl_table[i].maxlen = NFLOGGER_NAME_LEN;
nf_log_sysctl_table[i].mode = 0644;
nf_log_sysctl_table[i].proc_handler =
nf_log_proc_dostring;
nf_log_sysctl_table[i].extra1 =
(void *)(unsigned long) i;
}
nf_log_sysctl_fhdr = register_net_sysctl(net, "net/netfilter",
nf_log_sysctl_ftable);
if (!nf_log_sysctl_fhdr)
goto err_freg;
}
for (i = NFPROTO_UNSPEC; i < NFPROTO_NUMPROTO; i++)
table[i].extra2 = net;
net->nf.nf_log_dir_header = register_net_sysctl_sz(net,
"net/netfilter/nf_log",
table,
ARRAY_SIZE(nf_log_sysctl_table));
if (!net->nf.nf_log_dir_header)
goto err_reg;
return 0;
err_reg:
if (!net_eq(net, &init_net))
kfree(table);
else
unregister_net_sysctl_table(nf_log_sysctl_fhdr);
err_freg:
err_alloc:
return -ENOMEM;
}
static void netfilter_log_sysctl_exit(struct net *net)
{
struct ctl_table *table;
table = net->nf.nf_log_dir_header->ctl_table_arg;
unregister_net_sysctl_table(net->nf.nf_log_dir_header);
if (!net_eq(net, &init_net))
kfree(table);
else
unregister_net_sysctl_table(nf_log_sysctl_fhdr);
}
#else
static int netfilter_log_sysctl_init(struct net *net)
{
return 0;
}
static void netfilter_log_sysctl_exit(struct net *net)
{
}
#endif /* CONFIG_SYSCTL */
static int __net_init nf_log_net_init(struct net *net)
{
int ret = -ENOMEM;
#ifdef CONFIG_PROC_FS
if (!proc_create_net("nf_log", 0444, net->nf.proc_netfilter,
&nflog_seq_ops, sizeof(struct seq_net_private)))
return ret;
#endif
ret = netfilter_log_sysctl_init(net);
if (ret < 0)
goto out_sysctl;
return 0;
out_sysctl:
#ifdef CONFIG_PROC_FS
remove_proc_entry("nf_log", net->nf.proc_netfilter);
#endif
return ret;
}
static void __net_exit nf_log_net_exit(struct net *net)
{
netfilter_log_sysctl_exit(net);
#ifdef CONFIG_PROC_FS
remove_proc_entry("nf_log", net->nf.proc_netfilter);
#endif
}
static struct pernet_operations nf_log_net_ops = {
.init = nf_log_net_init,
.exit = nf_log_net_exit,
};
int __init netfilter_log_init(void)
{
return register_pernet_subsys(&nf_log_net_ops);
}
| linux-master | net/netfilter/nf_log.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* xt_conntrack - Netfilter module to match connection tracking
* information. (Superset of Rusty's minimalistic state match.)
*
* (C) 2001 Marc Boucher ([email protected]).
* (C) 2006-2012 Patrick McHardy <[email protected]>
* Copyright © CC Computer Consultants GmbH, 2007 - 2008
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/skbuff.h>
#include <net/ipv6.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter/xt_conntrack.h>
#include <net/netfilter/nf_conntrack.h>
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Marc Boucher <[email protected]>");
MODULE_AUTHOR("Jan Engelhardt <[email protected]>");
MODULE_DESCRIPTION("Xtables: connection tracking state match");
MODULE_ALIAS("ipt_conntrack");
MODULE_ALIAS("ip6t_conntrack");
static bool
conntrack_addrcmp(const union nf_inet_addr *kaddr,
const union nf_inet_addr *uaddr,
const union nf_inet_addr *umask, unsigned int l3proto)
{
if (l3proto == NFPROTO_IPV4)
return ((kaddr->ip ^ uaddr->ip) & umask->ip) == 0;
else if (l3proto == NFPROTO_IPV6)
return ipv6_masked_addr_cmp(&kaddr->in6, &umask->in6,
&uaddr->in6) == 0;
else
return false;
}
static inline bool
conntrack_mt_origsrc(const struct nf_conn *ct,
const struct xt_conntrack_mtinfo2 *info,
u_int8_t family)
{
return conntrack_addrcmp(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3,
&info->origsrc_addr, &info->origsrc_mask, family);
}
static inline bool
conntrack_mt_origdst(const struct nf_conn *ct,
const struct xt_conntrack_mtinfo2 *info,
u_int8_t family)
{
return conntrack_addrcmp(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u3,
&info->origdst_addr, &info->origdst_mask, family);
}
static inline bool
conntrack_mt_replsrc(const struct nf_conn *ct,
const struct xt_conntrack_mtinfo2 *info,
u_int8_t family)
{
return conntrack_addrcmp(&ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u3,
&info->replsrc_addr, &info->replsrc_mask, family);
}
static inline bool
conntrack_mt_repldst(const struct nf_conn *ct,
const struct xt_conntrack_mtinfo2 *info,
u_int8_t family)
{
return conntrack_addrcmp(&ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3,
&info->repldst_addr, &info->repldst_mask, family);
}
static inline bool
ct_proto_port_check(const struct xt_conntrack_mtinfo2 *info,
const struct nf_conn *ct)
{
const struct nf_conntrack_tuple *tuple;
tuple = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
if ((info->match_flags & XT_CONNTRACK_PROTO) &&
(nf_ct_protonum(ct) == info->l4proto) ^
!(info->invert_flags & XT_CONNTRACK_PROTO))
return false;
/* Shortcut to match all recognized protocols by using ->src.all. */
if ((info->match_flags & XT_CONNTRACK_ORIGSRC_PORT) &&
(tuple->src.u.all == info->origsrc_port) ^
!(info->invert_flags & XT_CONNTRACK_ORIGSRC_PORT))
return false;
if ((info->match_flags & XT_CONNTRACK_ORIGDST_PORT) &&
(tuple->dst.u.all == info->origdst_port) ^
!(info->invert_flags & XT_CONNTRACK_ORIGDST_PORT))
return false;
tuple = &ct->tuplehash[IP_CT_DIR_REPLY].tuple;
if ((info->match_flags & XT_CONNTRACK_REPLSRC_PORT) &&
(tuple->src.u.all == info->replsrc_port) ^
!(info->invert_flags & XT_CONNTRACK_REPLSRC_PORT))
return false;
if ((info->match_flags & XT_CONNTRACK_REPLDST_PORT) &&
(tuple->dst.u.all == info->repldst_port) ^
!(info->invert_flags & XT_CONNTRACK_REPLDST_PORT))
return false;
return true;
}
static inline bool
port_match(u16 min, u16 max, u16 port, bool invert)
{
return (port >= min && port <= max) ^ invert;
}
static inline bool
ct_proto_port_check_v3(const struct xt_conntrack_mtinfo3 *info,
const struct nf_conn *ct)
{
const struct nf_conntrack_tuple *tuple;
tuple = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
if ((info->match_flags & XT_CONNTRACK_PROTO) &&
(nf_ct_protonum(ct) == info->l4proto) ^
!(info->invert_flags & XT_CONNTRACK_PROTO))
return false;
/* Shortcut to match all recognized protocols by using ->src.all. */
if ((info->match_flags & XT_CONNTRACK_ORIGSRC_PORT) &&
!port_match(info->origsrc_port, info->origsrc_port_high,
ntohs(tuple->src.u.all),
info->invert_flags & XT_CONNTRACK_ORIGSRC_PORT))
return false;
if ((info->match_flags & XT_CONNTRACK_ORIGDST_PORT) &&
!port_match(info->origdst_port, info->origdst_port_high,
ntohs(tuple->dst.u.all),
info->invert_flags & XT_CONNTRACK_ORIGDST_PORT))
return false;
tuple = &ct->tuplehash[IP_CT_DIR_REPLY].tuple;
if ((info->match_flags & XT_CONNTRACK_REPLSRC_PORT) &&
!port_match(info->replsrc_port, info->replsrc_port_high,
ntohs(tuple->src.u.all),
info->invert_flags & XT_CONNTRACK_REPLSRC_PORT))
return false;
if ((info->match_flags & XT_CONNTRACK_REPLDST_PORT) &&
!port_match(info->repldst_port, info->repldst_port_high,
ntohs(tuple->dst.u.all),
info->invert_flags & XT_CONNTRACK_REPLDST_PORT))
return false;
return true;
}
static bool
conntrack_mt(const struct sk_buff *skb, struct xt_action_param *par,
u16 state_mask, u16 status_mask)
{
const struct xt_conntrack_mtinfo2 *info = par->matchinfo;
enum ip_conntrack_info ctinfo;
const struct nf_conn *ct;
unsigned int statebit;
ct = nf_ct_get(skb, &ctinfo);
if (ct)
statebit = XT_CONNTRACK_STATE_BIT(ctinfo);
else if (ctinfo == IP_CT_UNTRACKED)
statebit = XT_CONNTRACK_STATE_UNTRACKED;
else
statebit = XT_CONNTRACK_STATE_INVALID;
if (info->match_flags & XT_CONNTRACK_STATE) {
if (ct != NULL) {
if (test_bit(IPS_SRC_NAT_BIT, &ct->status))
statebit |= XT_CONNTRACK_STATE_SNAT;
if (test_bit(IPS_DST_NAT_BIT, &ct->status))
statebit |= XT_CONNTRACK_STATE_DNAT;
}
if (!!(state_mask & statebit) ^
!(info->invert_flags & XT_CONNTRACK_STATE))
return false;
}
if (ct == NULL)
return info->match_flags & XT_CONNTRACK_STATE;
if ((info->match_flags & XT_CONNTRACK_DIRECTION) &&
(CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL) ^
!(info->invert_flags & XT_CONNTRACK_DIRECTION))
return false;
if (info->match_flags & XT_CONNTRACK_ORIGSRC)
if (conntrack_mt_origsrc(ct, info, xt_family(par)) ^
!(info->invert_flags & XT_CONNTRACK_ORIGSRC))
return false;
if (info->match_flags & XT_CONNTRACK_ORIGDST)
if (conntrack_mt_origdst(ct, info, xt_family(par)) ^
!(info->invert_flags & XT_CONNTRACK_ORIGDST))
return false;
if (info->match_flags & XT_CONNTRACK_REPLSRC)
if (conntrack_mt_replsrc(ct, info, xt_family(par)) ^
!(info->invert_flags & XT_CONNTRACK_REPLSRC))
return false;
if (info->match_flags & XT_CONNTRACK_REPLDST)
if (conntrack_mt_repldst(ct, info, xt_family(par)) ^
!(info->invert_flags & XT_CONNTRACK_REPLDST))
return false;
if (par->match->revision != 3) {
if (!ct_proto_port_check(info, ct))
return false;
} else {
if (!ct_proto_port_check_v3(par->matchinfo, ct))
return false;
}
if ((info->match_flags & XT_CONNTRACK_STATUS) &&
(!!(status_mask & ct->status) ^
!(info->invert_flags & XT_CONNTRACK_STATUS)))
return false;
if (info->match_flags & XT_CONNTRACK_EXPIRES) {
unsigned long expires = nf_ct_expires(ct) / HZ;
if ((expires >= info->expires_min &&
expires <= info->expires_max) ^
!(info->invert_flags & XT_CONNTRACK_EXPIRES))
return false;
}
return true;
}
static bool
conntrack_mt_v1(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct xt_conntrack_mtinfo1 *info = par->matchinfo;
return conntrack_mt(skb, par, info->state_mask, info->status_mask);
}
static bool
conntrack_mt_v2(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct xt_conntrack_mtinfo2 *info = par->matchinfo;
return conntrack_mt(skb, par, info->state_mask, info->status_mask);
}
static bool
conntrack_mt_v3(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct xt_conntrack_mtinfo3 *info = par->matchinfo;
return conntrack_mt(skb, par, info->state_mask, info->status_mask);
}
static int conntrack_mt_check(const struct xt_mtchk_param *par)
{
int ret;
ret = nf_ct_netns_get(par->net, par->family);
if (ret < 0)
pr_info_ratelimited("cannot load conntrack support for proto=%u\n",
par->family);
return ret;
}
static void conntrack_mt_destroy(const struct xt_mtdtor_param *par)
{
nf_ct_netns_put(par->net, par->family);
}
static struct xt_match conntrack_mt_reg[] __read_mostly = {
{
.name = "conntrack",
.revision = 1,
.family = NFPROTO_UNSPEC,
.matchsize = sizeof(struct xt_conntrack_mtinfo1),
.match = conntrack_mt_v1,
.checkentry = conntrack_mt_check,
.destroy = conntrack_mt_destroy,
.me = THIS_MODULE,
},
{
.name = "conntrack",
.revision = 2,
.family = NFPROTO_UNSPEC,
.matchsize = sizeof(struct xt_conntrack_mtinfo2),
.match = conntrack_mt_v2,
.checkentry = conntrack_mt_check,
.destroy = conntrack_mt_destroy,
.me = THIS_MODULE,
},
{
.name = "conntrack",
.revision = 3,
.family = NFPROTO_UNSPEC,
.matchsize = sizeof(struct xt_conntrack_mtinfo3),
.match = conntrack_mt_v3,
.checkentry = conntrack_mt_check,
.destroy = conntrack_mt_destroy,
.me = THIS_MODULE,
},
};
static int __init conntrack_mt_init(void)
{
return xt_register_matches(conntrack_mt_reg,
ARRAY_SIZE(conntrack_mt_reg));
}
static void __exit conntrack_mt_exit(void)
{
xt_unregister_matches(conntrack_mt_reg, ARRAY_SIZE(conntrack_mt_reg));
}
module_init(conntrack_mt_init);
module_exit(conntrack_mt_exit);
| linux-master | net/netfilter/xt_conntrack.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2008-2009 Patrick McHardy <[email protected]>
* Copyright (c) 2012 Pablo Neira Ayuso <[email protected]>
* Copyright (c) 2012 Intel Corporation
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/skbuff.h>
#include <linux/ip.h>
#include <linux/string.h>
#include <linux/netlink.h>
#include <linux/netfilter.h>
#include <linux/netfilter_ipv4.h>
#include <linux/netfilter/nfnetlink.h>
#include <linux/netfilter/nf_tables.h>
#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_nat.h>
#include <net/netfilter/nf_tables.h>
#include <net/ip.h>
struct nft_nat {
u8 sreg_addr_min;
u8 sreg_addr_max;
u8 sreg_proto_min;
u8 sreg_proto_max;
enum nf_nat_manip_type type:8;
u8 family;
u16 flags;
};
static void nft_nat_setup_addr(struct nf_nat_range2 *range,
const struct nft_regs *regs,
const struct nft_nat *priv)
{
switch (priv->family) {
case AF_INET:
range->min_addr.ip = (__force __be32)
regs->data[priv->sreg_addr_min];
range->max_addr.ip = (__force __be32)
regs->data[priv->sreg_addr_max];
break;
case AF_INET6:
memcpy(range->min_addr.ip6, ®s->data[priv->sreg_addr_min],
sizeof(range->min_addr.ip6));
memcpy(range->max_addr.ip6, ®s->data[priv->sreg_addr_max],
sizeof(range->max_addr.ip6));
break;
}
}
static void nft_nat_setup_proto(struct nf_nat_range2 *range,
const struct nft_regs *regs,
const struct nft_nat *priv)
{
range->min_proto.all = (__force __be16)
nft_reg_load16(®s->data[priv->sreg_proto_min]);
range->max_proto.all = (__force __be16)
nft_reg_load16(®s->data[priv->sreg_proto_max]);
}
static void nft_nat_setup_netmap(struct nf_nat_range2 *range,
const struct nft_pktinfo *pkt,
const struct nft_nat *priv)
{
struct sk_buff *skb = pkt->skb;
union nf_inet_addr new_addr;
__be32 netmask;
int i, len = 0;
switch (priv->type) {
case NFT_NAT_SNAT:
if (nft_pf(pkt) == NFPROTO_IPV4) {
new_addr.ip = ip_hdr(skb)->saddr;
len = sizeof(struct in_addr);
} else {
new_addr.in6 = ipv6_hdr(skb)->saddr;
len = sizeof(struct in6_addr);
}
break;
case NFT_NAT_DNAT:
if (nft_pf(pkt) == NFPROTO_IPV4) {
new_addr.ip = ip_hdr(skb)->daddr;
len = sizeof(struct in_addr);
} else {
new_addr.in6 = ipv6_hdr(skb)->daddr;
len = sizeof(struct in6_addr);
}
break;
}
for (i = 0; i < len / sizeof(__be32); i++) {
netmask = ~(range->min_addr.ip6[i] ^ range->max_addr.ip6[i]);
new_addr.ip6[i] &= ~netmask;
new_addr.ip6[i] |= range->min_addr.ip6[i] & netmask;
}
range->min_addr = new_addr;
range->max_addr = new_addr;
}
static void nft_nat_eval(const struct nft_expr *expr,
struct nft_regs *regs,
const struct nft_pktinfo *pkt)
{
const struct nft_nat *priv = nft_expr_priv(expr);
enum ip_conntrack_info ctinfo;
struct nf_conn *ct = nf_ct_get(pkt->skb, &ctinfo);
struct nf_nat_range2 range;
memset(&range, 0, sizeof(range));
if (priv->sreg_addr_min) {
nft_nat_setup_addr(&range, regs, priv);
if (priv->flags & NF_NAT_RANGE_NETMAP)
nft_nat_setup_netmap(&range, pkt, priv);
}
if (priv->sreg_proto_min)
nft_nat_setup_proto(&range, regs, priv);
range.flags = priv->flags;
regs->verdict.code = nf_nat_setup_info(ct, &range, priv->type);
}
static const struct nla_policy nft_nat_policy[NFTA_NAT_MAX + 1] = {
[NFTA_NAT_TYPE] = { .type = NLA_U32 },
[NFTA_NAT_FAMILY] = { .type = NLA_U32 },
[NFTA_NAT_REG_ADDR_MIN] = { .type = NLA_U32 },
[NFTA_NAT_REG_ADDR_MAX] = { .type = NLA_U32 },
[NFTA_NAT_REG_PROTO_MIN] = { .type = NLA_U32 },
[NFTA_NAT_REG_PROTO_MAX] = { .type = NLA_U32 },
[NFTA_NAT_FLAGS] =
NLA_POLICY_MASK(NLA_BE32, NF_NAT_RANGE_MASK),
};
static int nft_nat_validate(const struct nft_ctx *ctx,
const struct nft_expr *expr,
const struct nft_data **data)
{
struct nft_nat *priv = nft_expr_priv(expr);
int err;
err = nft_chain_validate_dependency(ctx->chain, NFT_CHAIN_T_NAT);
if (err < 0)
return err;
switch (priv->type) {
case NFT_NAT_SNAT:
err = nft_chain_validate_hooks(ctx->chain,
(1 << NF_INET_POST_ROUTING) |
(1 << NF_INET_LOCAL_IN));
break;
case NFT_NAT_DNAT:
err = nft_chain_validate_hooks(ctx->chain,
(1 << NF_INET_PRE_ROUTING) |
(1 << NF_INET_LOCAL_OUT));
break;
}
return err;
}
static int nft_nat_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
const struct nlattr * const tb[])
{
struct nft_nat *priv = nft_expr_priv(expr);
unsigned int alen, plen;
u32 family;
int err;
if (tb[NFTA_NAT_TYPE] == NULL ||
(tb[NFTA_NAT_REG_ADDR_MIN] == NULL &&
tb[NFTA_NAT_REG_PROTO_MIN] == NULL))
return -EINVAL;
switch (ntohl(nla_get_be32(tb[NFTA_NAT_TYPE]))) {
case NFT_NAT_SNAT:
priv->type = NF_NAT_MANIP_SRC;
break;
case NFT_NAT_DNAT:
priv->type = NF_NAT_MANIP_DST;
break;
default:
return -EOPNOTSUPP;
}
if (tb[NFTA_NAT_FAMILY] == NULL)
return -EINVAL;
family = ntohl(nla_get_be32(tb[NFTA_NAT_FAMILY]));
if (ctx->family != NFPROTO_INET && ctx->family != family)
return -EOPNOTSUPP;
switch (family) {
case NFPROTO_IPV4:
alen = sizeof_field(struct nf_nat_range, min_addr.ip);
break;
case NFPROTO_IPV6:
alen = sizeof_field(struct nf_nat_range, min_addr.ip6);
break;
default:
if (tb[NFTA_NAT_REG_ADDR_MIN])
return -EAFNOSUPPORT;
break;
}
priv->family = family;
if (tb[NFTA_NAT_REG_ADDR_MIN]) {
err = nft_parse_register_load(tb[NFTA_NAT_REG_ADDR_MIN],
&priv->sreg_addr_min, alen);
if (err < 0)
return err;
if (tb[NFTA_NAT_REG_ADDR_MAX]) {
err = nft_parse_register_load(tb[NFTA_NAT_REG_ADDR_MAX],
&priv->sreg_addr_max,
alen);
if (err < 0)
return err;
} else {
priv->sreg_addr_max = priv->sreg_addr_min;
}
priv->flags |= NF_NAT_RANGE_MAP_IPS;
}
plen = sizeof_field(struct nf_nat_range, min_proto.all);
if (tb[NFTA_NAT_REG_PROTO_MIN]) {
err = nft_parse_register_load(tb[NFTA_NAT_REG_PROTO_MIN],
&priv->sreg_proto_min, plen);
if (err < 0)
return err;
if (tb[NFTA_NAT_REG_PROTO_MAX]) {
err = nft_parse_register_load(tb[NFTA_NAT_REG_PROTO_MAX],
&priv->sreg_proto_max,
plen);
if (err < 0)
return err;
} else {
priv->sreg_proto_max = priv->sreg_proto_min;
}
priv->flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
}
if (tb[NFTA_NAT_FLAGS])
priv->flags |= ntohl(nla_get_be32(tb[NFTA_NAT_FLAGS]));
return nf_ct_netns_get(ctx->net, family);
}
static int nft_nat_dump(struct sk_buff *skb,
const struct nft_expr *expr, bool reset)
{
const struct nft_nat *priv = nft_expr_priv(expr);
switch (priv->type) {
case NF_NAT_MANIP_SRC:
if (nla_put_be32(skb, NFTA_NAT_TYPE, htonl(NFT_NAT_SNAT)))
goto nla_put_failure;
break;
case NF_NAT_MANIP_DST:
if (nla_put_be32(skb, NFTA_NAT_TYPE, htonl(NFT_NAT_DNAT)))
goto nla_put_failure;
break;
}
if (nla_put_be32(skb, NFTA_NAT_FAMILY, htonl(priv->family)))
goto nla_put_failure;
if (priv->sreg_addr_min) {
if (nft_dump_register(skb, NFTA_NAT_REG_ADDR_MIN,
priv->sreg_addr_min) ||
nft_dump_register(skb, NFTA_NAT_REG_ADDR_MAX,
priv->sreg_addr_max))
goto nla_put_failure;
}
if (priv->sreg_proto_min) {
if (nft_dump_register(skb, NFTA_NAT_REG_PROTO_MIN,
priv->sreg_proto_min) ||
nft_dump_register(skb, NFTA_NAT_REG_PROTO_MAX,
priv->sreg_proto_max))
goto nla_put_failure;
}
if (priv->flags != 0) {
if (nla_put_be32(skb, NFTA_NAT_FLAGS, htonl(priv->flags)))
goto nla_put_failure;
}
return 0;
nla_put_failure:
return -1;
}
static void
nft_nat_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr)
{
const struct nft_nat *priv = nft_expr_priv(expr);
nf_ct_netns_put(ctx->net, priv->family);
}
static struct nft_expr_type nft_nat_type;
static const struct nft_expr_ops nft_nat_ops = {
.type = &nft_nat_type,
.size = NFT_EXPR_SIZE(sizeof(struct nft_nat)),
.eval = nft_nat_eval,
.init = nft_nat_init,
.destroy = nft_nat_destroy,
.dump = nft_nat_dump,
.validate = nft_nat_validate,
.reduce = NFT_REDUCE_READONLY,
};
static struct nft_expr_type nft_nat_type __read_mostly = {
.name = "nat",
.ops = &nft_nat_ops,
.policy = nft_nat_policy,
.maxattr = NFTA_NAT_MAX,
.owner = THIS_MODULE,
};
#ifdef CONFIG_NF_TABLES_INET
static void nft_nat_inet_eval(const struct nft_expr *expr,
struct nft_regs *regs,
const struct nft_pktinfo *pkt)
{
const struct nft_nat *priv = nft_expr_priv(expr);
if (priv->family == nft_pf(pkt) ||
priv->family == NFPROTO_INET)
nft_nat_eval(expr, regs, pkt);
}
static const struct nft_expr_ops nft_nat_inet_ops = {
.type = &nft_nat_type,
.size = NFT_EXPR_SIZE(sizeof(struct nft_nat)),
.eval = nft_nat_inet_eval,
.init = nft_nat_init,
.destroy = nft_nat_destroy,
.dump = nft_nat_dump,
.validate = nft_nat_validate,
.reduce = NFT_REDUCE_READONLY,
};
static struct nft_expr_type nft_inet_nat_type __read_mostly = {
.name = "nat",
.family = NFPROTO_INET,
.ops = &nft_nat_inet_ops,
.policy = nft_nat_policy,
.maxattr = NFTA_NAT_MAX,
.owner = THIS_MODULE,
};
static int nft_nat_inet_module_init(void)
{
return nft_register_expr(&nft_inet_nat_type);
}
static void nft_nat_inet_module_exit(void)
{
nft_unregister_expr(&nft_inet_nat_type);
}
#else
static int nft_nat_inet_module_init(void) { return 0; }
static void nft_nat_inet_module_exit(void) { }
#endif
static int __init nft_nat_module_init(void)
{
int ret = nft_nat_inet_module_init();
if (ret)
return ret;
ret = nft_register_expr(&nft_nat_type);
if (ret)
nft_nat_inet_module_exit();
return ret;
}
static void __exit nft_nat_module_exit(void)
{
nft_nat_inet_module_exit();
nft_unregister_expr(&nft_nat_type);
}
module_init(nft_nat_module_init);
module_exit(nft_nat_module_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Tomasz Bursztyka <[email protected]>");
MODULE_ALIAS_NFT_EXPR("nat");
MODULE_DESCRIPTION("Network Address Translation support");
| linux-master | net/netfilter/nft_nat.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2022 Pablo Neira Ayuso <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/if_vlan.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/netlink.h>
#include <linux/netfilter.h>
#include <linux/netfilter/nf_tables.h>
#include <net/netfilter/nf_tables_core.h>
#include <net/netfilter/nf_tables.h>
#include <net/netfilter/nft_meta.h>
#include <net/netfilter/nf_tables_offload.h>
#include <linux/tcp.h>
#include <linux/udp.h>
#include <net/gre.h>
#include <net/geneve.h>
#include <net/ip.h>
#include <linux/icmpv6.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
static DEFINE_PER_CPU(struct nft_inner_tun_ctx, nft_pcpu_tun_ctx);
/* Same layout as nft_expr but it embeds the private expression data area. */
struct __nft_expr {
const struct nft_expr_ops *ops;
union {
struct nft_payload payload;
struct nft_meta meta;
} __attribute__((aligned(__alignof__(u64))));
};
enum {
NFT_INNER_EXPR_PAYLOAD,
NFT_INNER_EXPR_META,
};
struct nft_inner {
u8 flags;
u8 hdrsize;
u8 type;
u8 expr_type;
struct __nft_expr expr;
};
static int nft_inner_parse_l2l3(const struct nft_inner *priv,
const struct nft_pktinfo *pkt,
struct nft_inner_tun_ctx *ctx, u32 off)
{
__be16 llproto, outer_llproto;
u32 nhoff, thoff;
if (priv->flags & NFT_INNER_LL) {
struct vlan_ethhdr *veth, _veth;
struct ethhdr *eth, _eth;
u32 hdrsize;
eth = skb_header_pointer(pkt->skb, off, sizeof(_eth), &_eth);
if (!eth)
return -1;
switch (eth->h_proto) {
case htons(ETH_P_IP):
case htons(ETH_P_IPV6):
llproto = eth->h_proto;
hdrsize = sizeof(_eth);
break;
case htons(ETH_P_8021Q):
veth = skb_header_pointer(pkt->skb, off, sizeof(_veth), &_veth);
if (!veth)
return -1;
outer_llproto = veth->h_vlan_encapsulated_proto;
llproto = veth->h_vlan_proto;
hdrsize = sizeof(_veth);
break;
default:
return -1;
}
ctx->inner_lloff = off;
ctx->flags |= NFT_PAYLOAD_CTX_INNER_LL;
off += hdrsize;
} else {
struct iphdr *iph;
u32 _version;
iph = skb_header_pointer(pkt->skb, off, sizeof(_version), &_version);
if (!iph)
return -1;
switch (iph->version) {
case 4:
llproto = htons(ETH_P_IP);
break;
case 6:
llproto = htons(ETH_P_IPV6);
break;
default:
return -1;
}
}
ctx->llproto = llproto;
if (llproto == htons(ETH_P_8021Q))
llproto = outer_llproto;
nhoff = off;
switch (llproto) {
case htons(ETH_P_IP): {
struct iphdr *iph, _iph;
iph = skb_header_pointer(pkt->skb, nhoff, sizeof(_iph), &_iph);
if (!iph)
return -1;
if (iph->ihl < 5 || iph->version != 4)
return -1;
ctx->inner_nhoff = nhoff;
ctx->flags |= NFT_PAYLOAD_CTX_INNER_NH;
thoff = nhoff + (iph->ihl * 4);
if ((ntohs(iph->frag_off) & IP_OFFSET) == 0) {
ctx->flags |= NFT_PAYLOAD_CTX_INNER_TH;
ctx->inner_thoff = thoff;
ctx->l4proto = iph->protocol;
}
}
break;
case htons(ETH_P_IPV6): {
struct ipv6hdr *ip6h, _ip6h;
int fh_flags = IP6_FH_F_AUTH;
unsigned short fragoff;
int l4proto;
ip6h = skb_header_pointer(pkt->skb, nhoff, sizeof(_ip6h), &_ip6h);
if (!ip6h)
return -1;
if (ip6h->version != 6)
return -1;
ctx->inner_nhoff = nhoff;
ctx->flags |= NFT_PAYLOAD_CTX_INNER_NH;
thoff = nhoff;
l4proto = ipv6_find_hdr(pkt->skb, &thoff, -1, &fragoff, &fh_flags);
if (l4proto < 0 || thoff > U16_MAX)
return -1;
if (fragoff == 0) {
thoff = nhoff + sizeof(_ip6h);
ctx->flags |= NFT_PAYLOAD_CTX_INNER_TH;
ctx->inner_thoff = thoff;
ctx->l4proto = l4proto;
}
}
break;
default:
return -1;
}
return 0;
}
static int nft_inner_parse_tunhdr(const struct nft_inner *priv,
const struct nft_pktinfo *pkt,
struct nft_inner_tun_ctx *ctx, u32 *off)
{
if (pkt->tprot == IPPROTO_GRE) {
ctx->inner_tunoff = pkt->thoff;
ctx->flags |= NFT_PAYLOAD_CTX_INNER_TUN;
return 0;
}
if (pkt->tprot != IPPROTO_UDP)
return -1;
ctx->inner_tunoff = *off;
ctx->flags |= NFT_PAYLOAD_CTX_INNER_TUN;
*off += priv->hdrsize;
switch (priv->type) {
case NFT_INNER_GENEVE: {
struct genevehdr *gnvh, _gnvh;
gnvh = skb_header_pointer(pkt->skb, pkt->inneroff,
sizeof(_gnvh), &_gnvh);
if (!gnvh)
return -1;
*off += gnvh->opt_len * 4;
}
break;
default:
break;
}
return 0;
}
static int nft_inner_parse(const struct nft_inner *priv,
struct nft_pktinfo *pkt,
struct nft_inner_tun_ctx *tun_ctx)
{
struct nft_inner_tun_ctx ctx = {};
u32 off = pkt->inneroff;
if (priv->flags & NFT_INNER_HDRSIZE &&
nft_inner_parse_tunhdr(priv, pkt, &ctx, &off) < 0)
return -1;
if (priv->flags & (NFT_INNER_LL | NFT_INNER_NH)) {
if (nft_inner_parse_l2l3(priv, pkt, &ctx, off) < 0)
return -1;
} else if (priv->flags & NFT_INNER_TH) {
ctx.inner_thoff = off;
ctx.flags |= NFT_PAYLOAD_CTX_INNER_TH;
}
*tun_ctx = ctx;
tun_ctx->type = priv->type;
pkt->flags |= NFT_PKTINFO_INNER_FULL;
return 0;
}
static bool nft_inner_parse_needed(const struct nft_inner *priv,
const struct nft_pktinfo *pkt,
const struct nft_inner_tun_ctx *tun_ctx)
{
if (!(pkt->flags & NFT_PKTINFO_INNER_FULL))
return true;
if (priv->type != tun_ctx->type)
return true;
return false;
}
static void nft_inner_eval(const struct nft_expr *expr, struct nft_regs *regs,
const struct nft_pktinfo *pkt)
{
struct nft_inner_tun_ctx *tun_ctx = this_cpu_ptr(&nft_pcpu_tun_ctx);
const struct nft_inner *priv = nft_expr_priv(expr);
if (nft_payload_inner_offset(pkt) < 0)
goto err;
if (nft_inner_parse_needed(priv, pkt, tun_ctx) &&
nft_inner_parse(priv, (struct nft_pktinfo *)pkt, tun_ctx) < 0)
goto err;
switch (priv->expr_type) {
case NFT_INNER_EXPR_PAYLOAD:
nft_payload_inner_eval((struct nft_expr *)&priv->expr, regs, pkt, tun_ctx);
break;
case NFT_INNER_EXPR_META:
nft_meta_inner_eval((struct nft_expr *)&priv->expr, regs, pkt, tun_ctx);
break;
default:
WARN_ON_ONCE(1);
goto err;
}
return;
err:
regs->verdict.code = NFT_BREAK;
}
static const struct nla_policy nft_inner_policy[NFTA_INNER_MAX + 1] = {
[NFTA_INNER_NUM] = { .type = NLA_U32 },
[NFTA_INNER_FLAGS] = { .type = NLA_U32 },
[NFTA_INNER_HDRSIZE] = { .type = NLA_U32 },
[NFTA_INNER_TYPE] = { .type = NLA_U32 },
[NFTA_INNER_EXPR] = { .type = NLA_NESTED },
};
struct nft_expr_info {
const struct nft_expr_ops *ops;
const struct nlattr *attr;
struct nlattr *tb[NFT_EXPR_MAXATTR + 1];
};
static int nft_inner_init(const struct nft_ctx *ctx,
const struct nft_expr *expr,
const struct nlattr * const tb[])
{
struct nft_inner *priv = nft_expr_priv(expr);
u32 flags, hdrsize, type, num;
struct nft_expr_info expr_info;
int err;
if (!tb[NFTA_INNER_FLAGS] ||
!tb[NFTA_INNER_HDRSIZE] ||
!tb[NFTA_INNER_TYPE] ||
!tb[NFTA_INNER_EXPR])
return -EINVAL;
flags = ntohl(nla_get_be32(tb[NFTA_INNER_FLAGS]));
if (flags & ~NFT_INNER_MASK)
return -EOPNOTSUPP;
num = ntohl(nla_get_be32(tb[NFTA_INNER_NUM]));
if (num != 0)
return -EOPNOTSUPP;
hdrsize = ntohl(nla_get_be32(tb[NFTA_INNER_HDRSIZE]));
type = ntohl(nla_get_be32(tb[NFTA_INNER_TYPE]));
if (type > U8_MAX)
return -EINVAL;
if (flags & NFT_INNER_HDRSIZE) {
if (hdrsize == 0 || hdrsize > 64)
return -EOPNOTSUPP;
}
priv->flags = flags;
priv->hdrsize = hdrsize;
priv->type = type;
err = nft_expr_inner_parse(ctx, tb[NFTA_INNER_EXPR], &expr_info);
if (err < 0)
return err;
priv->expr.ops = expr_info.ops;
if (!strcmp(expr_info.ops->type->name, "payload"))
priv->expr_type = NFT_INNER_EXPR_PAYLOAD;
else if (!strcmp(expr_info.ops->type->name, "meta"))
priv->expr_type = NFT_INNER_EXPR_META;
else
return -EINVAL;
err = expr_info.ops->init(ctx, (struct nft_expr *)&priv->expr,
(const struct nlattr * const*)expr_info.tb);
if (err < 0)
return err;
return 0;
}
static int nft_inner_dump(struct sk_buff *skb,
const struct nft_expr *expr, bool reset)
{
const struct nft_inner *priv = nft_expr_priv(expr);
if (nla_put_be32(skb, NFTA_INNER_NUM, htonl(0)) ||
nla_put_be32(skb, NFTA_INNER_TYPE, htonl(priv->type)) ||
nla_put_be32(skb, NFTA_INNER_FLAGS, htonl(priv->flags)) ||
nla_put_be32(skb, NFTA_INNER_HDRSIZE, htonl(priv->hdrsize)))
goto nla_put_failure;
if (nft_expr_dump(skb, NFTA_INNER_EXPR,
(struct nft_expr *)&priv->expr, reset) < 0)
goto nla_put_failure;
return 0;
nla_put_failure:
return -1;
}
static const struct nft_expr_ops nft_inner_ops = {
.type = &nft_inner_type,
.size = NFT_EXPR_SIZE(sizeof(struct nft_inner)),
.eval = nft_inner_eval,
.init = nft_inner_init,
.dump = nft_inner_dump,
};
struct nft_expr_type nft_inner_type __read_mostly = {
.name = "inner",
.ops = &nft_inner_ops,
.policy = nft_inner_policy,
.maxattr = NFTA_INNER_MAX,
.owner = THIS_MODULE,
};
| linux-master | net/netfilter/nft_inner.c |
// SPDX-License-Identifier: GPL-2.0-only
/* (C) 1999-2001 Paul `Rusty' Russell
* (C) 2002-2004 Netfilter Core Team <[email protected]>
* (C) 2002-2013 Jozsef Kadlecsik <[email protected]>
* (C) 2006-2012 Patrick McHardy <[email protected]>
*/
#include <linux/types.h>
#include <linux/timer.h>
#include <linux/module.h>
#include <linux/in.h>
#include <linux/tcp.h>
#include <linux/spinlock.h>
#include <linux/skbuff.h>
#include <linux/ipv6.h>
#include <net/ip6_checksum.h>
#include <asm/unaligned.h>
#include <net/tcp.h>
#include <linux/netfilter.h>
#include <linux/netfilter_ipv4.h>
#include <linux/netfilter_ipv6.h>
#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_conntrack_l4proto.h>
#include <net/netfilter/nf_conntrack_ecache.h>
#include <net/netfilter/nf_conntrack_seqadj.h>
#include <net/netfilter/nf_conntrack_synproxy.h>
#include <net/netfilter/nf_conntrack_timeout.h>
#include <net/netfilter/nf_log.h>
#include <net/netfilter/ipv4/nf_conntrack_ipv4.h>
#include <net/netfilter/ipv6/nf_conntrack_ipv6.h>
/* FIXME: Examine ipfilter's timeouts and conntrack transitions more
closely. They're more complex. --RR */
static const char *const tcp_conntrack_names[] = {
"NONE",
"SYN_SENT",
"SYN_RECV",
"ESTABLISHED",
"FIN_WAIT",
"CLOSE_WAIT",
"LAST_ACK",
"TIME_WAIT",
"CLOSE",
"SYN_SENT2",
};
enum nf_ct_tcp_action {
NFCT_TCP_IGNORE,
NFCT_TCP_INVALID,
NFCT_TCP_ACCEPT,
};
#define SECS * HZ
#define MINS * 60 SECS
#define HOURS * 60 MINS
#define DAYS * 24 HOURS
static const unsigned int tcp_timeouts[TCP_CONNTRACK_TIMEOUT_MAX] = {
[TCP_CONNTRACK_SYN_SENT] = 2 MINS,
[TCP_CONNTRACK_SYN_RECV] = 60 SECS,
[TCP_CONNTRACK_ESTABLISHED] = 5 DAYS,
[TCP_CONNTRACK_FIN_WAIT] = 2 MINS,
[TCP_CONNTRACK_CLOSE_WAIT] = 60 SECS,
[TCP_CONNTRACK_LAST_ACK] = 30 SECS,
[TCP_CONNTRACK_TIME_WAIT] = 2 MINS,
[TCP_CONNTRACK_CLOSE] = 10 SECS,
[TCP_CONNTRACK_SYN_SENT2] = 2 MINS,
/* RFC1122 says the R2 limit should be at least 100 seconds.
Linux uses 15 packets as limit, which corresponds
to ~13-30min depending on RTO. */
[TCP_CONNTRACK_RETRANS] = 5 MINS,
[TCP_CONNTRACK_UNACK] = 5 MINS,
};
#define sNO TCP_CONNTRACK_NONE
#define sSS TCP_CONNTRACK_SYN_SENT
#define sSR TCP_CONNTRACK_SYN_RECV
#define sES TCP_CONNTRACK_ESTABLISHED
#define sFW TCP_CONNTRACK_FIN_WAIT
#define sCW TCP_CONNTRACK_CLOSE_WAIT
#define sLA TCP_CONNTRACK_LAST_ACK
#define sTW TCP_CONNTRACK_TIME_WAIT
#define sCL TCP_CONNTRACK_CLOSE
#define sS2 TCP_CONNTRACK_SYN_SENT2
#define sIV TCP_CONNTRACK_MAX
#define sIG TCP_CONNTRACK_IGNORE
/* What TCP flags are set from RST/SYN/FIN/ACK. */
enum tcp_bit_set {
TCP_SYN_SET,
TCP_SYNACK_SET,
TCP_FIN_SET,
TCP_ACK_SET,
TCP_RST_SET,
TCP_NONE_SET,
};
/*
* The TCP state transition table needs a few words...
*
* We are the man in the middle. All the packets go through us
* but might get lost in transit to the destination.
* It is assumed that the destinations can't receive segments
* we haven't seen.
*
* The checked segment is in window, but our windows are *not*
* equivalent with the ones of the sender/receiver. We always
* try to guess the state of the current sender.
*
* The meaning of the states are:
*
* NONE: initial state
* SYN_SENT: SYN-only packet seen
* SYN_SENT2: SYN-only packet seen from reply dir, simultaneous open
* SYN_RECV: SYN-ACK packet seen
* ESTABLISHED: ACK packet seen
* FIN_WAIT: FIN packet seen
* CLOSE_WAIT: ACK seen (after FIN)
* LAST_ACK: FIN seen (after FIN)
* TIME_WAIT: last ACK seen
* CLOSE: closed connection (RST)
*
* Packets marked as IGNORED (sIG):
* if they may be either invalid or valid
* and the receiver may send back a connection
* closing RST or a SYN/ACK.
*
* Packets marked as INVALID (sIV):
* if we regard them as truly invalid packets
*/
static const u8 tcp_conntracks[2][6][TCP_CONNTRACK_MAX] = {
{
/* ORIGINAL */
/* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2 */
/*syn*/ { sSS, sSS, sIG, sIG, sIG, sIG, sIG, sSS, sSS, sS2 },
/*
* sNO -> sSS Initialize a new connection
* sSS -> sSS Retransmitted SYN
* sS2 -> sS2 Late retransmitted SYN
* sSR -> sIG
* sES -> sIG Error: SYNs in window outside the SYN_SENT state
* are errors. Receiver will reply with RST
* and close the connection.
* Or we are not in sync and hold a dead connection.
* sFW -> sIG
* sCW -> sIG
* sLA -> sIG
* sTW -> sSS Reopened connection (RFC 1122).
* sCL -> sSS
*/
/* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2 */
/*synack*/ { sIV, sIV, sSR, sIV, sIV, sIV, sIV, sIV, sIV, sSR },
/*
* sNO -> sIV Too late and no reason to do anything
* sSS -> sIV Client can't send SYN and then SYN/ACK
* sS2 -> sSR SYN/ACK sent to SYN2 in simultaneous open
* sSR -> sSR Late retransmitted SYN/ACK in simultaneous open
* sES -> sIV Invalid SYN/ACK packets sent by the client
* sFW -> sIV
* sCW -> sIV
* sLA -> sIV
* sTW -> sIV
* sCL -> sIV
*/
/* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2 */
/*fin*/ { sIV, sIV, sFW, sFW, sLA, sLA, sLA, sTW, sCL, sIV },
/*
* sNO -> sIV Too late and no reason to do anything...
* sSS -> sIV Client migth not send FIN in this state:
* we enforce waiting for a SYN/ACK reply first.
* sS2 -> sIV
* sSR -> sFW Close started.
* sES -> sFW
* sFW -> sLA FIN seen in both directions, waiting for
* the last ACK.
* Migth be a retransmitted FIN as well...
* sCW -> sLA
* sLA -> sLA Retransmitted FIN. Remain in the same state.
* sTW -> sTW
* sCL -> sCL
*/
/* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2 */
/*ack*/ { sES, sIV, sES, sES, sCW, sCW, sTW, sTW, sCL, sIV },
/*
* sNO -> sES Assumed.
* sSS -> sIV ACK is invalid: we haven't seen a SYN/ACK yet.
* sS2 -> sIV
* sSR -> sES Established state is reached.
* sES -> sES :-)
* sFW -> sCW Normal close request answered by ACK.
* sCW -> sCW
* sLA -> sTW Last ACK detected (RFC5961 challenged)
* sTW -> sTW Retransmitted last ACK. Remain in the same state.
* sCL -> sCL
*/
/* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2 */
/*rst*/ { sIV, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL },
/*none*/ { sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV }
},
{
/* REPLY */
/* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2 */
/*syn*/ { sIV, sS2, sIV, sIV, sIV, sIV, sIV, sSS, sIV, sS2 },
/*
* sNO -> sIV Never reached.
* sSS -> sS2 Simultaneous open
* sS2 -> sS2 Retransmitted simultaneous SYN
* sSR -> sIV Invalid SYN packets sent by the server
* sES -> sIV
* sFW -> sIV
* sCW -> sIV
* sLA -> sIV
* sTW -> sSS Reopened connection, but server may have switched role
* sCL -> sIV
*/
/* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2 */
/*synack*/ { sIV, sSR, sIG, sIG, sIG, sIG, sIG, sIG, sIG, sSR },
/*
* sSS -> sSR Standard open.
* sS2 -> sSR Simultaneous open
* sSR -> sIG Retransmitted SYN/ACK, ignore it.
* sES -> sIG Late retransmitted SYN/ACK?
* sFW -> sIG Might be SYN/ACK answering ignored SYN
* sCW -> sIG
* sLA -> sIG
* sTW -> sIG
* sCL -> sIG
*/
/* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2 */
/*fin*/ { sIV, sIV, sFW, sFW, sLA, sLA, sLA, sTW, sCL, sIV },
/*
* sSS -> sIV Server might not send FIN in this state.
* sS2 -> sIV
* sSR -> sFW Close started.
* sES -> sFW
* sFW -> sLA FIN seen in both directions.
* sCW -> sLA
* sLA -> sLA Retransmitted FIN.
* sTW -> sTW
* sCL -> sCL
*/
/* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2 */
/*ack*/ { sIV, sIG, sSR, sES, sCW, sCW, sTW, sTW, sCL, sIG },
/*
* sSS -> sIG Might be a half-open connection.
* sS2 -> sIG
* sSR -> sSR Might answer late resent SYN.
* sES -> sES :-)
* sFW -> sCW Normal close request answered by ACK.
* sCW -> sCW
* sLA -> sTW Last ACK detected (RFC5961 challenged)
* sTW -> sTW Retransmitted last ACK.
* sCL -> sCL
*/
/* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2 */
/*rst*/ { sIV, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL },
/*none*/ { sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV }
}
};
#ifdef CONFIG_NF_CONNTRACK_PROCFS
/* Print out the private part of the conntrack. */
static void tcp_print_conntrack(struct seq_file *s, struct nf_conn *ct)
{
if (test_bit(IPS_OFFLOAD_BIT, &ct->status))
return;
seq_printf(s, "%s ", tcp_conntrack_names[ct->proto.tcp.state]);
}
#endif
static unsigned int get_conntrack_index(const struct tcphdr *tcph)
{
if (tcph->rst) return TCP_RST_SET;
else if (tcph->syn) return (tcph->ack ? TCP_SYNACK_SET : TCP_SYN_SET);
else if (tcph->fin) return TCP_FIN_SET;
else if (tcph->ack) return TCP_ACK_SET;
else return TCP_NONE_SET;
}
/* TCP connection tracking based on 'Real Stateful TCP Packet Filtering
in IP Filter' by Guido van Rooij.
http://www.sane.nl/events/sane2000/papers.html
http://www.darkart.com/mirrors/www.obfuscation.org/ipf/
The boundaries and the conditions are changed according to RFC793:
the packet must intersect the window (i.e. segments may be
after the right or before the left edge) and thus receivers may ACK
segments after the right edge of the window.
td_maxend = max(sack + max(win,1)) seen in reply packets
td_maxwin = max(max(win, 1)) + (sack - ack) seen in sent packets
td_maxwin += seq + len - sender.td_maxend
if seq + len > sender.td_maxend
td_end = max(seq + len) seen in sent packets
I. Upper bound for valid data: seq <= sender.td_maxend
II. Lower bound for valid data: seq + len >= sender.td_end - receiver.td_maxwin
III. Upper bound for valid (s)ack: sack <= receiver.td_end
IV. Lower bound for valid (s)ack: sack >= receiver.td_end - MAXACKWINDOW
where sack is the highest right edge of sack block found in the packet
or ack in the case of packet without SACK option.
The upper bound limit for a valid (s)ack is not ignored -
we doesn't have to deal with fragments.
*/
static inline __u32 segment_seq_plus_len(__u32 seq,
size_t len,
unsigned int dataoff,
const struct tcphdr *tcph)
{
/* XXX Should I use payload length field in IP/IPv6 header ?
* - YK */
return (seq + len - dataoff - tcph->doff*4
+ (tcph->syn ? 1 : 0) + (tcph->fin ? 1 : 0));
}
/* Fixme: what about big packets? */
#define MAXACKWINCONST 66000
#define MAXACKWINDOW(sender) \
((sender)->td_maxwin > MAXACKWINCONST ? (sender)->td_maxwin \
: MAXACKWINCONST)
/*
* Simplified tcp_parse_options routine from tcp_input.c
*/
static void tcp_options(const struct sk_buff *skb,
unsigned int dataoff,
const struct tcphdr *tcph,
struct ip_ct_tcp_state *state)
{
unsigned char buff[(15 * 4) - sizeof(struct tcphdr)];
const unsigned char *ptr;
int length = (tcph->doff*4) - sizeof(struct tcphdr);
if (!length)
return;
ptr = skb_header_pointer(skb, dataoff + sizeof(struct tcphdr),
length, buff);
if (!ptr)
return;
state->td_scale = 0;
state->flags &= IP_CT_TCP_FLAG_BE_LIBERAL;
while (length > 0) {
int opcode=*ptr++;
int opsize;
switch (opcode) {
case TCPOPT_EOL:
return;
case TCPOPT_NOP: /* Ref: RFC 793 section 3.1 */
length--;
continue;
default:
if (length < 2)
return;
opsize=*ptr++;
if (opsize < 2) /* "silly options" */
return;
if (opsize > length)
return; /* don't parse partial options */
if (opcode == TCPOPT_SACK_PERM
&& opsize == TCPOLEN_SACK_PERM)
state->flags |= IP_CT_TCP_FLAG_SACK_PERM;
else if (opcode == TCPOPT_WINDOW
&& opsize == TCPOLEN_WINDOW) {
state->td_scale = *(u_int8_t *)ptr;
if (state->td_scale > TCP_MAX_WSCALE)
state->td_scale = TCP_MAX_WSCALE;
state->flags |=
IP_CT_TCP_FLAG_WINDOW_SCALE;
}
ptr += opsize - 2;
length -= opsize;
}
}
}
static void tcp_sack(const struct sk_buff *skb, unsigned int dataoff,
const struct tcphdr *tcph, __u32 *sack)
{
unsigned char buff[(15 * 4) - sizeof(struct tcphdr)];
const unsigned char *ptr;
int length = (tcph->doff*4) - sizeof(struct tcphdr);
__u32 tmp;
if (!length)
return;
ptr = skb_header_pointer(skb, dataoff + sizeof(struct tcphdr),
length, buff);
if (!ptr)
return;
/* Fast path for timestamp-only option */
if (length == TCPOLEN_TSTAMP_ALIGNED
&& *(__be32 *)ptr == htonl((TCPOPT_NOP << 24)
| (TCPOPT_NOP << 16)
| (TCPOPT_TIMESTAMP << 8)
| TCPOLEN_TIMESTAMP))
return;
while (length > 0) {
int opcode = *ptr++;
int opsize, i;
switch (opcode) {
case TCPOPT_EOL:
return;
case TCPOPT_NOP: /* Ref: RFC 793 section 3.1 */
length--;
continue;
default:
if (length < 2)
return;
opsize = *ptr++;
if (opsize < 2) /* "silly options" */
return;
if (opsize > length)
return; /* don't parse partial options */
if (opcode == TCPOPT_SACK
&& opsize >= (TCPOLEN_SACK_BASE
+ TCPOLEN_SACK_PERBLOCK)
&& !((opsize - TCPOLEN_SACK_BASE)
% TCPOLEN_SACK_PERBLOCK)) {
for (i = 0;
i < (opsize - TCPOLEN_SACK_BASE);
i += TCPOLEN_SACK_PERBLOCK) {
tmp = get_unaligned_be32((__be32 *)(ptr+i)+1);
if (after(tmp, *sack))
*sack = tmp;
}
return;
}
ptr += opsize - 2;
length -= opsize;
}
}
}
static void tcp_init_sender(struct ip_ct_tcp_state *sender,
struct ip_ct_tcp_state *receiver,
const struct sk_buff *skb,
unsigned int dataoff,
const struct tcphdr *tcph,
u32 end, u32 win)
{
/* SYN-ACK in reply to a SYN
* or SYN from reply direction in simultaneous open.
*/
sender->td_end =
sender->td_maxend = end;
sender->td_maxwin = (win == 0 ? 1 : win);
tcp_options(skb, dataoff, tcph, sender);
/* RFC 1323:
* Both sides must send the Window Scale option
* to enable window scaling in either direction.
*/
if (!(sender->flags & IP_CT_TCP_FLAG_WINDOW_SCALE &&
receiver->flags & IP_CT_TCP_FLAG_WINDOW_SCALE)) {
sender->td_scale = 0;
receiver->td_scale = 0;
}
}
__printf(6, 7)
static enum nf_ct_tcp_action nf_tcp_log_invalid(const struct sk_buff *skb,
const struct nf_conn *ct,
const struct nf_hook_state *state,
const struct ip_ct_tcp_state *sender,
enum nf_ct_tcp_action ret,
const char *fmt, ...)
{
const struct nf_tcp_net *tn = nf_tcp_pernet(nf_ct_net(ct));
struct va_format vaf;
va_list args;
bool be_liberal;
be_liberal = sender->flags & IP_CT_TCP_FLAG_BE_LIBERAL || tn->tcp_be_liberal;
if (be_liberal)
return NFCT_TCP_ACCEPT;
va_start(args, fmt);
vaf.fmt = fmt;
vaf.va = &args;
nf_ct_l4proto_log_invalid(skb, ct, state, "%pV", &vaf);
va_end(args);
return ret;
}
static enum nf_ct_tcp_action
tcp_in_window(struct nf_conn *ct, enum ip_conntrack_dir dir,
unsigned int index, const struct sk_buff *skb,
unsigned int dataoff, const struct tcphdr *tcph,
const struct nf_hook_state *hook_state)
{
struct ip_ct_tcp *state = &ct->proto.tcp;
struct ip_ct_tcp_state *sender = &state->seen[dir];
struct ip_ct_tcp_state *receiver = &state->seen[!dir];
__u32 seq, ack, sack, end, win, swin;
bool in_recv_win, seq_ok;
s32 receiver_offset;
u16 win_raw;
/*
* Get the required data from the packet.
*/
seq = ntohl(tcph->seq);
ack = sack = ntohl(tcph->ack_seq);
win_raw = ntohs(tcph->window);
win = win_raw;
end = segment_seq_plus_len(seq, skb->len, dataoff, tcph);
if (receiver->flags & IP_CT_TCP_FLAG_SACK_PERM)
tcp_sack(skb, dataoff, tcph, &sack);
/* Take into account NAT sequence number mangling */
receiver_offset = nf_ct_seq_offset(ct, !dir, ack - 1);
ack -= receiver_offset;
sack -= receiver_offset;
if (sender->td_maxwin == 0) {
/*
* Initialize sender data.
*/
if (tcph->syn) {
tcp_init_sender(sender, receiver,
skb, dataoff, tcph,
end, win);
if (!tcph->ack)
/* Simultaneous open */
return NFCT_TCP_ACCEPT;
} else {
/*
* We are in the middle of a connection,
* its history is lost for us.
* Let's try to use the data from the packet.
*/
sender->td_end = end;
swin = win << sender->td_scale;
sender->td_maxwin = (swin == 0 ? 1 : swin);
sender->td_maxend = end + sender->td_maxwin;
if (receiver->td_maxwin == 0) {
/* We haven't seen traffic in the other
* direction yet but we have to tweak window
* tracking to pass III and IV until that
* happens.
*/
receiver->td_end = receiver->td_maxend = sack;
} else if (sack == receiver->td_end + 1) {
/* Likely a reply to a keepalive.
* Needed for III.
*/
receiver->td_end++;
}
}
} else if (tcph->syn &&
after(end, sender->td_end) &&
(state->state == TCP_CONNTRACK_SYN_SENT ||
state->state == TCP_CONNTRACK_SYN_RECV)) {
/*
* RFC 793: "if a TCP is reinitialized ... then it need
* not wait at all; it must only be sure to use sequence
* numbers larger than those recently used."
*
* Re-init state for this direction, just like for the first
* syn(-ack) reply, it might differ in seq, ack or tcp options.
*/
tcp_init_sender(sender, receiver,
skb, dataoff, tcph,
end, win);
if (dir == IP_CT_DIR_REPLY && !tcph->ack)
return NFCT_TCP_ACCEPT;
}
if (!(tcph->ack)) {
/*
* If there is no ACK, just pretend it was set and OK.
*/
ack = sack = receiver->td_end;
} else if (((tcp_flag_word(tcph) & (TCP_FLAG_ACK|TCP_FLAG_RST)) ==
(TCP_FLAG_ACK|TCP_FLAG_RST))
&& (ack == 0)) {
/*
* Broken TCP stacks, that set ACK in RST packets as well
* with zero ack value.
*/
ack = sack = receiver->td_end;
}
if (tcph->rst && seq == 0 && state->state == TCP_CONNTRACK_SYN_SENT)
/*
* RST sent answering SYN.
*/
seq = end = sender->td_end;
seq_ok = before(seq, sender->td_maxend + 1);
if (!seq_ok) {
u32 overshot = end - sender->td_maxend + 1;
bool ack_ok;
ack_ok = after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1);
in_recv_win = receiver->td_maxwin &&
after(end, sender->td_end - receiver->td_maxwin - 1);
if (in_recv_win &&
ack_ok &&
overshot <= receiver->td_maxwin &&
before(sack, receiver->td_end + 1)) {
/* Work around TCPs that send more bytes than allowed by
* the receive window.
*
* If the (marked as invalid) packet is allowed to pass by
* the ruleset and the peer acks this data, then its possible
* all future packets will trigger 'ACK is over upper bound' check.
*
* Thus if only the sequence check fails then do update td_end so
* possible ACK for this data can update internal state.
*/
sender->td_end = end;
sender->flags |= IP_CT_TCP_FLAG_DATA_UNACKNOWLEDGED;
return nf_tcp_log_invalid(skb, ct, hook_state, sender, NFCT_TCP_IGNORE,
"%u bytes more than expected", overshot);
}
return nf_tcp_log_invalid(skb, ct, hook_state, sender, NFCT_TCP_INVALID,
"SEQ is over upper bound %u (over the window of the receiver)",
sender->td_maxend + 1);
}
if (!before(sack, receiver->td_end + 1))
return nf_tcp_log_invalid(skb, ct, hook_state, sender, NFCT_TCP_INVALID,
"ACK is over upper bound %u (ACKed data not seen yet)",
receiver->td_end + 1);
/* Is the ending sequence in the receive window (if available)? */
in_recv_win = !receiver->td_maxwin ||
after(end, sender->td_end - receiver->td_maxwin - 1);
if (!in_recv_win)
return nf_tcp_log_invalid(skb, ct, hook_state, sender, NFCT_TCP_IGNORE,
"SEQ is under lower bound %u (already ACKed data retransmitted)",
sender->td_end - receiver->td_maxwin - 1);
if (!after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1))
return nf_tcp_log_invalid(skb, ct, hook_state, sender, NFCT_TCP_IGNORE,
"ignored ACK under lower bound %u (possible overly delayed)",
receiver->td_end - MAXACKWINDOW(sender) - 1);
/* Take into account window scaling (RFC 1323). */
if (!tcph->syn)
win <<= sender->td_scale;
/* Update sender data. */
swin = win + (sack - ack);
if (sender->td_maxwin < swin)
sender->td_maxwin = swin;
if (after(end, sender->td_end)) {
sender->td_end = end;
sender->flags |= IP_CT_TCP_FLAG_DATA_UNACKNOWLEDGED;
}
if (tcph->ack) {
if (!(sender->flags & IP_CT_TCP_FLAG_MAXACK_SET)) {
sender->td_maxack = ack;
sender->flags |= IP_CT_TCP_FLAG_MAXACK_SET;
} else if (after(ack, sender->td_maxack)) {
sender->td_maxack = ack;
}
}
/* Update receiver data. */
if (receiver->td_maxwin != 0 && after(end, sender->td_maxend))
receiver->td_maxwin += end - sender->td_maxend;
if (after(sack + win, receiver->td_maxend - 1)) {
receiver->td_maxend = sack + win;
if (win == 0)
receiver->td_maxend++;
}
if (ack == receiver->td_end)
receiver->flags &= ~IP_CT_TCP_FLAG_DATA_UNACKNOWLEDGED;
/* Check retransmissions. */
if (index == TCP_ACK_SET) {
if (state->last_dir == dir &&
state->last_seq == seq &&
state->last_ack == ack &&
state->last_end == end &&
state->last_win == win_raw) {
state->retrans++;
} else {
state->last_dir = dir;
state->last_seq = seq;
state->last_ack = ack;
state->last_end = end;
state->last_win = win_raw;
state->retrans = 0;
}
}
return NFCT_TCP_ACCEPT;
}
static void __cold nf_tcp_handle_invalid(struct nf_conn *ct,
enum ip_conntrack_dir dir,
int index,
const struct sk_buff *skb,
const struct nf_hook_state *hook_state)
{
const unsigned int *timeouts;
const struct nf_tcp_net *tn;
unsigned int timeout;
u32 expires;
if (!test_bit(IPS_ASSURED_BIT, &ct->status) ||
test_bit(IPS_FIXED_TIMEOUT_BIT, &ct->status))
return;
/* We don't want to have connections hanging around in ESTABLISHED
* state for long time 'just because' conntrack deemed a FIN/RST
* out-of-window.
*
* Shrink the timeout just like when there is unacked data.
* This speeds up eviction of 'dead' connections where the
* connection and conntracks internal state are out of sync.
*/
switch (index) {
case TCP_RST_SET:
case TCP_FIN_SET:
break;
default:
return;
}
if (ct->proto.tcp.last_dir != dir &&
(ct->proto.tcp.last_index == TCP_FIN_SET ||
ct->proto.tcp.last_index == TCP_RST_SET)) {
expires = nf_ct_expires(ct);
if (expires < 120 * HZ)
return;
tn = nf_tcp_pernet(nf_ct_net(ct));
timeouts = nf_ct_timeout_lookup(ct);
if (!timeouts)
timeouts = tn->timeouts;
timeout = READ_ONCE(timeouts[TCP_CONNTRACK_UNACK]);
if (expires > timeout) {
nf_ct_l4proto_log_invalid(skb, ct, hook_state,
"packet (index %d, dir %d) response for index %d lower timeout to %u",
index, dir, ct->proto.tcp.last_index, timeout);
WRITE_ONCE(ct->timeout, timeout + nfct_time_stamp);
}
} else {
ct->proto.tcp.last_index = index;
ct->proto.tcp.last_dir = dir;
}
}
/* table of valid flag combinations - PUSH, ECE and CWR are always valid */
static const u8 tcp_valid_flags[(TCPHDR_FIN|TCPHDR_SYN|TCPHDR_RST|TCPHDR_ACK|
TCPHDR_URG) + 1] =
{
[TCPHDR_SYN] = 1,
[TCPHDR_SYN|TCPHDR_URG] = 1,
[TCPHDR_SYN|TCPHDR_ACK] = 1,
[TCPHDR_RST] = 1,
[TCPHDR_RST|TCPHDR_ACK] = 1,
[TCPHDR_FIN|TCPHDR_ACK] = 1,
[TCPHDR_FIN|TCPHDR_ACK|TCPHDR_URG] = 1,
[TCPHDR_ACK] = 1,
[TCPHDR_ACK|TCPHDR_URG] = 1,
};
static void tcp_error_log(const struct sk_buff *skb,
const struct nf_hook_state *state,
const char *msg)
{
nf_l4proto_log_invalid(skb, state, IPPROTO_TCP, "%s", msg);
}
/* Protect conntrack agaist broken packets. Code taken from ipt_unclean.c. */
static bool tcp_error(const struct tcphdr *th,
struct sk_buff *skb,
unsigned int dataoff,
const struct nf_hook_state *state)
{
unsigned int tcplen = skb->len - dataoff;
u8 tcpflags;
/* Not whole TCP header or malformed packet */
if (th->doff*4 < sizeof(struct tcphdr) || tcplen < th->doff*4) {
tcp_error_log(skb, state, "truncated packet");
return true;
}
/* Checksum invalid? Ignore.
* We skip checking packets on the outgoing path
* because the checksum is assumed to be correct.
*/
/* FIXME: Source route IP option packets --RR */
if (state->net->ct.sysctl_checksum &&
state->hook == NF_INET_PRE_ROUTING &&
nf_checksum(skb, state->hook, dataoff, IPPROTO_TCP, state->pf)) {
tcp_error_log(skb, state, "bad checksum");
return true;
}
/* Check TCP flags. */
tcpflags = (tcp_flag_byte(th) & ~(TCPHDR_ECE|TCPHDR_CWR|TCPHDR_PSH));
if (!tcp_valid_flags[tcpflags]) {
tcp_error_log(skb, state, "invalid tcp flag combination");
return true;
}
return false;
}
static noinline bool tcp_new(struct nf_conn *ct, const struct sk_buff *skb,
unsigned int dataoff,
const struct tcphdr *th)
{
enum tcp_conntrack new_state;
struct net *net = nf_ct_net(ct);
const struct nf_tcp_net *tn = nf_tcp_pernet(net);
/* Don't need lock here: this conntrack not in circulation yet */
new_state = tcp_conntracks[0][get_conntrack_index(th)][TCP_CONNTRACK_NONE];
/* Invalid: delete conntrack */
if (new_state >= TCP_CONNTRACK_MAX) {
pr_debug("nf_ct_tcp: invalid new deleting.\n");
return false;
}
if (new_state == TCP_CONNTRACK_SYN_SENT) {
memset(&ct->proto.tcp, 0, sizeof(ct->proto.tcp));
/* SYN packet */
ct->proto.tcp.seen[0].td_end =
segment_seq_plus_len(ntohl(th->seq), skb->len,
dataoff, th);
ct->proto.tcp.seen[0].td_maxwin = ntohs(th->window);
if (ct->proto.tcp.seen[0].td_maxwin == 0)
ct->proto.tcp.seen[0].td_maxwin = 1;
ct->proto.tcp.seen[0].td_maxend =
ct->proto.tcp.seen[0].td_end;
tcp_options(skb, dataoff, th, &ct->proto.tcp.seen[0]);
} else if (tn->tcp_loose == 0) {
/* Don't try to pick up connections. */
return false;
} else {
memset(&ct->proto.tcp, 0, sizeof(ct->proto.tcp));
/*
* We are in the middle of a connection,
* its history is lost for us.
* Let's try to use the data from the packet.
*/
ct->proto.tcp.seen[0].td_end =
segment_seq_plus_len(ntohl(th->seq), skb->len,
dataoff, th);
ct->proto.tcp.seen[0].td_maxwin = ntohs(th->window);
if (ct->proto.tcp.seen[0].td_maxwin == 0)
ct->proto.tcp.seen[0].td_maxwin = 1;
ct->proto.tcp.seen[0].td_maxend =
ct->proto.tcp.seen[0].td_end +
ct->proto.tcp.seen[0].td_maxwin;
/* We assume SACK and liberal window checking to handle
* window scaling */
ct->proto.tcp.seen[0].flags =
ct->proto.tcp.seen[1].flags = IP_CT_TCP_FLAG_SACK_PERM |
IP_CT_TCP_FLAG_BE_LIBERAL;
}
/* tcp_packet will set them */
ct->proto.tcp.last_index = TCP_NONE_SET;
return true;
}
static bool tcp_can_early_drop(const struct nf_conn *ct)
{
switch (ct->proto.tcp.state) {
case TCP_CONNTRACK_FIN_WAIT:
case TCP_CONNTRACK_LAST_ACK:
case TCP_CONNTRACK_TIME_WAIT:
case TCP_CONNTRACK_CLOSE:
case TCP_CONNTRACK_CLOSE_WAIT:
return true;
default:
break;
}
return false;
}
void nf_conntrack_tcp_set_closing(struct nf_conn *ct)
{
enum tcp_conntrack old_state;
const unsigned int *timeouts;
u32 timeout;
if (!nf_ct_is_confirmed(ct))
return;
spin_lock_bh(&ct->lock);
old_state = ct->proto.tcp.state;
ct->proto.tcp.state = TCP_CONNTRACK_CLOSE;
if (old_state == TCP_CONNTRACK_CLOSE ||
test_bit(IPS_FIXED_TIMEOUT_BIT, &ct->status)) {
spin_unlock_bh(&ct->lock);
return;
}
timeouts = nf_ct_timeout_lookup(ct);
if (!timeouts) {
const struct nf_tcp_net *tn;
tn = nf_tcp_pernet(nf_ct_net(ct));
timeouts = tn->timeouts;
}
timeout = timeouts[TCP_CONNTRACK_CLOSE];
WRITE_ONCE(ct->timeout, timeout + nfct_time_stamp);
spin_unlock_bh(&ct->lock);
nf_conntrack_event_cache(IPCT_PROTOINFO, ct);
}
static void nf_ct_tcp_state_reset(struct ip_ct_tcp_state *state)
{
state->td_end = 0;
state->td_maxend = 0;
state->td_maxwin = 0;
state->td_maxack = 0;
state->td_scale = 0;
state->flags &= IP_CT_TCP_FLAG_BE_LIBERAL;
}
/* Returns verdict for packet, or -1 for invalid. */
int nf_conntrack_tcp_packet(struct nf_conn *ct,
struct sk_buff *skb,
unsigned int dataoff,
enum ip_conntrack_info ctinfo,
const struct nf_hook_state *state)
{
struct net *net = nf_ct_net(ct);
struct nf_tcp_net *tn = nf_tcp_pernet(net);
enum tcp_conntrack new_state, old_state;
unsigned int index, *timeouts;
enum nf_ct_tcp_action res;
enum ip_conntrack_dir dir;
const struct tcphdr *th;
struct tcphdr _tcph;
unsigned long timeout;
th = skb_header_pointer(skb, dataoff, sizeof(_tcph), &_tcph);
if (th == NULL)
return -NF_ACCEPT;
if (tcp_error(th, skb, dataoff, state))
return -NF_ACCEPT;
if (!nf_ct_is_confirmed(ct) && !tcp_new(ct, skb, dataoff, th))
return -NF_ACCEPT;
spin_lock_bh(&ct->lock);
old_state = ct->proto.tcp.state;
dir = CTINFO2DIR(ctinfo);
index = get_conntrack_index(th);
new_state = tcp_conntracks[dir][index][old_state];
switch (new_state) {
case TCP_CONNTRACK_SYN_SENT:
if (old_state < TCP_CONNTRACK_TIME_WAIT)
break;
/* RFC 1122: "When a connection is closed actively,
* it MUST linger in TIME-WAIT state for a time 2xMSL
* (Maximum Segment Lifetime). However, it MAY accept
* a new SYN from the remote TCP to reopen the connection
* directly from TIME-WAIT state, if..."
* We ignore the conditions because we are in the
* TIME-WAIT state anyway.
*
* Handle aborted connections: we and the server
* think there is an existing connection but the client
* aborts it and starts a new one.
*/
if (((ct->proto.tcp.seen[dir].flags
| ct->proto.tcp.seen[!dir].flags)
& IP_CT_TCP_FLAG_CLOSE_INIT)
|| (ct->proto.tcp.last_dir == dir
&& ct->proto.tcp.last_index == TCP_RST_SET)) {
/* Attempt to reopen a closed/aborted connection.
* Delete this connection and look up again. */
spin_unlock_bh(&ct->lock);
/* Only repeat if we can actually remove the timer.
* Destruction may already be in progress in process
* context and we must give it a chance to terminate.
*/
if (nf_ct_kill(ct))
return -NF_REPEAT;
return NF_DROP;
}
fallthrough;
case TCP_CONNTRACK_IGNORE:
/* Ignored packets:
*
* Our connection entry may be out of sync, so ignore
* packets which may signal the real connection between
* the client and the server.
*
* a) SYN in ORIGINAL
* b) SYN/ACK in REPLY
* c) ACK in reply direction after initial SYN in original.
*
* If the ignored packet is invalid, the receiver will send
* a RST we'll catch below.
*/
if (index == TCP_SYNACK_SET
&& ct->proto.tcp.last_index == TCP_SYN_SET
&& ct->proto.tcp.last_dir != dir
&& ntohl(th->ack_seq) == ct->proto.tcp.last_end) {
/* b) This SYN/ACK acknowledges a SYN that we earlier
* ignored as invalid. This means that the client and
* the server are both in sync, while the firewall is
* not. We get in sync from the previously annotated
* values.
*/
old_state = TCP_CONNTRACK_SYN_SENT;
new_state = TCP_CONNTRACK_SYN_RECV;
ct->proto.tcp.seen[ct->proto.tcp.last_dir].td_end =
ct->proto.tcp.last_end;
ct->proto.tcp.seen[ct->proto.tcp.last_dir].td_maxend =
ct->proto.tcp.last_end;
ct->proto.tcp.seen[ct->proto.tcp.last_dir].td_maxwin =
ct->proto.tcp.last_win == 0 ?
1 : ct->proto.tcp.last_win;
ct->proto.tcp.seen[ct->proto.tcp.last_dir].td_scale =
ct->proto.tcp.last_wscale;
ct->proto.tcp.last_flags &= ~IP_CT_EXP_CHALLENGE_ACK;
ct->proto.tcp.seen[ct->proto.tcp.last_dir].flags =
ct->proto.tcp.last_flags;
nf_ct_tcp_state_reset(&ct->proto.tcp.seen[dir]);
break;
}
ct->proto.tcp.last_index = index;
ct->proto.tcp.last_dir = dir;
ct->proto.tcp.last_seq = ntohl(th->seq);
ct->proto.tcp.last_end =
segment_seq_plus_len(ntohl(th->seq), skb->len, dataoff, th);
ct->proto.tcp.last_win = ntohs(th->window);
/* a) This is a SYN in ORIGINAL. The client and the server
* may be in sync but we are not. In that case, we annotate
* the TCP options and let the packet go through. If it is a
* valid SYN packet, the server will reply with a SYN/ACK, and
* then we'll get in sync. Otherwise, the server potentially
* responds with a challenge ACK if implementing RFC5961.
*/
if (index == TCP_SYN_SET && dir == IP_CT_DIR_ORIGINAL) {
struct ip_ct_tcp_state seen = {};
ct->proto.tcp.last_flags =
ct->proto.tcp.last_wscale = 0;
tcp_options(skb, dataoff, th, &seen);
if (seen.flags & IP_CT_TCP_FLAG_WINDOW_SCALE) {
ct->proto.tcp.last_flags |=
IP_CT_TCP_FLAG_WINDOW_SCALE;
ct->proto.tcp.last_wscale = seen.td_scale;
}
if (seen.flags & IP_CT_TCP_FLAG_SACK_PERM) {
ct->proto.tcp.last_flags |=
IP_CT_TCP_FLAG_SACK_PERM;
}
/* Mark the potential for RFC5961 challenge ACK,
* this pose a special problem for LAST_ACK state
* as ACK is intrepretated as ACKing last FIN.
*/
if (old_state == TCP_CONNTRACK_LAST_ACK)
ct->proto.tcp.last_flags |=
IP_CT_EXP_CHALLENGE_ACK;
}
/* possible challenge ack reply to syn */
if (old_state == TCP_CONNTRACK_SYN_SENT &&
index == TCP_ACK_SET &&
dir == IP_CT_DIR_REPLY)
ct->proto.tcp.last_ack = ntohl(th->ack_seq);
spin_unlock_bh(&ct->lock);
nf_ct_l4proto_log_invalid(skb, ct, state,
"packet (index %d) in dir %d ignored, state %s",
index, dir,
tcp_conntrack_names[old_state]);
return NF_ACCEPT;
case TCP_CONNTRACK_MAX:
/* Special case for SYN proxy: when the SYN to the server or
* the SYN/ACK from the server is lost, the client may transmit
* a keep-alive packet while in SYN_SENT state. This needs to
* be associated with the original conntrack entry in order to
* generate a new SYN with the correct sequence number.
*/
if (nfct_synproxy(ct) && old_state == TCP_CONNTRACK_SYN_SENT &&
index == TCP_ACK_SET && dir == IP_CT_DIR_ORIGINAL &&
ct->proto.tcp.last_dir == IP_CT_DIR_ORIGINAL &&
ct->proto.tcp.seen[dir].td_end - 1 == ntohl(th->seq)) {
pr_debug("nf_ct_tcp: SYN proxy client keep alive\n");
spin_unlock_bh(&ct->lock);
return NF_ACCEPT;
}
/* Invalid packet */
spin_unlock_bh(&ct->lock);
nf_ct_l4proto_log_invalid(skb, ct, state,
"packet (index %d) in dir %d invalid, state %s",
index, dir,
tcp_conntrack_names[old_state]);
return -NF_ACCEPT;
case TCP_CONNTRACK_TIME_WAIT:
/* RFC5961 compliance cause stack to send "challenge-ACK"
* e.g. in response to spurious SYNs. Conntrack MUST
* not believe this ACK is acking last FIN.
*/
if (old_state == TCP_CONNTRACK_LAST_ACK &&
index == TCP_ACK_SET &&
ct->proto.tcp.last_dir != dir &&
ct->proto.tcp.last_index == TCP_SYN_SET &&
(ct->proto.tcp.last_flags & IP_CT_EXP_CHALLENGE_ACK)) {
/* Detected RFC5961 challenge ACK */
ct->proto.tcp.last_flags &= ~IP_CT_EXP_CHALLENGE_ACK;
spin_unlock_bh(&ct->lock);
nf_ct_l4proto_log_invalid(skb, ct, state, "challenge-ack ignored");
return NF_ACCEPT; /* Don't change state */
}
break;
case TCP_CONNTRACK_SYN_SENT2:
/* tcp_conntracks table is not smart enough to handle
* simultaneous open.
*/
ct->proto.tcp.last_flags |= IP_CT_TCP_SIMULTANEOUS_OPEN;
break;
case TCP_CONNTRACK_SYN_RECV:
if (dir == IP_CT_DIR_REPLY && index == TCP_ACK_SET &&
ct->proto.tcp.last_flags & IP_CT_TCP_SIMULTANEOUS_OPEN)
new_state = TCP_CONNTRACK_ESTABLISHED;
break;
case TCP_CONNTRACK_CLOSE:
if (index != TCP_RST_SET)
break;
/* If we are closing, tuple might have been re-used already.
* last_index, last_ack, and all other ct fields used for
* sequence/window validation are outdated in that case.
*
* As the conntrack can already be expired by GC under pressure,
* just skip validation checks.
*/
if (tcp_can_early_drop(ct))
goto in_window;
/* td_maxack might be outdated if we let a SYN through earlier */
if ((ct->proto.tcp.seen[!dir].flags & IP_CT_TCP_FLAG_MAXACK_SET) &&
ct->proto.tcp.last_index != TCP_SYN_SET) {
u32 seq = ntohl(th->seq);
/* If we are not in established state and SEQ=0 this is most
* likely an answer to a SYN we let go through above (last_index
* can be updated due to out-of-order ACKs).
*/
if (seq == 0 && !nf_conntrack_tcp_established(ct))
break;
if (before(seq, ct->proto.tcp.seen[!dir].td_maxack) &&
!tn->tcp_ignore_invalid_rst) {
/* Invalid RST */
spin_unlock_bh(&ct->lock);
nf_ct_l4proto_log_invalid(skb, ct, state, "invalid rst");
return -NF_ACCEPT;
}
if (!nf_conntrack_tcp_established(ct) ||
seq == ct->proto.tcp.seen[!dir].td_maxack)
break;
/* Check if rst is part of train, such as
* foo:80 > bar:4379: P, 235946583:235946602(19) ack 42
* foo:80 > bar:4379: R, 235946602:235946602(0) ack 42
*/
if (ct->proto.tcp.last_index == TCP_ACK_SET &&
ct->proto.tcp.last_dir == dir &&
seq == ct->proto.tcp.last_end)
break;
/* ... RST sequence number doesn't match exactly, keep
* established state to allow a possible challenge ACK.
*/
new_state = old_state;
}
if (((test_bit(IPS_SEEN_REPLY_BIT, &ct->status)
&& ct->proto.tcp.last_index == TCP_SYN_SET)
|| (!test_bit(IPS_ASSURED_BIT, &ct->status)
&& ct->proto.tcp.last_index == TCP_ACK_SET))
&& ntohl(th->ack_seq) == ct->proto.tcp.last_end) {
/* RST sent to invalid SYN or ACK we had let through
* at a) and c) above:
*
* a) SYN was in window then
* c) we hold a half-open connection.
*
* Delete our connection entry.
* We skip window checking, because packet might ACK
* segments we ignored. */
goto in_window;
}
/* Reset in response to a challenge-ack we let through earlier */
if (old_state == TCP_CONNTRACK_SYN_SENT &&
ct->proto.tcp.last_index == TCP_ACK_SET &&
ct->proto.tcp.last_dir == IP_CT_DIR_REPLY &&
ntohl(th->seq) == ct->proto.tcp.last_ack)
goto in_window;
break;
default:
/* Keep compilers happy. */
break;
}
res = tcp_in_window(ct, dir, index,
skb, dataoff, th, state);
switch (res) {
case NFCT_TCP_IGNORE:
spin_unlock_bh(&ct->lock);
return NF_ACCEPT;
case NFCT_TCP_INVALID:
nf_tcp_handle_invalid(ct, dir, index, skb, state);
spin_unlock_bh(&ct->lock);
return -NF_ACCEPT;
case NFCT_TCP_ACCEPT:
break;
}
in_window:
/* From now on we have got in-window packets */
ct->proto.tcp.last_index = index;
ct->proto.tcp.last_dir = dir;
ct->proto.tcp.state = new_state;
if (old_state != new_state
&& new_state == TCP_CONNTRACK_FIN_WAIT)
ct->proto.tcp.seen[dir].flags |= IP_CT_TCP_FLAG_CLOSE_INIT;
timeouts = nf_ct_timeout_lookup(ct);
if (!timeouts)
timeouts = tn->timeouts;
if (ct->proto.tcp.retrans >= tn->tcp_max_retrans &&
timeouts[new_state] > timeouts[TCP_CONNTRACK_RETRANS])
timeout = timeouts[TCP_CONNTRACK_RETRANS];
else if (unlikely(index == TCP_RST_SET))
timeout = timeouts[TCP_CONNTRACK_CLOSE];
else if ((ct->proto.tcp.seen[0].flags | ct->proto.tcp.seen[1].flags) &
IP_CT_TCP_FLAG_DATA_UNACKNOWLEDGED &&
timeouts[new_state] > timeouts[TCP_CONNTRACK_UNACK])
timeout = timeouts[TCP_CONNTRACK_UNACK];
else if (ct->proto.tcp.last_win == 0 &&
timeouts[new_state] > timeouts[TCP_CONNTRACK_RETRANS])
timeout = timeouts[TCP_CONNTRACK_RETRANS];
else
timeout = timeouts[new_state];
spin_unlock_bh(&ct->lock);
if (new_state != old_state)
nf_conntrack_event_cache(IPCT_PROTOINFO, ct);
if (!test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) {
/* If only reply is a RST, we can consider ourselves not to
have an established connection: this is a fairly common
problem case, so we can delete the conntrack
immediately. --RR */
if (th->rst) {
nf_ct_kill_acct(ct, ctinfo, skb);
return NF_ACCEPT;
}
if (index == TCP_SYN_SET && old_state == TCP_CONNTRACK_SYN_SENT) {
/* do not renew timeout on SYN retransmit.
*
* Else port reuse by client or NAT middlebox can keep
* entry alive indefinitely (including nat info).
*/
return NF_ACCEPT;
}
/* ESTABLISHED without SEEN_REPLY, i.e. mid-connection
* pickup with loose=1. Avoid large ESTABLISHED timeout.
*/
if (new_state == TCP_CONNTRACK_ESTABLISHED &&
timeout > timeouts[TCP_CONNTRACK_UNACK])
timeout = timeouts[TCP_CONNTRACK_UNACK];
} else if (!test_bit(IPS_ASSURED_BIT, &ct->status)
&& (old_state == TCP_CONNTRACK_SYN_RECV
|| old_state == TCP_CONNTRACK_ESTABLISHED)
&& new_state == TCP_CONNTRACK_ESTABLISHED) {
/* Set ASSURED if we see valid ack in ESTABLISHED
after SYN_RECV or a valid answer for a picked up
connection. */
set_bit(IPS_ASSURED_BIT, &ct->status);
nf_conntrack_event_cache(IPCT_ASSURED, ct);
}
nf_ct_refresh_acct(ct, ctinfo, skb, timeout);
return NF_ACCEPT;
}
#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
#include <linux/netfilter/nfnetlink.h>
#include <linux/netfilter/nfnetlink_conntrack.h>
static int tcp_to_nlattr(struct sk_buff *skb, struct nlattr *nla,
struct nf_conn *ct, bool destroy)
{
struct nlattr *nest_parms;
struct nf_ct_tcp_flags tmp = {};
spin_lock_bh(&ct->lock);
nest_parms = nla_nest_start(skb, CTA_PROTOINFO_TCP);
if (!nest_parms)
goto nla_put_failure;
if (nla_put_u8(skb, CTA_PROTOINFO_TCP_STATE, ct->proto.tcp.state))
goto nla_put_failure;
if (destroy)
goto skip_state;
if (nla_put_u8(skb, CTA_PROTOINFO_TCP_WSCALE_ORIGINAL,
ct->proto.tcp.seen[0].td_scale) ||
nla_put_u8(skb, CTA_PROTOINFO_TCP_WSCALE_REPLY,
ct->proto.tcp.seen[1].td_scale))
goto nla_put_failure;
tmp.flags = ct->proto.tcp.seen[0].flags;
if (nla_put(skb, CTA_PROTOINFO_TCP_FLAGS_ORIGINAL,
sizeof(struct nf_ct_tcp_flags), &tmp))
goto nla_put_failure;
tmp.flags = ct->proto.tcp.seen[1].flags;
if (nla_put(skb, CTA_PROTOINFO_TCP_FLAGS_REPLY,
sizeof(struct nf_ct_tcp_flags), &tmp))
goto nla_put_failure;
skip_state:
spin_unlock_bh(&ct->lock);
nla_nest_end(skb, nest_parms);
return 0;
nla_put_failure:
spin_unlock_bh(&ct->lock);
return -1;
}
static const struct nla_policy tcp_nla_policy[CTA_PROTOINFO_TCP_MAX+1] = {
[CTA_PROTOINFO_TCP_STATE] = { .type = NLA_U8 },
[CTA_PROTOINFO_TCP_WSCALE_ORIGINAL] = { .type = NLA_U8 },
[CTA_PROTOINFO_TCP_WSCALE_REPLY] = { .type = NLA_U8 },
[CTA_PROTOINFO_TCP_FLAGS_ORIGINAL] = { .len = sizeof(struct nf_ct_tcp_flags) },
[CTA_PROTOINFO_TCP_FLAGS_REPLY] = { .len = sizeof(struct nf_ct_tcp_flags) },
};
#define TCP_NLATTR_SIZE ( \
NLA_ALIGN(NLA_HDRLEN + 1) + \
NLA_ALIGN(NLA_HDRLEN + 1) + \
NLA_ALIGN(NLA_HDRLEN + sizeof(struct nf_ct_tcp_flags)) + \
NLA_ALIGN(NLA_HDRLEN + sizeof(struct nf_ct_tcp_flags)))
static int nlattr_to_tcp(struct nlattr *cda[], struct nf_conn *ct)
{
struct nlattr *pattr = cda[CTA_PROTOINFO_TCP];
struct nlattr *tb[CTA_PROTOINFO_TCP_MAX+1];
int err;
/* updates could not contain anything about the private
* protocol info, in that case skip the parsing */
if (!pattr)
return 0;
err = nla_parse_nested_deprecated(tb, CTA_PROTOINFO_TCP_MAX, pattr,
tcp_nla_policy, NULL);
if (err < 0)
return err;
if (tb[CTA_PROTOINFO_TCP_STATE] &&
nla_get_u8(tb[CTA_PROTOINFO_TCP_STATE]) >= TCP_CONNTRACK_MAX)
return -EINVAL;
spin_lock_bh(&ct->lock);
if (tb[CTA_PROTOINFO_TCP_STATE])
ct->proto.tcp.state = nla_get_u8(tb[CTA_PROTOINFO_TCP_STATE]);
if (tb[CTA_PROTOINFO_TCP_FLAGS_ORIGINAL]) {
struct nf_ct_tcp_flags *attr =
nla_data(tb[CTA_PROTOINFO_TCP_FLAGS_ORIGINAL]);
ct->proto.tcp.seen[0].flags &= ~attr->mask;
ct->proto.tcp.seen[0].flags |= attr->flags & attr->mask;
}
if (tb[CTA_PROTOINFO_TCP_FLAGS_REPLY]) {
struct nf_ct_tcp_flags *attr =
nla_data(tb[CTA_PROTOINFO_TCP_FLAGS_REPLY]);
ct->proto.tcp.seen[1].flags &= ~attr->mask;
ct->proto.tcp.seen[1].flags |= attr->flags & attr->mask;
}
if (tb[CTA_PROTOINFO_TCP_WSCALE_ORIGINAL] &&
tb[CTA_PROTOINFO_TCP_WSCALE_REPLY] &&
ct->proto.tcp.seen[0].flags & IP_CT_TCP_FLAG_WINDOW_SCALE &&
ct->proto.tcp.seen[1].flags & IP_CT_TCP_FLAG_WINDOW_SCALE) {
ct->proto.tcp.seen[0].td_scale =
nla_get_u8(tb[CTA_PROTOINFO_TCP_WSCALE_ORIGINAL]);
ct->proto.tcp.seen[1].td_scale =
nla_get_u8(tb[CTA_PROTOINFO_TCP_WSCALE_REPLY]);
}
spin_unlock_bh(&ct->lock);
return 0;
}
static unsigned int tcp_nlattr_tuple_size(void)
{
static unsigned int size __read_mostly;
if (!size)
size = nla_policy_len(nf_ct_port_nla_policy, CTA_PROTO_MAX + 1);
return size;
}
#endif
#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
#include <linux/netfilter/nfnetlink.h>
#include <linux/netfilter/nfnetlink_cttimeout.h>
static int tcp_timeout_nlattr_to_obj(struct nlattr *tb[],
struct net *net, void *data)
{
struct nf_tcp_net *tn = nf_tcp_pernet(net);
unsigned int *timeouts = data;
int i;
if (!timeouts)
timeouts = tn->timeouts;
/* set default TCP timeouts. */
for (i=0; i<TCP_CONNTRACK_TIMEOUT_MAX; i++)
timeouts[i] = tn->timeouts[i];
if (tb[CTA_TIMEOUT_TCP_SYN_SENT]) {
timeouts[TCP_CONNTRACK_SYN_SENT] =
ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_SYN_SENT]))*HZ;
}
if (tb[CTA_TIMEOUT_TCP_SYN_RECV]) {
timeouts[TCP_CONNTRACK_SYN_RECV] =
ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_SYN_RECV]))*HZ;
}
if (tb[CTA_TIMEOUT_TCP_ESTABLISHED]) {
timeouts[TCP_CONNTRACK_ESTABLISHED] =
ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_ESTABLISHED]))*HZ;
}
if (tb[CTA_TIMEOUT_TCP_FIN_WAIT]) {
timeouts[TCP_CONNTRACK_FIN_WAIT] =
ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_FIN_WAIT]))*HZ;
}
if (tb[CTA_TIMEOUT_TCP_CLOSE_WAIT]) {
timeouts[TCP_CONNTRACK_CLOSE_WAIT] =
ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_CLOSE_WAIT]))*HZ;
}
if (tb[CTA_TIMEOUT_TCP_LAST_ACK]) {
timeouts[TCP_CONNTRACK_LAST_ACK] =
ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_LAST_ACK]))*HZ;
}
if (tb[CTA_TIMEOUT_TCP_TIME_WAIT]) {
timeouts[TCP_CONNTRACK_TIME_WAIT] =
ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_TIME_WAIT]))*HZ;
}
if (tb[CTA_TIMEOUT_TCP_CLOSE]) {
timeouts[TCP_CONNTRACK_CLOSE] =
ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_CLOSE]))*HZ;
}
if (tb[CTA_TIMEOUT_TCP_SYN_SENT2]) {
timeouts[TCP_CONNTRACK_SYN_SENT2] =
ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_SYN_SENT2]))*HZ;
}
if (tb[CTA_TIMEOUT_TCP_RETRANS]) {
timeouts[TCP_CONNTRACK_RETRANS] =
ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_RETRANS]))*HZ;
}
if (tb[CTA_TIMEOUT_TCP_UNACK]) {
timeouts[TCP_CONNTRACK_UNACK] =
ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_UNACK]))*HZ;
}
timeouts[CTA_TIMEOUT_TCP_UNSPEC] = timeouts[CTA_TIMEOUT_TCP_SYN_SENT];
return 0;
}
static int
tcp_timeout_obj_to_nlattr(struct sk_buff *skb, const void *data)
{
const unsigned int *timeouts = data;
if (nla_put_be32(skb, CTA_TIMEOUT_TCP_SYN_SENT,
htonl(timeouts[TCP_CONNTRACK_SYN_SENT] / HZ)) ||
nla_put_be32(skb, CTA_TIMEOUT_TCP_SYN_RECV,
htonl(timeouts[TCP_CONNTRACK_SYN_RECV] / HZ)) ||
nla_put_be32(skb, CTA_TIMEOUT_TCP_ESTABLISHED,
htonl(timeouts[TCP_CONNTRACK_ESTABLISHED] / HZ)) ||
nla_put_be32(skb, CTA_TIMEOUT_TCP_FIN_WAIT,
htonl(timeouts[TCP_CONNTRACK_FIN_WAIT] / HZ)) ||
nla_put_be32(skb, CTA_TIMEOUT_TCP_CLOSE_WAIT,
htonl(timeouts[TCP_CONNTRACK_CLOSE_WAIT] / HZ)) ||
nla_put_be32(skb, CTA_TIMEOUT_TCP_LAST_ACK,
htonl(timeouts[TCP_CONNTRACK_LAST_ACK] / HZ)) ||
nla_put_be32(skb, CTA_TIMEOUT_TCP_TIME_WAIT,
htonl(timeouts[TCP_CONNTRACK_TIME_WAIT] / HZ)) ||
nla_put_be32(skb, CTA_TIMEOUT_TCP_CLOSE,
htonl(timeouts[TCP_CONNTRACK_CLOSE] / HZ)) ||
nla_put_be32(skb, CTA_TIMEOUT_TCP_SYN_SENT2,
htonl(timeouts[TCP_CONNTRACK_SYN_SENT2] / HZ)) ||
nla_put_be32(skb, CTA_TIMEOUT_TCP_RETRANS,
htonl(timeouts[TCP_CONNTRACK_RETRANS] / HZ)) ||
nla_put_be32(skb, CTA_TIMEOUT_TCP_UNACK,
htonl(timeouts[TCP_CONNTRACK_UNACK] / HZ)))
goto nla_put_failure;
return 0;
nla_put_failure:
return -ENOSPC;
}
static const struct nla_policy tcp_timeout_nla_policy[CTA_TIMEOUT_TCP_MAX+1] = {
[CTA_TIMEOUT_TCP_SYN_SENT] = { .type = NLA_U32 },
[CTA_TIMEOUT_TCP_SYN_RECV] = { .type = NLA_U32 },
[CTA_TIMEOUT_TCP_ESTABLISHED] = { .type = NLA_U32 },
[CTA_TIMEOUT_TCP_FIN_WAIT] = { .type = NLA_U32 },
[CTA_TIMEOUT_TCP_CLOSE_WAIT] = { .type = NLA_U32 },
[CTA_TIMEOUT_TCP_LAST_ACK] = { .type = NLA_U32 },
[CTA_TIMEOUT_TCP_TIME_WAIT] = { .type = NLA_U32 },
[CTA_TIMEOUT_TCP_CLOSE] = { .type = NLA_U32 },
[CTA_TIMEOUT_TCP_SYN_SENT2] = { .type = NLA_U32 },
[CTA_TIMEOUT_TCP_RETRANS] = { .type = NLA_U32 },
[CTA_TIMEOUT_TCP_UNACK] = { .type = NLA_U32 },
};
#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
void nf_conntrack_tcp_init_net(struct net *net)
{
struct nf_tcp_net *tn = nf_tcp_pernet(net);
int i;
for (i = 0; i < TCP_CONNTRACK_TIMEOUT_MAX; i++)
tn->timeouts[i] = tcp_timeouts[i];
/* timeouts[0] is unused, make it same as SYN_SENT so
* ->timeouts[0] contains 'new' timeout, like udp or icmp.
*/
tn->timeouts[0] = tcp_timeouts[TCP_CONNTRACK_SYN_SENT];
/* If it is set to zero, we disable picking up already established
* connections.
*/
tn->tcp_loose = 1;
/* "Be conservative in what you do,
* be liberal in what you accept from others."
* If it's non-zero, we mark only out of window RST segments as INVALID.
*/
tn->tcp_be_liberal = 0;
/* If it's non-zero, we turn off RST sequence number check */
tn->tcp_ignore_invalid_rst = 0;
/* Max number of the retransmitted packets without receiving an (acceptable)
* ACK from the destination. If this number is reached, a shorter timer
* will be started.
*/
tn->tcp_max_retrans = 3;
#if IS_ENABLED(CONFIG_NF_FLOW_TABLE)
tn->offload_timeout = 30 * HZ;
#endif
}
const struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp =
{
.l4proto = IPPROTO_TCP,
#ifdef CONFIG_NF_CONNTRACK_PROCFS
.print_conntrack = tcp_print_conntrack,
#endif
.can_early_drop = tcp_can_early_drop,
#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
.to_nlattr = tcp_to_nlattr,
.from_nlattr = nlattr_to_tcp,
.tuple_to_nlattr = nf_ct_port_tuple_to_nlattr,
.nlattr_to_tuple = nf_ct_port_nlattr_to_tuple,
.nlattr_tuple_size = tcp_nlattr_tuple_size,
.nlattr_size = TCP_NLATTR_SIZE,
.nla_policy = nf_ct_port_nla_policy,
#endif
#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
.ctnl_timeout = {
.nlattr_to_obj = tcp_timeout_nlattr_to_obj,
.obj_to_nlattr = tcp_timeout_obj_to_nlattr,
.nlattr_max = CTA_TIMEOUT_TCP_MAX,
.obj_size = sizeof(unsigned int) *
TCP_CONNTRACK_TIMEOUT_MAX,
.nla_policy = tcp_timeout_nla_policy,
},
#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
};
| linux-master | net/netfilter/nf_conntrack_proto_tcp.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2008-2009 Patrick McHardy <[email protected]>
* Copyright (c) 2016 Pablo Neira Ayuso <[email protected]>
*
* Development of this code funded by Astaro AG (http://www.astaro.com/)
*/
#include <linux/kernel.h>
#include <linux/if_vlan.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/netlink.h>
#include <linux/netfilter.h>
#include <linux/netfilter/nf_tables.h>
#include <net/netfilter/nf_tables_core.h>
#include <net/netfilter/nf_tables.h>
#include <net/netfilter/nf_tables_offload.h>
/* For layer 4 checksum field offset. */
#include <linux/tcp.h>
#include <linux/udp.h>
#include <net/gre.h>
#include <linux/icmpv6.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <net/sctp/checksum.h>
static bool nft_payload_rebuild_vlan_hdr(const struct sk_buff *skb, int mac_off,
struct vlan_ethhdr *veth)
{
if (skb_copy_bits(skb, mac_off, veth, ETH_HLEN))
return false;
veth->h_vlan_proto = skb->vlan_proto;
veth->h_vlan_TCI = htons(skb_vlan_tag_get(skb));
veth->h_vlan_encapsulated_proto = skb->protocol;
return true;
}
/* add vlan header into the user buffer for if tag was removed by offloads */
static bool
nft_payload_copy_vlan(u32 *d, const struct sk_buff *skb, u8 offset, u8 len)
{
int mac_off = skb_mac_header(skb) - skb->data;
u8 *vlanh, *dst_u8 = (u8 *) d;
struct vlan_ethhdr veth;
u8 vlan_hlen = 0;
if ((skb->protocol == htons(ETH_P_8021AD) ||
skb->protocol == htons(ETH_P_8021Q)) &&
offset >= VLAN_ETH_HLEN && offset < VLAN_ETH_HLEN + VLAN_HLEN)
vlan_hlen += VLAN_HLEN;
vlanh = (u8 *) &veth;
if (offset < VLAN_ETH_HLEN + vlan_hlen) {
u8 ethlen = len;
if (vlan_hlen &&
skb_copy_bits(skb, mac_off, &veth, VLAN_ETH_HLEN) < 0)
return false;
else if (!nft_payload_rebuild_vlan_hdr(skb, mac_off, &veth))
return false;
if (offset + len > VLAN_ETH_HLEN + vlan_hlen)
ethlen -= offset + len - VLAN_ETH_HLEN - vlan_hlen;
memcpy(dst_u8, vlanh + offset - vlan_hlen, ethlen);
len -= ethlen;
if (len == 0)
return true;
dst_u8 += ethlen;
offset = ETH_HLEN + vlan_hlen;
} else {
offset -= VLAN_HLEN + vlan_hlen;
}
return skb_copy_bits(skb, offset + mac_off, dst_u8, len) == 0;
}
static int __nft_payload_inner_offset(struct nft_pktinfo *pkt)
{
unsigned int thoff = nft_thoff(pkt);
if (!(pkt->flags & NFT_PKTINFO_L4PROTO) || pkt->fragoff)
return -1;
switch (pkt->tprot) {
case IPPROTO_UDP:
pkt->inneroff = thoff + sizeof(struct udphdr);
break;
case IPPROTO_TCP: {
struct tcphdr *th, _tcph;
th = skb_header_pointer(pkt->skb, thoff, sizeof(_tcph), &_tcph);
if (!th)
return -1;
pkt->inneroff = thoff + __tcp_hdrlen(th);
}
break;
case IPPROTO_GRE: {
u32 offset = sizeof(struct gre_base_hdr);
struct gre_base_hdr *gre, _gre;
__be16 version;
gre = skb_header_pointer(pkt->skb, thoff, sizeof(_gre), &_gre);
if (!gre)
return -1;
version = gre->flags & GRE_VERSION;
switch (version) {
case GRE_VERSION_0:
if (gre->flags & GRE_ROUTING)
return -1;
if (gre->flags & GRE_CSUM) {
offset += sizeof_field(struct gre_full_hdr, csum) +
sizeof_field(struct gre_full_hdr, reserved1);
}
if (gre->flags & GRE_KEY)
offset += sizeof_field(struct gre_full_hdr, key);
if (gre->flags & GRE_SEQ)
offset += sizeof_field(struct gre_full_hdr, seq);
break;
default:
return -1;
}
pkt->inneroff = thoff + offset;
}
break;
case IPPROTO_IPIP:
pkt->inneroff = thoff;
break;
default:
return -1;
}
pkt->flags |= NFT_PKTINFO_INNER;
return 0;
}
int nft_payload_inner_offset(const struct nft_pktinfo *pkt)
{
if (!(pkt->flags & NFT_PKTINFO_INNER) &&
__nft_payload_inner_offset((struct nft_pktinfo *)pkt) < 0)
return -1;
return pkt->inneroff;
}
void nft_payload_eval(const struct nft_expr *expr,
struct nft_regs *regs,
const struct nft_pktinfo *pkt)
{
const struct nft_payload *priv = nft_expr_priv(expr);
const struct sk_buff *skb = pkt->skb;
u32 *dest = ®s->data[priv->dreg];
int offset;
if (priv->len % NFT_REG32_SIZE)
dest[priv->len / NFT_REG32_SIZE] = 0;
switch (priv->base) {
case NFT_PAYLOAD_LL_HEADER:
if (!skb_mac_header_was_set(skb))
goto err;
if (skb_vlan_tag_present(skb) &&
priv->offset >= offsetof(struct ethhdr, h_proto)) {
if (!nft_payload_copy_vlan(dest, skb,
priv->offset, priv->len))
goto err;
return;
}
offset = skb_mac_header(skb) - skb->data;
break;
case NFT_PAYLOAD_NETWORK_HEADER:
offset = skb_network_offset(skb);
break;
case NFT_PAYLOAD_TRANSPORT_HEADER:
if (!(pkt->flags & NFT_PKTINFO_L4PROTO) || pkt->fragoff)
goto err;
offset = nft_thoff(pkt);
break;
case NFT_PAYLOAD_INNER_HEADER:
offset = nft_payload_inner_offset(pkt);
if (offset < 0)
goto err;
break;
default:
WARN_ON_ONCE(1);
goto err;
}
offset += priv->offset;
if (skb_copy_bits(skb, offset, dest, priv->len) < 0)
goto err;
return;
err:
regs->verdict.code = NFT_BREAK;
}
static const struct nla_policy nft_payload_policy[NFTA_PAYLOAD_MAX + 1] = {
[NFTA_PAYLOAD_SREG] = { .type = NLA_U32 },
[NFTA_PAYLOAD_DREG] = { .type = NLA_U32 },
[NFTA_PAYLOAD_BASE] = { .type = NLA_U32 },
[NFTA_PAYLOAD_OFFSET] = NLA_POLICY_MAX(NLA_BE32, 255),
[NFTA_PAYLOAD_LEN] = NLA_POLICY_MAX(NLA_BE32, 255),
[NFTA_PAYLOAD_CSUM_TYPE] = { .type = NLA_U32 },
[NFTA_PAYLOAD_CSUM_OFFSET] = NLA_POLICY_MAX(NLA_BE32, 255),
[NFTA_PAYLOAD_CSUM_FLAGS] = { .type = NLA_U32 },
};
static int nft_payload_init(const struct nft_ctx *ctx,
const struct nft_expr *expr,
const struct nlattr * const tb[])
{
struct nft_payload *priv = nft_expr_priv(expr);
priv->base = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_BASE]));
priv->offset = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_OFFSET]));
priv->len = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_LEN]));
return nft_parse_register_store(ctx, tb[NFTA_PAYLOAD_DREG],
&priv->dreg, NULL, NFT_DATA_VALUE,
priv->len);
}
static int nft_payload_dump(struct sk_buff *skb,
const struct nft_expr *expr, bool reset)
{
const struct nft_payload *priv = nft_expr_priv(expr);
if (nft_dump_register(skb, NFTA_PAYLOAD_DREG, priv->dreg) ||
nla_put_be32(skb, NFTA_PAYLOAD_BASE, htonl(priv->base)) ||
nla_put_be32(skb, NFTA_PAYLOAD_OFFSET, htonl(priv->offset)) ||
nla_put_be32(skb, NFTA_PAYLOAD_LEN, htonl(priv->len)))
goto nla_put_failure;
return 0;
nla_put_failure:
return -1;
}
static bool nft_payload_reduce(struct nft_regs_track *track,
const struct nft_expr *expr)
{
const struct nft_payload *priv = nft_expr_priv(expr);
const struct nft_payload *payload;
if (!nft_reg_track_cmp(track, expr, priv->dreg)) {
nft_reg_track_update(track, expr, priv->dreg, priv->len);
return false;
}
payload = nft_expr_priv(track->regs[priv->dreg].selector);
if (priv->base != payload->base ||
priv->offset != payload->offset ||
priv->len != payload->len) {
nft_reg_track_update(track, expr, priv->dreg, priv->len);
return false;
}
if (!track->regs[priv->dreg].bitwise)
return true;
return nft_expr_reduce_bitwise(track, expr);
}
static bool nft_payload_offload_mask(struct nft_offload_reg *reg,
u32 priv_len, u32 field_len)
{
unsigned int remainder, delta, k;
struct nft_data mask = {};
__be32 remainder_mask;
if (priv_len == field_len) {
memset(®->mask, 0xff, priv_len);
return true;
} else if (priv_len > field_len) {
return false;
}
memset(&mask, 0xff, field_len);
remainder = priv_len % sizeof(u32);
if (remainder) {
k = priv_len / sizeof(u32);
delta = field_len - priv_len;
remainder_mask = htonl(~((1 << (delta * BITS_PER_BYTE)) - 1));
mask.data[k] = (__force u32)remainder_mask;
}
memcpy(®->mask, &mask, field_len);
return true;
}
static int nft_payload_offload_ll(struct nft_offload_ctx *ctx,
struct nft_flow_rule *flow,
const struct nft_payload *priv)
{
struct nft_offload_reg *reg = &ctx->regs[priv->dreg];
switch (priv->offset) {
case offsetof(struct ethhdr, h_source):
if (!nft_payload_offload_mask(reg, priv->len, ETH_ALEN))
return -EOPNOTSUPP;
NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_ETH_ADDRS, eth_addrs,
src, ETH_ALEN, reg);
break;
case offsetof(struct ethhdr, h_dest):
if (!nft_payload_offload_mask(reg, priv->len, ETH_ALEN))
return -EOPNOTSUPP;
NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_ETH_ADDRS, eth_addrs,
dst, ETH_ALEN, reg);
break;
case offsetof(struct ethhdr, h_proto):
if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
return -EOPNOTSUPP;
NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_BASIC, basic,
n_proto, sizeof(__be16), reg);
nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_NETWORK);
break;
case offsetof(struct vlan_ethhdr, h_vlan_TCI):
if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
return -EOPNOTSUPP;
NFT_OFFLOAD_MATCH_FLAGS(FLOW_DISSECTOR_KEY_VLAN, vlan,
vlan_tci, sizeof(__be16), reg,
NFT_OFFLOAD_F_NETWORK2HOST);
break;
case offsetof(struct vlan_ethhdr, h_vlan_encapsulated_proto):
if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
return -EOPNOTSUPP;
NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_VLAN, vlan,
vlan_tpid, sizeof(__be16), reg);
nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_NETWORK);
break;
case offsetof(struct vlan_ethhdr, h_vlan_TCI) + sizeof(struct vlan_hdr):
if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
return -EOPNOTSUPP;
NFT_OFFLOAD_MATCH_FLAGS(FLOW_DISSECTOR_KEY_CVLAN, cvlan,
vlan_tci, sizeof(__be16), reg,
NFT_OFFLOAD_F_NETWORK2HOST);
break;
case offsetof(struct vlan_ethhdr, h_vlan_encapsulated_proto) +
sizeof(struct vlan_hdr):
if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
return -EOPNOTSUPP;
NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_CVLAN, cvlan,
vlan_tpid, sizeof(__be16), reg);
nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_NETWORK);
break;
default:
return -EOPNOTSUPP;
}
return 0;
}
static int nft_payload_offload_ip(struct nft_offload_ctx *ctx,
struct nft_flow_rule *flow,
const struct nft_payload *priv)
{
struct nft_offload_reg *reg = &ctx->regs[priv->dreg];
switch (priv->offset) {
case offsetof(struct iphdr, saddr):
if (!nft_payload_offload_mask(reg, priv->len,
sizeof(struct in_addr)))
return -EOPNOTSUPP;
NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4, src,
sizeof(struct in_addr), reg);
nft_flow_rule_set_addr_type(flow, FLOW_DISSECTOR_KEY_IPV4_ADDRS);
break;
case offsetof(struct iphdr, daddr):
if (!nft_payload_offload_mask(reg, priv->len,
sizeof(struct in_addr)))
return -EOPNOTSUPP;
NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4, dst,
sizeof(struct in_addr), reg);
nft_flow_rule_set_addr_type(flow, FLOW_DISSECTOR_KEY_IPV4_ADDRS);
break;
case offsetof(struct iphdr, protocol):
if (!nft_payload_offload_mask(reg, priv->len, sizeof(__u8)))
return -EOPNOTSUPP;
NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_BASIC, basic, ip_proto,
sizeof(__u8), reg);
nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_TRANSPORT);
break;
default:
return -EOPNOTSUPP;
}
return 0;
}
static int nft_payload_offload_ip6(struct nft_offload_ctx *ctx,
struct nft_flow_rule *flow,
const struct nft_payload *priv)
{
struct nft_offload_reg *reg = &ctx->regs[priv->dreg];
switch (priv->offset) {
case offsetof(struct ipv6hdr, saddr):
if (!nft_payload_offload_mask(reg, priv->len,
sizeof(struct in6_addr)))
return -EOPNOTSUPP;
NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6, src,
sizeof(struct in6_addr), reg);
nft_flow_rule_set_addr_type(flow, FLOW_DISSECTOR_KEY_IPV6_ADDRS);
break;
case offsetof(struct ipv6hdr, daddr):
if (!nft_payload_offload_mask(reg, priv->len,
sizeof(struct in6_addr)))
return -EOPNOTSUPP;
NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6, dst,
sizeof(struct in6_addr), reg);
nft_flow_rule_set_addr_type(flow, FLOW_DISSECTOR_KEY_IPV6_ADDRS);
break;
case offsetof(struct ipv6hdr, nexthdr):
if (!nft_payload_offload_mask(reg, priv->len, sizeof(__u8)))
return -EOPNOTSUPP;
NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_BASIC, basic, ip_proto,
sizeof(__u8), reg);
nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_TRANSPORT);
break;
default:
return -EOPNOTSUPP;
}
return 0;
}
static int nft_payload_offload_nh(struct nft_offload_ctx *ctx,
struct nft_flow_rule *flow,
const struct nft_payload *priv)
{
int err;
switch (ctx->dep.l3num) {
case htons(ETH_P_IP):
err = nft_payload_offload_ip(ctx, flow, priv);
break;
case htons(ETH_P_IPV6):
err = nft_payload_offload_ip6(ctx, flow, priv);
break;
default:
return -EOPNOTSUPP;
}
return err;
}
static int nft_payload_offload_tcp(struct nft_offload_ctx *ctx,
struct nft_flow_rule *flow,
const struct nft_payload *priv)
{
struct nft_offload_reg *reg = &ctx->regs[priv->dreg];
switch (priv->offset) {
case offsetof(struct tcphdr, source):
if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
return -EOPNOTSUPP;
NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS, tp, src,
sizeof(__be16), reg);
break;
case offsetof(struct tcphdr, dest):
if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
return -EOPNOTSUPP;
NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS, tp, dst,
sizeof(__be16), reg);
break;
default:
return -EOPNOTSUPP;
}
return 0;
}
static int nft_payload_offload_udp(struct nft_offload_ctx *ctx,
struct nft_flow_rule *flow,
const struct nft_payload *priv)
{
struct nft_offload_reg *reg = &ctx->regs[priv->dreg];
switch (priv->offset) {
case offsetof(struct udphdr, source):
if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
return -EOPNOTSUPP;
NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS, tp, src,
sizeof(__be16), reg);
break;
case offsetof(struct udphdr, dest):
if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
return -EOPNOTSUPP;
NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS, tp, dst,
sizeof(__be16), reg);
break;
default:
return -EOPNOTSUPP;
}
return 0;
}
static int nft_payload_offload_th(struct nft_offload_ctx *ctx,
struct nft_flow_rule *flow,
const struct nft_payload *priv)
{
int err;
switch (ctx->dep.protonum) {
case IPPROTO_TCP:
err = nft_payload_offload_tcp(ctx, flow, priv);
break;
case IPPROTO_UDP:
err = nft_payload_offload_udp(ctx, flow, priv);
break;
default:
return -EOPNOTSUPP;
}
return err;
}
static int nft_payload_offload(struct nft_offload_ctx *ctx,
struct nft_flow_rule *flow,
const struct nft_expr *expr)
{
const struct nft_payload *priv = nft_expr_priv(expr);
int err;
switch (priv->base) {
case NFT_PAYLOAD_LL_HEADER:
err = nft_payload_offload_ll(ctx, flow, priv);
break;
case NFT_PAYLOAD_NETWORK_HEADER:
err = nft_payload_offload_nh(ctx, flow, priv);
break;
case NFT_PAYLOAD_TRANSPORT_HEADER:
err = nft_payload_offload_th(ctx, flow, priv);
break;
default:
err = -EOPNOTSUPP;
break;
}
return err;
}
static const struct nft_expr_ops nft_payload_ops = {
.type = &nft_payload_type,
.size = NFT_EXPR_SIZE(sizeof(struct nft_payload)),
.eval = nft_payload_eval,
.init = nft_payload_init,
.dump = nft_payload_dump,
.reduce = nft_payload_reduce,
.offload = nft_payload_offload,
};
const struct nft_expr_ops nft_payload_fast_ops = {
.type = &nft_payload_type,
.size = NFT_EXPR_SIZE(sizeof(struct nft_payload)),
.eval = nft_payload_eval,
.init = nft_payload_init,
.dump = nft_payload_dump,
.reduce = nft_payload_reduce,
.offload = nft_payload_offload,
};
void nft_payload_inner_eval(const struct nft_expr *expr, struct nft_regs *regs,
const struct nft_pktinfo *pkt,
struct nft_inner_tun_ctx *tun_ctx)
{
const struct nft_payload *priv = nft_expr_priv(expr);
const struct sk_buff *skb = pkt->skb;
u32 *dest = ®s->data[priv->dreg];
int offset;
if (priv->len % NFT_REG32_SIZE)
dest[priv->len / NFT_REG32_SIZE] = 0;
switch (priv->base) {
case NFT_PAYLOAD_TUN_HEADER:
if (!(tun_ctx->flags & NFT_PAYLOAD_CTX_INNER_TUN))
goto err;
offset = tun_ctx->inner_tunoff;
break;
case NFT_PAYLOAD_LL_HEADER:
if (!(tun_ctx->flags & NFT_PAYLOAD_CTX_INNER_LL))
goto err;
offset = tun_ctx->inner_lloff;
break;
case NFT_PAYLOAD_NETWORK_HEADER:
if (!(tun_ctx->flags & NFT_PAYLOAD_CTX_INNER_NH))
goto err;
offset = tun_ctx->inner_nhoff;
break;
case NFT_PAYLOAD_TRANSPORT_HEADER:
if (!(tun_ctx->flags & NFT_PAYLOAD_CTX_INNER_TH))
goto err;
offset = tun_ctx->inner_thoff;
break;
default:
WARN_ON_ONCE(1);
goto err;
}
offset += priv->offset;
if (skb_copy_bits(skb, offset, dest, priv->len) < 0)
goto err;
return;
err:
regs->verdict.code = NFT_BREAK;
}
static int nft_payload_inner_init(const struct nft_ctx *ctx,
const struct nft_expr *expr,
const struct nlattr * const tb[])
{
struct nft_payload *priv = nft_expr_priv(expr);
u32 base;
base = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_BASE]));
switch (base) {
case NFT_PAYLOAD_TUN_HEADER:
case NFT_PAYLOAD_LL_HEADER:
case NFT_PAYLOAD_NETWORK_HEADER:
case NFT_PAYLOAD_TRANSPORT_HEADER:
break;
default:
return -EOPNOTSUPP;
}
priv->base = base;
priv->offset = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_OFFSET]));
priv->len = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_LEN]));
return nft_parse_register_store(ctx, tb[NFTA_PAYLOAD_DREG],
&priv->dreg, NULL, NFT_DATA_VALUE,
priv->len);
}
static const struct nft_expr_ops nft_payload_inner_ops = {
.type = &nft_payload_type,
.size = NFT_EXPR_SIZE(sizeof(struct nft_payload)),
.init = nft_payload_inner_init,
.dump = nft_payload_dump,
/* direct call to nft_payload_inner_eval(). */
};
static inline void nft_csum_replace(__sum16 *sum, __wsum fsum, __wsum tsum)
{
*sum = csum_fold(csum_add(csum_sub(~csum_unfold(*sum), fsum), tsum));
if (*sum == 0)
*sum = CSUM_MANGLED_0;
}
static bool nft_payload_udp_checksum(struct sk_buff *skb, unsigned int thoff)
{
struct udphdr *uh, _uh;
uh = skb_header_pointer(skb, thoff, sizeof(_uh), &_uh);
if (!uh)
return false;
return (__force bool)uh->check;
}
static int nft_payload_l4csum_offset(const struct nft_pktinfo *pkt,
struct sk_buff *skb,
unsigned int *l4csum_offset)
{
if (pkt->fragoff)
return -1;
switch (pkt->tprot) {
case IPPROTO_TCP:
*l4csum_offset = offsetof(struct tcphdr, check);
break;
case IPPROTO_UDP:
if (!nft_payload_udp_checksum(skb, nft_thoff(pkt)))
return -1;
fallthrough;
case IPPROTO_UDPLITE:
*l4csum_offset = offsetof(struct udphdr, check);
break;
case IPPROTO_ICMPV6:
*l4csum_offset = offsetof(struct icmp6hdr, icmp6_cksum);
break;
default:
return -1;
}
*l4csum_offset += nft_thoff(pkt);
return 0;
}
static int nft_payload_csum_sctp(struct sk_buff *skb, int offset)
{
struct sctphdr *sh;
if (skb_ensure_writable(skb, offset + sizeof(*sh)))
return -1;
sh = (struct sctphdr *)(skb->data + offset);
sh->checksum = sctp_compute_cksum(skb, offset);
skb->ip_summed = CHECKSUM_UNNECESSARY;
return 0;
}
static int nft_payload_l4csum_update(const struct nft_pktinfo *pkt,
struct sk_buff *skb,
__wsum fsum, __wsum tsum)
{
int l4csum_offset;
__sum16 sum;
/* If we cannot determine layer 4 checksum offset or this packet doesn't
* require layer 4 checksum recalculation, skip this packet.
*/
if (nft_payload_l4csum_offset(pkt, skb, &l4csum_offset) < 0)
return 0;
if (skb_copy_bits(skb, l4csum_offset, &sum, sizeof(sum)) < 0)
return -1;
/* Checksum mangling for an arbitrary amount of bytes, based on
* inet_proto_csum_replace*() functions.
*/
if (skb->ip_summed != CHECKSUM_PARTIAL) {
nft_csum_replace(&sum, fsum, tsum);
if (skb->ip_summed == CHECKSUM_COMPLETE) {
skb->csum = ~csum_add(csum_sub(~(skb->csum), fsum),
tsum);
}
} else {
sum = ~csum_fold(csum_add(csum_sub(csum_unfold(sum), fsum),
tsum));
}
if (skb_ensure_writable(skb, l4csum_offset + sizeof(sum)) ||
skb_store_bits(skb, l4csum_offset, &sum, sizeof(sum)) < 0)
return -1;
return 0;
}
static int nft_payload_csum_inet(struct sk_buff *skb, const u32 *src,
__wsum fsum, __wsum tsum, int csum_offset)
{
__sum16 sum;
if (skb_copy_bits(skb, csum_offset, &sum, sizeof(sum)) < 0)
return -1;
nft_csum_replace(&sum, fsum, tsum);
if (skb_ensure_writable(skb, csum_offset + sizeof(sum)) ||
skb_store_bits(skb, csum_offset, &sum, sizeof(sum)) < 0)
return -1;
return 0;
}
struct nft_payload_set {
enum nft_payload_bases base:8;
u8 offset;
u8 len;
u8 sreg;
u8 csum_type;
u8 csum_offset;
u8 csum_flags;
};
static void nft_payload_set_eval(const struct nft_expr *expr,
struct nft_regs *regs,
const struct nft_pktinfo *pkt)
{
const struct nft_payload_set *priv = nft_expr_priv(expr);
struct sk_buff *skb = pkt->skb;
const u32 *src = ®s->data[priv->sreg];
int offset, csum_offset;
__wsum fsum, tsum;
switch (priv->base) {
case NFT_PAYLOAD_LL_HEADER:
if (!skb_mac_header_was_set(skb))
goto err;
offset = skb_mac_header(skb) - skb->data;
break;
case NFT_PAYLOAD_NETWORK_HEADER:
offset = skb_network_offset(skb);
break;
case NFT_PAYLOAD_TRANSPORT_HEADER:
if (!(pkt->flags & NFT_PKTINFO_L4PROTO) || pkt->fragoff)
goto err;
offset = nft_thoff(pkt);
break;
case NFT_PAYLOAD_INNER_HEADER:
offset = nft_payload_inner_offset(pkt);
if (offset < 0)
goto err;
break;
default:
WARN_ON_ONCE(1);
goto err;
}
csum_offset = offset + priv->csum_offset;
offset += priv->offset;
if ((priv->csum_type == NFT_PAYLOAD_CSUM_INET || priv->csum_flags) &&
((priv->base != NFT_PAYLOAD_TRANSPORT_HEADER &&
priv->base != NFT_PAYLOAD_INNER_HEADER) ||
skb->ip_summed != CHECKSUM_PARTIAL)) {
fsum = skb_checksum(skb, offset, priv->len, 0);
tsum = csum_partial(src, priv->len, 0);
if (priv->csum_type == NFT_PAYLOAD_CSUM_INET &&
nft_payload_csum_inet(skb, src, fsum, tsum, csum_offset))
goto err;
if (priv->csum_flags &&
nft_payload_l4csum_update(pkt, skb, fsum, tsum) < 0)
goto err;
}
if (skb_ensure_writable(skb, max(offset + priv->len, 0)) ||
skb_store_bits(skb, offset, src, priv->len) < 0)
goto err;
if (priv->csum_type == NFT_PAYLOAD_CSUM_SCTP &&
pkt->tprot == IPPROTO_SCTP &&
skb->ip_summed != CHECKSUM_PARTIAL) {
if (pkt->fragoff == 0 &&
nft_payload_csum_sctp(skb, nft_thoff(pkt)))
goto err;
}
return;
err:
regs->verdict.code = NFT_BREAK;
}
static int nft_payload_set_init(const struct nft_ctx *ctx,
const struct nft_expr *expr,
const struct nlattr * const tb[])
{
struct nft_payload_set *priv = nft_expr_priv(expr);
u32 csum_offset, csum_type = NFT_PAYLOAD_CSUM_NONE;
int err;
priv->base = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_BASE]));
priv->offset = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_OFFSET]));
priv->len = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_LEN]));
if (tb[NFTA_PAYLOAD_CSUM_TYPE])
csum_type = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_CSUM_TYPE]));
if (tb[NFTA_PAYLOAD_CSUM_OFFSET]) {
err = nft_parse_u32_check(tb[NFTA_PAYLOAD_CSUM_OFFSET], U8_MAX,
&csum_offset);
if (err < 0)
return err;
priv->csum_offset = csum_offset;
}
if (tb[NFTA_PAYLOAD_CSUM_FLAGS]) {
u32 flags;
flags = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_CSUM_FLAGS]));
if (flags & ~NFT_PAYLOAD_L4CSUM_PSEUDOHDR)
return -EINVAL;
priv->csum_flags = flags;
}
switch (csum_type) {
case NFT_PAYLOAD_CSUM_NONE:
case NFT_PAYLOAD_CSUM_INET:
break;
case NFT_PAYLOAD_CSUM_SCTP:
if (priv->base != NFT_PAYLOAD_TRANSPORT_HEADER)
return -EINVAL;
if (priv->csum_offset != offsetof(struct sctphdr, checksum))
return -EINVAL;
break;
default:
return -EOPNOTSUPP;
}
priv->csum_type = csum_type;
return nft_parse_register_load(tb[NFTA_PAYLOAD_SREG], &priv->sreg,
priv->len);
}
static int nft_payload_set_dump(struct sk_buff *skb,
const struct nft_expr *expr, bool reset)
{
const struct nft_payload_set *priv = nft_expr_priv(expr);
if (nft_dump_register(skb, NFTA_PAYLOAD_SREG, priv->sreg) ||
nla_put_be32(skb, NFTA_PAYLOAD_BASE, htonl(priv->base)) ||
nla_put_be32(skb, NFTA_PAYLOAD_OFFSET, htonl(priv->offset)) ||
nla_put_be32(skb, NFTA_PAYLOAD_LEN, htonl(priv->len)) ||
nla_put_be32(skb, NFTA_PAYLOAD_CSUM_TYPE, htonl(priv->csum_type)) ||
nla_put_be32(skb, NFTA_PAYLOAD_CSUM_OFFSET,
htonl(priv->csum_offset)) ||
nla_put_be32(skb, NFTA_PAYLOAD_CSUM_FLAGS, htonl(priv->csum_flags)))
goto nla_put_failure;
return 0;
nla_put_failure:
return -1;
}
static bool nft_payload_set_reduce(struct nft_regs_track *track,
const struct nft_expr *expr)
{
int i;
for (i = 0; i < NFT_REG32_NUM; i++) {
if (!track->regs[i].selector)
continue;
if (track->regs[i].selector->ops != &nft_payload_ops &&
track->regs[i].selector->ops != &nft_payload_fast_ops)
continue;
__nft_reg_track_cancel(track, i);
}
return false;
}
static const struct nft_expr_ops nft_payload_set_ops = {
.type = &nft_payload_type,
.size = NFT_EXPR_SIZE(sizeof(struct nft_payload_set)),
.eval = nft_payload_set_eval,
.init = nft_payload_set_init,
.dump = nft_payload_set_dump,
.reduce = nft_payload_set_reduce,
};
static const struct nft_expr_ops *
nft_payload_select_ops(const struct nft_ctx *ctx,
const struct nlattr * const tb[])
{
enum nft_payload_bases base;
unsigned int offset, len;
int err;
if (tb[NFTA_PAYLOAD_BASE] == NULL ||
tb[NFTA_PAYLOAD_OFFSET] == NULL ||
tb[NFTA_PAYLOAD_LEN] == NULL)
return ERR_PTR(-EINVAL);
base = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_BASE]));
switch (base) {
case NFT_PAYLOAD_LL_HEADER:
case NFT_PAYLOAD_NETWORK_HEADER:
case NFT_PAYLOAD_TRANSPORT_HEADER:
case NFT_PAYLOAD_INNER_HEADER:
break;
default:
return ERR_PTR(-EOPNOTSUPP);
}
if (tb[NFTA_PAYLOAD_SREG] != NULL) {
if (tb[NFTA_PAYLOAD_DREG] != NULL)
return ERR_PTR(-EINVAL);
return &nft_payload_set_ops;
}
if (tb[NFTA_PAYLOAD_DREG] == NULL)
return ERR_PTR(-EINVAL);
err = nft_parse_u32_check(tb[NFTA_PAYLOAD_OFFSET], U8_MAX, &offset);
if (err < 0)
return ERR_PTR(err);
err = nft_parse_u32_check(tb[NFTA_PAYLOAD_LEN], U8_MAX, &len);
if (err < 0)
return ERR_PTR(err);
if (len <= 4 && is_power_of_2(len) && IS_ALIGNED(offset, len) &&
base != NFT_PAYLOAD_LL_HEADER && base != NFT_PAYLOAD_INNER_HEADER)
return &nft_payload_fast_ops;
else
return &nft_payload_ops;
}
struct nft_expr_type nft_payload_type __read_mostly = {
.name = "payload",
.select_ops = nft_payload_select_ops,
.inner_ops = &nft_payload_inner_ops,
.policy = nft_payload_policy,
.maxattr = NFTA_PAYLOAD_MAX,
.owner = THIS_MODULE,
};
| linux-master | net/netfilter/nft_payload.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/module.h>
#include <linux/netfilter/nf_tables.h>
#include <net/netfilter/nf_nat.h>
#include <net/netfilter/nf_tables.h>
#include <net/netfilter/nf_tables_ipv4.h>
#include <net/netfilter/nf_tables_ipv6.h>
static unsigned int nft_nat_do_chain(void *priv, struct sk_buff *skb,
const struct nf_hook_state *state)
{
struct nft_pktinfo pkt;
nft_set_pktinfo(&pkt, skb, state);
switch (state->pf) {
#ifdef CONFIG_NF_TABLES_IPV4
case NFPROTO_IPV4:
nft_set_pktinfo_ipv4(&pkt);
break;
#endif
#ifdef CONFIG_NF_TABLES_IPV6
case NFPROTO_IPV6:
nft_set_pktinfo_ipv6(&pkt);
break;
#endif
default:
break;
}
return nft_do_chain(&pkt, priv);
}
#ifdef CONFIG_NF_TABLES_IPV4
static const struct nft_chain_type nft_chain_nat_ipv4 = {
.name = "nat",
.type = NFT_CHAIN_T_NAT,
.family = NFPROTO_IPV4,
.owner = THIS_MODULE,
.hook_mask = (1 << NF_INET_PRE_ROUTING) |
(1 << NF_INET_POST_ROUTING) |
(1 << NF_INET_LOCAL_OUT) |
(1 << NF_INET_LOCAL_IN),
.hooks = {
[NF_INET_PRE_ROUTING] = nft_nat_do_chain,
[NF_INET_POST_ROUTING] = nft_nat_do_chain,
[NF_INET_LOCAL_OUT] = nft_nat_do_chain,
[NF_INET_LOCAL_IN] = nft_nat_do_chain,
},
.ops_register = nf_nat_ipv4_register_fn,
.ops_unregister = nf_nat_ipv4_unregister_fn,
};
#endif
#ifdef CONFIG_NF_TABLES_IPV6
static const struct nft_chain_type nft_chain_nat_ipv6 = {
.name = "nat",
.type = NFT_CHAIN_T_NAT,
.family = NFPROTO_IPV6,
.owner = THIS_MODULE,
.hook_mask = (1 << NF_INET_PRE_ROUTING) |
(1 << NF_INET_POST_ROUTING) |
(1 << NF_INET_LOCAL_OUT) |
(1 << NF_INET_LOCAL_IN),
.hooks = {
[NF_INET_PRE_ROUTING] = nft_nat_do_chain,
[NF_INET_POST_ROUTING] = nft_nat_do_chain,
[NF_INET_LOCAL_OUT] = nft_nat_do_chain,
[NF_INET_LOCAL_IN] = nft_nat_do_chain,
},
.ops_register = nf_nat_ipv6_register_fn,
.ops_unregister = nf_nat_ipv6_unregister_fn,
};
#endif
#ifdef CONFIG_NF_TABLES_INET
static int nft_nat_inet_reg(struct net *net, const struct nf_hook_ops *ops)
{
return nf_nat_inet_register_fn(net, ops);
}
static void nft_nat_inet_unreg(struct net *net, const struct nf_hook_ops *ops)
{
nf_nat_inet_unregister_fn(net, ops);
}
static const struct nft_chain_type nft_chain_nat_inet = {
.name = "nat",
.type = NFT_CHAIN_T_NAT,
.family = NFPROTO_INET,
.owner = THIS_MODULE,
.hook_mask = (1 << NF_INET_PRE_ROUTING) |
(1 << NF_INET_LOCAL_IN) |
(1 << NF_INET_LOCAL_OUT) |
(1 << NF_INET_POST_ROUTING),
.hooks = {
[NF_INET_PRE_ROUTING] = nft_nat_do_chain,
[NF_INET_LOCAL_IN] = nft_nat_do_chain,
[NF_INET_LOCAL_OUT] = nft_nat_do_chain,
[NF_INET_POST_ROUTING] = nft_nat_do_chain,
},
.ops_register = nft_nat_inet_reg,
.ops_unregister = nft_nat_inet_unreg,
};
#endif
static int __init nft_chain_nat_init(void)
{
#ifdef CONFIG_NF_TABLES_IPV6
nft_register_chain_type(&nft_chain_nat_ipv6);
#endif
#ifdef CONFIG_NF_TABLES_IPV4
nft_register_chain_type(&nft_chain_nat_ipv4);
#endif
#ifdef CONFIG_NF_TABLES_INET
nft_register_chain_type(&nft_chain_nat_inet);
#endif
return 0;
}
static void __exit nft_chain_nat_exit(void)
{
#ifdef CONFIG_NF_TABLES_IPV4
nft_unregister_chain_type(&nft_chain_nat_ipv4);
#endif
#ifdef CONFIG_NF_TABLES_IPV6
nft_unregister_chain_type(&nft_chain_nat_ipv6);
#endif
#ifdef CONFIG_NF_TABLES_INET
nft_unregister_chain_type(&nft_chain_nat_inet);
#endif
}
module_init(nft_chain_nat_init);
module_exit(nft_chain_nat_exit);
MODULE_LICENSE("GPL");
#ifdef CONFIG_NF_TABLES_IPV4
MODULE_ALIAS_NFT_CHAIN(AF_INET, "nat");
#endif
#ifdef CONFIG_NF_TABLES_IPV6
MODULE_ALIAS_NFT_CHAIN(AF_INET6, "nat");
#endif
#ifdef CONFIG_NF_TABLES_INET
MODULE_ALIAS_NFT_CHAIN(1, "nat"); /* NFPROTO_INET */
#endif
| linux-master | net/netfilter/nft_chain_nat.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2008-2009 Patrick McHardy <[email protected]>
*
* Development of this code funded by Astaro AG (http://www.astaro.com/)
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/netlink.h>
#include <linux/netfilter.h>
#include <linux/netfilter/nf_tables.h>
#include <net/netfilter/nf_tables.h>
struct nft_limit {
spinlock_t lock;
u64 last;
u64 tokens;
};
struct nft_limit_priv {
struct nft_limit *limit;
u64 tokens_max;
u64 rate;
u64 nsecs;
u32 burst;
bool invert;
};
static inline bool nft_limit_eval(struct nft_limit_priv *priv, u64 cost)
{
u64 now, tokens;
s64 delta;
spin_lock_bh(&priv->limit->lock);
now = ktime_get_ns();
tokens = priv->limit->tokens + now - priv->limit->last;
if (tokens > priv->tokens_max)
tokens = priv->tokens_max;
priv->limit->last = now;
delta = tokens - cost;
if (delta >= 0) {
priv->limit->tokens = delta;
spin_unlock_bh(&priv->limit->lock);
return priv->invert;
}
priv->limit->tokens = tokens;
spin_unlock_bh(&priv->limit->lock);
return !priv->invert;
}
/* Use same default as in iptables. */
#define NFT_LIMIT_PKT_BURST_DEFAULT 5
static int nft_limit_init(struct nft_limit_priv *priv,
const struct nlattr * const tb[], bool pkts)
{
u64 unit, tokens;
if (tb[NFTA_LIMIT_RATE] == NULL ||
tb[NFTA_LIMIT_UNIT] == NULL)
return -EINVAL;
priv->rate = be64_to_cpu(nla_get_be64(tb[NFTA_LIMIT_RATE]));
unit = be64_to_cpu(nla_get_be64(tb[NFTA_LIMIT_UNIT]));
priv->nsecs = unit * NSEC_PER_SEC;
if (priv->rate == 0 || priv->nsecs < unit)
return -EOVERFLOW;
if (tb[NFTA_LIMIT_BURST])
priv->burst = ntohl(nla_get_be32(tb[NFTA_LIMIT_BURST]));
if (pkts && priv->burst == 0)
priv->burst = NFT_LIMIT_PKT_BURST_DEFAULT;
if (priv->rate + priv->burst < priv->rate)
return -EOVERFLOW;
if (pkts) {
tokens = div64_u64(priv->nsecs, priv->rate) * priv->burst;
} else {
/* The token bucket size limits the number of tokens can be
* accumulated. tokens_max specifies the bucket size.
* tokens_max = unit * (rate + burst) / rate.
*/
tokens = div64_u64(priv->nsecs * (priv->rate + priv->burst),
priv->rate);
}
priv->limit = kmalloc(sizeof(*priv->limit), GFP_KERNEL_ACCOUNT);
if (!priv->limit)
return -ENOMEM;
priv->limit->tokens = tokens;
priv->tokens_max = priv->limit->tokens;
if (tb[NFTA_LIMIT_FLAGS]) {
u32 flags = ntohl(nla_get_be32(tb[NFTA_LIMIT_FLAGS]));
if (flags & NFT_LIMIT_F_INV)
priv->invert = true;
}
priv->limit->last = ktime_get_ns();
spin_lock_init(&priv->limit->lock);
return 0;
}
static int nft_limit_dump(struct sk_buff *skb, const struct nft_limit_priv *priv,
enum nft_limit_type type)
{
u32 flags = priv->invert ? NFT_LIMIT_F_INV : 0;
u64 secs = div_u64(priv->nsecs, NSEC_PER_SEC);
if (nla_put_be64(skb, NFTA_LIMIT_RATE, cpu_to_be64(priv->rate),
NFTA_LIMIT_PAD) ||
nla_put_be64(skb, NFTA_LIMIT_UNIT, cpu_to_be64(secs),
NFTA_LIMIT_PAD) ||
nla_put_be32(skb, NFTA_LIMIT_BURST, htonl(priv->burst)) ||
nla_put_be32(skb, NFTA_LIMIT_TYPE, htonl(type)) ||
nla_put_be32(skb, NFTA_LIMIT_FLAGS, htonl(flags)))
goto nla_put_failure;
return 0;
nla_put_failure:
return -1;
}
static void nft_limit_destroy(const struct nft_ctx *ctx,
const struct nft_limit_priv *priv)
{
kfree(priv->limit);
}
static int nft_limit_clone(struct nft_limit_priv *priv_dst,
const struct nft_limit_priv *priv_src)
{
priv_dst->tokens_max = priv_src->tokens_max;
priv_dst->rate = priv_src->rate;
priv_dst->nsecs = priv_src->nsecs;
priv_dst->burst = priv_src->burst;
priv_dst->invert = priv_src->invert;
priv_dst->limit = kmalloc(sizeof(*priv_dst->limit), GFP_ATOMIC);
if (!priv_dst->limit)
return -ENOMEM;
spin_lock_init(&priv_dst->limit->lock);
priv_dst->limit->tokens = priv_src->tokens_max;
priv_dst->limit->last = ktime_get_ns();
return 0;
}
struct nft_limit_priv_pkts {
struct nft_limit_priv limit;
u64 cost;
};
static void nft_limit_pkts_eval(const struct nft_expr *expr,
struct nft_regs *regs,
const struct nft_pktinfo *pkt)
{
struct nft_limit_priv_pkts *priv = nft_expr_priv(expr);
if (nft_limit_eval(&priv->limit, priv->cost))
regs->verdict.code = NFT_BREAK;
}
static const struct nla_policy nft_limit_policy[NFTA_LIMIT_MAX + 1] = {
[NFTA_LIMIT_RATE] = { .type = NLA_U64 },
[NFTA_LIMIT_UNIT] = { .type = NLA_U64 },
[NFTA_LIMIT_BURST] = { .type = NLA_U32 },
[NFTA_LIMIT_TYPE] = { .type = NLA_U32 },
[NFTA_LIMIT_FLAGS] = { .type = NLA_U32 },
};
static int nft_limit_pkts_init(const struct nft_ctx *ctx,
const struct nft_expr *expr,
const struct nlattr * const tb[])
{
struct nft_limit_priv_pkts *priv = nft_expr_priv(expr);
int err;
err = nft_limit_init(&priv->limit, tb, true);
if (err < 0)
return err;
priv->cost = div64_u64(priv->limit.nsecs, priv->limit.rate);
return 0;
}
static int nft_limit_pkts_dump(struct sk_buff *skb,
const struct nft_expr *expr, bool reset)
{
const struct nft_limit_priv_pkts *priv = nft_expr_priv(expr);
return nft_limit_dump(skb, &priv->limit, NFT_LIMIT_PKTS);
}
static void nft_limit_pkts_destroy(const struct nft_ctx *ctx,
const struct nft_expr *expr)
{
const struct nft_limit_priv_pkts *priv = nft_expr_priv(expr);
nft_limit_destroy(ctx, &priv->limit);
}
static int nft_limit_pkts_clone(struct nft_expr *dst, const struct nft_expr *src)
{
struct nft_limit_priv_pkts *priv_dst = nft_expr_priv(dst);
struct nft_limit_priv_pkts *priv_src = nft_expr_priv(src);
priv_dst->cost = priv_src->cost;
return nft_limit_clone(&priv_dst->limit, &priv_src->limit);
}
static struct nft_expr_type nft_limit_type;
static const struct nft_expr_ops nft_limit_pkts_ops = {
.type = &nft_limit_type,
.size = NFT_EXPR_SIZE(sizeof(struct nft_limit_priv_pkts)),
.eval = nft_limit_pkts_eval,
.init = nft_limit_pkts_init,
.destroy = nft_limit_pkts_destroy,
.clone = nft_limit_pkts_clone,
.dump = nft_limit_pkts_dump,
.reduce = NFT_REDUCE_READONLY,
};
static void nft_limit_bytes_eval(const struct nft_expr *expr,
struct nft_regs *regs,
const struct nft_pktinfo *pkt)
{
struct nft_limit_priv *priv = nft_expr_priv(expr);
u64 cost = div64_u64(priv->nsecs * pkt->skb->len, priv->rate);
if (nft_limit_eval(priv, cost))
regs->verdict.code = NFT_BREAK;
}
static int nft_limit_bytes_init(const struct nft_ctx *ctx,
const struct nft_expr *expr,
const struct nlattr * const tb[])
{
struct nft_limit_priv *priv = nft_expr_priv(expr);
return nft_limit_init(priv, tb, false);
}
static int nft_limit_bytes_dump(struct sk_buff *skb,
const struct nft_expr *expr, bool reset)
{
const struct nft_limit_priv *priv = nft_expr_priv(expr);
return nft_limit_dump(skb, priv, NFT_LIMIT_PKT_BYTES);
}
static void nft_limit_bytes_destroy(const struct nft_ctx *ctx,
const struct nft_expr *expr)
{
const struct nft_limit_priv *priv = nft_expr_priv(expr);
nft_limit_destroy(ctx, priv);
}
static int nft_limit_bytes_clone(struct nft_expr *dst, const struct nft_expr *src)
{
struct nft_limit_priv *priv_dst = nft_expr_priv(dst);
struct nft_limit_priv *priv_src = nft_expr_priv(src);
return nft_limit_clone(priv_dst, priv_src);
}
static const struct nft_expr_ops nft_limit_bytes_ops = {
.type = &nft_limit_type,
.size = NFT_EXPR_SIZE(sizeof(struct nft_limit_priv)),
.eval = nft_limit_bytes_eval,
.init = nft_limit_bytes_init,
.dump = nft_limit_bytes_dump,
.clone = nft_limit_bytes_clone,
.destroy = nft_limit_bytes_destroy,
.reduce = NFT_REDUCE_READONLY,
};
static const struct nft_expr_ops *
nft_limit_select_ops(const struct nft_ctx *ctx,
const struct nlattr * const tb[])
{
if (tb[NFTA_LIMIT_TYPE] == NULL)
return &nft_limit_pkts_ops;
switch (ntohl(nla_get_be32(tb[NFTA_LIMIT_TYPE]))) {
case NFT_LIMIT_PKTS:
return &nft_limit_pkts_ops;
case NFT_LIMIT_PKT_BYTES:
return &nft_limit_bytes_ops;
}
return ERR_PTR(-EOPNOTSUPP);
}
static struct nft_expr_type nft_limit_type __read_mostly = {
.name = "limit",
.select_ops = nft_limit_select_ops,
.policy = nft_limit_policy,
.maxattr = NFTA_LIMIT_MAX,
.flags = NFT_EXPR_STATEFUL,
.owner = THIS_MODULE,
};
static void nft_limit_obj_pkts_eval(struct nft_object *obj,
struct nft_regs *regs,
const struct nft_pktinfo *pkt)
{
struct nft_limit_priv_pkts *priv = nft_obj_data(obj);
if (nft_limit_eval(&priv->limit, priv->cost))
regs->verdict.code = NFT_BREAK;
}
static int nft_limit_obj_pkts_init(const struct nft_ctx *ctx,
const struct nlattr * const tb[],
struct nft_object *obj)
{
struct nft_limit_priv_pkts *priv = nft_obj_data(obj);
int err;
err = nft_limit_init(&priv->limit, tb, true);
if (err < 0)
return err;
priv->cost = div64_u64(priv->limit.nsecs, priv->limit.rate);
return 0;
}
static int nft_limit_obj_pkts_dump(struct sk_buff *skb,
struct nft_object *obj,
bool reset)
{
const struct nft_limit_priv_pkts *priv = nft_obj_data(obj);
return nft_limit_dump(skb, &priv->limit, NFT_LIMIT_PKTS);
}
static void nft_limit_obj_pkts_destroy(const struct nft_ctx *ctx,
struct nft_object *obj)
{
struct nft_limit_priv_pkts *priv = nft_obj_data(obj);
nft_limit_destroy(ctx, &priv->limit);
}
static struct nft_object_type nft_limit_obj_type;
static const struct nft_object_ops nft_limit_obj_pkts_ops = {
.type = &nft_limit_obj_type,
.size = NFT_EXPR_SIZE(sizeof(struct nft_limit_priv_pkts)),
.init = nft_limit_obj_pkts_init,
.destroy = nft_limit_obj_pkts_destroy,
.eval = nft_limit_obj_pkts_eval,
.dump = nft_limit_obj_pkts_dump,
};
static void nft_limit_obj_bytes_eval(struct nft_object *obj,
struct nft_regs *regs,
const struct nft_pktinfo *pkt)
{
struct nft_limit_priv *priv = nft_obj_data(obj);
u64 cost = div64_u64(priv->nsecs * pkt->skb->len, priv->rate);
if (nft_limit_eval(priv, cost))
regs->verdict.code = NFT_BREAK;
}
static int nft_limit_obj_bytes_init(const struct nft_ctx *ctx,
const struct nlattr * const tb[],
struct nft_object *obj)
{
struct nft_limit_priv *priv = nft_obj_data(obj);
return nft_limit_init(priv, tb, false);
}
static int nft_limit_obj_bytes_dump(struct sk_buff *skb,
struct nft_object *obj,
bool reset)
{
const struct nft_limit_priv *priv = nft_obj_data(obj);
return nft_limit_dump(skb, priv, NFT_LIMIT_PKT_BYTES);
}
static void nft_limit_obj_bytes_destroy(const struct nft_ctx *ctx,
struct nft_object *obj)
{
struct nft_limit_priv *priv = nft_obj_data(obj);
nft_limit_destroy(ctx, priv);
}
static struct nft_object_type nft_limit_obj_type;
static const struct nft_object_ops nft_limit_obj_bytes_ops = {
.type = &nft_limit_obj_type,
.size = sizeof(struct nft_limit_priv),
.init = nft_limit_obj_bytes_init,
.destroy = nft_limit_obj_bytes_destroy,
.eval = nft_limit_obj_bytes_eval,
.dump = nft_limit_obj_bytes_dump,
};
static const struct nft_object_ops *
nft_limit_obj_select_ops(const struct nft_ctx *ctx,
const struct nlattr * const tb[])
{
if (!tb[NFTA_LIMIT_TYPE])
return &nft_limit_obj_pkts_ops;
switch (ntohl(nla_get_be32(tb[NFTA_LIMIT_TYPE]))) {
case NFT_LIMIT_PKTS:
return &nft_limit_obj_pkts_ops;
case NFT_LIMIT_PKT_BYTES:
return &nft_limit_obj_bytes_ops;
}
return ERR_PTR(-EOPNOTSUPP);
}
static struct nft_object_type nft_limit_obj_type __read_mostly = {
.select_ops = nft_limit_obj_select_ops,
.type = NFT_OBJECT_LIMIT,
.maxattr = NFTA_LIMIT_MAX,
.policy = nft_limit_policy,
.owner = THIS_MODULE,
};
static int __init nft_limit_module_init(void)
{
int err;
err = nft_register_obj(&nft_limit_obj_type);
if (err < 0)
return err;
err = nft_register_expr(&nft_limit_type);
if (err < 0)
goto err1;
return 0;
err1:
nft_unregister_obj(&nft_limit_obj_type);
return err;
}
static void __exit nft_limit_module_exit(void)
{
nft_unregister_expr(&nft_limit_type);
nft_unregister_obj(&nft_limit_obj_type);
}
module_init(nft_limit_module_init);
module_exit(nft_limit_module_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Patrick McHardy <[email protected]>");
MODULE_ALIAS_NFT_EXPR("limit");
MODULE_ALIAS_NFT_OBJ(NFT_OBJECT_LIMIT);
MODULE_DESCRIPTION("nftables limit expression support");
| linux-master | net/netfilter/nft_limit.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Accounting handling for netfilter. */
/*
* (C) 2008 Krzysztof Piotr Oledzki <[email protected]>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/netfilter.h>
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/moduleparam.h>
#include <linux/export.h>
#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_conntrack_extend.h>
#include <net/netfilter/nf_conntrack_acct.h>
static bool nf_ct_acct __read_mostly;
module_param_named(acct, nf_ct_acct, bool, 0644);
MODULE_PARM_DESC(acct, "Enable connection tracking flow accounting.");
void nf_conntrack_acct_pernet_init(struct net *net)
{
net->ct.sysctl_acct = nf_ct_acct;
}
| linux-master | net/netfilter/nf_conntrack_acct.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2016 Laura Garcia <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/netlink.h>
#include <linux/netfilter.h>
#include <linux/netfilter/nf_tables.h>
#include <net/netfilter/nf_tables.h>
#include <net/netfilter/nf_tables_core.h>
#include <linux/jhash.h>
struct nft_jhash {
u8 sreg;
u8 dreg;
u8 len;
bool autogen_seed:1;
u32 modulus;
u32 seed;
u32 offset;
};
static void nft_jhash_eval(const struct nft_expr *expr,
struct nft_regs *regs,
const struct nft_pktinfo *pkt)
{
struct nft_jhash *priv = nft_expr_priv(expr);
const void *data = ®s->data[priv->sreg];
u32 h;
h = reciprocal_scale(jhash(data, priv->len, priv->seed),
priv->modulus);
regs->data[priv->dreg] = h + priv->offset;
}
struct nft_symhash {
u8 dreg;
u32 modulus;
u32 offset;
};
static void nft_symhash_eval(const struct nft_expr *expr,
struct nft_regs *regs,
const struct nft_pktinfo *pkt)
{
struct nft_symhash *priv = nft_expr_priv(expr);
struct sk_buff *skb = pkt->skb;
u32 h;
h = reciprocal_scale(__skb_get_hash_symmetric(skb), priv->modulus);
regs->data[priv->dreg] = h + priv->offset;
}
static const struct nla_policy nft_hash_policy[NFTA_HASH_MAX + 1] = {
[NFTA_HASH_SREG] = { .type = NLA_U32 },
[NFTA_HASH_DREG] = { .type = NLA_U32 },
[NFTA_HASH_LEN] = NLA_POLICY_MAX(NLA_BE32, 255),
[NFTA_HASH_MODULUS] = { .type = NLA_U32 },
[NFTA_HASH_SEED] = { .type = NLA_U32 },
[NFTA_HASH_OFFSET] = { .type = NLA_U32 },
[NFTA_HASH_TYPE] = { .type = NLA_U32 },
};
static int nft_jhash_init(const struct nft_ctx *ctx,
const struct nft_expr *expr,
const struct nlattr * const tb[])
{
struct nft_jhash *priv = nft_expr_priv(expr);
u32 len;
int err;
if (!tb[NFTA_HASH_SREG] ||
!tb[NFTA_HASH_DREG] ||
!tb[NFTA_HASH_LEN] ||
!tb[NFTA_HASH_MODULUS])
return -EINVAL;
if (tb[NFTA_HASH_OFFSET])
priv->offset = ntohl(nla_get_be32(tb[NFTA_HASH_OFFSET]));
err = nft_parse_u32_check(tb[NFTA_HASH_LEN], U8_MAX, &len);
if (err < 0)
return err;
if (len == 0)
return -ERANGE;
priv->len = len;
err = nft_parse_register_load(tb[NFTA_HASH_SREG], &priv->sreg, len);
if (err < 0)
return err;
priv->modulus = ntohl(nla_get_be32(tb[NFTA_HASH_MODULUS]));
if (priv->modulus < 1)
return -ERANGE;
if (priv->offset + priv->modulus - 1 < priv->offset)
return -EOVERFLOW;
if (tb[NFTA_HASH_SEED]) {
priv->seed = ntohl(nla_get_be32(tb[NFTA_HASH_SEED]));
} else {
priv->autogen_seed = true;
get_random_bytes(&priv->seed, sizeof(priv->seed));
}
return nft_parse_register_store(ctx, tb[NFTA_HASH_DREG], &priv->dreg,
NULL, NFT_DATA_VALUE, sizeof(u32));
}
static int nft_symhash_init(const struct nft_ctx *ctx,
const struct nft_expr *expr,
const struct nlattr * const tb[])
{
struct nft_symhash *priv = nft_expr_priv(expr);
if (!tb[NFTA_HASH_DREG] ||
!tb[NFTA_HASH_MODULUS])
return -EINVAL;
if (tb[NFTA_HASH_OFFSET])
priv->offset = ntohl(nla_get_be32(tb[NFTA_HASH_OFFSET]));
priv->modulus = ntohl(nla_get_be32(tb[NFTA_HASH_MODULUS]));
if (priv->modulus < 1)
return -ERANGE;
if (priv->offset + priv->modulus - 1 < priv->offset)
return -EOVERFLOW;
return nft_parse_register_store(ctx, tb[NFTA_HASH_DREG],
&priv->dreg, NULL, NFT_DATA_VALUE,
sizeof(u32));
}
static int nft_jhash_dump(struct sk_buff *skb,
const struct nft_expr *expr, bool reset)
{
const struct nft_jhash *priv = nft_expr_priv(expr);
if (nft_dump_register(skb, NFTA_HASH_SREG, priv->sreg))
goto nla_put_failure;
if (nft_dump_register(skb, NFTA_HASH_DREG, priv->dreg))
goto nla_put_failure;
if (nla_put_be32(skb, NFTA_HASH_LEN, htonl(priv->len)))
goto nla_put_failure;
if (nla_put_be32(skb, NFTA_HASH_MODULUS, htonl(priv->modulus)))
goto nla_put_failure;
if (!priv->autogen_seed &&
nla_put_be32(skb, NFTA_HASH_SEED, htonl(priv->seed)))
goto nla_put_failure;
if (priv->offset != 0)
if (nla_put_be32(skb, NFTA_HASH_OFFSET, htonl(priv->offset)))
goto nla_put_failure;
if (nla_put_be32(skb, NFTA_HASH_TYPE, htonl(NFT_HASH_JENKINS)))
goto nla_put_failure;
return 0;
nla_put_failure:
return -1;
}
static bool nft_jhash_reduce(struct nft_regs_track *track,
const struct nft_expr *expr)
{
const struct nft_jhash *priv = nft_expr_priv(expr);
nft_reg_track_cancel(track, priv->dreg, sizeof(u32));
return false;
}
static int nft_symhash_dump(struct sk_buff *skb,
const struct nft_expr *expr, bool reset)
{
const struct nft_symhash *priv = nft_expr_priv(expr);
if (nft_dump_register(skb, NFTA_HASH_DREG, priv->dreg))
goto nla_put_failure;
if (nla_put_be32(skb, NFTA_HASH_MODULUS, htonl(priv->modulus)))
goto nla_put_failure;
if (priv->offset != 0)
if (nla_put_be32(skb, NFTA_HASH_OFFSET, htonl(priv->offset)))
goto nla_put_failure;
if (nla_put_be32(skb, NFTA_HASH_TYPE, htonl(NFT_HASH_SYM)))
goto nla_put_failure;
return 0;
nla_put_failure:
return -1;
}
static bool nft_symhash_reduce(struct nft_regs_track *track,
const struct nft_expr *expr)
{
struct nft_symhash *priv = nft_expr_priv(expr);
struct nft_symhash *symhash;
if (!nft_reg_track_cmp(track, expr, priv->dreg)) {
nft_reg_track_update(track, expr, priv->dreg, sizeof(u32));
return false;
}
symhash = nft_expr_priv(track->regs[priv->dreg].selector);
if (priv->offset != symhash->offset ||
priv->modulus != symhash->modulus) {
nft_reg_track_update(track, expr, priv->dreg, sizeof(u32));
return false;
}
if (!track->regs[priv->dreg].bitwise)
return true;
return false;
}
static struct nft_expr_type nft_hash_type;
static const struct nft_expr_ops nft_jhash_ops = {
.type = &nft_hash_type,
.size = NFT_EXPR_SIZE(sizeof(struct nft_jhash)),
.eval = nft_jhash_eval,
.init = nft_jhash_init,
.dump = nft_jhash_dump,
.reduce = nft_jhash_reduce,
};
static const struct nft_expr_ops nft_symhash_ops = {
.type = &nft_hash_type,
.size = NFT_EXPR_SIZE(sizeof(struct nft_symhash)),
.eval = nft_symhash_eval,
.init = nft_symhash_init,
.dump = nft_symhash_dump,
.reduce = nft_symhash_reduce,
};
static const struct nft_expr_ops *
nft_hash_select_ops(const struct nft_ctx *ctx,
const struct nlattr * const tb[])
{
u32 type;
if (!tb[NFTA_HASH_TYPE])
return &nft_jhash_ops;
type = ntohl(nla_get_be32(tb[NFTA_HASH_TYPE]));
switch (type) {
case NFT_HASH_SYM:
return &nft_symhash_ops;
case NFT_HASH_JENKINS:
return &nft_jhash_ops;
default:
break;
}
return ERR_PTR(-EOPNOTSUPP);
}
static struct nft_expr_type nft_hash_type __read_mostly = {
.name = "hash",
.select_ops = nft_hash_select_ops,
.policy = nft_hash_policy,
.maxattr = NFTA_HASH_MAX,
.owner = THIS_MODULE,
};
static int __init nft_hash_module_init(void)
{
return nft_register_expr(&nft_hash_type);
}
static void __exit nft_hash_module_exit(void)
{
nft_unregister_expr(&nft_hash_type);
}
module_init(nft_hash_module_init);
module_exit(nft_hash_module_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Laura Garcia <[email protected]>");
MODULE_ALIAS_NFT_EXPR("hash");
MODULE_DESCRIPTION("Netfilter nftables hash module");
| linux-master | net/netfilter/nft_hash.c |
// SPDX-License-Identifier: GPL-2.0-only
/* IP tables module for matching the routing realm
*
* (C) 2003 by Sampsa Ranta <[email protected]>
*/
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <net/route.h>
#include <linux/netfilter_ipv4.h>
#include <linux/netfilter/xt_realm.h>
#include <linux/netfilter/x_tables.h>
MODULE_AUTHOR("Sampsa Ranta <[email protected]>");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Xtables: Routing realm match");
MODULE_ALIAS("ipt_realm");
static bool
realm_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct xt_realm_info *info = par->matchinfo;
const struct dst_entry *dst = skb_dst(skb);
return (info->id == (dst->tclassid & info->mask)) ^ info->invert;
}
static struct xt_match realm_mt_reg __read_mostly = {
.name = "realm",
.match = realm_mt,
.matchsize = sizeof(struct xt_realm_info),
.hooks = (1 << NF_INET_POST_ROUTING) | (1 << NF_INET_FORWARD) |
(1 << NF_INET_LOCAL_OUT) | (1 << NF_INET_LOCAL_IN),
.family = NFPROTO_UNSPEC,
.me = THIS_MODULE
};
static int __init realm_mt_init(void)
{
return xt_register_match(&realm_mt_reg);
}
static void __exit realm_mt_exit(void)
{
xt_unregister_match(&realm_mt_reg);
}
module_init(realm_mt_init);
module_exit(realm_mt_exit);
| linux-master | net/netfilter/xt_realm.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* (C) 2000-2001 Svenning Soerensen <[email protected]>
* Copyright (c) 2011 Patrick McHardy <[email protected]>
*/
#include <linux/ip.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/ipv6.h>
#include <linux/netfilter.h>
#include <linux/netfilter_ipv4.h>
#include <linux/netfilter_ipv6.h>
#include <linux/netfilter/x_tables.h>
#include <net/netfilter/nf_nat.h>
static unsigned int
netmap_tg6(struct sk_buff *skb, const struct xt_action_param *par)
{
const struct nf_nat_range2 *range = par->targinfo;
struct nf_nat_range2 newrange;
struct nf_conn *ct;
enum ip_conntrack_info ctinfo;
union nf_inet_addr new_addr, netmask;
unsigned int i;
ct = nf_ct_get(skb, &ctinfo);
for (i = 0; i < ARRAY_SIZE(range->min_addr.ip6); i++)
netmask.ip6[i] = ~(range->min_addr.ip6[i] ^
range->max_addr.ip6[i]);
if (xt_hooknum(par) == NF_INET_PRE_ROUTING ||
xt_hooknum(par) == NF_INET_LOCAL_OUT)
new_addr.in6 = ipv6_hdr(skb)->daddr;
else
new_addr.in6 = ipv6_hdr(skb)->saddr;
for (i = 0; i < ARRAY_SIZE(new_addr.ip6); i++) {
new_addr.ip6[i] &= ~netmask.ip6[i];
new_addr.ip6[i] |= range->min_addr.ip6[i] &
netmask.ip6[i];
}
newrange.flags = range->flags | NF_NAT_RANGE_MAP_IPS;
newrange.min_addr = new_addr;
newrange.max_addr = new_addr;
newrange.min_proto = range->min_proto;
newrange.max_proto = range->max_proto;
return nf_nat_setup_info(ct, &newrange, HOOK2MANIP(xt_hooknum(par)));
}
static int netmap_tg6_checkentry(const struct xt_tgchk_param *par)
{
const struct nf_nat_range2 *range = par->targinfo;
if (!(range->flags & NF_NAT_RANGE_MAP_IPS))
return -EINVAL;
return nf_ct_netns_get(par->net, par->family);
}
static void netmap_tg_destroy(const struct xt_tgdtor_param *par)
{
nf_ct_netns_put(par->net, par->family);
}
static unsigned int
netmap_tg4(struct sk_buff *skb, const struct xt_action_param *par)
{
struct nf_conn *ct;
enum ip_conntrack_info ctinfo;
__be32 new_ip, netmask;
const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo;
struct nf_nat_range2 newrange;
WARN_ON(xt_hooknum(par) != NF_INET_PRE_ROUTING &&
xt_hooknum(par) != NF_INET_POST_ROUTING &&
xt_hooknum(par) != NF_INET_LOCAL_OUT &&
xt_hooknum(par) != NF_INET_LOCAL_IN);
ct = nf_ct_get(skb, &ctinfo);
netmask = ~(mr->range[0].min_ip ^ mr->range[0].max_ip);
if (xt_hooknum(par) == NF_INET_PRE_ROUTING ||
xt_hooknum(par) == NF_INET_LOCAL_OUT)
new_ip = ip_hdr(skb)->daddr & ~netmask;
else
new_ip = ip_hdr(skb)->saddr & ~netmask;
new_ip |= mr->range[0].min_ip & netmask;
memset(&newrange.min_addr, 0, sizeof(newrange.min_addr));
memset(&newrange.max_addr, 0, sizeof(newrange.max_addr));
newrange.flags = mr->range[0].flags | NF_NAT_RANGE_MAP_IPS;
newrange.min_addr.ip = new_ip;
newrange.max_addr.ip = new_ip;
newrange.min_proto = mr->range[0].min;
newrange.max_proto = mr->range[0].max;
/* Hand modified range to generic setup. */
return nf_nat_setup_info(ct, &newrange, HOOK2MANIP(xt_hooknum(par)));
}
static int netmap_tg4_check(const struct xt_tgchk_param *par)
{
const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo;
if (!(mr->range[0].flags & NF_NAT_RANGE_MAP_IPS)) {
pr_debug("bad MAP_IPS.\n");
return -EINVAL;
}
if (mr->rangesize != 1) {
pr_debug("bad rangesize %u.\n", mr->rangesize);
return -EINVAL;
}
return nf_ct_netns_get(par->net, par->family);
}
static struct xt_target netmap_tg_reg[] __read_mostly = {
{
.name = "NETMAP",
.family = NFPROTO_IPV6,
.revision = 0,
.target = netmap_tg6,
.targetsize = sizeof(struct nf_nat_range),
.table = "nat",
.hooks = (1 << NF_INET_PRE_ROUTING) |
(1 << NF_INET_POST_ROUTING) |
(1 << NF_INET_LOCAL_OUT) |
(1 << NF_INET_LOCAL_IN),
.checkentry = netmap_tg6_checkentry,
.destroy = netmap_tg_destroy,
.me = THIS_MODULE,
},
{
.name = "NETMAP",
.family = NFPROTO_IPV4,
.revision = 0,
.target = netmap_tg4,
.targetsize = sizeof(struct nf_nat_ipv4_multi_range_compat),
.table = "nat",
.hooks = (1 << NF_INET_PRE_ROUTING) |
(1 << NF_INET_POST_ROUTING) |
(1 << NF_INET_LOCAL_OUT) |
(1 << NF_INET_LOCAL_IN),
.checkentry = netmap_tg4_check,
.destroy = netmap_tg_destroy,
.me = THIS_MODULE,
},
};
static int __init netmap_tg_init(void)
{
return xt_register_targets(netmap_tg_reg, ARRAY_SIZE(netmap_tg_reg));
}
static void netmap_tg_exit(void)
{
xt_unregister_targets(netmap_tg_reg, ARRAY_SIZE(netmap_tg_reg));
}
module_init(netmap_tg_init);
module_exit(netmap_tg_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Xtables: 1:1 NAT mapping of subnets");
MODULE_AUTHOR("Patrick McHardy <[email protected]>");
MODULE_ALIAS("ip6t_NETMAP");
MODULE_ALIAS("ipt_NETMAP");
| linux-master | net/netfilter/xt_NETMAP.c |
// SPDX-License-Identifier: GPL-2.0-only
/* nf_nat_helper.c - generic support functions for NAT helpers
*
* (C) 2000-2002 Harald Welte <[email protected]>
* (C) 2003-2006 Netfilter Core Team <[email protected]>
* (C) 2007-2012 Patrick McHardy <[email protected]>
*/
#include <linux/module.h>
#include <linux/gfp.h>
#include <linux/types.h>
#include <linux/skbuff.h>
#include <linux/tcp.h>
#include <linux/udp.h>
#include <net/tcp.h>
#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_conntrack_helper.h>
#include <net/netfilter/nf_conntrack_ecache.h>
#include <net/netfilter/nf_conntrack_expect.h>
#include <net/netfilter/nf_conntrack_seqadj.h>
#include <net/netfilter/nf_nat.h>
#include <net/netfilter/nf_nat_helper.h>
/* Frobs data inside this packet, which is linear. */
static void mangle_contents(struct sk_buff *skb,
unsigned int dataoff,
unsigned int match_offset,
unsigned int match_len,
const char *rep_buffer,
unsigned int rep_len)
{
unsigned char *data;
SKB_LINEAR_ASSERT(skb);
data = skb_network_header(skb) + dataoff;
/* move post-replacement */
memmove(data + match_offset + rep_len,
data + match_offset + match_len,
skb_tail_pointer(skb) - (skb_network_header(skb) + dataoff +
match_offset + match_len));
/* insert data from buffer */
memcpy(data + match_offset, rep_buffer, rep_len);
/* update skb info */
if (rep_len > match_len) {
pr_debug("nf_nat_mangle_packet: Extending packet by "
"%u from %u bytes\n", rep_len - match_len, skb->len);
skb_put(skb, rep_len - match_len);
} else {
pr_debug("nf_nat_mangle_packet: Shrinking packet from "
"%u from %u bytes\n", match_len - rep_len, skb->len);
__skb_trim(skb, skb->len + rep_len - match_len);
}
if (nf_ct_l3num((struct nf_conn *)skb_nfct(skb)) == NFPROTO_IPV4) {
/* fix IP hdr checksum information */
ip_hdr(skb)->tot_len = htons(skb->len);
ip_send_check(ip_hdr(skb));
} else
ipv6_hdr(skb)->payload_len =
htons(skb->len - sizeof(struct ipv6hdr));
}
/* Unusual, but possible case. */
static bool enlarge_skb(struct sk_buff *skb, unsigned int extra)
{
if (skb->len + extra > 65535)
return false;
if (pskb_expand_head(skb, 0, extra - skb_tailroom(skb), GFP_ATOMIC))
return false;
return true;
}
/* Generic function for mangling variable-length address changes inside
* NATed TCP connections (like the PORT XXX,XXX,XXX,XXX,XXX,XXX
* command in FTP).
*
* Takes care about all the nasty sequence number changes, checksumming,
* skb enlargement, ...
*
* */
bool __nf_nat_mangle_tcp_packet(struct sk_buff *skb,
struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
unsigned int protoff,
unsigned int match_offset,
unsigned int match_len,
const char *rep_buffer,
unsigned int rep_len, bool adjust)
{
struct tcphdr *tcph;
int oldlen, datalen;
if (skb_ensure_writable(skb, skb->len))
return false;
if (rep_len > match_len &&
rep_len - match_len > skb_tailroom(skb) &&
!enlarge_skb(skb, rep_len - match_len))
return false;
tcph = (void *)skb->data + protoff;
oldlen = skb->len - protoff;
mangle_contents(skb, protoff + tcph->doff*4,
match_offset, match_len, rep_buffer, rep_len);
datalen = skb->len - protoff;
nf_nat_csum_recalc(skb, nf_ct_l3num(ct), IPPROTO_TCP,
tcph, &tcph->check, datalen, oldlen);
if (adjust && rep_len != match_len)
nf_ct_seqadj_set(ct, ctinfo, tcph->seq,
(int)rep_len - (int)match_len);
return true;
}
EXPORT_SYMBOL(__nf_nat_mangle_tcp_packet);
/* Generic function for mangling variable-length address changes inside
* NATed UDP connections (like the CONNECT DATA XXXXX MESG XXXXX INDEX XXXXX
* command in the Amanda protocol)
*
* Takes care about all the nasty sequence number changes, checksumming,
* skb enlargement, ...
*
* XXX - This function could be merged with nf_nat_mangle_tcp_packet which
* should be fairly easy to do.
*/
bool
nf_nat_mangle_udp_packet(struct sk_buff *skb,
struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
unsigned int protoff,
unsigned int match_offset,
unsigned int match_len,
const char *rep_buffer,
unsigned int rep_len)
{
struct udphdr *udph;
int datalen, oldlen;
if (skb_ensure_writable(skb, skb->len))
return false;
if (rep_len > match_len &&
rep_len - match_len > skb_tailroom(skb) &&
!enlarge_skb(skb, rep_len - match_len))
return false;
udph = (void *)skb->data + protoff;
oldlen = skb->len - protoff;
mangle_contents(skb, protoff + sizeof(*udph),
match_offset, match_len, rep_buffer, rep_len);
/* update the length of the UDP packet */
datalen = skb->len - protoff;
udph->len = htons(datalen);
/* fix udp checksum if udp checksum was previously calculated */
if (!udph->check && skb->ip_summed != CHECKSUM_PARTIAL)
return true;
nf_nat_csum_recalc(skb, nf_ct_l3num(ct), IPPROTO_UDP,
udph, &udph->check, datalen, oldlen);
return true;
}
EXPORT_SYMBOL(nf_nat_mangle_udp_packet);
/* Setup NAT on this expected conntrack so it follows master. */
/* If we fail to get a free NAT slot, we'll get dropped on confirm */
void nf_nat_follow_master(struct nf_conn *ct,
struct nf_conntrack_expect *exp)
{
struct nf_nat_range2 range;
/* This must be a fresh one. */
BUG_ON(ct->status & IPS_NAT_DONE_MASK);
/* Change src to where master sends to */
range.flags = NF_NAT_RANGE_MAP_IPS;
range.min_addr = range.max_addr
= ct->master->tuplehash[!exp->dir].tuple.dst.u3;
nf_nat_setup_info(ct, &range, NF_NAT_MANIP_SRC);
/* For DST manip, map port here to where it's expected. */
range.flags = (NF_NAT_RANGE_MAP_IPS | NF_NAT_RANGE_PROTO_SPECIFIED);
range.min_proto = range.max_proto = exp->saved_proto;
range.min_addr = range.max_addr
= ct->master->tuplehash[!exp->dir].tuple.src.u3;
nf_nat_setup_info(ct, &range, NF_NAT_MANIP_DST);
}
EXPORT_SYMBOL(nf_nat_follow_master);
u16 nf_nat_exp_find_port(struct nf_conntrack_expect *exp, u16 port)
{
static const unsigned int max_attempts = 128;
int range, attempts_left;
u16 min = port;
range = USHRT_MAX - port;
attempts_left = range;
if (attempts_left > max_attempts)
attempts_left = max_attempts;
/* Try to get same port: if not, try to change it. */
for (;;) {
int res;
exp->tuple.dst.u.tcp.port = htons(port);
res = nf_ct_expect_related(exp, 0);
if (res == 0)
return port;
if (res != -EBUSY || (--attempts_left < 0))
break;
port = min + get_random_u32_below(range);
}
return 0;
}
EXPORT_SYMBOL_GPL(nf_nat_exp_find_port);
| linux-master | net/netfilter/nf_nat_helper.c |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.