python_code
stringlengths 0
1.8M
| repo_name
stringclasses 7
values | file_path
stringlengths 5
99
|
---|---|---|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* xfrm4_output.c - Common IPsec encapsulation code for IPv4.
* Copyright (c) 2004 Herbert Xu <[email protected]>
*/
#include <linux/if_ether.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/netfilter_ipv4.h>
#include <net/dst.h>
#include <net/ip.h>
#include <net/xfrm.h>
#include <net/icmp.h>
static int __xfrm4_output(struct net *net, struct sock *sk, struct sk_buff *skb)
{
#ifdef CONFIG_NETFILTER
struct xfrm_state *x = skb_dst(skb)->xfrm;
if (!x) {
IPCB(skb)->flags |= IPSKB_REROUTED;
return dst_output(net, sk, skb);
}
#endif
return xfrm_output(sk, skb);
}
int xfrm4_output(struct net *net, struct sock *sk, struct sk_buff *skb)
{
return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING,
net, sk, skb, skb->dev, skb_dst(skb)->dev,
__xfrm4_output,
!(IPCB(skb)->flags & IPSKB_REROUTED));
}
void xfrm4_local_error(struct sk_buff *skb, u32 mtu)
{
struct iphdr *hdr;
hdr = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb);
ip_local_error(skb->sk, EMSGSIZE, hdr->daddr,
inet_sk(skb->sk)->inet_dport, mtu);
}
| linux-master | net/ipv4/xfrm4_output.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2019 Facebook */
#include <linux/init.h>
#include <linux/types.h>
#include <linux/bpf_verifier.h>
#include <linux/bpf.h>
#include <linux/btf.h>
#include <linux/btf_ids.h>
#include <linux/filter.h>
#include <net/tcp.h>
#include <net/bpf_sk_storage.h>
/* "extern" is to avoid sparse warning. It is only used in bpf_struct_ops.c. */
extern struct bpf_struct_ops bpf_tcp_congestion_ops;
static u32 unsupported_ops[] = {
offsetof(struct tcp_congestion_ops, get_info),
};
static const struct btf_type *tcp_sock_type;
static u32 tcp_sock_id, sock_id;
static int bpf_tcp_ca_init(struct btf *btf)
{
s32 type_id;
type_id = btf_find_by_name_kind(btf, "sock", BTF_KIND_STRUCT);
if (type_id < 0)
return -EINVAL;
sock_id = type_id;
type_id = btf_find_by_name_kind(btf, "tcp_sock", BTF_KIND_STRUCT);
if (type_id < 0)
return -EINVAL;
tcp_sock_id = type_id;
tcp_sock_type = btf_type_by_id(btf, tcp_sock_id);
return 0;
}
static bool is_unsupported(u32 member_offset)
{
unsigned int i;
for (i = 0; i < ARRAY_SIZE(unsupported_ops); i++) {
if (member_offset == unsupported_ops[i])
return true;
}
return false;
}
static bool bpf_tcp_ca_is_valid_access(int off, int size,
enum bpf_access_type type,
const struct bpf_prog *prog,
struct bpf_insn_access_aux *info)
{
if (!bpf_tracing_btf_ctx_access(off, size, type, prog, info))
return false;
if (base_type(info->reg_type) == PTR_TO_BTF_ID &&
!bpf_type_has_unsafe_modifiers(info->reg_type) &&
info->btf_id == sock_id)
/* promote it to tcp_sock */
info->btf_id = tcp_sock_id;
return true;
}
static int bpf_tcp_ca_btf_struct_access(struct bpf_verifier_log *log,
const struct bpf_reg_state *reg,
int off, int size)
{
const struct btf_type *t;
size_t end;
t = btf_type_by_id(reg->btf, reg->btf_id);
if (t != tcp_sock_type) {
bpf_log(log, "only read is supported\n");
return -EACCES;
}
switch (off) {
case offsetof(struct sock, sk_pacing_rate):
end = offsetofend(struct sock, sk_pacing_rate);
break;
case offsetof(struct sock, sk_pacing_status):
end = offsetofend(struct sock, sk_pacing_status);
break;
case bpf_ctx_range(struct inet_connection_sock, icsk_ca_priv):
end = offsetofend(struct inet_connection_sock, icsk_ca_priv);
break;
case offsetof(struct inet_connection_sock, icsk_ack.pending):
end = offsetofend(struct inet_connection_sock,
icsk_ack.pending);
break;
case offsetof(struct tcp_sock, snd_cwnd):
end = offsetofend(struct tcp_sock, snd_cwnd);
break;
case offsetof(struct tcp_sock, snd_cwnd_cnt):
end = offsetofend(struct tcp_sock, snd_cwnd_cnt);
break;
case offsetof(struct tcp_sock, snd_ssthresh):
end = offsetofend(struct tcp_sock, snd_ssthresh);
break;
case offsetof(struct tcp_sock, ecn_flags):
end = offsetofend(struct tcp_sock, ecn_flags);
break;
case offsetof(struct tcp_sock, app_limited):
end = offsetofend(struct tcp_sock, app_limited);
break;
default:
bpf_log(log, "no write support to tcp_sock at off %d\n", off);
return -EACCES;
}
if (off + size > end) {
bpf_log(log,
"write access at off %d with size %d beyond the member of tcp_sock ended at %zu\n",
off, size, end);
return -EACCES;
}
return 0;
}
BPF_CALL_2(bpf_tcp_send_ack, struct tcp_sock *, tp, u32, rcv_nxt)
{
/* bpf_tcp_ca prog cannot have NULL tp */
__tcp_send_ack((struct sock *)tp, rcv_nxt);
return 0;
}
static const struct bpf_func_proto bpf_tcp_send_ack_proto = {
.func = bpf_tcp_send_ack,
.gpl_only = false,
/* In case we want to report error later */
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_BTF_ID,
.arg1_btf_id = &tcp_sock_id,
.arg2_type = ARG_ANYTHING,
};
static u32 prog_ops_moff(const struct bpf_prog *prog)
{
const struct btf_member *m;
const struct btf_type *t;
u32 midx;
midx = prog->expected_attach_type;
t = bpf_tcp_congestion_ops.type;
m = &btf_type_member(t)[midx];
return __btf_member_bit_offset(t, m) / 8;
}
static const struct bpf_func_proto *
bpf_tcp_ca_get_func_proto(enum bpf_func_id func_id,
const struct bpf_prog *prog)
{
switch (func_id) {
case BPF_FUNC_tcp_send_ack:
return &bpf_tcp_send_ack_proto;
case BPF_FUNC_sk_storage_get:
return &bpf_sk_storage_get_proto;
case BPF_FUNC_sk_storage_delete:
return &bpf_sk_storage_delete_proto;
case BPF_FUNC_setsockopt:
/* Does not allow release() to call setsockopt.
* release() is called when the current bpf-tcp-cc
* is retiring. It is not allowed to call
* setsockopt() to make further changes which
* may potentially allocate new resources.
*/
if (prog_ops_moff(prog) !=
offsetof(struct tcp_congestion_ops, release))
return &bpf_sk_setsockopt_proto;
return NULL;
case BPF_FUNC_getsockopt:
/* Since get/setsockopt is usually expected to
* be available together, disable getsockopt for
* release also to avoid usage surprise.
* The bpf-tcp-cc already has a more powerful way
* to read tcp_sock from the PTR_TO_BTF_ID.
*/
if (prog_ops_moff(prog) !=
offsetof(struct tcp_congestion_ops, release))
return &bpf_sk_getsockopt_proto;
return NULL;
case BPF_FUNC_ktime_get_coarse_ns:
return &bpf_ktime_get_coarse_ns_proto;
default:
return bpf_base_func_proto(func_id);
}
}
BTF_SET8_START(bpf_tcp_ca_check_kfunc_ids)
BTF_ID_FLAGS(func, tcp_reno_ssthresh)
BTF_ID_FLAGS(func, tcp_reno_cong_avoid)
BTF_ID_FLAGS(func, tcp_reno_undo_cwnd)
BTF_ID_FLAGS(func, tcp_slow_start)
BTF_ID_FLAGS(func, tcp_cong_avoid_ai)
BTF_SET8_END(bpf_tcp_ca_check_kfunc_ids)
static const struct btf_kfunc_id_set bpf_tcp_ca_kfunc_set = {
.owner = THIS_MODULE,
.set = &bpf_tcp_ca_check_kfunc_ids,
};
static const struct bpf_verifier_ops bpf_tcp_ca_verifier_ops = {
.get_func_proto = bpf_tcp_ca_get_func_proto,
.is_valid_access = bpf_tcp_ca_is_valid_access,
.btf_struct_access = bpf_tcp_ca_btf_struct_access,
};
static int bpf_tcp_ca_init_member(const struct btf_type *t,
const struct btf_member *member,
void *kdata, const void *udata)
{
const struct tcp_congestion_ops *utcp_ca;
struct tcp_congestion_ops *tcp_ca;
u32 moff;
utcp_ca = (const struct tcp_congestion_ops *)udata;
tcp_ca = (struct tcp_congestion_ops *)kdata;
moff = __btf_member_bit_offset(t, member) / 8;
switch (moff) {
case offsetof(struct tcp_congestion_ops, flags):
if (utcp_ca->flags & ~TCP_CONG_MASK)
return -EINVAL;
tcp_ca->flags = utcp_ca->flags;
return 1;
case offsetof(struct tcp_congestion_ops, name):
if (bpf_obj_name_cpy(tcp_ca->name, utcp_ca->name,
sizeof(tcp_ca->name)) <= 0)
return -EINVAL;
return 1;
}
return 0;
}
static int bpf_tcp_ca_check_member(const struct btf_type *t,
const struct btf_member *member,
const struct bpf_prog *prog)
{
if (is_unsupported(__btf_member_bit_offset(t, member) / 8))
return -ENOTSUPP;
return 0;
}
static int bpf_tcp_ca_reg(void *kdata)
{
return tcp_register_congestion_control(kdata);
}
static void bpf_tcp_ca_unreg(void *kdata)
{
tcp_unregister_congestion_control(kdata);
}
static int bpf_tcp_ca_update(void *kdata, void *old_kdata)
{
return tcp_update_congestion_control(kdata, old_kdata);
}
static int bpf_tcp_ca_validate(void *kdata)
{
return tcp_validate_congestion_control(kdata);
}
struct bpf_struct_ops bpf_tcp_congestion_ops = {
.verifier_ops = &bpf_tcp_ca_verifier_ops,
.reg = bpf_tcp_ca_reg,
.unreg = bpf_tcp_ca_unreg,
.update = bpf_tcp_ca_update,
.check_member = bpf_tcp_ca_check_member,
.init_member = bpf_tcp_ca_init_member,
.init = bpf_tcp_ca_init,
.validate = bpf_tcp_ca_validate,
.name = "tcp_congestion_ops",
};
static int __init bpf_tcp_ca_kfunc_init(void)
{
return register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, &bpf_tcp_ca_kfunc_set);
}
late_initcall(bpf_tcp_ca_kfunc_init);
| linux-master | net/ipv4/bpf_tcp_ca.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* NET3: Implementation of the ICMP protocol layer.
*
* Alan Cox, <[email protected]>
*
* Some of the function names and the icmp unreach table for this
* module were derived from [icmp.c 1.0.11 06/02/93] by
* Ross Biro, Fred N. van Kempen, Mark Evans, Alan Cox, Gerhard Koerting.
* Other than that this module is a complete rewrite.
*
* Fixes:
* Clemens Fruhwirth : introduce global icmp rate limiting
* with icmp type masking ability instead
* of broken per type icmp timeouts.
* Mike Shaver : RFC1122 checks.
* Alan Cox : Multicast ping reply as self.
* Alan Cox : Fix atomicity lockup in ip_build_xmit
* call.
* Alan Cox : Added 216,128 byte paths to the MTU
* code.
* Martin Mares : RFC1812 checks.
* Martin Mares : Can be configured to follow redirects
* if acting as a router _without_ a
* routing protocol (RFC 1812).
* Martin Mares : Echo requests may be configured to
* be ignored (RFC 1812).
* Martin Mares : Limitation of ICMP error message
* transmit rate (RFC 1812).
* Martin Mares : TOS and Precedence set correctly
* (RFC 1812).
* Martin Mares : Now copying as much data from the
* original packet as we can without
* exceeding 576 bytes (RFC 1812).
* Willy Konynenberg : Transparent proxying support.
* Keith Owens : RFC1191 correction for 4.2BSD based
* path MTU bug.
* Thomas Quinot : ICMP Dest Unreach codes up to 15 are
* valid (RFC 1812).
* Andi Kleen : Check all packet lengths properly
* and moved all kfree_skb() up to
* icmp_rcv.
* Andi Kleen : Move the rate limit bookkeeping
* into the dest entry and use a token
* bucket filter (thanks to ANK). Make
* the rates sysctl configurable.
* Yu Tianli : Fixed two ugly bugs in icmp_send
* - IP option length was accounted wrongly
* - ICMP header length was not accounted
* at all.
* Tristan Greaves : Added sysctl option to ignore bogus
* broadcast responses from broken routers.
*
* To Fix:
*
* - Should use skb_pull() instead of all the manual checking.
* This would also greatly simply some upper layer error handlers. --AK
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/types.h>
#include <linux/jiffies.h>
#include <linux/kernel.h>
#include <linux/fcntl.h>
#include <linux/socket.h>
#include <linux/in.h>
#include <linux/inet.h>
#include <linux/inetdevice.h>
#include <linux/netdevice.h>
#include <linux/string.h>
#include <linux/netfilter_ipv4.h>
#include <linux/slab.h>
#include <net/snmp.h>
#include <net/ip.h>
#include <net/route.h>
#include <net/protocol.h>
#include <net/icmp.h>
#include <net/tcp.h>
#include <net/udp.h>
#include <net/raw.h>
#include <net/ping.h>
#include <linux/skbuff.h>
#include <net/sock.h>
#include <linux/errno.h>
#include <linux/timer.h>
#include <linux/init.h>
#include <linux/uaccess.h>
#include <net/checksum.h>
#include <net/xfrm.h>
#include <net/inet_common.h>
#include <net/ip_fib.h>
#include <net/l3mdev.h>
/*
* Build xmit assembly blocks
*/
struct icmp_bxm {
struct sk_buff *skb;
int offset;
int data_len;
struct {
struct icmphdr icmph;
__be32 times[3];
} data;
int head_len;
struct ip_options_data replyopts;
};
/* An array of errno for error messages from dest unreach. */
/* RFC 1122: 3.2.2.1 States that NET_UNREACH, HOST_UNREACH and SR_FAILED MUST be considered 'transient errs'. */
const struct icmp_err icmp_err_convert[] = {
{
.errno = ENETUNREACH, /* ICMP_NET_UNREACH */
.fatal = 0,
},
{
.errno = EHOSTUNREACH, /* ICMP_HOST_UNREACH */
.fatal = 0,
},
{
.errno = ENOPROTOOPT /* ICMP_PROT_UNREACH */,
.fatal = 1,
},
{
.errno = ECONNREFUSED, /* ICMP_PORT_UNREACH */
.fatal = 1,
},
{
.errno = EMSGSIZE, /* ICMP_FRAG_NEEDED */
.fatal = 0,
},
{
.errno = EOPNOTSUPP, /* ICMP_SR_FAILED */
.fatal = 0,
},
{
.errno = ENETUNREACH, /* ICMP_NET_UNKNOWN */
.fatal = 1,
},
{
.errno = EHOSTDOWN, /* ICMP_HOST_UNKNOWN */
.fatal = 1,
},
{
.errno = ENONET, /* ICMP_HOST_ISOLATED */
.fatal = 1,
},
{
.errno = ENETUNREACH, /* ICMP_NET_ANO */
.fatal = 1,
},
{
.errno = EHOSTUNREACH, /* ICMP_HOST_ANO */
.fatal = 1,
},
{
.errno = ENETUNREACH, /* ICMP_NET_UNR_TOS */
.fatal = 0,
},
{
.errno = EHOSTUNREACH, /* ICMP_HOST_UNR_TOS */
.fatal = 0,
},
{
.errno = EHOSTUNREACH, /* ICMP_PKT_FILTERED */
.fatal = 1,
},
{
.errno = EHOSTUNREACH, /* ICMP_PREC_VIOLATION */
.fatal = 1,
},
{
.errno = EHOSTUNREACH, /* ICMP_PREC_CUTOFF */
.fatal = 1,
},
};
EXPORT_SYMBOL(icmp_err_convert);
/*
* ICMP control array. This specifies what to do with each ICMP.
*/
struct icmp_control {
enum skb_drop_reason (*handler)(struct sk_buff *skb);
short error; /* This ICMP is classed as an error message */
};
static const struct icmp_control icmp_pointers[NR_ICMP_TYPES+1];
static DEFINE_PER_CPU(struct sock *, ipv4_icmp_sk);
/* Called with BH disabled */
static inline struct sock *icmp_xmit_lock(struct net *net)
{
struct sock *sk;
sk = this_cpu_read(ipv4_icmp_sk);
if (unlikely(!spin_trylock(&sk->sk_lock.slock))) {
/* This can happen if the output path signals a
* dst_link_failure() for an outgoing ICMP packet.
*/
return NULL;
}
sock_net_set(sk, net);
return sk;
}
static inline void icmp_xmit_unlock(struct sock *sk)
{
sock_net_set(sk, &init_net);
spin_unlock(&sk->sk_lock.slock);
}
int sysctl_icmp_msgs_per_sec __read_mostly = 1000;
int sysctl_icmp_msgs_burst __read_mostly = 50;
static struct {
spinlock_t lock;
u32 credit;
u32 stamp;
} icmp_global = {
.lock = __SPIN_LOCK_UNLOCKED(icmp_global.lock),
};
/**
* icmp_global_allow - Are we allowed to send one more ICMP message ?
*
* Uses a token bucket to limit our ICMP messages to ~sysctl_icmp_msgs_per_sec.
* Returns false if we reached the limit and can not send another packet.
* Note: called with BH disabled
*/
bool icmp_global_allow(void)
{
u32 credit, delta, incr = 0, now = (u32)jiffies;
bool rc = false;
/* Check if token bucket is empty and cannot be refilled
* without taking the spinlock. The READ_ONCE() are paired
* with the following WRITE_ONCE() in this same function.
*/
if (!READ_ONCE(icmp_global.credit)) {
delta = min_t(u32, now - READ_ONCE(icmp_global.stamp), HZ);
if (delta < HZ / 50)
return false;
}
spin_lock(&icmp_global.lock);
delta = min_t(u32, now - icmp_global.stamp, HZ);
if (delta >= HZ / 50) {
incr = READ_ONCE(sysctl_icmp_msgs_per_sec) * delta / HZ;
if (incr)
WRITE_ONCE(icmp_global.stamp, now);
}
credit = min_t(u32, icmp_global.credit + incr,
READ_ONCE(sysctl_icmp_msgs_burst));
if (credit) {
/* We want to use a credit of one in average, but need to randomize
* it for security reasons.
*/
credit = max_t(int, credit - get_random_u32_below(3), 0);
rc = true;
}
WRITE_ONCE(icmp_global.credit, credit);
spin_unlock(&icmp_global.lock);
return rc;
}
EXPORT_SYMBOL(icmp_global_allow);
static bool icmpv4_mask_allow(struct net *net, int type, int code)
{
if (type > NR_ICMP_TYPES)
return true;
/* Don't limit PMTU discovery. */
if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED)
return true;
/* Limit if icmp type is enabled in ratemask. */
if (!((1 << type) & READ_ONCE(net->ipv4.sysctl_icmp_ratemask)))
return true;
return false;
}
static bool icmpv4_global_allow(struct net *net, int type, int code)
{
if (icmpv4_mask_allow(net, type, code))
return true;
if (icmp_global_allow())
return true;
__ICMP_INC_STATS(net, ICMP_MIB_RATELIMITGLOBAL);
return false;
}
/*
* Send an ICMP frame.
*/
static bool icmpv4_xrlim_allow(struct net *net, struct rtable *rt,
struct flowi4 *fl4, int type, int code)
{
struct dst_entry *dst = &rt->dst;
struct inet_peer *peer;
bool rc = true;
int vif;
if (icmpv4_mask_allow(net, type, code))
goto out;
/* No rate limit on loopback */
if (dst->dev && (dst->dev->flags&IFF_LOOPBACK))
goto out;
vif = l3mdev_master_ifindex(dst->dev);
peer = inet_getpeer_v4(net->ipv4.peers, fl4->daddr, vif, 1);
rc = inet_peer_xrlim_allow(peer,
READ_ONCE(net->ipv4.sysctl_icmp_ratelimit));
if (peer)
inet_putpeer(peer);
out:
if (!rc)
__ICMP_INC_STATS(net, ICMP_MIB_RATELIMITHOST);
return rc;
}
/*
* Maintain the counters used in the SNMP statistics for outgoing ICMP
*/
void icmp_out_count(struct net *net, unsigned char type)
{
ICMPMSGOUT_INC_STATS(net, type);
ICMP_INC_STATS(net, ICMP_MIB_OUTMSGS);
}
/*
* Checksum each fragment, and on the first include the headers and final
* checksum.
*/
static int icmp_glue_bits(void *from, char *to, int offset, int len, int odd,
struct sk_buff *skb)
{
struct icmp_bxm *icmp_param = from;
__wsum csum;
csum = skb_copy_and_csum_bits(icmp_param->skb,
icmp_param->offset + offset,
to, len);
skb->csum = csum_block_add(skb->csum, csum, odd);
if (icmp_pointers[icmp_param->data.icmph.type].error)
nf_ct_attach(skb, icmp_param->skb);
return 0;
}
static void icmp_push_reply(struct sock *sk,
struct icmp_bxm *icmp_param,
struct flowi4 *fl4,
struct ipcm_cookie *ipc, struct rtable **rt)
{
struct sk_buff *skb;
if (ip_append_data(sk, fl4, icmp_glue_bits, icmp_param,
icmp_param->data_len+icmp_param->head_len,
icmp_param->head_len,
ipc, rt, MSG_DONTWAIT) < 0) {
__ICMP_INC_STATS(sock_net(sk), ICMP_MIB_OUTERRORS);
ip_flush_pending_frames(sk);
} else if ((skb = skb_peek(&sk->sk_write_queue)) != NULL) {
struct icmphdr *icmph = icmp_hdr(skb);
__wsum csum;
struct sk_buff *skb1;
csum = csum_partial_copy_nocheck((void *)&icmp_param->data,
(char *)icmph,
icmp_param->head_len);
skb_queue_walk(&sk->sk_write_queue, skb1) {
csum = csum_add(csum, skb1->csum);
}
icmph->checksum = csum_fold(csum);
skb->ip_summed = CHECKSUM_NONE;
ip_push_pending_frames(sk, fl4);
}
}
/*
* Driving logic for building and sending ICMP messages.
*/
static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb)
{
struct ipcm_cookie ipc;
struct rtable *rt = skb_rtable(skb);
struct net *net = dev_net(rt->dst.dev);
struct flowi4 fl4;
struct sock *sk;
struct inet_sock *inet;
__be32 daddr, saddr;
u32 mark = IP4_REPLY_MARK(net, skb->mark);
int type = icmp_param->data.icmph.type;
int code = icmp_param->data.icmph.code;
if (ip_options_echo(net, &icmp_param->replyopts.opt.opt, skb))
return;
/* Needed by both icmp_global_allow and icmp_xmit_lock */
local_bh_disable();
/* global icmp_msgs_per_sec */
if (!icmpv4_global_allow(net, type, code))
goto out_bh_enable;
sk = icmp_xmit_lock(net);
if (!sk)
goto out_bh_enable;
inet = inet_sk(sk);
icmp_param->data.icmph.checksum = 0;
ipcm_init(&ipc);
inet->tos = ip_hdr(skb)->tos;
ipc.sockc.mark = mark;
daddr = ipc.addr = ip_hdr(skb)->saddr;
saddr = fib_compute_spec_dst(skb);
if (icmp_param->replyopts.opt.opt.optlen) {
ipc.opt = &icmp_param->replyopts.opt;
if (ipc.opt->opt.srr)
daddr = icmp_param->replyopts.opt.opt.faddr;
}
memset(&fl4, 0, sizeof(fl4));
fl4.daddr = daddr;
fl4.saddr = saddr;
fl4.flowi4_mark = mark;
fl4.flowi4_uid = sock_net_uid(net, NULL);
fl4.flowi4_tos = RT_TOS(ip_hdr(skb)->tos);
fl4.flowi4_proto = IPPROTO_ICMP;
fl4.flowi4_oif = l3mdev_master_ifindex(skb->dev);
security_skb_classify_flow(skb, flowi4_to_flowi_common(&fl4));
rt = ip_route_output_key(net, &fl4);
if (IS_ERR(rt))
goto out_unlock;
if (icmpv4_xrlim_allow(net, rt, &fl4, type, code))
icmp_push_reply(sk, icmp_param, &fl4, &ipc, &rt);
ip_rt_put(rt);
out_unlock:
icmp_xmit_unlock(sk);
out_bh_enable:
local_bh_enable();
}
/*
* The device used for looking up which routing table to use for sending an ICMP
* error is preferably the source whenever it is set, which should ensure the
* icmp error can be sent to the source host, else lookup using the routing
* table of the destination device, else use the main routing table (index 0).
*/
static struct net_device *icmp_get_route_lookup_dev(struct sk_buff *skb)
{
struct net_device *route_lookup_dev = NULL;
if (skb->dev)
route_lookup_dev = skb->dev;
else if (skb_dst(skb))
route_lookup_dev = skb_dst(skb)->dev;
return route_lookup_dev;
}
static struct rtable *icmp_route_lookup(struct net *net,
struct flowi4 *fl4,
struct sk_buff *skb_in,
const struct iphdr *iph,
__be32 saddr, u8 tos, u32 mark,
int type, int code,
struct icmp_bxm *param)
{
struct net_device *route_lookup_dev;
struct rtable *rt, *rt2;
struct flowi4 fl4_dec;
int err;
memset(fl4, 0, sizeof(*fl4));
fl4->daddr = (param->replyopts.opt.opt.srr ?
param->replyopts.opt.opt.faddr : iph->saddr);
fl4->saddr = saddr;
fl4->flowi4_mark = mark;
fl4->flowi4_uid = sock_net_uid(net, NULL);
fl4->flowi4_tos = RT_TOS(tos);
fl4->flowi4_proto = IPPROTO_ICMP;
fl4->fl4_icmp_type = type;
fl4->fl4_icmp_code = code;
route_lookup_dev = icmp_get_route_lookup_dev(skb_in);
fl4->flowi4_oif = l3mdev_master_ifindex(route_lookup_dev);
security_skb_classify_flow(skb_in, flowi4_to_flowi_common(fl4));
rt = ip_route_output_key_hash(net, fl4, skb_in);
if (IS_ERR(rt))
return rt;
/* No need to clone since we're just using its address. */
rt2 = rt;
rt = (struct rtable *) xfrm_lookup(net, &rt->dst,
flowi4_to_flowi(fl4), NULL, 0);
if (!IS_ERR(rt)) {
if (rt != rt2)
return rt;
} else if (PTR_ERR(rt) == -EPERM) {
rt = NULL;
} else
return rt;
err = xfrm_decode_session_reverse(skb_in, flowi4_to_flowi(&fl4_dec), AF_INET);
if (err)
goto relookup_failed;
if (inet_addr_type_dev_table(net, route_lookup_dev,
fl4_dec.saddr) == RTN_LOCAL) {
rt2 = __ip_route_output_key(net, &fl4_dec);
if (IS_ERR(rt2))
err = PTR_ERR(rt2);
} else {
struct flowi4 fl4_2 = {};
unsigned long orefdst;
fl4_2.daddr = fl4_dec.saddr;
rt2 = ip_route_output_key(net, &fl4_2);
if (IS_ERR(rt2)) {
err = PTR_ERR(rt2);
goto relookup_failed;
}
/* Ugh! */
orefdst = skb_in->_skb_refdst; /* save old refdst */
skb_dst_set(skb_in, NULL);
err = ip_route_input(skb_in, fl4_dec.daddr, fl4_dec.saddr,
RT_TOS(tos), rt2->dst.dev);
dst_release(&rt2->dst);
rt2 = skb_rtable(skb_in);
skb_in->_skb_refdst = orefdst; /* restore old refdst */
}
if (err)
goto relookup_failed;
rt2 = (struct rtable *) xfrm_lookup(net, &rt2->dst,
flowi4_to_flowi(&fl4_dec), NULL,
XFRM_LOOKUP_ICMP);
if (!IS_ERR(rt2)) {
dst_release(&rt->dst);
memcpy(fl4, &fl4_dec, sizeof(*fl4));
rt = rt2;
} else if (PTR_ERR(rt2) == -EPERM) {
if (rt)
dst_release(&rt->dst);
return rt2;
} else {
err = PTR_ERR(rt2);
goto relookup_failed;
}
return rt;
relookup_failed:
if (rt)
return rt;
return ERR_PTR(err);
}
/*
* Send an ICMP message in response to a situation
*
* RFC 1122: 3.2.2 MUST send at least the IP header and 8 bytes of header.
* MAY send more (we do).
* MUST NOT change this header information.
* MUST NOT reply to a multicast/broadcast IP address.
* MUST NOT reply to a multicast/broadcast MAC address.
* MUST reply to only the first fragment.
*/
void __icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info,
const struct ip_options *opt)
{
struct iphdr *iph;
int room;
struct icmp_bxm icmp_param;
struct rtable *rt = skb_rtable(skb_in);
struct ipcm_cookie ipc;
struct flowi4 fl4;
__be32 saddr;
u8 tos;
u32 mark;
struct net *net;
struct sock *sk;
if (!rt)
goto out;
if (rt->dst.dev)
net = dev_net(rt->dst.dev);
else if (skb_in->dev)
net = dev_net(skb_in->dev);
else
goto out;
/*
* Find the original header. It is expected to be valid, of course.
* Check this, icmp_send is called from the most obscure devices
* sometimes.
*/
iph = ip_hdr(skb_in);
if ((u8 *)iph < skb_in->head ||
(skb_network_header(skb_in) + sizeof(*iph)) >
skb_tail_pointer(skb_in))
goto out;
/*
* No replies to physical multicast/broadcast
*/
if (skb_in->pkt_type != PACKET_HOST)
goto out;
/*
* Now check at the protocol level
*/
if (rt->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
goto out;
/*
* Only reply to fragment 0. We byte re-order the constant
* mask for efficiency.
*/
if (iph->frag_off & htons(IP_OFFSET))
goto out;
/*
* If we send an ICMP error to an ICMP error a mess would result..
*/
if (icmp_pointers[type].error) {
/*
* We are an error, check if we are replying to an
* ICMP error
*/
if (iph->protocol == IPPROTO_ICMP) {
u8 _inner_type, *itp;
itp = skb_header_pointer(skb_in,
skb_network_header(skb_in) +
(iph->ihl << 2) +
offsetof(struct icmphdr,
type) -
skb_in->data,
sizeof(_inner_type),
&_inner_type);
if (!itp)
goto out;
/*
* Assume any unknown ICMP type is an error. This
* isn't specified by the RFC, but think about it..
*/
if (*itp > NR_ICMP_TYPES ||
icmp_pointers[*itp].error)
goto out;
}
}
/* Needed by both icmp_global_allow and icmp_xmit_lock */
local_bh_disable();
/* Check global sysctl_icmp_msgs_per_sec ratelimit, unless
* incoming dev is loopback. If outgoing dev change to not be
* loopback, then peer ratelimit still work (in icmpv4_xrlim_allow)
*/
if (!(skb_in->dev && (skb_in->dev->flags&IFF_LOOPBACK)) &&
!icmpv4_global_allow(net, type, code))
goto out_bh_enable;
sk = icmp_xmit_lock(net);
if (!sk)
goto out_bh_enable;
/*
* Construct source address and options.
*/
saddr = iph->daddr;
if (!(rt->rt_flags & RTCF_LOCAL)) {
struct net_device *dev = NULL;
rcu_read_lock();
if (rt_is_input_route(rt) &&
READ_ONCE(net->ipv4.sysctl_icmp_errors_use_inbound_ifaddr))
dev = dev_get_by_index_rcu(net, inet_iif(skb_in));
if (dev)
saddr = inet_select_addr(dev, iph->saddr,
RT_SCOPE_LINK);
else
saddr = 0;
rcu_read_unlock();
}
tos = icmp_pointers[type].error ? (RT_TOS(iph->tos) |
IPTOS_PREC_INTERNETCONTROL) :
iph->tos;
mark = IP4_REPLY_MARK(net, skb_in->mark);
if (__ip_options_echo(net, &icmp_param.replyopts.opt.opt, skb_in, opt))
goto out_unlock;
/*
* Prepare data for ICMP header.
*/
icmp_param.data.icmph.type = type;
icmp_param.data.icmph.code = code;
icmp_param.data.icmph.un.gateway = info;
icmp_param.data.icmph.checksum = 0;
icmp_param.skb = skb_in;
icmp_param.offset = skb_network_offset(skb_in);
inet_sk(sk)->tos = tos;
ipcm_init(&ipc);
ipc.addr = iph->saddr;
ipc.opt = &icmp_param.replyopts.opt;
ipc.sockc.mark = mark;
rt = icmp_route_lookup(net, &fl4, skb_in, iph, saddr, tos, mark,
type, code, &icmp_param);
if (IS_ERR(rt))
goto out_unlock;
/* peer icmp_ratelimit */
if (!icmpv4_xrlim_allow(net, rt, &fl4, type, code))
goto ende;
/* RFC says return as much as we can without exceeding 576 bytes. */
room = dst_mtu(&rt->dst);
if (room > 576)
room = 576;
room -= sizeof(struct iphdr) + icmp_param.replyopts.opt.opt.optlen;
room -= sizeof(struct icmphdr);
/* Guard against tiny mtu. We need to include at least one
* IP network header for this message to make any sense.
*/
if (room <= (int)sizeof(struct iphdr))
goto ende;
icmp_param.data_len = skb_in->len - icmp_param.offset;
if (icmp_param.data_len > room)
icmp_param.data_len = room;
icmp_param.head_len = sizeof(struct icmphdr);
/* if we don't have a source address at this point, fall back to the
* dummy address instead of sending out a packet with a source address
* of 0.0.0.0
*/
if (!fl4.saddr)
fl4.saddr = htonl(INADDR_DUMMY);
icmp_push_reply(sk, &icmp_param, &fl4, &ipc, &rt);
ende:
ip_rt_put(rt);
out_unlock:
icmp_xmit_unlock(sk);
out_bh_enable:
local_bh_enable();
out:;
}
EXPORT_SYMBOL(__icmp_send);
#if IS_ENABLED(CONFIG_NF_NAT)
#include <net/netfilter/nf_conntrack.h>
void icmp_ndo_send(struct sk_buff *skb_in, int type, int code, __be32 info)
{
struct sk_buff *cloned_skb = NULL;
struct ip_options opts = { 0 };
enum ip_conntrack_info ctinfo;
struct nf_conn *ct;
__be32 orig_ip;
ct = nf_ct_get(skb_in, &ctinfo);
if (!ct || !(ct->status & IPS_SRC_NAT)) {
__icmp_send(skb_in, type, code, info, &opts);
return;
}
if (skb_shared(skb_in))
skb_in = cloned_skb = skb_clone(skb_in, GFP_ATOMIC);
if (unlikely(!skb_in || skb_network_header(skb_in) < skb_in->head ||
(skb_network_header(skb_in) + sizeof(struct iphdr)) >
skb_tail_pointer(skb_in) || skb_ensure_writable(skb_in,
skb_network_offset(skb_in) + sizeof(struct iphdr))))
goto out;
orig_ip = ip_hdr(skb_in)->saddr;
ip_hdr(skb_in)->saddr = ct->tuplehash[0].tuple.src.u3.ip;
__icmp_send(skb_in, type, code, info, &opts);
ip_hdr(skb_in)->saddr = orig_ip;
out:
consume_skb(cloned_skb);
}
EXPORT_SYMBOL(icmp_ndo_send);
#endif
static void icmp_socket_deliver(struct sk_buff *skb, u32 info)
{
const struct iphdr *iph = (const struct iphdr *)skb->data;
const struct net_protocol *ipprot;
int protocol = iph->protocol;
/* Checkin full IP header plus 8 bytes of protocol to
* avoid additional coding at protocol handlers.
*/
if (!pskb_may_pull(skb, iph->ihl * 4 + 8)) {
__ICMP_INC_STATS(dev_net(skb->dev), ICMP_MIB_INERRORS);
return;
}
raw_icmp_error(skb, protocol, info);
ipprot = rcu_dereference(inet_protos[protocol]);
if (ipprot && ipprot->err_handler)
ipprot->err_handler(skb, info);
}
static bool icmp_tag_validation(int proto)
{
bool ok;
rcu_read_lock();
ok = rcu_dereference(inet_protos[proto])->icmp_strict_tag_validation;
rcu_read_unlock();
return ok;
}
/*
* Handle ICMP_DEST_UNREACH, ICMP_TIME_EXCEEDED, ICMP_QUENCH, and
* ICMP_PARAMETERPROB.
*/
static enum skb_drop_reason icmp_unreach(struct sk_buff *skb)
{
enum skb_drop_reason reason = SKB_NOT_DROPPED_YET;
const struct iphdr *iph;
struct icmphdr *icmph;
struct net *net;
u32 info = 0;
net = dev_net(skb_dst(skb)->dev);
/*
* Incomplete header ?
* Only checks for the IP header, there should be an
* additional check for longer headers in upper levels.
*/
if (!pskb_may_pull(skb, sizeof(struct iphdr)))
goto out_err;
icmph = icmp_hdr(skb);
iph = (const struct iphdr *)skb->data;
if (iph->ihl < 5) { /* Mangled header, drop. */
reason = SKB_DROP_REASON_IP_INHDR;
goto out_err;
}
switch (icmph->type) {
case ICMP_DEST_UNREACH:
switch (icmph->code & 15) {
case ICMP_NET_UNREACH:
case ICMP_HOST_UNREACH:
case ICMP_PROT_UNREACH:
case ICMP_PORT_UNREACH:
break;
case ICMP_FRAG_NEEDED:
/* for documentation of the ip_no_pmtu_disc
* values please see
* Documentation/networking/ip-sysctl.rst
*/
switch (READ_ONCE(net->ipv4.sysctl_ip_no_pmtu_disc)) {
default:
net_dbg_ratelimited("%pI4: fragmentation needed and DF set\n",
&iph->daddr);
break;
case 2:
goto out;
case 3:
if (!icmp_tag_validation(iph->protocol))
goto out;
fallthrough;
case 0:
info = ntohs(icmph->un.frag.mtu);
}
break;
case ICMP_SR_FAILED:
net_dbg_ratelimited("%pI4: Source Route Failed\n",
&iph->daddr);
break;
default:
break;
}
if (icmph->code > NR_ICMP_UNREACH)
goto out;
break;
case ICMP_PARAMETERPROB:
info = ntohl(icmph->un.gateway) >> 24;
break;
case ICMP_TIME_EXCEEDED:
__ICMP_INC_STATS(net, ICMP_MIB_INTIMEEXCDS);
if (icmph->code == ICMP_EXC_FRAGTIME)
goto out;
break;
}
/*
* Throw it at our lower layers
*
* RFC 1122: 3.2.2 MUST extract the protocol ID from the passed
* header.
* RFC 1122: 3.2.2.1 MUST pass ICMP unreach messages to the
* transport layer.
* RFC 1122: 3.2.2.2 MUST pass ICMP time expired messages to
* transport layer.
*/
/*
* Check the other end isn't violating RFC 1122. Some routers send
* bogus responses to broadcast frames. If you see this message
* first check your netmask matches at both ends, if it does then
* get the other vendor to fix their kit.
*/
if (!READ_ONCE(net->ipv4.sysctl_icmp_ignore_bogus_error_responses) &&
inet_addr_type_dev_table(net, skb->dev, iph->daddr) == RTN_BROADCAST) {
net_warn_ratelimited("%pI4 sent an invalid ICMP type %u, code %u error to a broadcast: %pI4 on %s\n",
&ip_hdr(skb)->saddr,
icmph->type, icmph->code,
&iph->daddr, skb->dev->name);
goto out;
}
icmp_socket_deliver(skb, info);
out:
return reason;
out_err:
__ICMP_INC_STATS(net, ICMP_MIB_INERRORS);
return reason ?: SKB_DROP_REASON_NOT_SPECIFIED;
}
/*
* Handle ICMP_REDIRECT.
*/
static enum skb_drop_reason icmp_redirect(struct sk_buff *skb)
{
if (skb->len < sizeof(struct iphdr)) {
__ICMP_INC_STATS(dev_net(skb->dev), ICMP_MIB_INERRORS);
return SKB_DROP_REASON_PKT_TOO_SMALL;
}
if (!pskb_may_pull(skb, sizeof(struct iphdr))) {
/* there aught to be a stat */
return SKB_DROP_REASON_NOMEM;
}
icmp_socket_deliver(skb, ntohl(icmp_hdr(skb)->un.gateway));
return SKB_NOT_DROPPED_YET;
}
/*
* Handle ICMP_ECHO ("ping") and ICMP_EXT_ECHO ("PROBE") requests.
*
* RFC 1122: 3.2.2.6 MUST have an echo server that answers ICMP echo
* requests.
* RFC 1122: 3.2.2.6 Data received in the ICMP_ECHO request MUST be
* included in the reply.
* RFC 1812: 4.3.3.6 SHOULD have a config option for silently ignoring
* echo requests, MUST have default=NOT.
* RFC 8335: 8 MUST have a config option to enable/disable ICMP
* Extended Echo Functionality, MUST be disabled by default
* See also WRT handling of options once they are done and working.
*/
static enum skb_drop_reason icmp_echo(struct sk_buff *skb)
{
struct icmp_bxm icmp_param;
struct net *net;
net = dev_net(skb_dst(skb)->dev);
/* should there be an ICMP stat for ignored echos? */
if (READ_ONCE(net->ipv4.sysctl_icmp_echo_ignore_all))
return SKB_NOT_DROPPED_YET;
icmp_param.data.icmph = *icmp_hdr(skb);
icmp_param.skb = skb;
icmp_param.offset = 0;
icmp_param.data_len = skb->len;
icmp_param.head_len = sizeof(struct icmphdr);
if (icmp_param.data.icmph.type == ICMP_ECHO)
icmp_param.data.icmph.type = ICMP_ECHOREPLY;
else if (!icmp_build_probe(skb, &icmp_param.data.icmph))
return SKB_NOT_DROPPED_YET;
icmp_reply(&icmp_param, skb);
return SKB_NOT_DROPPED_YET;
}
/* Helper for icmp_echo and icmpv6_echo_reply.
* Searches for net_device that matches PROBE interface identifier
* and builds PROBE reply message in icmphdr.
*
* Returns false if PROBE responses are disabled via sysctl
*/
bool icmp_build_probe(struct sk_buff *skb, struct icmphdr *icmphdr)
{
struct icmp_ext_hdr *ext_hdr, _ext_hdr;
struct icmp_ext_echo_iio *iio, _iio;
struct net *net = dev_net(skb->dev);
struct net_device *dev;
char buff[IFNAMSIZ];
u16 ident_len;
u8 status;
if (!READ_ONCE(net->ipv4.sysctl_icmp_echo_enable_probe))
return false;
/* We currently only support probing interfaces on the proxy node
* Check to ensure L-bit is set
*/
if (!(ntohs(icmphdr->un.echo.sequence) & 1))
return false;
/* Clear status bits in reply message */
icmphdr->un.echo.sequence &= htons(0xFF00);
if (icmphdr->type == ICMP_EXT_ECHO)
icmphdr->type = ICMP_EXT_ECHOREPLY;
else
icmphdr->type = ICMPV6_EXT_ECHO_REPLY;
ext_hdr = skb_header_pointer(skb, 0, sizeof(_ext_hdr), &_ext_hdr);
/* Size of iio is class_type dependent.
* Only check header here and assign length based on ctype in the switch statement
*/
iio = skb_header_pointer(skb, sizeof(_ext_hdr), sizeof(iio->extobj_hdr), &_iio);
if (!ext_hdr || !iio)
goto send_mal_query;
if (ntohs(iio->extobj_hdr.length) <= sizeof(iio->extobj_hdr) ||
ntohs(iio->extobj_hdr.length) > sizeof(_iio))
goto send_mal_query;
ident_len = ntohs(iio->extobj_hdr.length) - sizeof(iio->extobj_hdr);
iio = skb_header_pointer(skb, sizeof(_ext_hdr),
sizeof(iio->extobj_hdr) + ident_len, &_iio);
if (!iio)
goto send_mal_query;
status = 0;
dev = NULL;
switch (iio->extobj_hdr.class_type) {
case ICMP_EXT_ECHO_CTYPE_NAME:
if (ident_len >= IFNAMSIZ)
goto send_mal_query;
memset(buff, 0, sizeof(buff));
memcpy(buff, &iio->ident.name, ident_len);
dev = dev_get_by_name(net, buff);
break;
case ICMP_EXT_ECHO_CTYPE_INDEX:
if (ident_len != sizeof(iio->ident.ifindex))
goto send_mal_query;
dev = dev_get_by_index(net, ntohl(iio->ident.ifindex));
break;
case ICMP_EXT_ECHO_CTYPE_ADDR:
if (ident_len < sizeof(iio->ident.addr.ctype3_hdr) ||
ident_len != sizeof(iio->ident.addr.ctype3_hdr) +
iio->ident.addr.ctype3_hdr.addrlen)
goto send_mal_query;
switch (ntohs(iio->ident.addr.ctype3_hdr.afi)) {
case ICMP_AFI_IP:
if (iio->ident.addr.ctype3_hdr.addrlen != sizeof(struct in_addr))
goto send_mal_query;
dev = ip_dev_find(net, iio->ident.addr.ip_addr.ipv4_addr);
break;
#if IS_ENABLED(CONFIG_IPV6)
case ICMP_AFI_IP6:
if (iio->ident.addr.ctype3_hdr.addrlen != sizeof(struct in6_addr))
goto send_mal_query;
dev = ipv6_stub->ipv6_dev_find(net, &iio->ident.addr.ip_addr.ipv6_addr, dev);
dev_hold(dev);
break;
#endif
default:
goto send_mal_query;
}
break;
default:
goto send_mal_query;
}
if (!dev) {
icmphdr->code = ICMP_EXT_CODE_NO_IF;
return true;
}
/* Fill bits in reply message */
if (dev->flags & IFF_UP)
status |= ICMP_EXT_ECHOREPLY_ACTIVE;
if (__in_dev_get_rcu(dev) && __in_dev_get_rcu(dev)->ifa_list)
status |= ICMP_EXT_ECHOREPLY_IPV4;
if (!list_empty(&rcu_dereference(dev->ip6_ptr)->addr_list))
status |= ICMP_EXT_ECHOREPLY_IPV6;
dev_put(dev);
icmphdr->un.echo.sequence |= htons(status);
return true;
send_mal_query:
icmphdr->code = ICMP_EXT_CODE_MAL_QUERY;
return true;
}
EXPORT_SYMBOL_GPL(icmp_build_probe);
/*
* Handle ICMP Timestamp requests.
* RFC 1122: 3.2.2.8 MAY implement ICMP timestamp requests.
* SHOULD be in the kernel for minimum random latency.
* MUST be accurate to a few minutes.
* MUST be updated at least at 15Hz.
*/
static enum skb_drop_reason icmp_timestamp(struct sk_buff *skb)
{
struct icmp_bxm icmp_param;
/*
* Too short.
*/
if (skb->len < 4)
goto out_err;
/*
* Fill in the current time as ms since midnight UT:
*/
icmp_param.data.times[1] = inet_current_timestamp();
icmp_param.data.times[2] = icmp_param.data.times[1];
BUG_ON(skb_copy_bits(skb, 0, &icmp_param.data.times[0], 4));
icmp_param.data.icmph = *icmp_hdr(skb);
icmp_param.data.icmph.type = ICMP_TIMESTAMPREPLY;
icmp_param.data.icmph.code = 0;
icmp_param.skb = skb;
icmp_param.offset = 0;
icmp_param.data_len = 0;
icmp_param.head_len = sizeof(struct icmphdr) + 12;
icmp_reply(&icmp_param, skb);
return SKB_NOT_DROPPED_YET;
out_err:
__ICMP_INC_STATS(dev_net(skb_dst(skb)->dev), ICMP_MIB_INERRORS);
return SKB_DROP_REASON_PKT_TOO_SMALL;
}
static enum skb_drop_reason icmp_discard(struct sk_buff *skb)
{
/* pretend it was a success */
return SKB_NOT_DROPPED_YET;
}
/*
* Deal with incoming ICMP packets.
*/
int icmp_rcv(struct sk_buff *skb)
{
enum skb_drop_reason reason = SKB_DROP_REASON_NOT_SPECIFIED;
struct rtable *rt = skb_rtable(skb);
struct net *net = dev_net(rt->dst.dev);
struct icmphdr *icmph;
if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
struct sec_path *sp = skb_sec_path(skb);
int nh;
if (!(sp && sp->xvec[sp->len - 1]->props.flags &
XFRM_STATE_ICMP)) {
reason = SKB_DROP_REASON_XFRM_POLICY;
goto drop;
}
if (!pskb_may_pull(skb, sizeof(*icmph) + sizeof(struct iphdr)))
goto drop;
nh = skb_network_offset(skb);
skb_set_network_header(skb, sizeof(*icmph));
if (!xfrm4_policy_check_reverse(NULL, XFRM_POLICY_IN,
skb)) {
reason = SKB_DROP_REASON_XFRM_POLICY;
goto drop;
}
skb_set_network_header(skb, nh);
}
__ICMP_INC_STATS(net, ICMP_MIB_INMSGS);
if (skb_checksum_simple_validate(skb))
goto csum_error;
if (!pskb_pull(skb, sizeof(*icmph)))
goto error;
icmph = icmp_hdr(skb);
ICMPMSGIN_INC_STATS(net, icmph->type);
/* Check for ICMP Extended Echo (PROBE) messages */
if (icmph->type == ICMP_EXT_ECHO) {
/* We can't use icmp_pointers[].handler() because it is an array of
* size NR_ICMP_TYPES + 1 (19 elements) and PROBE has code 42.
*/
reason = icmp_echo(skb);
goto reason_check;
}
if (icmph->type == ICMP_EXT_ECHOREPLY) {
reason = ping_rcv(skb);
goto reason_check;
}
/*
* 18 is the highest 'known' ICMP type. Anything else is a mystery
*
* RFC 1122: 3.2.2 Unknown ICMP messages types MUST be silently
* discarded.
*/
if (icmph->type > NR_ICMP_TYPES) {
reason = SKB_DROP_REASON_UNHANDLED_PROTO;
goto error;
}
/*
* Parse the ICMP message
*/
if (rt->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) {
/*
* RFC 1122: 3.2.2.6 An ICMP_ECHO to broadcast MAY be
* silently ignored (we let user decide with a sysctl).
* RFC 1122: 3.2.2.8 An ICMP_TIMESTAMP MAY be silently
* discarded if to broadcast/multicast.
*/
if ((icmph->type == ICMP_ECHO ||
icmph->type == ICMP_TIMESTAMP) &&
READ_ONCE(net->ipv4.sysctl_icmp_echo_ignore_broadcasts)) {
reason = SKB_DROP_REASON_INVALID_PROTO;
goto error;
}
if (icmph->type != ICMP_ECHO &&
icmph->type != ICMP_TIMESTAMP &&
icmph->type != ICMP_ADDRESS &&
icmph->type != ICMP_ADDRESSREPLY) {
reason = SKB_DROP_REASON_INVALID_PROTO;
goto error;
}
}
reason = icmp_pointers[icmph->type].handler(skb);
reason_check:
if (!reason) {
consume_skb(skb);
return NET_RX_SUCCESS;
}
drop:
kfree_skb_reason(skb, reason);
return NET_RX_DROP;
csum_error:
reason = SKB_DROP_REASON_ICMP_CSUM;
__ICMP_INC_STATS(net, ICMP_MIB_CSUMERRORS);
error:
__ICMP_INC_STATS(net, ICMP_MIB_INERRORS);
goto drop;
}
static bool ip_icmp_error_rfc4884_validate(const struct sk_buff *skb, int off)
{
struct icmp_extobj_hdr *objh, _objh;
struct icmp_ext_hdr *exth, _exth;
u16 olen;
exth = skb_header_pointer(skb, off, sizeof(_exth), &_exth);
if (!exth)
return false;
if (exth->version != 2)
return true;
if (exth->checksum &&
csum_fold(skb_checksum(skb, off, skb->len - off, 0)))
return false;
off += sizeof(_exth);
while (off < skb->len) {
objh = skb_header_pointer(skb, off, sizeof(_objh), &_objh);
if (!objh)
return false;
olen = ntohs(objh->length);
if (olen < sizeof(_objh))
return false;
off += olen;
if (off > skb->len)
return false;
}
return true;
}
void ip_icmp_error_rfc4884(const struct sk_buff *skb,
struct sock_ee_data_rfc4884 *out,
int thlen, int off)
{
int hlen;
/* original datagram headers: end of icmph to payload (skb->data) */
hlen = -skb_transport_offset(skb) - thlen;
/* per rfc 4884: minimal datagram length of 128 bytes */
if (off < 128 || off < hlen)
return;
/* kernel has stripped headers: return payload offset in bytes */
off -= hlen;
if (off + sizeof(struct icmp_ext_hdr) > skb->len)
return;
out->len = off;
if (!ip_icmp_error_rfc4884_validate(skb, off))
out->flags |= SO_EE_RFC4884_FLAG_INVALID;
}
EXPORT_SYMBOL_GPL(ip_icmp_error_rfc4884);
int icmp_err(struct sk_buff *skb, u32 info)
{
struct iphdr *iph = (struct iphdr *)skb->data;
int offset = iph->ihl<<2;
struct icmphdr *icmph = (struct icmphdr *)(skb->data + offset);
int type = icmp_hdr(skb)->type;
int code = icmp_hdr(skb)->code;
struct net *net = dev_net(skb->dev);
/*
* Use ping_err to handle all icmp errors except those
* triggered by ICMP_ECHOREPLY which sent from kernel.
*/
if (icmph->type != ICMP_ECHOREPLY) {
ping_err(skb, offset, info);
return 0;
}
if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED)
ipv4_update_pmtu(skb, net, info, 0, IPPROTO_ICMP);
else if (type == ICMP_REDIRECT)
ipv4_redirect(skb, net, 0, IPPROTO_ICMP);
return 0;
}
/*
* This table is the definition of how we handle ICMP.
*/
static const struct icmp_control icmp_pointers[NR_ICMP_TYPES + 1] = {
[ICMP_ECHOREPLY] = {
.handler = ping_rcv,
},
[1] = {
.handler = icmp_discard,
.error = 1,
},
[2] = {
.handler = icmp_discard,
.error = 1,
},
[ICMP_DEST_UNREACH] = {
.handler = icmp_unreach,
.error = 1,
},
[ICMP_SOURCE_QUENCH] = {
.handler = icmp_unreach,
.error = 1,
},
[ICMP_REDIRECT] = {
.handler = icmp_redirect,
.error = 1,
},
[6] = {
.handler = icmp_discard,
.error = 1,
},
[7] = {
.handler = icmp_discard,
.error = 1,
},
[ICMP_ECHO] = {
.handler = icmp_echo,
},
[9] = {
.handler = icmp_discard,
.error = 1,
},
[10] = {
.handler = icmp_discard,
.error = 1,
},
[ICMP_TIME_EXCEEDED] = {
.handler = icmp_unreach,
.error = 1,
},
[ICMP_PARAMETERPROB] = {
.handler = icmp_unreach,
.error = 1,
},
[ICMP_TIMESTAMP] = {
.handler = icmp_timestamp,
},
[ICMP_TIMESTAMPREPLY] = {
.handler = icmp_discard,
},
[ICMP_INFO_REQUEST] = {
.handler = icmp_discard,
},
[ICMP_INFO_REPLY] = {
.handler = icmp_discard,
},
[ICMP_ADDRESS] = {
.handler = icmp_discard,
},
[ICMP_ADDRESSREPLY] = {
.handler = icmp_discard,
},
};
static int __net_init icmp_sk_init(struct net *net)
{
/* Control parameters for ECHO replies. */
net->ipv4.sysctl_icmp_echo_ignore_all = 0;
net->ipv4.sysctl_icmp_echo_enable_probe = 0;
net->ipv4.sysctl_icmp_echo_ignore_broadcasts = 1;
/* Control parameter - ignore bogus broadcast responses? */
net->ipv4.sysctl_icmp_ignore_bogus_error_responses = 1;
/*
* Configurable global rate limit.
*
* ratelimit defines tokens/packet consumed for dst->rate_token
* bucket ratemask defines which icmp types are ratelimited by
* setting it's bit position.
*
* default:
* dest unreachable (3), source quench (4),
* time exceeded (11), parameter problem (12)
*/
net->ipv4.sysctl_icmp_ratelimit = 1 * HZ;
net->ipv4.sysctl_icmp_ratemask = 0x1818;
net->ipv4.sysctl_icmp_errors_use_inbound_ifaddr = 0;
return 0;
}
static struct pernet_operations __net_initdata icmp_sk_ops = {
.init = icmp_sk_init,
};
int __init icmp_init(void)
{
int err, i;
for_each_possible_cpu(i) {
struct sock *sk;
err = inet_ctl_sock_create(&sk, PF_INET,
SOCK_RAW, IPPROTO_ICMP, &init_net);
if (err < 0)
return err;
per_cpu(ipv4_icmp_sk, i) = sk;
/* Enough space for 2 64K ICMP packets, including
* sk_buff/skb_shared_info struct overhead.
*/
sk->sk_sndbuf = 2 * SKB_TRUESIZE(64 * 1024);
/*
* Speedup sock_wfree()
*/
sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
inet_sk(sk)->pmtudisc = IP_PMTUDISC_DONT;
}
return register_pernet_subsys(&icmp_sk_ops);
}
| linux-master | net/ipv4/icmp.c |
// SPDX-License-Identifier: GPL-2.0-only
// Copyright (c) 2020 Facebook Inc.
#include <net/udp_tunnel.h>
const struct udp_tunnel_nic_ops *udp_tunnel_nic_ops;
EXPORT_SYMBOL_GPL(udp_tunnel_nic_ops);
| linux-master | net/ipv4/udp_tunnel_stub.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* INET An implementation of the TCP/IP protocol suite for the LINUX
* operating system. INET is implemented using the BSD Socket
* interface as the means of communication with the user level.
*
* This file implements the various access functions for the
* PROC file system. It is mainly used for debugging and
* statistics.
*
* Authors: Fred N. van Kempen, <[email protected]>
* Gerald J. Heim, <[email protected]>
* Fred Baumgarten, <[email protected]>
* Erik Schoenfelder, <[email protected]>
*
* Fixes:
* Alan Cox : UDP sockets show the rxqueue/txqueue
* using hint flag for the netinfo.
* Pauline Middelink : identd support
* Alan Cox : Make /proc safer.
* Erik Schoenfelder : /proc/net/snmp
* Alan Cox : Handle dead sockets properly.
* Gerhard Koerting : Show both timers
* Alan Cox : Allow inode to be NULL (kernel socket)
* Andi Kleen : Add support for open_requests and
* split functions for more readibility.
* Andi Kleen : Add support for /proc/net/netstat
* Arnaldo C. Melo : Convert to seq_file
*/
#include <linux/types.h>
#include <net/net_namespace.h>
#include <net/icmp.h>
#include <net/protocol.h>
#include <net/tcp.h>
#include <net/mptcp.h>
#include <net/udp.h>
#include <net/udplite.h>
#include <linux/bottom_half.h>
#include <linux/inetdevice.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/export.h>
#include <net/sock.h>
#include <net/raw.h>
#define TCPUDP_MIB_MAX max_t(u32, UDP_MIB_MAX, TCP_MIB_MAX)
/*
* Report socket allocation statistics [[email protected]]
*/
static int sockstat_seq_show(struct seq_file *seq, void *v)
{
struct net *net = seq->private;
int orphans, sockets;
orphans = tcp_orphan_count_sum();
sockets = proto_sockets_allocated_sum_positive(&tcp_prot);
socket_seq_show(seq);
seq_printf(seq, "TCP: inuse %d orphan %d tw %d alloc %d mem %ld\n",
sock_prot_inuse_get(net, &tcp_prot), orphans,
refcount_read(&net->ipv4.tcp_death_row.tw_refcount) - 1,
sockets, proto_memory_allocated(&tcp_prot));
seq_printf(seq, "UDP: inuse %d mem %ld\n",
sock_prot_inuse_get(net, &udp_prot),
proto_memory_allocated(&udp_prot));
seq_printf(seq, "UDPLITE: inuse %d\n",
sock_prot_inuse_get(net, &udplite_prot));
seq_printf(seq, "RAW: inuse %d\n",
sock_prot_inuse_get(net, &raw_prot));
seq_printf(seq, "FRAG: inuse %u memory %lu\n",
atomic_read(&net->ipv4.fqdir->rhashtable.nelems),
frag_mem_limit(net->ipv4.fqdir));
return 0;
}
/* snmp items */
static const struct snmp_mib snmp4_ipstats_list[] = {
SNMP_MIB_ITEM("InReceives", IPSTATS_MIB_INPKTS),
SNMP_MIB_ITEM("InHdrErrors", IPSTATS_MIB_INHDRERRORS),
SNMP_MIB_ITEM("InAddrErrors", IPSTATS_MIB_INADDRERRORS),
SNMP_MIB_ITEM("ForwDatagrams", IPSTATS_MIB_OUTFORWDATAGRAMS),
SNMP_MIB_ITEM("InUnknownProtos", IPSTATS_MIB_INUNKNOWNPROTOS),
SNMP_MIB_ITEM("InDiscards", IPSTATS_MIB_INDISCARDS),
SNMP_MIB_ITEM("InDelivers", IPSTATS_MIB_INDELIVERS),
SNMP_MIB_ITEM("OutRequests", IPSTATS_MIB_OUTPKTS),
SNMP_MIB_ITEM("OutDiscards", IPSTATS_MIB_OUTDISCARDS),
SNMP_MIB_ITEM("OutNoRoutes", IPSTATS_MIB_OUTNOROUTES),
SNMP_MIB_ITEM("ReasmTimeout", IPSTATS_MIB_REASMTIMEOUT),
SNMP_MIB_ITEM("ReasmReqds", IPSTATS_MIB_REASMREQDS),
SNMP_MIB_ITEM("ReasmOKs", IPSTATS_MIB_REASMOKS),
SNMP_MIB_ITEM("ReasmFails", IPSTATS_MIB_REASMFAILS),
SNMP_MIB_ITEM("FragOKs", IPSTATS_MIB_FRAGOKS),
SNMP_MIB_ITEM("FragFails", IPSTATS_MIB_FRAGFAILS),
SNMP_MIB_ITEM("FragCreates", IPSTATS_MIB_FRAGCREATES),
SNMP_MIB_SENTINEL
};
/* Following items are displayed in /proc/net/netstat */
static const struct snmp_mib snmp4_ipextstats_list[] = {
SNMP_MIB_ITEM("InNoRoutes", IPSTATS_MIB_INNOROUTES),
SNMP_MIB_ITEM("InTruncatedPkts", IPSTATS_MIB_INTRUNCATEDPKTS),
SNMP_MIB_ITEM("InMcastPkts", IPSTATS_MIB_INMCASTPKTS),
SNMP_MIB_ITEM("OutMcastPkts", IPSTATS_MIB_OUTMCASTPKTS),
SNMP_MIB_ITEM("InBcastPkts", IPSTATS_MIB_INBCASTPKTS),
SNMP_MIB_ITEM("OutBcastPkts", IPSTATS_MIB_OUTBCASTPKTS),
SNMP_MIB_ITEM("InOctets", IPSTATS_MIB_INOCTETS),
SNMP_MIB_ITEM("OutOctets", IPSTATS_MIB_OUTOCTETS),
SNMP_MIB_ITEM("InMcastOctets", IPSTATS_MIB_INMCASTOCTETS),
SNMP_MIB_ITEM("OutMcastOctets", IPSTATS_MIB_OUTMCASTOCTETS),
SNMP_MIB_ITEM("InBcastOctets", IPSTATS_MIB_INBCASTOCTETS),
SNMP_MIB_ITEM("OutBcastOctets", IPSTATS_MIB_OUTBCASTOCTETS),
/* Non RFC4293 fields */
SNMP_MIB_ITEM("InCsumErrors", IPSTATS_MIB_CSUMERRORS),
SNMP_MIB_ITEM("InNoECTPkts", IPSTATS_MIB_NOECTPKTS),
SNMP_MIB_ITEM("InECT1Pkts", IPSTATS_MIB_ECT1PKTS),
SNMP_MIB_ITEM("InECT0Pkts", IPSTATS_MIB_ECT0PKTS),
SNMP_MIB_ITEM("InCEPkts", IPSTATS_MIB_CEPKTS),
SNMP_MIB_ITEM("ReasmOverlaps", IPSTATS_MIB_REASM_OVERLAPS),
SNMP_MIB_SENTINEL
};
static const struct {
const char *name;
int index;
} icmpmibmap[] = {
{ "DestUnreachs", ICMP_DEST_UNREACH },
{ "TimeExcds", ICMP_TIME_EXCEEDED },
{ "ParmProbs", ICMP_PARAMETERPROB },
{ "SrcQuenchs", ICMP_SOURCE_QUENCH },
{ "Redirects", ICMP_REDIRECT },
{ "Echos", ICMP_ECHO },
{ "EchoReps", ICMP_ECHOREPLY },
{ "Timestamps", ICMP_TIMESTAMP },
{ "TimestampReps", ICMP_TIMESTAMPREPLY },
{ "AddrMasks", ICMP_ADDRESS },
{ "AddrMaskReps", ICMP_ADDRESSREPLY },
{ NULL, 0 }
};
static const struct snmp_mib snmp4_tcp_list[] = {
SNMP_MIB_ITEM("RtoAlgorithm", TCP_MIB_RTOALGORITHM),
SNMP_MIB_ITEM("RtoMin", TCP_MIB_RTOMIN),
SNMP_MIB_ITEM("RtoMax", TCP_MIB_RTOMAX),
SNMP_MIB_ITEM("MaxConn", TCP_MIB_MAXCONN),
SNMP_MIB_ITEM("ActiveOpens", TCP_MIB_ACTIVEOPENS),
SNMP_MIB_ITEM("PassiveOpens", TCP_MIB_PASSIVEOPENS),
SNMP_MIB_ITEM("AttemptFails", TCP_MIB_ATTEMPTFAILS),
SNMP_MIB_ITEM("EstabResets", TCP_MIB_ESTABRESETS),
SNMP_MIB_ITEM("CurrEstab", TCP_MIB_CURRESTAB),
SNMP_MIB_ITEM("InSegs", TCP_MIB_INSEGS),
SNMP_MIB_ITEM("OutSegs", TCP_MIB_OUTSEGS),
SNMP_MIB_ITEM("RetransSegs", TCP_MIB_RETRANSSEGS),
SNMP_MIB_ITEM("InErrs", TCP_MIB_INERRS),
SNMP_MIB_ITEM("OutRsts", TCP_MIB_OUTRSTS),
SNMP_MIB_ITEM("InCsumErrors", TCP_MIB_CSUMERRORS),
SNMP_MIB_SENTINEL
};
static const struct snmp_mib snmp4_udp_list[] = {
SNMP_MIB_ITEM("InDatagrams", UDP_MIB_INDATAGRAMS),
SNMP_MIB_ITEM("NoPorts", UDP_MIB_NOPORTS),
SNMP_MIB_ITEM("InErrors", UDP_MIB_INERRORS),
SNMP_MIB_ITEM("OutDatagrams", UDP_MIB_OUTDATAGRAMS),
SNMP_MIB_ITEM("RcvbufErrors", UDP_MIB_RCVBUFERRORS),
SNMP_MIB_ITEM("SndbufErrors", UDP_MIB_SNDBUFERRORS),
SNMP_MIB_ITEM("InCsumErrors", UDP_MIB_CSUMERRORS),
SNMP_MIB_ITEM("IgnoredMulti", UDP_MIB_IGNOREDMULTI),
SNMP_MIB_ITEM("MemErrors", UDP_MIB_MEMERRORS),
SNMP_MIB_SENTINEL
};
static const struct snmp_mib snmp4_net_list[] = {
SNMP_MIB_ITEM("SyncookiesSent", LINUX_MIB_SYNCOOKIESSENT),
SNMP_MIB_ITEM("SyncookiesRecv", LINUX_MIB_SYNCOOKIESRECV),
SNMP_MIB_ITEM("SyncookiesFailed", LINUX_MIB_SYNCOOKIESFAILED),
SNMP_MIB_ITEM("EmbryonicRsts", LINUX_MIB_EMBRYONICRSTS),
SNMP_MIB_ITEM("PruneCalled", LINUX_MIB_PRUNECALLED),
SNMP_MIB_ITEM("RcvPruned", LINUX_MIB_RCVPRUNED),
SNMP_MIB_ITEM("OfoPruned", LINUX_MIB_OFOPRUNED),
SNMP_MIB_ITEM("OutOfWindowIcmps", LINUX_MIB_OUTOFWINDOWICMPS),
SNMP_MIB_ITEM("LockDroppedIcmps", LINUX_MIB_LOCKDROPPEDICMPS),
SNMP_MIB_ITEM("ArpFilter", LINUX_MIB_ARPFILTER),
SNMP_MIB_ITEM("TW", LINUX_MIB_TIMEWAITED),
SNMP_MIB_ITEM("TWRecycled", LINUX_MIB_TIMEWAITRECYCLED),
SNMP_MIB_ITEM("TWKilled", LINUX_MIB_TIMEWAITKILLED),
SNMP_MIB_ITEM("PAWSActive", LINUX_MIB_PAWSACTIVEREJECTED),
SNMP_MIB_ITEM("PAWSEstab", LINUX_MIB_PAWSESTABREJECTED),
SNMP_MIB_ITEM("DelayedACKs", LINUX_MIB_DELAYEDACKS),
SNMP_MIB_ITEM("DelayedACKLocked", LINUX_MIB_DELAYEDACKLOCKED),
SNMP_MIB_ITEM("DelayedACKLost", LINUX_MIB_DELAYEDACKLOST),
SNMP_MIB_ITEM("ListenOverflows", LINUX_MIB_LISTENOVERFLOWS),
SNMP_MIB_ITEM("ListenDrops", LINUX_MIB_LISTENDROPS),
SNMP_MIB_ITEM("TCPHPHits", LINUX_MIB_TCPHPHITS),
SNMP_MIB_ITEM("TCPPureAcks", LINUX_MIB_TCPPUREACKS),
SNMP_MIB_ITEM("TCPHPAcks", LINUX_MIB_TCPHPACKS),
SNMP_MIB_ITEM("TCPRenoRecovery", LINUX_MIB_TCPRENORECOVERY),
SNMP_MIB_ITEM("TCPSackRecovery", LINUX_MIB_TCPSACKRECOVERY),
SNMP_MIB_ITEM("TCPSACKReneging", LINUX_MIB_TCPSACKRENEGING),
SNMP_MIB_ITEM("TCPSACKReorder", LINUX_MIB_TCPSACKREORDER),
SNMP_MIB_ITEM("TCPRenoReorder", LINUX_MIB_TCPRENOREORDER),
SNMP_MIB_ITEM("TCPTSReorder", LINUX_MIB_TCPTSREORDER),
SNMP_MIB_ITEM("TCPFullUndo", LINUX_MIB_TCPFULLUNDO),
SNMP_MIB_ITEM("TCPPartialUndo", LINUX_MIB_TCPPARTIALUNDO),
SNMP_MIB_ITEM("TCPDSACKUndo", LINUX_MIB_TCPDSACKUNDO),
SNMP_MIB_ITEM("TCPLossUndo", LINUX_MIB_TCPLOSSUNDO),
SNMP_MIB_ITEM("TCPLostRetransmit", LINUX_MIB_TCPLOSTRETRANSMIT),
SNMP_MIB_ITEM("TCPRenoFailures", LINUX_MIB_TCPRENOFAILURES),
SNMP_MIB_ITEM("TCPSackFailures", LINUX_MIB_TCPSACKFAILURES),
SNMP_MIB_ITEM("TCPLossFailures", LINUX_MIB_TCPLOSSFAILURES),
SNMP_MIB_ITEM("TCPFastRetrans", LINUX_MIB_TCPFASTRETRANS),
SNMP_MIB_ITEM("TCPSlowStartRetrans", LINUX_MIB_TCPSLOWSTARTRETRANS),
SNMP_MIB_ITEM("TCPTimeouts", LINUX_MIB_TCPTIMEOUTS),
SNMP_MIB_ITEM("TCPLossProbes", LINUX_MIB_TCPLOSSPROBES),
SNMP_MIB_ITEM("TCPLossProbeRecovery", LINUX_MIB_TCPLOSSPROBERECOVERY),
SNMP_MIB_ITEM("TCPRenoRecoveryFail", LINUX_MIB_TCPRENORECOVERYFAIL),
SNMP_MIB_ITEM("TCPSackRecoveryFail", LINUX_MIB_TCPSACKRECOVERYFAIL),
SNMP_MIB_ITEM("TCPRcvCollapsed", LINUX_MIB_TCPRCVCOLLAPSED),
SNMP_MIB_ITEM("TCPBacklogCoalesce", LINUX_MIB_TCPBACKLOGCOALESCE),
SNMP_MIB_ITEM("TCPDSACKOldSent", LINUX_MIB_TCPDSACKOLDSENT),
SNMP_MIB_ITEM("TCPDSACKOfoSent", LINUX_MIB_TCPDSACKOFOSENT),
SNMP_MIB_ITEM("TCPDSACKRecv", LINUX_MIB_TCPDSACKRECV),
SNMP_MIB_ITEM("TCPDSACKOfoRecv", LINUX_MIB_TCPDSACKOFORECV),
SNMP_MIB_ITEM("TCPAbortOnData", LINUX_MIB_TCPABORTONDATA),
SNMP_MIB_ITEM("TCPAbortOnClose", LINUX_MIB_TCPABORTONCLOSE),
SNMP_MIB_ITEM("TCPAbortOnMemory", LINUX_MIB_TCPABORTONMEMORY),
SNMP_MIB_ITEM("TCPAbortOnTimeout", LINUX_MIB_TCPABORTONTIMEOUT),
SNMP_MIB_ITEM("TCPAbortOnLinger", LINUX_MIB_TCPABORTONLINGER),
SNMP_MIB_ITEM("TCPAbortFailed", LINUX_MIB_TCPABORTFAILED),
SNMP_MIB_ITEM("TCPMemoryPressures", LINUX_MIB_TCPMEMORYPRESSURES),
SNMP_MIB_ITEM("TCPMemoryPressuresChrono", LINUX_MIB_TCPMEMORYPRESSURESCHRONO),
SNMP_MIB_ITEM("TCPSACKDiscard", LINUX_MIB_TCPSACKDISCARD),
SNMP_MIB_ITEM("TCPDSACKIgnoredOld", LINUX_MIB_TCPDSACKIGNOREDOLD),
SNMP_MIB_ITEM("TCPDSACKIgnoredNoUndo", LINUX_MIB_TCPDSACKIGNOREDNOUNDO),
SNMP_MIB_ITEM("TCPSpuriousRTOs", LINUX_MIB_TCPSPURIOUSRTOS),
SNMP_MIB_ITEM("TCPMD5NotFound", LINUX_MIB_TCPMD5NOTFOUND),
SNMP_MIB_ITEM("TCPMD5Unexpected", LINUX_MIB_TCPMD5UNEXPECTED),
SNMP_MIB_ITEM("TCPMD5Failure", LINUX_MIB_TCPMD5FAILURE),
SNMP_MIB_ITEM("TCPSackShifted", LINUX_MIB_SACKSHIFTED),
SNMP_MIB_ITEM("TCPSackMerged", LINUX_MIB_SACKMERGED),
SNMP_MIB_ITEM("TCPSackShiftFallback", LINUX_MIB_SACKSHIFTFALLBACK),
SNMP_MIB_ITEM("TCPBacklogDrop", LINUX_MIB_TCPBACKLOGDROP),
SNMP_MIB_ITEM("PFMemallocDrop", LINUX_MIB_PFMEMALLOCDROP),
SNMP_MIB_ITEM("TCPMinTTLDrop", LINUX_MIB_TCPMINTTLDROP),
SNMP_MIB_ITEM("TCPDeferAcceptDrop", LINUX_MIB_TCPDEFERACCEPTDROP),
SNMP_MIB_ITEM("IPReversePathFilter", LINUX_MIB_IPRPFILTER),
SNMP_MIB_ITEM("TCPTimeWaitOverflow", LINUX_MIB_TCPTIMEWAITOVERFLOW),
SNMP_MIB_ITEM("TCPReqQFullDoCookies", LINUX_MIB_TCPREQQFULLDOCOOKIES),
SNMP_MIB_ITEM("TCPReqQFullDrop", LINUX_MIB_TCPREQQFULLDROP),
SNMP_MIB_ITEM("TCPRetransFail", LINUX_MIB_TCPRETRANSFAIL),
SNMP_MIB_ITEM("TCPRcvCoalesce", LINUX_MIB_TCPRCVCOALESCE),
SNMP_MIB_ITEM("TCPOFOQueue", LINUX_MIB_TCPOFOQUEUE),
SNMP_MIB_ITEM("TCPOFODrop", LINUX_MIB_TCPOFODROP),
SNMP_MIB_ITEM("TCPOFOMerge", LINUX_MIB_TCPOFOMERGE),
SNMP_MIB_ITEM("TCPChallengeACK", LINUX_MIB_TCPCHALLENGEACK),
SNMP_MIB_ITEM("TCPSYNChallenge", LINUX_MIB_TCPSYNCHALLENGE),
SNMP_MIB_ITEM("TCPFastOpenActive", LINUX_MIB_TCPFASTOPENACTIVE),
SNMP_MIB_ITEM("TCPFastOpenActiveFail", LINUX_MIB_TCPFASTOPENACTIVEFAIL),
SNMP_MIB_ITEM("TCPFastOpenPassive", LINUX_MIB_TCPFASTOPENPASSIVE),
SNMP_MIB_ITEM("TCPFastOpenPassiveFail", LINUX_MIB_TCPFASTOPENPASSIVEFAIL),
SNMP_MIB_ITEM("TCPFastOpenListenOverflow", LINUX_MIB_TCPFASTOPENLISTENOVERFLOW),
SNMP_MIB_ITEM("TCPFastOpenCookieReqd", LINUX_MIB_TCPFASTOPENCOOKIEREQD),
SNMP_MIB_ITEM("TCPFastOpenBlackhole", LINUX_MIB_TCPFASTOPENBLACKHOLE),
SNMP_MIB_ITEM("TCPSpuriousRtxHostQueues", LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES),
SNMP_MIB_ITEM("BusyPollRxPackets", LINUX_MIB_BUSYPOLLRXPACKETS),
SNMP_MIB_ITEM("TCPAutoCorking", LINUX_MIB_TCPAUTOCORKING),
SNMP_MIB_ITEM("TCPFromZeroWindowAdv", LINUX_MIB_TCPFROMZEROWINDOWADV),
SNMP_MIB_ITEM("TCPToZeroWindowAdv", LINUX_MIB_TCPTOZEROWINDOWADV),
SNMP_MIB_ITEM("TCPWantZeroWindowAdv", LINUX_MIB_TCPWANTZEROWINDOWADV),
SNMP_MIB_ITEM("TCPSynRetrans", LINUX_MIB_TCPSYNRETRANS),
SNMP_MIB_ITEM("TCPOrigDataSent", LINUX_MIB_TCPORIGDATASENT),
SNMP_MIB_ITEM("TCPHystartTrainDetect", LINUX_MIB_TCPHYSTARTTRAINDETECT),
SNMP_MIB_ITEM("TCPHystartTrainCwnd", LINUX_MIB_TCPHYSTARTTRAINCWND),
SNMP_MIB_ITEM("TCPHystartDelayDetect", LINUX_MIB_TCPHYSTARTDELAYDETECT),
SNMP_MIB_ITEM("TCPHystartDelayCwnd", LINUX_MIB_TCPHYSTARTDELAYCWND),
SNMP_MIB_ITEM("TCPACKSkippedSynRecv", LINUX_MIB_TCPACKSKIPPEDSYNRECV),
SNMP_MIB_ITEM("TCPACKSkippedPAWS", LINUX_MIB_TCPACKSKIPPEDPAWS),
SNMP_MIB_ITEM("TCPACKSkippedSeq", LINUX_MIB_TCPACKSKIPPEDSEQ),
SNMP_MIB_ITEM("TCPACKSkippedFinWait2", LINUX_MIB_TCPACKSKIPPEDFINWAIT2),
SNMP_MIB_ITEM("TCPACKSkippedTimeWait", LINUX_MIB_TCPACKSKIPPEDTIMEWAIT),
SNMP_MIB_ITEM("TCPACKSkippedChallenge", LINUX_MIB_TCPACKSKIPPEDCHALLENGE),
SNMP_MIB_ITEM("TCPWinProbe", LINUX_MIB_TCPWINPROBE),
SNMP_MIB_ITEM("TCPKeepAlive", LINUX_MIB_TCPKEEPALIVE),
SNMP_MIB_ITEM("TCPMTUPFail", LINUX_MIB_TCPMTUPFAIL),
SNMP_MIB_ITEM("TCPMTUPSuccess", LINUX_MIB_TCPMTUPSUCCESS),
SNMP_MIB_ITEM("TCPDelivered", LINUX_MIB_TCPDELIVERED),
SNMP_MIB_ITEM("TCPDeliveredCE", LINUX_MIB_TCPDELIVEREDCE),
SNMP_MIB_ITEM("TCPAckCompressed", LINUX_MIB_TCPACKCOMPRESSED),
SNMP_MIB_ITEM("TCPZeroWindowDrop", LINUX_MIB_TCPZEROWINDOWDROP),
SNMP_MIB_ITEM("TCPRcvQDrop", LINUX_MIB_TCPRCVQDROP),
SNMP_MIB_ITEM("TCPWqueueTooBig", LINUX_MIB_TCPWQUEUETOOBIG),
SNMP_MIB_ITEM("TCPFastOpenPassiveAltKey", LINUX_MIB_TCPFASTOPENPASSIVEALTKEY),
SNMP_MIB_ITEM("TcpTimeoutRehash", LINUX_MIB_TCPTIMEOUTREHASH),
SNMP_MIB_ITEM("TcpDuplicateDataRehash", LINUX_MIB_TCPDUPLICATEDATAREHASH),
SNMP_MIB_ITEM("TCPDSACKRecvSegs", LINUX_MIB_TCPDSACKRECVSEGS),
SNMP_MIB_ITEM("TCPDSACKIgnoredDubious", LINUX_MIB_TCPDSACKIGNOREDDUBIOUS),
SNMP_MIB_ITEM("TCPMigrateReqSuccess", LINUX_MIB_TCPMIGRATEREQSUCCESS),
SNMP_MIB_ITEM("TCPMigrateReqFailure", LINUX_MIB_TCPMIGRATEREQFAILURE),
SNMP_MIB_ITEM("TCPPLBRehash", LINUX_MIB_TCPPLBREHASH),
SNMP_MIB_SENTINEL
};
static void icmpmsg_put_line(struct seq_file *seq, unsigned long *vals,
unsigned short *type, int count)
{
int j;
if (count) {
seq_puts(seq, "\nIcmpMsg:");
for (j = 0; j < count; ++j)
seq_printf(seq, " %sType%u",
type[j] & 0x100 ? "Out" : "In",
type[j] & 0xff);
seq_puts(seq, "\nIcmpMsg:");
for (j = 0; j < count; ++j)
seq_printf(seq, " %lu", vals[j]);
}
}
static void icmpmsg_put(struct seq_file *seq)
{
#define PERLINE 16
int i, count;
unsigned short type[PERLINE];
unsigned long vals[PERLINE], val;
struct net *net = seq->private;
count = 0;
for (i = 0; i < ICMPMSG_MIB_MAX; i++) {
val = atomic_long_read(&net->mib.icmpmsg_statistics->mibs[i]);
if (val) {
type[count] = i;
vals[count++] = val;
}
if (count == PERLINE) {
icmpmsg_put_line(seq, vals, type, count);
count = 0;
}
}
icmpmsg_put_line(seq, vals, type, count);
#undef PERLINE
}
static void icmp_put(struct seq_file *seq)
{
int i;
struct net *net = seq->private;
atomic_long_t *ptr = net->mib.icmpmsg_statistics->mibs;
seq_puts(seq, "\nIcmp: InMsgs InErrors InCsumErrors");
for (i = 0; icmpmibmap[i].name; i++)
seq_printf(seq, " In%s", icmpmibmap[i].name);
seq_puts(seq, " OutMsgs OutErrors OutRateLimitGlobal OutRateLimitHost");
for (i = 0; icmpmibmap[i].name; i++)
seq_printf(seq, " Out%s", icmpmibmap[i].name);
seq_printf(seq, "\nIcmp: %lu %lu %lu",
snmp_fold_field(net->mib.icmp_statistics, ICMP_MIB_INMSGS),
snmp_fold_field(net->mib.icmp_statistics, ICMP_MIB_INERRORS),
snmp_fold_field(net->mib.icmp_statistics, ICMP_MIB_CSUMERRORS));
for (i = 0; icmpmibmap[i].name; i++)
seq_printf(seq, " %lu",
atomic_long_read(ptr + icmpmibmap[i].index));
seq_printf(seq, " %lu %lu %lu %lu",
snmp_fold_field(net->mib.icmp_statistics, ICMP_MIB_OUTMSGS),
snmp_fold_field(net->mib.icmp_statistics, ICMP_MIB_OUTERRORS),
snmp_fold_field(net->mib.icmp_statistics, ICMP_MIB_RATELIMITGLOBAL),
snmp_fold_field(net->mib.icmp_statistics, ICMP_MIB_RATELIMITHOST));
for (i = 0; icmpmibmap[i].name; i++)
seq_printf(seq, " %lu",
atomic_long_read(ptr + (icmpmibmap[i].index | 0x100)));
}
/*
* Called from the PROCfs module. This outputs /proc/net/snmp.
*/
static int snmp_seq_show_ipstats(struct seq_file *seq, void *v)
{
struct net *net = seq->private;
u64 buff64[IPSTATS_MIB_MAX];
int i;
memset(buff64, 0, IPSTATS_MIB_MAX * sizeof(u64));
seq_puts(seq, "Ip: Forwarding DefaultTTL");
for (i = 0; snmp4_ipstats_list[i].name; i++)
seq_printf(seq, " %s", snmp4_ipstats_list[i].name);
seq_printf(seq, "\nIp: %d %d",
IPV4_DEVCONF_ALL(net, FORWARDING) ? 1 : 2,
READ_ONCE(net->ipv4.sysctl_ip_default_ttl));
BUILD_BUG_ON(offsetof(struct ipstats_mib, mibs) != 0);
snmp_get_cpu_field64_batch(buff64, snmp4_ipstats_list,
net->mib.ip_statistics,
offsetof(struct ipstats_mib, syncp));
for (i = 0; snmp4_ipstats_list[i].name; i++)
seq_printf(seq, " %llu", buff64[i]);
return 0;
}
static int snmp_seq_show_tcp_udp(struct seq_file *seq, void *v)
{
unsigned long buff[TCPUDP_MIB_MAX];
struct net *net = seq->private;
int i;
memset(buff, 0, TCPUDP_MIB_MAX * sizeof(unsigned long));
seq_puts(seq, "\nTcp:");
for (i = 0; snmp4_tcp_list[i].name; i++)
seq_printf(seq, " %s", snmp4_tcp_list[i].name);
seq_puts(seq, "\nTcp:");
snmp_get_cpu_field_batch(buff, snmp4_tcp_list,
net->mib.tcp_statistics);
for (i = 0; snmp4_tcp_list[i].name; i++) {
/* MaxConn field is signed, RFC 2012 */
if (snmp4_tcp_list[i].entry == TCP_MIB_MAXCONN)
seq_printf(seq, " %ld", buff[i]);
else
seq_printf(seq, " %lu", buff[i]);
}
memset(buff, 0, TCPUDP_MIB_MAX * sizeof(unsigned long));
snmp_get_cpu_field_batch(buff, snmp4_udp_list,
net->mib.udp_statistics);
seq_puts(seq, "\nUdp:");
for (i = 0; snmp4_udp_list[i].name; i++)
seq_printf(seq, " %s", snmp4_udp_list[i].name);
seq_puts(seq, "\nUdp:");
for (i = 0; snmp4_udp_list[i].name; i++)
seq_printf(seq, " %lu", buff[i]);
memset(buff, 0, TCPUDP_MIB_MAX * sizeof(unsigned long));
/* the UDP and UDP-Lite MIBs are the same */
seq_puts(seq, "\nUdpLite:");
snmp_get_cpu_field_batch(buff, snmp4_udp_list,
net->mib.udplite_statistics);
for (i = 0; snmp4_udp_list[i].name; i++)
seq_printf(seq, " %s", snmp4_udp_list[i].name);
seq_puts(seq, "\nUdpLite:");
for (i = 0; snmp4_udp_list[i].name; i++)
seq_printf(seq, " %lu", buff[i]);
seq_putc(seq, '\n');
return 0;
}
static int snmp_seq_show(struct seq_file *seq, void *v)
{
snmp_seq_show_ipstats(seq, v);
icmp_put(seq); /* RFC 2011 compatibility */
icmpmsg_put(seq);
snmp_seq_show_tcp_udp(seq, v);
return 0;
}
/*
* Output /proc/net/netstat
*/
static int netstat_seq_show(struct seq_file *seq, void *v)
{
const int ip_cnt = ARRAY_SIZE(snmp4_ipextstats_list) - 1;
const int tcp_cnt = ARRAY_SIZE(snmp4_net_list) - 1;
struct net *net = seq->private;
unsigned long *buff;
int i;
seq_puts(seq, "TcpExt:");
for (i = 0; i < tcp_cnt; i++)
seq_printf(seq, " %s", snmp4_net_list[i].name);
seq_puts(seq, "\nTcpExt:");
buff = kzalloc(max(tcp_cnt * sizeof(long), ip_cnt * sizeof(u64)),
GFP_KERNEL);
if (buff) {
snmp_get_cpu_field_batch(buff, snmp4_net_list,
net->mib.net_statistics);
for (i = 0; i < tcp_cnt; i++)
seq_printf(seq, " %lu", buff[i]);
} else {
for (i = 0; i < tcp_cnt; i++)
seq_printf(seq, " %lu",
snmp_fold_field(net->mib.net_statistics,
snmp4_net_list[i].entry));
}
seq_puts(seq, "\nIpExt:");
for (i = 0; i < ip_cnt; i++)
seq_printf(seq, " %s", snmp4_ipextstats_list[i].name);
seq_puts(seq, "\nIpExt:");
if (buff) {
u64 *buff64 = (u64 *)buff;
memset(buff64, 0, ip_cnt * sizeof(u64));
snmp_get_cpu_field64_batch(buff64, snmp4_ipextstats_list,
net->mib.ip_statistics,
offsetof(struct ipstats_mib, syncp));
for (i = 0; i < ip_cnt; i++)
seq_printf(seq, " %llu", buff64[i]);
} else {
for (i = 0; i < ip_cnt; i++)
seq_printf(seq, " %llu",
snmp_fold_field64(net->mib.ip_statistics,
snmp4_ipextstats_list[i].entry,
offsetof(struct ipstats_mib, syncp)));
}
kfree(buff);
seq_putc(seq, '\n');
mptcp_seq_show(seq);
return 0;
}
static __net_init int ip_proc_init_net(struct net *net)
{
if (!proc_create_net_single("sockstat", 0444, net->proc_net,
sockstat_seq_show, NULL))
goto out_sockstat;
if (!proc_create_net_single("netstat", 0444, net->proc_net,
netstat_seq_show, NULL))
goto out_netstat;
if (!proc_create_net_single("snmp", 0444, net->proc_net, snmp_seq_show,
NULL))
goto out_snmp;
return 0;
out_snmp:
remove_proc_entry("netstat", net->proc_net);
out_netstat:
remove_proc_entry("sockstat", net->proc_net);
out_sockstat:
return -ENOMEM;
}
static __net_exit void ip_proc_exit_net(struct net *net)
{
remove_proc_entry("snmp", net->proc_net);
remove_proc_entry("netstat", net->proc_net);
remove_proc_entry("sockstat", net->proc_net);
}
static __net_initdata struct pernet_operations ip_proc_ops = {
.init = ip_proc_init_net,
.exit = ip_proc_exit_net,
};
int __init ip_misc_proc_init(void)
{
return register_pernet_subsys(&ip_proc_ops);
}
| linux-master | net/ipv4/proc.c |
// SPDX-License-Identifier: GPL-2.0
/*
* INET An implementation of the TCP/IP protocol suite for the LINUX
* operating system. INET is implemented using the BSD Socket
* interface as the means of communication with the user level.
*
* The IP fragmentation functionality.
*
* Authors: Fred N. van Kempen <[email protected]>
* Alan Cox <[email protected]>
*
* Fixes:
* Alan Cox : Split from ip.c , see ip_input.c for history.
* David S. Miller : Begin massive cleanup...
* Andi Kleen : Add sysctls.
* xxxx : Overlapfrag bug.
* Ultima : ip_expire() kernel panic.
* Bill Hawes : Frag accounting and evictor fixes.
* John McDonald : 0 length frag bug.
* Alexey Kuznetsov: SMP races, threading, cleanup.
* Patrick McHardy : LRU queue of frag heads for evictor.
*/
#define pr_fmt(fmt) "IPv4: " fmt
#include <linux/compiler.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/jiffies.h>
#include <linux/skbuff.h>
#include <linux/list.h>
#include <linux/ip.h>
#include <linux/icmp.h>
#include <linux/netdevice.h>
#include <linux/jhash.h>
#include <linux/random.h>
#include <linux/slab.h>
#include <net/route.h>
#include <net/dst.h>
#include <net/sock.h>
#include <net/ip.h>
#include <net/icmp.h>
#include <net/checksum.h>
#include <net/inetpeer.h>
#include <net/inet_frag.h>
#include <linux/tcp.h>
#include <linux/udp.h>
#include <linux/inet.h>
#include <linux/netfilter_ipv4.h>
#include <net/inet_ecn.h>
#include <net/l3mdev.h>
/* NOTE. Logic of IP defragmentation is parallel to corresponding IPv6
* code now. If you change something here, _PLEASE_ update ipv6/reassembly.c
* as well. Or notify me, at least. --ANK
*/
static const char ip_frag_cache_name[] = "ip4-frags";
/* Describe an entry in the "incomplete datagrams" queue. */
struct ipq {
struct inet_frag_queue q;
u8 ecn; /* RFC3168 support */
u16 max_df_size; /* largest frag with DF set seen */
int iif;
unsigned int rid;
struct inet_peer *peer;
};
static u8 ip4_frag_ecn(u8 tos)
{
return 1 << (tos & INET_ECN_MASK);
}
static struct inet_frags ip4_frags;
static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb,
struct sk_buff *prev_tail, struct net_device *dev);
static void ip4_frag_init(struct inet_frag_queue *q, const void *a)
{
struct ipq *qp = container_of(q, struct ipq, q);
struct net *net = q->fqdir->net;
const struct frag_v4_compare_key *key = a;
q->key.v4 = *key;
qp->ecn = 0;
qp->peer = q->fqdir->max_dist ?
inet_getpeer_v4(net->ipv4.peers, key->saddr, key->vif, 1) :
NULL;
}
static void ip4_frag_free(struct inet_frag_queue *q)
{
struct ipq *qp;
qp = container_of(q, struct ipq, q);
if (qp->peer)
inet_putpeer(qp->peer);
}
/* Destruction primitives. */
static void ipq_put(struct ipq *ipq)
{
inet_frag_put(&ipq->q);
}
/* Kill ipq entry. It is not destroyed immediately,
* because caller (and someone more) holds reference count.
*/
static void ipq_kill(struct ipq *ipq)
{
inet_frag_kill(&ipq->q);
}
static bool frag_expire_skip_icmp(u32 user)
{
return user == IP_DEFRAG_AF_PACKET ||
ip_defrag_user_in_between(user, IP_DEFRAG_CONNTRACK_IN,
__IP_DEFRAG_CONNTRACK_IN_END) ||
ip_defrag_user_in_between(user, IP_DEFRAG_CONNTRACK_BRIDGE_IN,
__IP_DEFRAG_CONNTRACK_BRIDGE_IN);
}
/*
* Oops, a fragment queue timed out. Kill it and send an ICMP reply.
*/
static void ip_expire(struct timer_list *t)
{
struct inet_frag_queue *frag = from_timer(frag, t, timer);
const struct iphdr *iph;
struct sk_buff *head = NULL;
struct net *net;
struct ipq *qp;
int err;
qp = container_of(frag, struct ipq, q);
net = qp->q.fqdir->net;
rcu_read_lock();
/* Paired with WRITE_ONCE() in fqdir_pre_exit(). */
if (READ_ONCE(qp->q.fqdir->dead))
goto out_rcu_unlock;
spin_lock(&qp->q.lock);
if (qp->q.flags & INET_FRAG_COMPLETE)
goto out;
qp->q.flags |= INET_FRAG_DROP;
ipq_kill(qp);
__IP_INC_STATS(net, IPSTATS_MIB_REASMFAILS);
__IP_INC_STATS(net, IPSTATS_MIB_REASMTIMEOUT);
if (!(qp->q.flags & INET_FRAG_FIRST_IN))
goto out;
/* sk_buff::dev and sk_buff::rbnode are unionized. So we
* pull the head out of the tree in order to be able to
* deal with head->dev.
*/
head = inet_frag_pull_head(&qp->q);
if (!head)
goto out;
head->dev = dev_get_by_index_rcu(net, qp->iif);
if (!head->dev)
goto out;
/* skb has no dst, perform route lookup again */
iph = ip_hdr(head);
err = ip_route_input_noref(head, iph->daddr, iph->saddr,
iph->tos, head->dev);
if (err)
goto out;
/* Only an end host needs to send an ICMP
* "Fragment Reassembly Timeout" message, per RFC792.
*/
if (frag_expire_skip_icmp(qp->q.key.v4.user) &&
(skb_rtable(head)->rt_type != RTN_LOCAL))
goto out;
spin_unlock(&qp->q.lock);
icmp_send(head, ICMP_TIME_EXCEEDED, ICMP_EXC_FRAGTIME, 0);
goto out_rcu_unlock;
out:
spin_unlock(&qp->q.lock);
out_rcu_unlock:
rcu_read_unlock();
kfree_skb_reason(head, SKB_DROP_REASON_FRAG_REASM_TIMEOUT);
ipq_put(qp);
}
/* Find the correct entry in the "incomplete datagrams" queue for
* this IP datagram, and create new one, if nothing is found.
*/
static struct ipq *ip_find(struct net *net, struct iphdr *iph,
u32 user, int vif)
{
struct frag_v4_compare_key key = {
.saddr = iph->saddr,
.daddr = iph->daddr,
.user = user,
.vif = vif,
.id = iph->id,
.protocol = iph->protocol,
};
struct inet_frag_queue *q;
q = inet_frag_find(net->ipv4.fqdir, &key);
if (!q)
return NULL;
return container_of(q, struct ipq, q);
}
/* Is the fragment too far ahead to be part of ipq? */
static int ip_frag_too_far(struct ipq *qp)
{
struct inet_peer *peer = qp->peer;
unsigned int max = qp->q.fqdir->max_dist;
unsigned int start, end;
int rc;
if (!peer || !max)
return 0;
start = qp->rid;
end = atomic_inc_return(&peer->rid);
qp->rid = end;
rc = qp->q.fragments_tail && (end - start) > max;
if (rc)
__IP_INC_STATS(qp->q.fqdir->net, IPSTATS_MIB_REASMFAILS);
return rc;
}
static int ip_frag_reinit(struct ipq *qp)
{
unsigned int sum_truesize = 0;
if (!mod_timer(&qp->q.timer, jiffies + qp->q.fqdir->timeout)) {
refcount_inc(&qp->q.refcnt);
return -ETIMEDOUT;
}
sum_truesize = inet_frag_rbtree_purge(&qp->q.rb_fragments,
SKB_DROP_REASON_FRAG_TOO_FAR);
sub_frag_mem_limit(qp->q.fqdir, sum_truesize);
qp->q.flags = 0;
qp->q.len = 0;
qp->q.meat = 0;
qp->q.rb_fragments = RB_ROOT;
qp->q.fragments_tail = NULL;
qp->q.last_run_head = NULL;
qp->iif = 0;
qp->ecn = 0;
return 0;
}
/* Add new segment to existing queue. */
static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
{
struct net *net = qp->q.fqdir->net;
int ihl, end, flags, offset;
struct sk_buff *prev_tail;
struct net_device *dev;
unsigned int fragsize;
int err = -ENOENT;
SKB_DR(reason);
u8 ecn;
/* If reassembly is already done, @skb must be a duplicate frag. */
if (qp->q.flags & INET_FRAG_COMPLETE) {
SKB_DR_SET(reason, DUP_FRAG);
goto err;
}
if (!(IPCB(skb)->flags & IPSKB_FRAG_COMPLETE) &&
unlikely(ip_frag_too_far(qp)) &&
unlikely(err = ip_frag_reinit(qp))) {
ipq_kill(qp);
goto err;
}
ecn = ip4_frag_ecn(ip_hdr(skb)->tos);
offset = ntohs(ip_hdr(skb)->frag_off);
flags = offset & ~IP_OFFSET;
offset &= IP_OFFSET;
offset <<= 3; /* offset is in 8-byte chunks */
ihl = ip_hdrlen(skb);
/* Determine the position of this fragment. */
end = offset + skb->len - skb_network_offset(skb) - ihl;
err = -EINVAL;
/* Is this the final fragment? */
if ((flags & IP_MF) == 0) {
/* If we already have some bits beyond end
* or have different end, the segment is corrupted.
*/
if (end < qp->q.len ||
((qp->q.flags & INET_FRAG_LAST_IN) && end != qp->q.len))
goto discard_qp;
qp->q.flags |= INET_FRAG_LAST_IN;
qp->q.len = end;
} else {
if (end&7) {
end &= ~7;
if (skb->ip_summed != CHECKSUM_UNNECESSARY)
skb->ip_summed = CHECKSUM_NONE;
}
if (end > qp->q.len) {
/* Some bits beyond end -> corruption. */
if (qp->q.flags & INET_FRAG_LAST_IN)
goto discard_qp;
qp->q.len = end;
}
}
if (end == offset)
goto discard_qp;
err = -ENOMEM;
if (!pskb_pull(skb, skb_network_offset(skb) + ihl))
goto discard_qp;
err = pskb_trim_rcsum(skb, end - offset);
if (err)
goto discard_qp;
/* Note : skb->rbnode and skb->dev share the same location. */
dev = skb->dev;
/* Makes sure compiler wont do silly aliasing games */
barrier();
prev_tail = qp->q.fragments_tail;
err = inet_frag_queue_insert(&qp->q, skb, offset, end);
if (err)
goto insert_error;
if (dev)
qp->iif = dev->ifindex;
qp->q.stamp = skb->tstamp;
qp->q.mono_delivery_time = skb->mono_delivery_time;
qp->q.meat += skb->len;
qp->ecn |= ecn;
add_frag_mem_limit(qp->q.fqdir, skb->truesize);
if (offset == 0)
qp->q.flags |= INET_FRAG_FIRST_IN;
fragsize = skb->len + ihl;
if (fragsize > qp->q.max_size)
qp->q.max_size = fragsize;
if (ip_hdr(skb)->frag_off & htons(IP_DF) &&
fragsize > qp->max_df_size)
qp->max_df_size = fragsize;
if (qp->q.flags == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
qp->q.meat == qp->q.len) {
unsigned long orefdst = skb->_skb_refdst;
skb->_skb_refdst = 0UL;
err = ip_frag_reasm(qp, skb, prev_tail, dev);
skb->_skb_refdst = orefdst;
if (err)
inet_frag_kill(&qp->q);
return err;
}
skb_dst_drop(skb);
return -EINPROGRESS;
insert_error:
if (err == IPFRAG_DUP) {
SKB_DR_SET(reason, DUP_FRAG);
err = -EINVAL;
goto err;
}
err = -EINVAL;
__IP_INC_STATS(net, IPSTATS_MIB_REASM_OVERLAPS);
discard_qp:
inet_frag_kill(&qp->q);
__IP_INC_STATS(net, IPSTATS_MIB_REASMFAILS);
err:
kfree_skb_reason(skb, reason);
return err;
}
static bool ip_frag_coalesce_ok(const struct ipq *qp)
{
return qp->q.key.v4.user == IP_DEFRAG_LOCAL_DELIVER;
}
/* Build a new IP datagram from all its fragments. */
static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb,
struct sk_buff *prev_tail, struct net_device *dev)
{
struct net *net = qp->q.fqdir->net;
struct iphdr *iph;
void *reasm_data;
int len, err;
u8 ecn;
ipq_kill(qp);
ecn = ip_frag_ecn_table[qp->ecn];
if (unlikely(ecn == 0xff)) {
err = -EINVAL;
goto out_fail;
}
/* Make the one we just received the head. */
reasm_data = inet_frag_reasm_prepare(&qp->q, skb, prev_tail);
if (!reasm_data)
goto out_nomem;
len = ip_hdrlen(skb) + qp->q.len;
err = -E2BIG;
if (len > 65535)
goto out_oversize;
inet_frag_reasm_finish(&qp->q, skb, reasm_data,
ip_frag_coalesce_ok(qp));
skb->dev = dev;
IPCB(skb)->frag_max_size = max(qp->max_df_size, qp->q.max_size);
iph = ip_hdr(skb);
iph->tot_len = htons(len);
iph->tos |= ecn;
/* When we set IP_DF on a refragmented skb we must also force a
* call to ip_fragment to avoid forwarding a DF-skb of size s while
* original sender only sent fragments of size f (where f < s).
*
* We only set DF/IPSKB_FRAG_PMTU if such DF fragment was the largest
* frag seen to avoid sending tiny DF-fragments in case skb was built
* from one very small df-fragment and one large non-df frag.
*/
if (qp->max_df_size == qp->q.max_size) {
IPCB(skb)->flags |= IPSKB_FRAG_PMTU;
iph->frag_off = htons(IP_DF);
} else {
iph->frag_off = 0;
}
ip_send_check(iph);
__IP_INC_STATS(net, IPSTATS_MIB_REASMOKS);
qp->q.rb_fragments = RB_ROOT;
qp->q.fragments_tail = NULL;
qp->q.last_run_head = NULL;
return 0;
out_nomem:
net_dbg_ratelimited("queue_glue: no memory for gluing queue %p\n", qp);
err = -ENOMEM;
goto out_fail;
out_oversize:
net_info_ratelimited("Oversized IP packet from %pI4\n", &qp->q.key.v4.saddr);
out_fail:
__IP_INC_STATS(net, IPSTATS_MIB_REASMFAILS);
return err;
}
/* Process an incoming IP datagram fragment. */
int ip_defrag(struct net *net, struct sk_buff *skb, u32 user)
{
struct net_device *dev = skb->dev ? : skb_dst(skb)->dev;
int vif = l3mdev_master_ifindex_rcu(dev);
struct ipq *qp;
__IP_INC_STATS(net, IPSTATS_MIB_REASMREQDS);
skb_orphan(skb);
/* Lookup (or create) queue header */
qp = ip_find(net, ip_hdr(skb), user, vif);
if (qp) {
int ret;
spin_lock(&qp->q.lock);
ret = ip_frag_queue(qp, skb);
spin_unlock(&qp->q.lock);
ipq_put(qp);
return ret;
}
__IP_INC_STATS(net, IPSTATS_MIB_REASMFAILS);
kfree_skb(skb);
return -ENOMEM;
}
EXPORT_SYMBOL(ip_defrag);
struct sk_buff *ip_check_defrag(struct net *net, struct sk_buff *skb, u32 user)
{
struct iphdr iph;
int netoff;
u32 len;
if (skb->protocol != htons(ETH_P_IP))
return skb;
netoff = skb_network_offset(skb);
if (skb_copy_bits(skb, netoff, &iph, sizeof(iph)) < 0)
return skb;
if (iph.ihl < 5 || iph.version != 4)
return skb;
len = ntohs(iph.tot_len);
if (skb->len < netoff + len || len < (iph.ihl * 4))
return skb;
if (ip_is_fragment(&iph)) {
skb = skb_share_check(skb, GFP_ATOMIC);
if (skb) {
if (!pskb_may_pull(skb, netoff + iph.ihl * 4)) {
kfree_skb(skb);
return NULL;
}
if (pskb_trim_rcsum(skb, netoff + len)) {
kfree_skb(skb);
return NULL;
}
memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
if (ip_defrag(net, skb, user))
return NULL;
skb_clear_hash(skb);
}
}
return skb;
}
EXPORT_SYMBOL(ip_check_defrag);
#ifdef CONFIG_SYSCTL
static int dist_min;
static struct ctl_table ip4_frags_ns_ctl_table[] = {
{
.procname = "ipfrag_high_thresh",
.maxlen = sizeof(unsigned long),
.mode = 0644,
.proc_handler = proc_doulongvec_minmax,
},
{
.procname = "ipfrag_low_thresh",
.maxlen = sizeof(unsigned long),
.mode = 0644,
.proc_handler = proc_doulongvec_minmax,
},
{
.procname = "ipfrag_time",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "ipfrag_max_dist",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = &dist_min,
},
{ }
};
/* secret interval has been deprecated */
static int ip4_frags_secret_interval_unused;
static struct ctl_table ip4_frags_ctl_table[] = {
{
.procname = "ipfrag_secret_interval",
.data = &ip4_frags_secret_interval_unused,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{ }
};
static int __net_init ip4_frags_ns_ctl_register(struct net *net)
{
struct ctl_table *table;
struct ctl_table_header *hdr;
table = ip4_frags_ns_ctl_table;
if (!net_eq(net, &init_net)) {
table = kmemdup(table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL);
if (!table)
goto err_alloc;
}
table[0].data = &net->ipv4.fqdir->high_thresh;
table[0].extra1 = &net->ipv4.fqdir->low_thresh;
table[1].data = &net->ipv4.fqdir->low_thresh;
table[1].extra2 = &net->ipv4.fqdir->high_thresh;
table[2].data = &net->ipv4.fqdir->timeout;
table[3].data = &net->ipv4.fqdir->max_dist;
hdr = register_net_sysctl_sz(net, "net/ipv4", table,
ARRAY_SIZE(ip4_frags_ns_ctl_table));
if (!hdr)
goto err_reg;
net->ipv4.frags_hdr = hdr;
return 0;
err_reg:
if (!net_eq(net, &init_net))
kfree(table);
err_alloc:
return -ENOMEM;
}
static void __net_exit ip4_frags_ns_ctl_unregister(struct net *net)
{
struct ctl_table *table;
table = net->ipv4.frags_hdr->ctl_table_arg;
unregister_net_sysctl_table(net->ipv4.frags_hdr);
kfree(table);
}
static void __init ip4_frags_ctl_register(void)
{
register_net_sysctl(&init_net, "net/ipv4", ip4_frags_ctl_table);
}
#else
static int ip4_frags_ns_ctl_register(struct net *net)
{
return 0;
}
static void ip4_frags_ns_ctl_unregister(struct net *net)
{
}
static void __init ip4_frags_ctl_register(void)
{
}
#endif
static int __net_init ipv4_frags_init_net(struct net *net)
{
int res;
res = fqdir_init(&net->ipv4.fqdir, &ip4_frags, net);
if (res < 0)
return res;
/* Fragment cache limits.
*
* The fragment memory accounting code, (tries to) account for
* the real memory usage, by measuring both the size of frag
* queue struct (inet_frag_queue (ipv4:ipq/ipv6:frag_queue))
* and the SKB's truesize.
*
* A 64K fragment consumes 129736 bytes (44*2944)+200
* (1500 truesize == 2944, sizeof(struct ipq) == 200)
*
* We will commit 4MB at one time. Should we cross that limit
* we will prune down to 3MB, making room for approx 8 big 64K
* fragments 8x128k.
*/
net->ipv4.fqdir->high_thresh = 4 * 1024 * 1024;
net->ipv4.fqdir->low_thresh = 3 * 1024 * 1024;
/*
* Important NOTE! Fragment queue must be destroyed before MSL expires.
* RFC791 is wrong proposing to prolongate timer each fragment arrival
* by TTL.
*/
net->ipv4.fqdir->timeout = IP_FRAG_TIME;
net->ipv4.fqdir->max_dist = 64;
res = ip4_frags_ns_ctl_register(net);
if (res < 0)
fqdir_exit(net->ipv4.fqdir);
return res;
}
static void __net_exit ipv4_frags_pre_exit_net(struct net *net)
{
fqdir_pre_exit(net->ipv4.fqdir);
}
static void __net_exit ipv4_frags_exit_net(struct net *net)
{
ip4_frags_ns_ctl_unregister(net);
fqdir_exit(net->ipv4.fqdir);
}
static struct pernet_operations ip4_frags_ops = {
.init = ipv4_frags_init_net,
.pre_exit = ipv4_frags_pre_exit_net,
.exit = ipv4_frags_exit_net,
};
static u32 ip4_key_hashfn(const void *data, u32 len, u32 seed)
{
return jhash2(data,
sizeof(struct frag_v4_compare_key) / sizeof(u32), seed);
}
static u32 ip4_obj_hashfn(const void *data, u32 len, u32 seed)
{
const struct inet_frag_queue *fq = data;
return jhash2((const u32 *)&fq->key.v4,
sizeof(struct frag_v4_compare_key) / sizeof(u32), seed);
}
static int ip4_obj_cmpfn(struct rhashtable_compare_arg *arg, const void *ptr)
{
const struct frag_v4_compare_key *key = arg->key;
const struct inet_frag_queue *fq = ptr;
return !!memcmp(&fq->key, key, sizeof(*key));
}
static const struct rhashtable_params ip4_rhash_params = {
.head_offset = offsetof(struct inet_frag_queue, node),
.key_offset = offsetof(struct inet_frag_queue, key),
.key_len = sizeof(struct frag_v4_compare_key),
.hashfn = ip4_key_hashfn,
.obj_hashfn = ip4_obj_hashfn,
.obj_cmpfn = ip4_obj_cmpfn,
.automatic_shrinking = true,
};
void __init ipfrag_init(void)
{
ip4_frags.constructor = ip4_frag_init;
ip4_frags.destructor = ip4_frag_free;
ip4_frags.qsize = sizeof(struct ipq);
ip4_frags.frag_expire = ip_expire;
ip4_frags.frags_cache_name = ip_frag_cache_name;
ip4_frags.rhash_params = ip4_rhash_params;
if (inet_frags_init(&ip4_frags))
panic("IP: failed to allocate ip4_frags cache\n");
ip4_frags_ctl_register();
register_pernet_subsys(&ip4_frags_ops);
}
| linux-master | net/ipv4/ip_fragment.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* GRE over IPv4 demultiplexer driver
*
* Authors: Dmitry Kozlov ([email protected])
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/if.h>
#include <linux/icmp.h>
#include <linux/kernel.h>
#include <linux/kmod.h>
#include <linux/skbuff.h>
#include <linux/in.h>
#include <linux/ip.h>
#include <linux/netdevice.h>
#include <linux/if_tunnel.h>
#include <linux/spinlock.h>
#include <net/protocol.h>
#include <net/gre.h>
#include <net/erspan.h>
#include <net/icmp.h>
#include <net/route.h>
#include <net/xfrm.h>
static const struct gre_protocol __rcu *gre_proto[GREPROTO_MAX] __read_mostly;
int gre_add_protocol(const struct gre_protocol *proto, u8 version)
{
if (version >= GREPROTO_MAX)
return -EINVAL;
return (cmpxchg((const struct gre_protocol **)&gre_proto[version], NULL, proto) == NULL) ?
0 : -EBUSY;
}
EXPORT_SYMBOL_GPL(gre_add_protocol);
int gre_del_protocol(const struct gre_protocol *proto, u8 version)
{
int ret;
if (version >= GREPROTO_MAX)
return -EINVAL;
ret = (cmpxchg((const struct gre_protocol **)&gre_proto[version], proto, NULL) == proto) ?
0 : -EBUSY;
if (ret)
return ret;
synchronize_rcu();
return 0;
}
EXPORT_SYMBOL_GPL(gre_del_protocol);
/* Fills in tpi and returns header length to be pulled.
* Note that caller must use pskb_may_pull() before pulling GRE header.
*/
int gre_parse_header(struct sk_buff *skb, struct tnl_ptk_info *tpi,
bool *csum_err, __be16 proto, int nhs)
{
const struct gre_base_hdr *greh;
__be32 *options;
int hdr_len;
if (unlikely(!pskb_may_pull(skb, nhs + sizeof(struct gre_base_hdr))))
return -EINVAL;
greh = (struct gre_base_hdr *)(skb->data + nhs);
if (unlikely(greh->flags & (GRE_VERSION | GRE_ROUTING)))
return -EINVAL;
tpi->flags = gre_flags_to_tnl_flags(greh->flags);
hdr_len = gre_calc_hlen(tpi->flags);
if (!pskb_may_pull(skb, nhs + hdr_len))
return -EINVAL;
greh = (struct gre_base_hdr *)(skb->data + nhs);
tpi->proto = greh->protocol;
options = (__be32 *)(greh + 1);
if (greh->flags & GRE_CSUM) {
if (!skb_checksum_simple_validate(skb)) {
skb_checksum_try_convert(skb, IPPROTO_GRE,
null_compute_pseudo);
} else if (csum_err) {
*csum_err = true;
return -EINVAL;
}
options++;
}
if (greh->flags & GRE_KEY) {
tpi->key = *options;
options++;
} else {
tpi->key = 0;
}
if (unlikely(greh->flags & GRE_SEQ)) {
tpi->seq = *options;
options++;
} else {
tpi->seq = 0;
}
/* WCCP version 1 and 2 protocol decoding.
* - Change protocol to IPv4/IPv6
* - When dealing with WCCPv2, Skip extra 4 bytes in GRE header
*/
if (greh->flags == 0 && tpi->proto == htons(ETH_P_WCCP)) {
u8 _val, *val;
val = skb_header_pointer(skb, nhs + hdr_len,
sizeof(_val), &_val);
if (!val)
return -EINVAL;
tpi->proto = proto;
if ((*val & 0xF0) != 0x40)
hdr_len += 4;
}
tpi->hdr_len = hdr_len;
/* ERSPAN ver 1 and 2 protocol sets GRE key field
* to 0 and sets the configured key in the
* inner erspan header field
*/
if ((greh->protocol == htons(ETH_P_ERSPAN) && hdr_len != 4) ||
greh->protocol == htons(ETH_P_ERSPAN2)) {
struct erspan_base_hdr *ershdr;
if (!pskb_may_pull(skb, nhs + hdr_len + sizeof(*ershdr)))
return -EINVAL;
ershdr = (struct erspan_base_hdr *)(skb->data + nhs + hdr_len);
tpi->key = cpu_to_be32(get_session_id(ershdr));
}
return hdr_len;
}
EXPORT_SYMBOL(gre_parse_header);
static int gre_rcv(struct sk_buff *skb)
{
const struct gre_protocol *proto;
u8 ver;
int ret;
if (!pskb_may_pull(skb, 12))
goto drop;
ver = skb->data[1]&0x7f;
if (ver >= GREPROTO_MAX)
goto drop;
rcu_read_lock();
proto = rcu_dereference(gre_proto[ver]);
if (!proto || !proto->handler)
goto drop_unlock;
ret = proto->handler(skb);
rcu_read_unlock();
return ret;
drop_unlock:
rcu_read_unlock();
drop:
kfree_skb(skb);
return NET_RX_DROP;
}
static int gre_err(struct sk_buff *skb, u32 info)
{
const struct gre_protocol *proto;
const struct iphdr *iph = (const struct iphdr *)skb->data;
u8 ver = skb->data[(iph->ihl<<2) + 1]&0x7f;
int err = 0;
if (ver >= GREPROTO_MAX)
return -EINVAL;
rcu_read_lock();
proto = rcu_dereference(gre_proto[ver]);
if (proto && proto->err_handler)
proto->err_handler(skb, info);
else
err = -EPROTONOSUPPORT;
rcu_read_unlock();
return err;
}
static const struct net_protocol net_gre_protocol = {
.handler = gre_rcv,
.err_handler = gre_err,
};
static int __init gre_init(void)
{
pr_info("GRE over IPv4 demultiplexor driver\n");
if (inet_add_protocol(&net_gre_protocol, IPPROTO_GRE) < 0) {
pr_err("can't add protocol\n");
return -EAGAIN;
}
return 0;
}
static void __exit gre_exit(void)
{
inet_del_protocol(&net_gre_protocol, IPPROTO_GRE);
}
module_init(gre_init);
module_exit(gre_exit);
MODULE_DESCRIPTION("GRE over IPv4 demultiplexer driver");
MODULE_AUTHOR("D. Kozlov ([email protected])");
MODULE_LICENSE("GPL");
| linux-master | net/ipv4/gre_demux.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* inet_diag.c Module for monitoring INET transport protocols sockets.
*
* Authors: Alexey Kuznetsov, <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/fcntl.h>
#include <linux/random.h>
#include <linux/slab.h>
#include <linux/cache.h>
#include <linux/init.h>
#include <linux/time.h>
#include <net/icmp.h>
#include <net/tcp.h>
#include <net/ipv6.h>
#include <net/inet_common.h>
#include <net/inet_connection_sock.h>
#include <net/inet_hashtables.h>
#include <net/inet_timewait_sock.h>
#include <net/inet6_hashtables.h>
#include <net/bpf_sk_storage.h>
#include <net/netlink.h>
#include <linux/inet.h>
#include <linux/stddef.h>
#include <linux/inet_diag.h>
#include <linux/sock_diag.h>
static const struct inet_diag_handler **inet_diag_table;
struct inet_diag_entry {
const __be32 *saddr;
const __be32 *daddr;
u16 sport;
u16 dport;
u16 family;
u16 userlocks;
u32 ifindex;
u32 mark;
#ifdef CONFIG_SOCK_CGROUP_DATA
u64 cgroup_id;
#endif
};
static DEFINE_MUTEX(inet_diag_table_mutex);
static const struct inet_diag_handler *inet_diag_lock_handler(int proto)
{
if (proto < 0 || proto >= IPPROTO_MAX) {
mutex_lock(&inet_diag_table_mutex);
return ERR_PTR(-ENOENT);
}
if (!inet_diag_table[proto])
sock_load_diag_module(AF_INET, proto);
mutex_lock(&inet_diag_table_mutex);
if (!inet_diag_table[proto])
return ERR_PTR(-ENOENT);
return inet_diag_table[proto];
}
static void inet_diag_unlock_handler(const struct inet_diag_handler *handler)
{
mutex_unlock(&inet_diag_table_mutex);
}
void inet_diag_msg_common_fill(struct inet_diag_msg *r, struct sock *sk)
{
r->idiag_family = sk->sk_family;
r->id.idiag_sport = htons(sk->sk_num);
r->id.idiag_dport = sk->sk_dport;
r->id.idiag_if = sk->sk_bound_dev_if;
sock_diag_save_cookie(sk, r->id.idiag_cookie);
#if IS_ENABLED(CONFIG_IPV6)
if (sk->sk_family == AF_INET6) {
*(struct in6_addr *)r->id.idiag_src = sk->sk_v6_rcv_saddr;
*(struct in6_addr *)r->id.idiag_dst = sk->sk_v6_daddr;
} else
#endif
{
memset(&r->id.idiag_src, 0, sizeof(r->id.idiag_src));
memset(&r->id.idiag_dst, 0, sizeof(r->id.idiag_dst));
r->id.idiag_src[0] = sk->sk_rcv_saddr;
r->id.idiag_dst[0] = sk->sk_daddr;
}
}
EXPORT_SYMBOL_GPL(inet_diag_msg_common_fill);
static size_t inet_sk_attr_size(struct sock *sk,
const struct inet_diag_req_v2 *req,
bool net_admin)
{
const struct inet_diag_handler *handler;
size_t aux = 0;
handler = inet_diag_table[req->sdiag_protocol];
if (handler && handler->idiag_get_aux_size)
aux = handler->idiag_get_aux_size(sk, net_admin);
return nla_total_size(sizeof(struct tcp_info))
+ nla_total_size(sizeof(struct inet_diag_msg))
+ inet_diag_msg_attrs_size()
+ nla_total_size(sizeof(struct inet_diag_meminfo))
+ nla_total_size(SK_MEMINFO_VARS * sizeof(u32))
+ nla_total_size(TCP_CA_NAME_MAX)
+ nla_total_size(sizeof(struct tcpvegas_info))
+ aux
+ 64;
}
int inet_diag_msg_attrs_fill(struct sock *sk, struct sk_buff *skb,
struct inet_diag_msg *r, int ext,
struct user_namespace *user_ns,
bool net_admin)
{
const struct inet_sock *inet = inet_sk(sk);
struct inet_diag_sockopt inet_sockopt;
if (nla_put_u8(skb, INET_DIAG_SHUTDOWN, sk->sk_shutdown))
goto errout;
/* IPv6 dual-stack sockets use inet->tos for IPv4 connections,
* hence this needs to be included regardless of socket family.
*/
if (ext & (1 << (INET_DIAG_TOS - 1)))
if (nla_put_u8(skb, INET_DIAG_TOS, inet->tos) < 0)
goto errout;
#if IS_ENABLED(CONFIG_IPV6)
if (r->idiag_family == AF_INET6) {
if (ext & (1 << (INET_DIAG_TCLASS - 1)))
if (nla_put_u8(skb, INET_DIAG_TCLASS,
inet6_sk(sk)->tclass) < 0)
goto errout;
if (((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE)) &&
nla_put_u8(skb, INET_DIAG_SKV6ONLY, ipv6_only_sock(sk)))
goto errout;
}
#endif
if (net_admin && nla_put_u32(skb, INET_DIAG_MARK, READ_ONCE(sk->sk_mark)))
goto errout;
if (ext & (1 << (INET_DIAG_CLASS_ID - 1)) ||
ext & (1 << (INET_DIAG_TCLASS - 1))) {
u32 classid = 0;
#ifdef CONFIG_SOCK_CGROUP_DATA
classid = sock_cgroup_classid(&sk->sk_cgrp_data);
#endif
/* Fallback to socket priority if class id isn't set.
* Classful qdiscs use it as direct reference to class.
* For cgroup2 classid is always zero.
*/
if (!classid)
classid = sk->sk_priority;
if (nla_put_u32(skb, INET_DIAG_CLASS_ID, classid))
goto errout;
}
#ifdef CONFIG_SOCK_CGROUP_DATA
if (nla_put_u64_64bit(skb, INET_DIAG_CGROUP_ID,
cgroup_id(sock_cgroup_ptr(&sk->sk_cgrp_data)),
INET_DIAG_PAD))
goto errout;
#endif
r->idiag_uid = from_kuid_munged(user_ns, sock_i_uid(sk));
r->idiag_inode = sock_i_ino(sk);
memset(&inet_sockopt, 0, sizeof(inet_sockopt));
inet_sockopt.recverr = inet_test_bit(RECVERR, sk);
inet_sockopt.is_icsk = inet_test_bit(IS_ICSK, sk);
inet_sockopt.freebind = inet_test_bit(FREEBIND, sk);
inet_sockopt.hdrincl = inet_test_bit(HDRINCL, sk);
inet_sockopt.mc_loop = inet_test_bit(MC_LOOP, sk);
inet_sockopt.transparent = inet_test_bit(TRANSPARENT, sk);
inet_sockopt.mc_all = inet_test_bit(MC_ALL, sk);
inet_sockopt.nodefrag = inet_test_bit(NODEFRAG, sk);
inet_sockopt.bind_address_no_port = inet_test_bit(BIND_ADDRESS_NO_PORT, sk);
inet_sockopt.recverr_rfc4884 = inet_test_bit(RECVERR_RFC4884, sk);
inet_sockopt.defer_connect = inet_test_bit(DEFER_CONNECT, sk);
if (nla_put(skb, INET_DIAG_SOCKOPT, sizeof(inet_sockopt),
&inet_sockopt))
goto errout;
return 0;
errout:
return 1;
}
EXPORT_SYMBOL_GPL(inet_diag_msg_attrs_fill);
static int inet_diag_parse_attrs(const struct nlmsghdr *nlh, int hdrlen,
struct nlattr **req_nlas)
{
struct nlattr *nla;
int remaining;
nlmsg_for_each_attr(nla, nlh, hdrlen, remaining) {
int type = nla_type(nla);
if (type == INET_DIAG_REQ_PROTOCOL && nla_len(nla) != sizeof(u32))
return -EINVAL;
if (type < __INET_DIAG_REQ_MAX)
req_nlas[type] = nla;
}
return 0;
}
static int inet_diag_get_protocol(const struct inet_diag_req_v2 *req,
const struct inet_diag_dump_data *data)
{
if (data->req_nlas[INET_DIAG_REQ_PROTOCOL])
return nla_get_u32(data->req_nlas[INET_DIAG_REQ_PROTOCOL]);
return req->sdiag_protocol;
}
#define MAX_DUMP_ALLOC_SIZE (KMALLOC_MAX_SIZE - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
struct sk_buff *skb, struct netlink_callback *cb,
const struct inet_diag_req_v2 *req,
u16 nlmsg_flags, bool net_admin)
{
const struct tcp_congestion_ops *ca_ops;
const struct inet_diag_handler *handler;
struct inet_diag_dump_data *cb_data;
int ext = req->idiag_ext;
struct inet_diag_msg *r;
struct nlmsghdr *nlh;
struct nlattr *attr;
void *info = NULL;
cb_data = cb->data;
handler = inet_diag_table[inet_diag_get_protocol(req, cb_data)];
BUG_ON(!handler);
nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
cb->nlh->nlmsg_type, sizeof(*r), nlmsg_flags);
if (!nlh)
return -EMSGSIZE;
r = nlmsg_data(nlh);
BUG_ON(!sk_fullsock(sk));
inet_diag_msg_common_fill(r, sk);
r->idiag_state = sk->sk_state;
r->idiag_timer = 0;
r->idiag_retrans = 0;
r->idiag_expires = 0;
if (inet_diag_msg_attrs_fill(sk, skb, r, ext,
sk_user_ns(NETLINK_CB(cb->skb).sk),
net_admin))
goto errout;
if (ext & (1 << (INET_DIAG_MEMINFO - 1))) {
struct inet_diag_meminfo minfo = {
.idiag_rmem = sk_rmem_alloc_get(sk),
.idiag_wmem = READ_ONCE(sk->sk_wmem_queued),
.idiag_fmem = sk_forward_alloc_get(sk),
.idiag_tmem = sk_wmem_alloc_get(sk),
};
if (nla_put(skb, INET_DIAG_MEMINFO, sizeof(minfo), &minfo) < 0)
goto errout;
}
if (ext & (1 << (INET_DIAG_SKMEMINFO - 1)))
if (sock_diag_put_meminfo(sk, skb, INET_DIAG_SKMEMINFO))
goto errout;
/*
* RAW sockets might have user-defined protocols assigned,
* so report the one supplied on socket creation.
*/
if (sk->sk_type == SOCK_RAW) {
if (nla_put_u8(skb, INET_DIAG_PROTOCOL, sk->sk_protocol))
goto errout;
}
if (!icsk) {
handler->idiag_get_info(sk, r, NULL);
goto out;
}
if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT ||
icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
r->idiag_timer = 1;
r->idiag_retrans = icsk->icsk_retransmits;
r->idiag_expires =
jiffies_delta_to_msecs(icsk->icsk_timeout - jiffies);
} else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
r->idiag_timer = 4;
r->idiag_retrans = icsk->icsk_probes_out;
r->idiag_expires =
jiffies_delta_to_msecs(icsk->icsk_timeout - jiffies);
} else if (timer_pending(&sk->sk_timer)) {
r->idiag_timer = 2;
r->idiag_retrans = icsk->icsk_probes_out;
r->idiag_expires =
jiffies_delta_to_msecs(sk->sk_timer.expires - jiffies);
}
if ((ext & (1 << (INET_DIAG_INFO - 1))) && handler->idiag_info_size) {
attr = nla_reserve_64bit(skb, INET_DIAG_INFO,
handler->idiag_info_size,
INET_DIAG_PAD);
if (!attr)
goto errout;
info = nla_data(attr);
}
if (ext & (1 << (INET_DIAG_CONG - 1))) {
int err = 0;
rcu_read_lock();
ca_ops = READ_ONCE(icsk->icsk_ca_ops);
if (ca_ops)
err = nla_put_string(skb, INET_DIAG_CONG, ca_ops->name);
rcu_read_unlock();
if (err < 0)
goto errout;
}
handler->idiag_get_info(sk, r, info);
if (ext & (1 << (INET_DIAG_INFO - 1)) && handler->idiag_get_aux)
if (handler->idiag_get_aux(sk, net_admin, skb) < 0)
goto errout;
if (sk->sk_state < TCP_TIME_WAIT) {
union tcp_cc_info info;
size_t sz = 0;
int attr;
rcu_read_lock();
ca_ops = READ_ONCE(icsk->icsk_ca_ops);
if (ca_ops && ca_ops->get_info)
sz = ca_ops->get_info(sk, ext, &attr, &info);
rcu_read_unlock();
if (sz && nla_put(skb, attr, sz, &info) < 0)
goto errout;
}
/* Keep it at the end for potential retry with a larger skb,
* or else do best-effort fitting, which is only done for the
* first_nlmsg.
*/
if (cb_data->bpf_stg_diag) {
bool first_nlmsg = ((unsigned char *)nlh == skb->data);
unsigned int prev_min_dump_alloc;
unsigned int total_nla_size = 0;
unsigned int msg_len;
int err;
msg_len = skb_tail_pointer(skb) - (unsigned char *)nlh;
err = bpf_sk_storage_diag_put(cb_data->bpf_stg_diag, sk, skb,
INET_DIAG_SK_BPF_STORAGES,
&total_nla_size);
if (!err)
goto out;
total_nla_size += msg_len;
prev_min_dump_alloc = cb->min_dump_alloc;
if (total_nla_size > prev_min_dump_alloc)
cb->min_dump_alloc = min_t(u32, total_nla_size,
MAX_DUMP_ALLOC_SIZE);
if (!first_nlmsg)
goto errout;
if (cb->min_dump_alloc > prev_min_dump_alloc)
/* Retry with pskb_expand_head() with
* __GFP_DIRECT_RECLAIM
*/
goto errout;
WARN_ON_ONCE(total_nla_size <= prev_min_dump_alloc);
/* Send what we have for this sk
* and move on to the next sk in the following
* dump()
*/
}
out:
nlmsg_end(skb, nlh);
return 0;
errout:
nlmsg_cancel(skb, nlh);
return -EMSGSIZE;
}
EXPORT_SYMBOL_GPL(inet_sk_diag_fill);
static int inet_twsk_diag_fill(struct sock *sk,
struct sk_buff *skb,
struct netlink_callback *cb,
u16 nlmsg_flags, bool net_admin)
{
struct inet_timewait_sock *tw = inet_twsk(sk);
struct inet_diag_msg *r;
struct nlmsghdr *nlh;
long tmo;
nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq, cb->nlh->nlmsg_type,
sizeof(*r), nlmsg_flags);
if (!nlh)
return -EMSGSIZE;
r = nlmsg_data(nlh);
BUG_ON(tw->tw_state != TCP_TIME_WAIT);
inet_diag_msg_common_fill(r, sk);
r->idiag_retrans = 0;
r->idiag_state = tw->tw_substate;
r->idiag_timer = 3;
tmo = tw->tw_timer.expires - jiffies;
r->idiag_expires = jiffies_delta_to_msecs(tmo);
r->idiag_rqueue = 0;
r->idiag_wqueue = 0;
r->idiag_uid = 0;
r->idiag_inode = 0;
if (net_admin && nla_put_u32(skb, INET_DIAG_MARK,
tw->tw_mark)) {
nlmsg_cancel(skb, nlh);
return -EMSGSIZE;
}
nlmsg_end(skb, nlh);
return 0;
}
static int inet_req_diag_fill(struct sock *sk, struct sk_buff *skb,
struct netlink_callback *cb,
u16 nlmsg_flags, bool net_admin)
{
struct request_sock *reqsk = inet_reqsk(sk);
struct inet_diag_msg *r;
struct nlmsghdr *nlh;
long tmo;
nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
cb->nlh->nlmsg_type, sizeof(*r), nlmsg_flags);
if (!nlh)
return -EMSGSIZE;
r = nlmsg_data(nlh);
inet_diag_msg_common_fill(r, sk);
r->idiag_state = TCP_SYN_RECV;
r->idiag_timer = 1;
r->idiag_retrans = reqsk->num_retrans;
BUILD_BUG_ON(offsetof(struct inet_request_sock, ir_cookie) !=
offsetof(struct sock, sk_cookie));
tmo = inet_reqsk(sk)->rsk_timer.expires - jiffies;
r->idiag_expires = jiffies_delta_to_msecs(tmo);
r->idiag_rqueue = 0;
r->idiag_wqueue = 0;
r->idiag_uid = 0;
r->idiag_inode = 0;
if (net_admin && nla_put_u32(skb, INET_DIAG_MARK,
inet_rsk(reqsk)->ir_mark)) {
nlmsg_cancel(skb, nlh);
return -EMSGSIZE;
}
nlmsg_end(skb, nlh);
return 0;
}
static int sk_diag_fill(struct sock *sk, struct sk_buff *skb,
struct netlink_callback *cb,
const struct inet_diag_req_v2 *r,
u16 nlmsg_flags, bool net_admin)
{
if (sk->sk_state == TCP_TIME_WAIT)
return inet_twsk_diag_fill(sk, skb, cb, nlmsg_flags, net_admin);
if (sk->sk_state == TCP_NEW_SYN_RECV)
return inet_req_diag_fill(sk, skb, cb, nlmsg_flags, net_admin);
return inet_sk_diag_fill(sk, inet_csk(sk), skb, cb, r, nlmsg_flags,
net_admin);
}
struct sock *inet_diag_find_one_icsk(struct net *net,
struct inet_hashinfo *hashinfo,
const struct inet_diag_req_v2 *req)
{
struct sock *sk;
rcu_read_lock();
if (req->sdiag_family == AF_INET)
sk = inet_lookup(net, hashinfo, NULL, 0, req->id.idiag_dst[0],
req->id.idiag_dport, req->id.idiag_src[0],
req->id.idiag_sport, req->id.idiag_if);
#if IS_ENABLED(CONFIG_IPV6)
else if (req->sdiag_family == AF_INET6) {
if (ipv6_addr_v4mapped((struct in6_addr *)req->id.idiag_dst) &&
ipv6_addr_v4mapped((struct in6_addr *)req->id.idiag_src))
sk = inet_lookup(net, hashinfo, NULL, 0, req->id.idiag_dst[3],
req->id.idiag_dport, req->id.idiag_src[3],
req->id.idiag_sport, req->id.idiag_if);
else
sk = inet6_lookup(net, hashinfo, NULL, 0,
(struct in6_addr *)req->id.idiag_dst,
req->id.idiag_dport,
(struct in6_addr *)req->id.idiag_src,
req->id.idiag_sport,
req->id.idiag_if);
}
#endif
else {
rcu_read_unlock();
return ERR_PTR(-EINVAL);
}
rcu_read_unlock();
if (!sk)
return ERR_PTR(-ENOENT);
if (sock_diag_check_cookie(sk, req->id.idiag_cookie)) {
sock_gen_put(sk);
return ERR_PTR(-ENOENT);
}
return sk;
}
EXPORT_SYMBOL_GPL(inet_diag_find_one_icsk);
int inet_diag_dump_one_icsk(struct inet_hashinfo *hashinfo,
struct netlink_callback *cb,
const struct inet_diag_req_v2 *req)
{
struct sk_buff *in_skb = cb->skb;
bool net_admin = netlink_net_capable(in_skb, CAP_NET_ADMIN);
struct net *net = sock_net(in_skb->sk);
struct sk_buff *rep;
struct sock *sk;
int err;
sk = inet_diag_find_one_icsk(net, hashinfo, req);
if (IS_ERR(sk))
return PTR_ERR(sk);
rep = nlmsg_new(inet_sk_attr_size(sk, req, net_admin), GFP_KERNEL);
if (!rep) {
err = -ENOMEM;
goto out;
}
err = sk_diag_fill(sk, rep, cb, req, 0, net_admin);
if (err < 0) {
WARN_ON(err == -EMSGSIZE);
nlmsg_free(rep);
goto out;
}
err = nlmsg_unicast(net->diag_nlsk, rep, NETLINK_CB(in_skb).portid);
out:
if (sk)
sock_gen_put(sk);
return err;
}
EXPORT_SYMBOL_GPL(inet_diag_dump_one_icsk);
static int inet_diag_cmd_exact(int cmd, struct sk_buff *in_skb,
const struct nlmsghdr *nlh,
int hdrlen,
const struct inet_diag_req_v2 *req)
{
const struct inet_diag_handler *handler;
struct inet_diag_dump_data dump_data;
int err, protocol;
memset(&dump_data, 0, sizeof(dump_data));
err = inet_diag_parse_attrs(nlh, hdrlen, dump_data.req_nlas);
if (err)
return err;
protocol = inet_diag_get_protocol(req, &dump_data);
handler = inet_diag_lock_handler(protocol);
if (IS_ERR(handler)) {
err = PTR_ERR(handler);
} else if (cmd == SOCK_DIAG_BY_FAMILY) {
struct netlink_callback cb = {
.nlh = nlh,
.skb = in_skb,
.data = &dump_data,
};
err = handler->dump_one(&cb, req);
} else if (cmd == SOCK_DESTROY && handler->destroy) {
err = handler->destroy(in_skb, req);
} else {
err = -EOPNOTSUPP;
}
inet_diag_unlock_handler(handler);
return err;
}
static int bitstring_match(const __be32 *a1, const __be32 *a2, int bits)
{
int words = bits >> 5;
bits &= 0x1f;
if (words) {
if (memcmp(a1, a2, words << 2))
return 0;
}
if (bits) {
__be32 w1, w2;
__be32 mask;
w1 = a1[words];
w2 = a2[words];
mask = htonl((0xffffffff) << (32 - bits));
if ((w1 ^ w2) & mask)
return 0;
}
return 1;
}
static int inet_diag_bc_run(const struct nlattr *_bc,
const struct inet_diag_entry *entry)
{
const void *bc = nla_data(_bc);
int len = nla_len(_bc);
while (len > 0) {
int yes = 1;
const struct inet_diag_bc_op *op = bc;
switch (op->code) {
case INET_DIAG_BC_NOP:
break;
case INET_DIAG_BC_JMP:
yes = 0;
break;
case INET_DIAG_BC_S_EQ:
yes = entry->sport == op[1].no;
break;
case INET_DIAG_BC_S_GE:
yes = entry->sport >= op[1].no;
break;
case INET_DIAG_BC_S_LE:
yes = entry->sport <= op[1].no;
break;
case INET_DIAG_BC_D_EQ:
yes = entry->dport == op[1].no;
break;
case INET_DIAG_BC_D_GE:
yes = entry->dport >= op[1].no;
break;
case INET_DIAG_BC_D_LE:
yes = entry->dport <= op[1].no;
break;
case INET_DIAG_BC_AUTO:
yes = !(entry->userlocks & SOCK_BINDPORT_LOCK);
break;
case INET_DIAG_BC_S_COND:
case INET_DIAG_BC_D_COND: {
const struct inet_diag_hostcond *cond;
const __be32 *addr;
cond = (const struct inet_diag_hostcond *)(op + 1);
if (cond->port != -1 &&
cond->port != (op->code == INET_DIAG_BC_S_COND ?
entry->sport : entry->dport)) {
yes = 0;
break;
}
if (op->code == INET_DIAG_BC_S_COND)
addr = entry->saddr;
else
addr = entry->daddr;
if (cond->family != AF_UNSPEC &&
cond->family != entry->family) {
if (entry->family == AF_INET6 &&
cond->family == AF_INET) {
if (addr[0] == 0 && addr[1] == 0 &&
addr[2] == htonl(0xffff) &&
bitstring_match(addr + 3,
cond->addr,
cond->prefix_len))
break;
}
yes = 0;
break;
}
if (cond->prefix_len == 0)
break;
if (bitstring_match(addr, cond->addr,
cond->prefix_len))
break;
yes = 0;
break;
}
case INET_DIAG_BC_DEV_COND: {
u32 ifindex;
ifindex = *((const u32 *)(op + 1));
if (ifindex != entry->ifindex)
yes = 0;
break;
}
case INET_DIAG_BC_MARK_COND: {
struct inet_diag_markcond *cond;
cond = (struct inet_diag_markcond *)(op + 1);
if ((entry->mark & cond->mask) != cond->mark)
yes = 0;
break;
}
#ifdef CONFIG_SOCK_CGROUP_DATA
case INET_DIAG_BC_CGROUP_COND: {
u64 cgroup_id;
cgroup_id = get_unaligned((const u64 *)(op + 1));
if (cgroup_id != entry->cgroup_id)
yes = 0;
break;
}
#endif
}
if (yes) {
len -= op->yes;
bc += op->yes;
} else {
len -= op->no;
bc += op->no;
}
}
return len == 0;
}
/* This helper is available for all sockets (ESTABLISH, TIMEWAIT, SYN_RECV)
*/
static void entry_fill_addrs(struct inet_diag_entry *entry,
const struct sock *sk)
{
#if IS_ENABLED(CONFIG_IPV6)
if (sk->sk_family == AF_INET6) {
entry->saddr = sk->sk_v6_rcv_saddr.s6_addr32;
entry->daddr = sk->sk_v6_daddr.s6_addr32;
} else
#endif
{
entry->saddr = &sk->sk_rcv_saddr;
entry->daddr = &sk->sk_daddr;
}
}
int inet_diag_bc_sk(const struct nlattr *bc, struct sock *sk)
{
struct inet_sock *inet = inet_sk(sk);
struct inet_diag_entry entry;
if (!bc)
return 1;
entry.family = sk->sk_family;
entry_fill_addrs(&entry, sk);
entry.sport = inet->inet_num;
entry.dport = ntohs(inet->inet_dport);
entry.ifindex = sk->sk_bound_dev_if;
entry.userlocks = sk_fullsock(sk) ? sk->sk_userlocks : 0;
if (sk_fullsock(sk))
entry.mark = READ_ONCE(sk->sk_mark);
else if (sk->sk_state == TCP_NEW_SYN_RECV)
entry.mark = inet_rsk(inet_reqsk(sk))->ir_mark;
else if (sk->sk_state == TCP_TIME_WAIT)
entry.mark = inet_twsk(sk)->tw_mark;
else
entry.mark = 0;
#ifdef CONFIG_SOCK_CGROUP_DATA
entry.cgroup_id = sk_fullsock(sk) ?
cgroup_id(sock_cgroup_ptr(&sk->sk_cgrp_data)) : 0;
#endif
return inet_diag_bc_run(bc, &entry);
}
EXPORT_SYMBOL_GPL(inet_diag_bc_sk);
static int valid_cc(const void *bc, int len, int cc)
{
while (len >= 0) {
const struct inet_diag_bc_op *op = bc;
if (cc > len)
return 0;
if (cc == len)
return 1;
if (op->yes < 4 || op->yes & 3)
return 0;
len -= op->yes;
bc += op->yes;
}
return 0;
}
/* data is u32 ifindex */
static bool valid_devcond(const struct inet_diag_bc_op *op, int len,
int *min_len)
{
/* Check ifindex space. */
*min_len += sizeof(u32);
if (len < *min_len)
return false;
return true;
}
/* Validate an inet_diag_hostcond. */
static bool valid_hostcond(const struct inet_diag_bc_op *op, int len,
int *min_len)
{
struct inet_diag_hostcond *cond;
int addr_len;
/* Check hostcond space. */
*min_len += sizeof(struct inet_diag_hostcond);
if (len < *min_len)
return false;
cond = (struct inet_diag_hostcond *)(op + 1);
/* Check address family and address length. */
switch (cond->family) {
case AF_UNSPEC:
addr_len = 0;
break;
case AF_INET:
addr_len = sizeof(struct in_addr);
break;
case AF_INET6:
addr_len = sizeof(struct in6_addr);
break;
default:
return false;
}
*min_len += addr_len;
if (len < *min_len)
return false;
/* Check prefix length (in bits) vs address length (in bytes). */
if (cond->prefix_len > 8 * addr_len)
return false;
return true;
}
/* Validate a port comparison operator. */
static bool valid_port_comparison(const struct inet_diag_bc_op *op,
int len, int *min_len)
{
/* Port comparisons put the port in a follow-on inet_diag_bc_op. */
*min_len += sizeof(struct inet_diag_bc_op);
if (len < *min_len)
return false;
return true;
}
static bool valid_markcond(const struct inet_diag_bc_op *op, int len,
int *min_len)
{
*min_len += sizeof(struct inet_diag_markcond);
return len >= *min_len;
}
#ifdef CONFIG_SOCK_CGROUP_DATA
static bool valid_cgroupcond(const struct inet_diag_bc_op *op, int len,
int *min_len)
{
*min_len += sizeof(u64);
return len >= *min_len;
}
#endif
static int inet_diag_bc_audit(const struct nlattr *attr,
const struct sk_buff *skb)
{
bool net_admin = netlink_net_capable(skb, CAP_NET_ADMIN);
const void *bytecode, *bc;
int bytecode_len, len;
if (!attr || nla_len(attr) < sizeof(struct inet_diag_bc_op))
return -EINVAL;
bytecode = bc = nla_data(attr);
len = bytecode_len = nla_len(attr);
while (len > 0) {
int min_len = sizeof(struct inet_diag_bc_op);
const struct inet_diag_bc_op *op = bc;
switch (op->code) {
case INET_DIAG_BC_S_COND:
case INET_DIAG_BC_D_COND:
if (!valid_hostcond(bc, len, &min_len))
return -EINVAL;
break;
case INET_DIAG_BC_DEV_COND:
if (!valid_devcond(bc, len, &min_len))
return -EINVAL;
break;
case INET_DIAG_BC_S_EQ:
case INET_DIAG_BC_S_GE:
case INET_DIAG_BC_S_LE:
case INET_DIAG_BC_D_EQ:
case INET_DIAG_BC_D_GE:
case INET_DIAG_BC_D_LE:
if (!valid_port_comparison(bc, len, &min_len))
return -EINVAL;
break;
case INET_DIAG_BC_MARK_COND:
if (!net_admin)
return -EPERM;
if (!valid_markcond(bc, len, &min_len))
return -EINVAL;
break;
#ifdef CONFIG_SOCK_CGROUP_DATA
case INET_DIAG_BC_CGROUP_COND:
if (!valid_cgroupcond(bc, len, &min_len))
return -EINVAL;
break;
#endif
case INET_DIAG_BC_AUTO:
case INET_DIAG_BC_JMP:
case INET_DIAG_BC_NOP:
break;
default:
return -EINVAL;
}
if (op->code != INET_DIAG_BC_NOP) {
if (op->no < min_len || op->no > len + 4 || op->no & 3)
return -EINVAL;
if (op->no < len &&
!valid_cc(bytecode, bytecode_len, len - op->no))
return -EINVAL;
}
if (op->yes < min_len || op->yes > len + 4 || op->yes & 3)
return -EINVAL;
bc += op->yes;
len -= op->yes;
}
return len == 0 ? 0 : -EINVAL;
}
static void twsk_build_assert(void)
{
BUILD_BUG_ON(offsetof(struct inet_timewait_sock, tw_family) !=
offsetof(struct sock, sk_family));
BUILD_BUG_ON(offsetof(struct inet_timewait_sock, tw_num) !=
offsetof(struct inet_sock, inet_num));
BUILD_BUG_ON(offsetof(struct inet_timewait_sock, tw_dport) !=
offsetof(struct inet_sock, inet_dport));
BUILD_BUG_ON(offsetof(struct inet_timewait_sock, tw_rcv_saddr) !=
offsetof(struct inet_sock, inet_rcv_saddr));
BUILD_BUG_ON(offsetof(struct inet_timewait_sock, tw_daddr) !=
offsetof(struct inet_sock, inet_daddr));
#if IS_ENABLED(CONFIG_IPV6)
BUILD_BUG_ON(offsetof(struct inet_timewait_sock, tw_v6_rcv_saddr) !=
offsetof(struct sock, sk_v6_rcv_saddr));
BUILD_BUG_ON(offsetof(struct inet_timewait_sock, tw_v6_daddr) !=
offsetof(struct sock, sk_v6_daddr));
#endif
}
void inet_diag_dump_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *skb,
struct netlink_callback *cb,
const struct inet_diag_req_v2 *r)
{
bool net_admin = netlink_net_capable(cb->skb, CAP_NET_ADMIN);
struct inet_diag_dump_data *cb_data = cb->data;
struct net *net = sock_net(skb->sk);
u32 idiag_states = r->idiag_states;
int i, num, s_i, s_num;
struct nlattr *bc;
struct sock *sk;
bc = cb_data->inet_diag_nla_bc;
if (idiag_states & TCPF_SYN_RECV)
idiag_states |= TCPF_NEW_SYN_RECV;
s_i = cb->args[1];
s_num = num = cb->args[2];
if (cb->args[0] == 0) {
if (!(idiag_states & TCPF_LISTEN) || r->id.idiag_dport)
goto skip_listen_ht;
for (i = s_i; i <= hashinfo->lhash2_mask; i++) {
struct inet_listen_hashbucket *ilb;
struct hlist_nulls_node *node;
num = 0;
ilb = &hashinfo->lhash2[i];
spin_lock(&ilb->lock);
sk_nulls_for_each(sk, node, &ilb->nulls_head) {
struct inet_sock *inet = inet_sk(sk);
if (!net_eq(sock_net(sk), net))
continue;
if (num < s_num) {
num++;
continue;
}
if (r->sdiag_family != AF_UNSPEC &&
sk->sk_family != r->sdiag_family)
goto next_listen;
if (r->id.idiag_sport != inet->inet_sport &&
r->id.idiag_sport)
goto next_listen;
if (!inet_diag_bc_sk(bc, sk))
goto next_listen;
if (inet_sk_diag_fill(sk, inet_csk(sk), skb,
cb, r, NLM_F_MULTI,
net_admin) < 0) {
spin_unlock(&ilb->lock);
goto done;
}
next_listen:
++num;
}
spin_unlock(&ilb->lock);
s_num = 0;
}
skip_listen_ht:
cb->args[0] = 1;
s_i = num = s_num = 0;
}
if (!(idiag_states & ~TCPF_LISTEN))
goto out;
#define SKARR_SZ 16
for (i = s_i; i <= hashinfo->ehash_mask; i++) {
struct inet_ehash_bucket *head = &hashinfo->ehash[i];
spinlock_t *lock = inet_ehash_lockp(hashinfo, i);
struct hlist_nulls_node *node;
struct sock *sk_arr[SKARR_SZ];
int num_arr[SKARR_SZ];
int idx, accum, res;
if (hlist_nulls_empty(&head->chain))
continue;
if (i > s_i)
s_num = 0;
next_chunk:
num = 0;
accum = 0;
spin_lock_bh(lock);
sk_nulls_for_each(sk, node, &head->chain) {
int state;
if (!net_eq(sock_net(sk), net))
continue;
if (num < s_num)
goto next_normal;
state = (sk->sk_state == TCP_TIME_WAIT) ?
inet_twsk(sk)->tw_substate : sk->sk_state;
if (!(idiag_states & (1 << state)))
goto next_normal;
if (r->sdiag_family != AF_UNSPEC &&
sk->sk_family != r->sdiag_family)
goto next_normal;
if (r->id.idiag_sport != htons(sk->sk_num) &&
r->id.idiag_sport)
goto next_normal;
if (r->id.idiag_dport != sk->sk_dport &&
r->id.idiag_dport)
goto next_normal;
twsk_build_assert();
if (!inet_diag_bc_sk(bc, sk))
goto next_normal;
if (!refcount_inc_not_zero(&sk->sk_refcnt))
goto next_normal;
num_arr[accum] = num;
sk_arr[accum] = sk;
if (++accum == SKARR_SZ)
break;
next_normal:
++num;
}
spin_unlock_bh(lock);
res = 0;
for (idx = 0; idx < accum; idx++) {
if (res >= 0) {
res = sk_diag_fill(sk_arr[idx], skb, cb, r,
NLM_F_MULTI, net_admin);
if (res < 0)
num = num_arr[idx];
}
sock_gen_put(sk_arr[idx]);
}
if (res < 0)
break;
cond_resched();
if (accum == SKARR_SZ) {
s_num = num + 1;
goto next_chunk;
}
}
done:
cb->args[1] = i;
cb->args[2] = num;
out:
;
}
EXPORT_SYMBOL_GPL(inet_diag_dump_icsk);
static int __inet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
const struct inet_diag_req_v2 *r)
{
struct inet_diag_dump_data *cb_data = cb->data;
const struct inet_diag_handler *handler;
u32 prev_min_dump_alloc;
int protocol, err = 0;
protocol = inet_diag_get_protocol(r, cb_data);
again:
prev_min_dump_alloc = cb->min_dump_alloc;
handler = inet_diag_lock_handler(protocol);
if (!IS_ERR(handler))
handler->dump(skb, cb, r);
else
err = PTR_ERR(handler);
inet_diag_unlock_handler(handler);
/* The skb is not large enough to fit one sk info and
* inet_sk_diag_fill() has requested for a larger skb.
*/
if (!skb->len && cb->min_dump_alloc > prev_min_dump_alloc) {
err = pskb_expand_head(skb, 0, cb->min_dump_alloc, GFP_KERNEL);
if (!err)
goto again;
}
return err ? : skb->len;
}
static int inet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
{
return __inet_diag_dump(skb, cb, nlmsg_data(cb->nlh));
}
static int __inet_diag_dump_start(struct netlink_callback *cb, int hdrlen)
{
const struct nlmsghdr *nlh = cb->nlh;
struct inet_diag_dump_data *cb_data;
struct sk_buff *skb = cb->skb;
struct nlattr *nla;
int err;
cb_data = kzalloc(sizeof(*cb_data), GFP_KERNEL);
if (!cb_data)
return -ENOMEM;
err = inet_diag_parse_attrs(nlh, hdrlen, cb_data->req_nlas);
if (err) {
kfree(cb_data);
return err;
}
nla = cb_data->inet_diag_nla_bc;
if (nla) {
err = inet_diag_bc_audit(nla, skb);
if (err) {
kfree(cb_data);
return err;
}
}
nla = cb_data->inet_diag_nla_bpf_stgs;
if (nla) {
struct bpf_sk_storage_diag *bpf_stg_diag;
bpf_stg_diag = bpf_sk_storage_diag_alloc(nla);
if (IS_ERR(bpf_stg_diag)) {
kfree(cb_data);
return PTR_ERR(bpf_stg_diag);
}
cb_data->bpf_stg_diag = bpf_stg_diag;
}
cb->data = cb_data;
return 0;
}
static int inet_diag_dump_start(struct netlink_callback *cb)
{
return __inet_diag_dump_start(cb, sizeof(struct inet_diag_req_v2));
}
static int inet_diag_dump_start_compat(struct netlink_callback *cb)
{
return __inet_diag_dump_start(cb, sizeof(struct inet_diag_req));
}
static int inet_diag_dump_done(struct netlink_callback *cb)
{
struct inet_diag_dump_data *cb_data = cb->data;
bpf_sk_storage_diag_free(cb_data->bpf_stg_diag);
kfree(cb->data);
return 0;
}
static int inet_diag_type2proto(int type)
{
switch (type) {
case TCPDIAG_GETSOCK:
return IPPROTO_TCP;
case DCCPDIAG_GETSOCK:
return IPPROTO_DCCP;
default:
return 0;
}
}
static int inet_diag_dump_compat(struct sk_buff *skb,
struct netlink_callback *cb)
{
struct inet_diag_req *rc = nlmsg_data(cb->nlh);
struct inet_diag_req_v2 req;
req.sdiag_family = AF_UNSPEC; /* compatibility */
req.sdiag_protocol = inet_diag_type2proto(cb->nlh->nlmsg_type);
req.idiag_ext = rc->idiag_ext;
req.idiag_states = rc->idiag_states;
req.id = rc->id;
return __inet_diag_dump(skb, cb, &req);
}
static int inet_diag_get_exact_compat(struct sk_buff *in_skb,
const struct nlmsghdr *nlh)
{
struct inet_diag_req *rc = nlmsg_data(nlh);
struct inet_diag_req_v2 req;
req.sdiag_family = rc->idiag_family;
req.sdiag_protocol = inet_diag_type2proto(nlh->nlmsg_type);
req.idiag_ext = rc->idiag_ext;
req.idiag_states = rc->idiag_states;
req.id = rc->id;
return inet_diag_cmd_exact(SOCK_DIAG_BY_FAMILY, in_skb, nlh,
sizeof(struct inet_diag_req), &req);
}
static int inet_diag_rcv_msg_compat(struct sk_buff *skb, struct nlmsghdr *nlh)
{
int hdrlen = sizeof(struct inet_diag_req);
struct net *net = sock_net(skb->sk);
if (nlh->nlmsg_type >= INET_DIAG_GETSOCK_MAX ||
nlmsg_len(nlh) < hdrlen)
return -EINVAL;
if (nlh->nlmsg_flags & NLM_F_DUMP) {
struct netlink_dump_control c = {
.start = inet_diag_dump_start_compat,
.done = inet_diag_dump_done,
.dump = inet_diag_dump_compat,
};
return netlink_dump_start(net->diag_nlsk, skb, nlh, &c);
}
return inet_diag_get_exact_compat(skb, nlh);
}
static int inet_diag_handler_cmd(struct sk_buff *skb, struct nlmsghdr *h)
{
int hdrlen = sizeof(struct inet_diag_req_v2);
struct net *net = sock_net(skb->sk);
if (nlmsg_len(h) < hdrlen)
return -EINVAL;
if (h->nlmsg_type == SOCK_DIAG_BY_FAMILY &&
h->nlmsg_flags & NLM_F_DUMP) {
struct netlink_dump_control c = {
.start = inet_diag_dump_start,
.done = inet_diag_dump_done,
.dump = inet_diag_dump,
};
return netlink_dump_start(net->diag_nlsk, skb, h, &c);
}
return inet_diag_cmd_exact(h->nlmsg_type, skb, h, hdrlen,
nlmsg_data(h));
}
static
int inet_diag_handler_get_info(struct sk_buff *skb, struct sock *sk)
{
const struct inet_diag_handler *handler;
struct nlmsghdr *nlh;
struct nlattr *attr;
struct inet_diag_msg *r;
void *info = NULL;
int err = 0;
nlh = nlmsg_put(skb, 0, 0, SOCK_DIAG_BY_FAMILY, sizeof(*r), 0);
if (!nlh)
return -ENOMEM;
r = nlmsg_data(nlh);
memset(r, 0, sizeof(*r));
inet_diag_msg_common_fill(r, sk);
if (sk->sk_type == SOCK_DGRAM || sk->sk_type == SOCK_STREAM)
r->id.idiag_sport = inet_sk(sk)->inet_sport;
r->idiag_state = sk->sk_state;
if ((err = nla_put_u8(skb, INET_DIAG_PROTOCOL, sk->sk_protocol))) {
nlmsg_cancel(skb, nlh);
return err;
}
handler = inet_diag_lock_handler(sk->sk_protocol);
if (IS_ERR(handler)) {
inet_diag_unlock_handler(handler);
nlmsg_cancel(skb, nlh);
return PTR_ERR(handler);
}
attr = handler->idiag_info_size
? nla_reserve_64bit(skb, INET_DIAG_INFO,
handler->idiag_info_size,
INET_DIAG_PAD)
: NULL;
if (attr)
info = nla_data(attr);
handler->idiag_get_info(sk, r, info);
inet_diag_unlock_handler(handler);
nlmsg_end(skb, nlh);
return 0;
}
static const struct sock_diag_handler inet_diag_handler = {
.family = AF_INET,
.dump = inet_diag_handler_cmd,
.get_info = inet_diag_handler_get_info,
.destroy = inet_diag_handler_cmd,
};
static const struct sock_diag_handler inet6_diag_handler = {
.family = AF_INET6,
.dump = inet_diag_handler_cmd,
.get_info = inet_diag_handler_get_info,
.destroy = inet_diag_handler_cmd,
};
int inet_diag_register(const struct inet_diag_handler *h)
{
const __u16 type = h->idiag_type;
int err = -EINVAL;
if (type >= IPPROTO_MAX)
goto out;
mutex_lock(&inet_diag_table_mutex);
err = -EEXIST;
if (!inet_diag_table[type]) {
inet_diag_table[type] = h;
err = 0;
}
mutex_unlock(&inet_diag_table_mutex);
out:
return err;
}
EXPORT_SYMBOL_GPL(inet_diag_register);
void inet_diag_unregister(const struct inet_diag_handler *h)
{
const __u16 type = h->idiag_type;
if (type >= IPPROTO_MAX)
return;
mutex_lock(&inet_diag_table_mutex);
inet_diag_table[type] = NULL;
mutex_unlock(&inet_diag_table_mutex);
}
EXPORT_SYMBOL_GPL(inet_diag_unregister);
static int __init inet_diag_init(void)
{
const int inet_diag_table_size = (IPPROTO_MAX *
sizeof(struct inet_diag_handler *));
int err = -ENOMEM;
inet_diag_table = kzalloc(inet_diag_table_size, GFP_KERNEL);
if (!inet_diag_table)
goto out;
err = sock_diag_register(&inet_diag_handler);
if (err)
goto out_free_nl;
err = sock_diag_register(&inet6_diag_handler);
if (err)
goto out_free_inet;
sock_diag_register_inet_compat(inet_diag_rcv_msg_compat);
out:
return err;
out_free_inet:
sock_diag_unregister(&inet_diag_handler);
out_free_nl:
kfree(inet_diag_table);
goto out;
}
static void __exit inet_diag_exit(void)
{
sock_diag_unregister(&inet6_diag_handler);
sock_diag_unregister(&inet_diag_handler);
sock_diag_unregister_inet_compat(inet_diag_rcv_msg_compat);
kfree(inet_diag_table);
}
module_init(inet_diag_init);
module_exit(inet_diag_exit);
MODULE_LICENSE("GPL");
MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 2 /* AF_INET */);
MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 10 /* AF_INET6 */);
| linux-master | net/ipv4/inet_diag.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* INET An implementation of the TCP/IP protocol suite for the LINUX
* operating system. INET is implemented using the BSD Socket
* interface as the means of communication with the user level.
*
* Generic INET transport hashtables
*
* Authors: Lotsa people, from code originally in tcp
*/
#include <linux/module.h>
#include <linux/random.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/wait.h>
#include <linux/vmalloc.h>
#include <linux/memblock.h>
#include <net/addrconf.h>
#include <net/inet_connection_sock.h>
#include <net/inet_hashtables.h>
#if IS_ENABLED(CONFIG_IPV6)
#include <net/inet6_hashtables.h>
#endif
#include <net/secure_seq.h>
#include <net/ip.h>
#include <net/tcp.h>
#include <net/sock_reuseport.h>
u32 inet_ehashfn(const struct net *net, const __be32 laddr,
const __u16 lport, const __be32 faddr,
const __be16 fport)
{
static u32 inet_ehash_secret __read_mostly;
net_get_random_once(&inet_ehash_secret, sizeof(inet_ehash_secret));
return __inet_ehashfn(laddr, lport, faddr, fport,
inet_ehash_secret + net_hash_mix(net));
}
EXPORT_SYMBOL_GPL(inet_ehashfn);
/* This function handles inet_sock, but also timewait and request sockets
* for IPv4/IPv6.
*/
static u32 sk_ehashfn(const struct sock *sk)
{
#if IS_ENABLED(CONFIG_IPV6)
if (sk->sk_family == AF_INET6 &&
!ipv6_addr_v4mapped(&sk->sk_v6_daddr))
return inet6_ehashfn(sock_net(sk),
&sk->sk_v6_rcv_saddr, sk->sk_num,
&sk->sk_v6_daddr, sk->sk_dport);
#endif
return inet_ehashfn(sock_net(sk),
sk->sk_rcv_saddr, sk->sk_num,
sk->sk_daddr, sk->sk_dport);
}
/*
* Allocate and initialize a new local port bind bucket.
* The bindhash mutex for snum's hash chain must be held here.
*/
struct inet_bind_bucket *inet_bind_bucket_create(struct kmem_cache *cachep,
struct net *net,
struct inet_bind_hashbucket *head,
const unsigned short snum,
int l3mdev)
{
struct inet_bind_bucket *tb = kmem_cache_alloc(cachep, GFP_ATOMIC);
if (tb) {
write_pnet(&tb->ib_net, net);
tb->l3mdev = l3mdev;
tb->port = snum;
tb->fastreuse = 0;
tb->fastreuseport = 0;
INIT_HLIST_HEAD(&tb->owners);
hlist_add_head(&tb->node, &head->chain);
}
return tb;
}
/*
* Caller must hold hashbucket lock for this tb with local BH disabled
*/
void inet_bind_bucket_destroy(struct kmem_cache *cachep, struct inet_bind_bucket *tb)
{
if (hlist_empty(&tb->owners)) {
__hlist_del(&tb->node);
kmem_cache_free(cachep, tb);
}
}
bool inet_bind_bucket_match(const struct inet_bind_bucket *tb, const struct net *net,
unsigned short port, int l3mdev)
{
return net_eq(ib_net(tb), net) && tb->port == port &&
tb->l3mdev == l3mdev;
}
static void inet_bind2_bucket_init(struct inet_bind2_bucket *tb,
struct net *net,
struct inet_bind_hashbucket *head,
unsigned short port, int l3mdev,
const struct sock *sk)
{
write_pnet(&tb->ib_net, net);
tb->l3mdev = l3mdev;
tb->port = port;
#if IS_ENABLED(CONFIG_IPV6)
tb->family = sk->sk_family;
if (sk->sk_family == AF_INET6)
tb->v6_rcv_saddr = sk->sk_v6_rcv_saddr;
else
#endif
tb->rcv_saddr = sk->sk_rcv_saddr;
INIT_HLIST_HEAD(&tb->owners);
INIT_HLIST_HEAD(&tb->deathrow);
hlist_add_head(&tb->node, &head->chain);
}
struct inet_bind2_bucket *inet_bind2_bucket_create(struct kmem_cache *cachep,
struct net *net,
struct inet_bind_hashbucket *head,
unsigned short port,
int l3mdev,
const struct sock *sk)
{
struct inet_bind2_bucket *tb = kmem_cache_alloc(cachep, GFP_ATOMIC);
if (tb)
inet_bind2_bucket_init(tb, net, head, port, l3mdev, sk);
return tb;
}
/* Caller must hold hashbucket lock for this tb with local BH disabled */
void inet_bind2_bucket_destroy(struct kmem_cache *cachep, struct inet_bind2_bucket *tb)
{
if (hlist_empty(&tb->owners) && hlist_empty(&tb->deathrow)) {
__hlist_del(&tb->node);
kmem_cache_free(cachep, tb);
}
}
static bool inet_bind2_bucket_addr_match(const struct inet_bind2_bucket *tb2,
const struct sock *sk)
{
#if IS_ENABLED(CONFIG_IPV6)
if (sk->sk_family != tb2->family)
return false;
if (sk->sk_family == AF_INET6)
return ipv6_addr_equal(&tb2->v6_rcv_saddr,
&sk->sk_v6_rcv_saddr);
#endif
return tb2->rcv_saddr == sk->sk_rcv_saddr;
}
void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb,
struct inet_bind2_bucket *tb2, unsigned short port)
{
inet_sk(sk)->inet_num = port;
sk_add_bind_node(sk, &tb->owners);
inet_csk(sk)->icsk_bind_hash = tb;
sk_add_bind2_node(sk, &tb2->owners);
inet_csk(sk)->icsk_bind2_hash = tb2;
}
/*
* Get rid of any references to a local port held by the given sock.
*/
static void __inet_put_port(struct sock *sk)
{
struct inet_hashinfo *hashinfo = tcp_or_dccp_get_hashinfo(sk);
struct inet_bind_hashbucket *head, *head2;
struct net *net = sock_net(sk);
struct inet_bind_bucket *tb;
int bhash;
bhash = inet_bhashfn(net, inet_sk(sk)->inet_num, hashinfo->bhash_size);
head = &hashinfo->bhash[bhash];
head2 = inet_bhashfn_portaddr(hashinfo, sk, net, inet_sk(sk)->inet_num);
spin_lock(&head->lock);
tb = inet_csk(sk)->icsk_bind_hash;
__sk_del_bind_node(sk);
inet_csk(sk)->icsk_bind_hash = NULL;
inet_sk(sk)->inet_num = 0;
inet_bind_bucket_destroy(hashinfo->bind_bucket_cachep, tb);
spin_lock(&head2->lock);
if (inet_csk(sk)->icsk_bind2_hash) {
struct inet_bind2_bucket *tb2 = inet_csk(sk)->icsk_bind2_hash;
__sk_del_bind2_node(sk);
inet_csk(sk)->icsk_bind2_hash = NULL;
inet_bind2_bucket_destroy(hashinfo->bind2_bucket_cachep, tb2);
}
spin_unlock(&head2->lock);
spin_unlock(&head->lock);
}
void inet_put_port(struct sock *sk)
{
local_bh_disable();
__inet_put_port(sk);
local_bh_enable();
}
EXPORT_SYMBOL(inet_put_port);
int __inet_inherit_port(const struct sock *sk, struct sock *child)
{
struct inet_hashinfo *table = tcp_or_dccp_get_hashinfo(sk);
unsigned short port = inet_sk(child)->inet_num;
struct inet_bind_hashbucket *head, *head2;
bool created_inet_bind_bucket = false;
struct net *net = sock_net(sk);
bool update_fastreuse = false;
struct inet_bind2_bucket *tb2;
struct inet_bind_bucket *tb;
int bhash, l3mdev;
bhash = inet_bhashfn(net, port, table->bhash_size);
head = &table->bhash[bhash];
head2 = inet_bhashfn_portaddr(table, child, net, port);
spin_lock(&head->lock);
spin_lock(&head2->lock);
tb = inet_csk(sk)->icsk_bind_hash;
tb2 = inet_csk(sk)->icsk_bind2_hash;
if (unlikely(!tb || !tb2)) {
spin_unlock(&head2->lock);
spin_unlock(&head->lock);
return -ENOENT;
}
if (tb->port != port) {
l3mdev = inet_sk_bound_l3mdev(sk);
/* NOTE: using tproxy and redirecting skbs to a proxy
* on a different listener port breaks the assumption
* that the listener socket's icsk_bind_hash is the same
* as that of the child socket. We have to look up or
* create a new bind bucket for the child here. */
inet_bind_bucket_for_each(tb, &head->chain) {
if (inet_bind_bucket_match(tb, net, port, l3mdev))
break;
}
if (!tb) {
tb = inet_bind_bucket_create(table->bind_bucket_cachep,
net, head, port, l3mdev);
if (!tb) {
spin_unlock(&head2->lock);
spin_unlock(&head->lock);
return -ENOMEM;
}
created_inet_bind_bucket = true;
}
update_fastreuse = true;
goto bhash2_find;
} else if (!inet_bind2_bucket_addr_match(tb2, child)) {
l3mdev = inet_sk_bound_l3mdev(sk);
bhash2_find:
tb2 = inet_bind2_bucket_find(head2, net, port, l3mdev, child);
if (!tb2) {
tb2 = inet_bind2_bucket_create(table->bind2_bucket_cachep,
net, head2, port,
l3mdev, child);
if (!tb2)
goto error;
}
}
if (update_fastreuse)
inet_csk_update_fastreuse(tb, child);
inet_bind_hash(child, tb, tb2, port);
spin_unlock(&head2->lock);
spin_unlock(&head->lock);
return 0;
error:
if (created_inet_bind_bucket)
inet_bind_bucket_destroy(table->bind_bucket_cachep, tb);
spin_unlock(&head2->lock);
spin_unlock(&head->lock);
return -ENOMEM;
}
EXPORT_SYMBOL_GPL(__inet_inherit_port);
static struct inet_listen_hashbucket *
inet_lhash2_bucket_sk(struct inet_hashinfo *h, struct sock *sk)
{
u32 hash;
#if IS_ENABLED(CONFIG_IPV6)
if (sk->sk_family == AF_INET6)
hash = ipv6_portaddr_hash(sock_net(sk),
&sk->sk_v6_rcv_saddr,
inet_sk(sk)->inet_num);
else
#endif
hash = ipv4_portaddr_hash(sock_net(sk),
inet_sk(sk)->inet_rcv_saddr,
inet_sk(sk)->inet_num);
return inet_lhash2_bucket(h, hash);
}
static inline int compute_score(struct sock *sk, struct net *net,
const unsigned short hnum, const __be32 daddr,
const int dif, const int sdif)
{
int score = -1;
if (net_eq(sock_net(sk), net) && sk->sk_num == hnum &&
!ipv6_only_sock(sk)) {
if (sk->sk_rcv_saddr != daddr)
return -1;
if (!inet_sk_bound_dev_eq(net, sk->sk_bound_dev_if, dif, sdif))
return -1;
score = sk->sk_bound_dev_if ? 2 : 1;
if (sk->sk_family == PF_INET)
score++;
if (READ_ONCE(sk->sk_incoming_cpu) == raw_smp_processor_id())
score++;
}
return score;
}
/**
* inet_lookup_reuseport() - execute reuseport logic on AF_INET socket if necessary.
* @net: network namespace.
* @sk: AF_INET socket, must be in TCP_LISTEN state for TCP or TCP_CLOSE for UDP.
* @skb: context for a potential SK_REUSEPORT program.
* @doff: header offset.
* @saddr: source address.
* @sport: source port.
* @daddr: destination address.
* @hnum: destination port in host byte order.
* @ehashfn: hash function used to generate the fallback hash.
*
* Return: NULL if sk doesn't have SO_REUSEPORT set, otherwise a pointer to
* the selected sock or an error.
*/
struct sock *inet_lookup_reuseport(struct net *net, struct sock *sk,
struct sk_buff *skb, int doff,
__be32 saddr, __be16 sport,
__be32 daddr, unsigned short hnum,
inet_ehashfn_t *ehashfn)
{
struct sock *reuse_sk = NULL;
u32 phash;
if (sk->sk_reuseport) {
phash = INDIRECT_CALL_2(ehashfn, udp_ehashfn, inet_ehashfn,
net, daddr, hnum, saddr, sport);
reuse_sk = reuseport_select_sock(sk, phash, skb, doff);
}
return reuse_sk;
}
EXPORT_SYMBOL_GPL(inet_lookup_reuseport);
/*
* Here are some nice properties to exploit here. The BSD API
* does not allow a listening sock to specify the remote port nor the
* remote address for the connection. So always assume those are both
* wildcarded during the search since they can never be otherwise.
*/
/* called with rcu_read_lock() : No refcount taken on the socket */
static struct sock *inet_lhash2_lookup(struct net *net,
struct inet_listen_hashbucket *ilb2,
struct sk_buff *skb, int doff,
const __be32 saddr, __be16 sport,
const __be32 daddr, const unsigned short hnum,
const int dif, const int sdif)
{
struct sock *sk, *result = NULL;
struct hlist_nulls_node *node;
int score, hiscore = 0;
sk_nulls_for_each_rcu(sk, node, &ilb2->nulls_head) {
score = compute_score(sk, net, hnum, daddr, dif, sdif);
if (score > hiscore) {
result = inet_lookup_reuseport(net, sk, skb, doff,
saddr, sport, daddr, hnum, inet_ehashfn);
if (result)
return result;
result = sk;
hiscore = score;
}
}
return result;
}
struct sock *inet_lookup_run_sk_lookup(struct net *net,
int protocol,
struct sk_buff *skb, int doff,
__be32 saddr, __be16 sport,
__be32 daddr, u16 hnum, const int dif,
inet_ehashfn_t *ehashfn)
{
struct sock *sk, *reuse_sk;
bool no_reuseport;
no_reuseport = bpf_sk_lookup_run_v4(net, protocol, saddr, sport,
daddr, hnum, dif, &sk);
if (no_reuseport || IS_ERR_OR_NULL(sk))
return sk;
reuse_sk = inet_lookup_reuseport(net, sk, skb, doff, saddr, sport, daddr, hnum,
ehashfn);
if (reuse_sk)
sk = reuse_sk;
return sk;
}
struct sock *__inet_lookup_listener(struct net *net,
struct inet_hashinfo *hashinfo,
struct sk_buff *skb, int doff,
const __be32 saddr, __be16 sport,
const __be32 daddr, const unsigned short hnum,
const int dif, const int sdif)
{
struct inet_listen_hashbucket *ilb2;
struct sock *result = NULL;
unsigned int hash2;
/* Lookup redirect from BPF */
if (static_branch_unlikely(&bpf_sk_lookup_enabled) &&
hashinfo == net->ipv4.tcp_death_row.hashinfo) {
result = inet_lookup_run_sk_lookup(net, IPPROTO_TCP, skb, doff,
saddr, sport, daddr, hnum, dif,
inet_ehashfn);
if (result)
goto done;
}
hash2 = ipv4_portaddr_hash(net, daddr, hnum);
ilb2 = inet_lhash2_bucket(hashinfo, hash2);
result = inet_lhash2_lookup(net, ilb2, skb, doff,
saddr, sport, daddr, hnum,
dif, sdif);
if (result)
goto done;
/* Lookup lhash2 with INADDR_ANY */
hash2 = ipv4_portaddr_hash(net, htonl(INADDR_ANY), hnum);
ilb2 = inet_lhash2_bucket(hashinfo, hash2);
result = inet_lhash2_lookup(net, ilb2, skb, doff,
saddr, sport, htonl(INADDR_ANY), hnum,
dif, sdif);
done:
if (IS_ERR(result))
return NULL;
return result;
}
EXPORT_SYMBOL_GPL(__inet_lookup_listener);
/* All sockets share common refcount, but have different destructors */
void sock_gen_put(struct sock *sk)
{
if (!refcount_dec_and_test(&sk->sk_refcnt))
return;
if (sk->sk_state == TCP_TIME_WAIT)
inet_twsk_free(inet_twsk(sk));
else if (sk->sk_state == TCP_NEW_SYN_RECV)
reqsk_free(inet_reqsk(sk));
else
sk_free(sk);
}
EXPORT_SYMBOL_GPL(sock_gen_put);
void sock_edemux(struct sk_buff *skb)
{
sock_gen_put(skb->sk);
}
EXPORT_SYMBOL(sock_edemux);
struct sock *__inet_lookup_established(struct net *net,
struct inet_hashinfo *hashinfo,
const __be32 saddr, const __be16 sport,
const __be32 daddr, const u16 hnum,
const int dif, const int sdif)
{
INET_ADDR_COOKIE(acookie, saddr, daddr);
const __portpair ports = INET_COMBINED_PORTS(sport, hnum);
struct sock *sk;
const struct hlist_nulls_node *node;
/* Optimize here for direct hit, only listening connections can
* have wildcards anyways.
*/
unsigned int hash = inet_ehashfn(net, daddr, hnum, saddr, sport);
unsigned int slot = hash & hashinfo->ehash_mask;
struct inet_ehash_bucket *head = &hashinfo->ehash[slot];
begin:
sk_nulls_for_each_rcu(sk, node, &head->chain) {
if (sk->sk_hash != hash)
continue;
if (likely(inet_match(net, sk, acookie, ports, dif, sdif))) {
if (unlikely(!refcount_inc_not_zero(&sk->sk_refcnt)))
goto out;
if (unlikely(!inet_match(net, sk, acookie,
ports, dif, sdif))) {
sock_gen_put(sk);
goto begin;
}
goto found;
}
}
/*
* if the nulls value we got at the end of this lookup is
* not the expected one, we must restart lookup.
* We probably met an item that was moved to another chain.
*/
if (get_nulls_value(node) != slot)
goto begin;
out:
sk = NULL;
found:
return sk;
}
EXPORT_SYMBOL_GPL(__inet_lookup_established);
/* called with local bh disabled */
static int __inet_check_established(struct inet_timewait_death_row *death_row,
struct sock *sk, __u16 lport,
struct inet_timewait_sock **twp)
{
struct inet_hashinfo *hinfo = death_row->hashinfo;
struct inet_sock *inet = inet_sk(sk);
__be32 daddr = inet->inet_rcv_saddr;
__be32 saddr = inet->inet_daddr;
int dif = sk->sk_bound_dev_if;
struct net *net = sock_net(sk);
int sdif = l3mdev_master_ifindex_by_index(net, dif);
INET_ADDR_COOKIE(acookie, saddr, daddr);
const __portpair ports = INET_COMBINED_PORTS(inet->inet_dport, lport);
unsigned int hash = inet_ehashfn(net, daddr, lport,
saddr, inet->inet_dport);
struct inet_ehash_bucket *head = inet_ehash_bucket(hinfo, hash);
spinlock_t *lock = inet_ehash_lockp(hinfo, hash);
struct sock *sk2;
const struct hlist_nulls_node *node;
struct inet_timewait_sock *tw = NULL;
spin_lock(lock);
sk_nulls_for_each(sk2, node, &head->chain) {
if (sk2->sk_hash != hash)
continue;
if (likely(inet_match(net, sk2, acookie, ports, dif, sdif))) {
if (sk2->sk_state == TCP_TIME_WAIT) {
tw = inet_twsk(sk2);
if (twsk_unique(sk, sk2, twp))
break;
}
goto not_unique;
}
}
/* Must record num and sport now. Otherwise we will see
* in hash table socket with a funny identity.
*/
inet->inet_num = lport;
inet->inet_sport = htons(lport);
sk->sk_hash = hash;
WARN_ON(!sk_unhashed(sk));
__sk_nulls_add_node_rcu(sk, &head->chain);
if (tw) {
sk_nulls_del_node_init_rcu((struct sock *)tw);
__NET_INC_STATS(net, LINUX_MIB_TIMEWAITRECYCLED);
}
spin_unlock(lock);
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
if (twp) {
*twp = tw;
} else if (tw) {
/* Silly. Should hash-dance instead... */
inet_twsk_deschedule_put(tw);
}
return 0;
not_unique:
spin_unlock(lock);
return -EADDRNOTAVAIL;
}
static u64 inet_sk_port_offset(const struct sock *sk)
{
const struct inet_sock *inet = inet_sk(sk);
return secure_ipv4_port_ephemeral(inet->inet_rcv_saddr,
inet->inet_daddr,
inet->inet_dport);
}
/* Searches for an exsiting socket in the ehash bucket list.
* Returns true if found, false otherwise.
*/
static bool inet_ehash_lookup_by_sk(struct sock *sk,
struct hlist_nulls_head *list)
{
const __portpair ports = INET_COMBINED_PORTS(sk->sk_dport, sk->sk_num);
const int sdif = sk->sk_bound_dev_if;
const int dif = sk->sk_bound_dev_if;
const struct hlist_nulls_node *node;
struct net *net = sock_net(sk);
struct sock *esk;
INET_ADDR_COOKIE(acookie, sk->sk_daddr, sk->sk_rcv_saddr);
sk_nulls_for_each_rcu(esk, node, list) {
if (esk->sk_hash != sk->sk_hash)
continue;
if (sk->sk_family == AF_INET) {
if (unlikely(inet_match(net, esk, acookie,
ports, dif, sdif))) {
return true;
}
}
#if IS_ENABLED(CONFIG_IPV6)
else if (sk->sk_family == AF_INET6) {
if (unlikely(inet6_match(net, esk,
&sk->sk_v6_daddr,
&sk->sk_v6_rcv_saddr,
ports, dif, sdif))) {
return true;
}
}
#endif
}
return false;
}
/* Insert a socket into ehash, and eventually remove another one
* (The another one can be a SYN_RECV or TIMEWAIT)
* If an existing socket already exists, socket sk is not inserted,
* and sets found_dup_sk parameter to true.
*/
bool inet_ehash_insert(struct sock *sk, struct sock *osk, bool *found_dup_sk)
{
struct inet_hashinfo *hashinfo = tcp_or_dccp_get_hashinfo(sk);
struct inet_ehash_bucket *head;
struct hlist_nulls_head *list;
spinlock_t *lock;
bool ret = true;
WARN_ON_ONCE(!sk_unhashed(sk));
sk->sk_hash = sk_ehashfn(sk);
head = inet_ehash_bucket(hashinfo, sk->sk_hash);
list = &head->chain;
lock = inet_ehash_lockp(hashinfo, sk->sk_hash);
spin_lock(lock);
if (osk) {
WARN_ON_ONCE(sk->sk_hash != osk->sk_hash);
ret = sk_nulls_del_node_init_rcu(osk);
} else if (found_dup_sk) {
*found_dup_sk = inet_ehash_lookup_by_sk(sk, list);
if (*found_dup_sk)
ret = false;
}
if (ret)
__sk_nulls_add_node_rcu(sk, list);
spin_unlock(lock);
return ret;
}
bool inet_ehash_nolisten(struct sock *sk, struct sock *osk, bool *found_dup_sk)
{
bool ok = inet_ehash_insert(sk, osk, found_dup_sk);
if (ok) {
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
} else {
this_cpu_inc(*sk->sk_prot->orphan_count);
inet_sk_set_state(sk, TCP_CLOSE);
sock_set_flag(sk, SOCK_DEAD);
inet_csk_destroy_sock(sk);
}
return ok;
}
EXPORT_SYMBOL_GPL(inet_ehash_nolisten);
static int inet_reuseport_add_sock(struct sock *sk,
struct inet_listen_hashbucket *ilb)
{
struct inet_bind_bucket *tb = inet_csk(sk)->icsk_bind_hash;
const struct hlist_nulls_node *node;
struct sock *sk2;
kuid_t uid = sock_i_uid(sk);
sk_nulls_for_each_rcu(sk2, node, &ilb->nulls_head) {
if (sk2 != sk &&
sk2->sk_family == sk->sk_family &&
ipv6_only_sock(sk2) == ipv6_only_sock(sk) &&
sk2->sk_bound_dev_if == sk->sk_bound_dev_if &&
inet_csk(sk2)->icsk_bind_hash == tb &&
sk2->sk_reuseport && uid_eq(uid, sock_i_uid(sk2)) &&
inet_rcv_saddr_equal(sk, sk2, false))
return reuseport_add_sock(sk, sk2,
inet_rcv_saddr_any(sk));
}
return reuseport_alloc(sk, inet_rcv_saddr_any(sk));
}
int __inet_hash(struct sock *sk, struct sock *osk)
{
struct inet_hashinfo *hashinfo = tcp_or_dccp_get_hashinfo(sk);
struct inet_listen_hashbucket *ilb2;
int err = 0;
if (sk->sk_state != TCP_LISTEN) {
local_bh_disable();
inet_ehash_nolisten(sk, osk, NULL);
local_bh_enable();
return 0;
}
WARN_ON(!sk_unhashed(sk));
ilb2 = inet_lhash2_bucket_sk(hashinfo, sk);
spin_lock(&ilb2->lock);
if (sk->sk_reuseport) {
err = inet_reuseport_add_sock(sk, ilb2);
if (err)
goto unlock;
}
if (IS_ENABLED(CONFIG_IPV6) && sk->sk_reuseport &&
sk->sk_family == AF_INET6)
__sk_nulls_add_node_tail_rcu(sk, &ilb2->nulls_head);
else
__sk_nulls_add_node_rcu(sk, &ilb2->nulls_head);
sock_set_flag(sk, SOCK_RCU_FREE);
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
unlock:
spin_unlock(&ilb2->lock);
return err;
}
EXPORT_SYMBOL(__inet_hash);
int inet_hash(struct sock *sk)
{
int err = 0;
if (sk->sk_state != TCP_CLOSE)
err = __inet_hash(sk, NULL);
return err;
}
EXPORT_SYMBOL_GPL(inet_hash);
void inet_unhash(struct sock *sk)
{
struct inet_hashinfo *hashinfo = tcp_or_dccp_get_hashinfo(sk);
if (sk_unhashed(sk))
return;
if (sk->sk_state == TCP_LISTEN) {
struct inet_listen_hashbucket *ilb2;
ilb2 = inet_lhash2_bucket_sk(hashinfo, sk);
/* Don't disable bottom halves while acquiring the lock to
* avoid circular locking dependency on PREEMPT_RT.
*/
spin_lock(&ilb2->lock);
if (sk_unhashed(sk)) {
spin_unlock(&ilb2->lock);
return;
}
if (rcu_access_pointer(sk->sk_reuseport_cb))
reuseport_stop_listen_sock(sk);
__sk_nulls_del_node_init_rcu(sk);
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
spin_unlock(&ilb2->lock);
} else {
spinlock_t *lock = inet_ehash_lockp(hashinfo, sk->sk_hash);
spin_lock_bh(lock);
if (sk_unhashed(sk)) {
spin_unlock_bh(lock);
return;
}
__sk_nulls_del_node_init_rcu(sk);
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
spin_unlock_bh(lock);
}
}
EXPORT_SYMBOL_GPL(inet_unhash);
static bool inet_bind2_bucket_match(const struct inet_bind2_bucket *tb,
const struct net *net, unsigned short port,
int l3mdev, const struct sock *sk)
{
if (!net_eq(ib2_net(tb), net) || tb->port != port ||
tb->l3mdev != l3mdev)
return false;
#if IS_ENABLED(CONFIG_IPV6)
if (sk->sk_family != tb->family) {
if (sk->sk_family == AF_INET)
return ipv6_addr_v4mapped(&tb->v6_rcv_saddr) &&
tb->v6_rcv_saddr.s6_addr32[3] == sk->sk_rcv_saddr;
return false;
}
if (sk->sk_family == AF_INET6)
return ipv6_addr_equal(&tb->v6_rcv_saddr, &sk->sk_v6_rcv_saddr);
#endif
return tb->rcv_saddr == sk->sk_rcv_saddr;
}
bool inet_bind2_bucket_match_addr_any(const struct inet_bind2_bucket *tb, const struct net *net,
unsigned short port, int l3mdev, const struct sock *sk)
{
if (!net_eq(ib2_net(tb), net) || tb->port != port ||
tb->l3mdev != l3mdev)
return false;
#if IS_ENABLED(CONFIG_IPV6)
if (sk->sk_family != tb->family) {
if (sk->sk_family == AF_INET)
return ipv6_addr_any(&tb->v6_rcv_saddr) ||
ipv6_addr_v4mapped_any(&tb->v6_rcv_saddr);
return false;
}
if (sk->sk_family == AF_INET6)
return ipv6_addr_any(&tb->v6_rcv_saddr);
#endif
return tb->rcv_saddr == 0;
}
/* The socket's bhash2 hashbucket spinlock must be held when this is called */
struct inet_bind2_bucket *
inet_bind2_bucket_find(const struct inet_bind_hashbucket *head, const struct net *net,
unsigned short port, int l3mdev, const struct sock *sk)
{
struct inet_bind2_bucket *bhash2 = NULL;
inet_bind_bucket_for_each(bhash2, &head->chain)
if (inet_bind2_bucket_match(bhash2, net, port, l3mdev, sk))
break;
return bhash2;
}
struct inet_bind_hashbucket *
inet_bhash2_addr_any_hashbucket(const struct sock *sk, const struct net *net, int port)
{
struct inet_hashinfo *hinfo = tcp_or_dccp_get_hashinfo(sk);
u32 hash;
#if IS_ENABLED(CONFIG_IPV6)
if (sk->sk_family == AF_INET6)
hash = ipv6_portaddr_hash(net, &in6addr_any, port);
else
#endif
hash = ipv4_portaddr_hash(net, 0, port);
return &hinfo->bhash2[hash & (hinfo->bhash_size - 1)];
}
static void inet_update_saddr(struct sock *sk, void *saddr, int family)
{
if (family == AF_INET) {
inet_sk(sk)->inet_saddr = *(__be32 *)saddr;
sk_rcv_saddr_set(sk, inet_sk(sk)->inet_saddr);
}
#if IS_ENABLED(CONFIG_IPV6)
else {
sk->sk_v6_rcv_saddr = *(struct in6_addr *)saddr;
}
#endif
}
static int __inet_bhash2_update_saddr(struct sock *sk, void *saddr, int family, bool reset)
{
struct inet_hashinfo *hinfo = tcp_or_dccp_get_hashinfo(sk);
struct inet_bind_hashbucket *head, *head2;
struct inet_bind2_bucket *tb2, *new_tb2;
int l3mdev = inet_sk_bound_l3mdev(sk);
int port = inet_sk(sk)->inet_num;
struct net *net = sock_net(sk);
int bhash;
if (!inet_csk(sk)->icsk_bind2_hash) {
/* Not bind()ed before. */
if (reset)
inet_reset_saddr(sk);
else
inet_update_saddr(sk, saddr, family);
return 0;
}
/* Allocate a bind2 bucket ahead of time to avoid permanently putting
* the bhash2 table in an inconsistent state if a new tb2 bucket
* allocation fails.
*/
new_tb2 = kmem_cache_alloc(hinfo->bind2_bucket_cachep, GFP_ATOMIC);
if (!new_tb2) {
if (reset) {
/* The (INADDR_ANY, port) bucket might have already
* been freed, then we cannot fixup icsk_bind2_hash,
* so we give up and unlink sk from bhash/bhash2 not
* to leave inconsistency in bhash2.
*/
inet_put_port(sk);
inet_reset_saddr(sk);
}
return -ENOMEM;
}
bhash = inet_bhashfn(net, port, hinfo->bhash_size);
head = &hinfo->bhash[bhash];
head2 = inet_bhashfn_portaddr(hinfo, sk, net, port);
/* If we change saddr locklessly, another thread
* iterating over bhash might see corrupted address.
*/
spin_lock_bh(&head->lock);
spin_lock(&head2->lock);
__sk_del_bind2_node(sk);
inet_bind2_bucket_destroy(hinfo->bind2_bucket_cachep, inet_csk(sk)->icsk_bind2_hash);
spin_unlock(&head2->lock);
if (reset)
inet_reset_saddr(sk);
else
inet_update_saddr(sk, saddr, family);
head2 = inet_bhashfn_portaddr(hinfo, sk, net, port);
spin_lock(&head2->lock);
tb2 = inet_bind2_bucket_find(head2, net, port, l3mdev, sk);
if (!tb2) {
tb2 = new_tb2;
inet_bind2_bucket_init(tb2, net, head2, port, l3mdev, sk);
}
sk_add_bind2_node(sk, &tb2->owners);
inet_csk(sk)->icsk_bind2_hash = tb2;
spin_unlock(&head2->lock);
spin_unlock_bh(&head->lock);
if (tb2 != new_tb2)
kmem_cache_free(hinfo->bind2_bucket_cachep, new_tb2);
return 0;
}
int inet_bhash2_update_saddr(struct sock *sk, void *saddr, int family)
{
return __inet_bhash2_update_saddr(sk, saddr, family, false);
}
EXPORT_SYMBOL_GPL(inet_bhash2_update_saddr);
void inet_bhash2_reset_saddr(struct sock *sk)
{
if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
__inet_bhash2_update_saddr(sk, NULL, 0, true);
}
EXPORT_SYMBOL_GPL(inet_bhash2_reset_saddr);
/* RFC 6056 3.3.4. Algorithm 4: Double-Hash Port Selection Algorithm
* Note that we use 32bit integers (vs RFC 'short integers')
* because 2^16 is not a multiple of num_ephemeral and this
* property might be used by clever attacker.
*
* RFC claims using TABLE_LENGTH=10 buckets gives an improvement, though
* attacks were since demonstrated, thus we use 65536 by default instead
* to really give more isolation and privacy, at the expense of 256kB
* of kernel memory.
*/
#define INET_TABLE_PERTURB_SIZE (1 << CONFIG_INET_TABLE_PERTURB_ORDER)
static u32 *table_perturb;
int __inet_hash_connect(struct inet_timewait_death_row *death_row,
struct sock *sk, u64 port_offset,
int (*check_established)(struct inet_timewait_death_row *,
struct sock *, __u16, struct inet_timewait_sock **))
{
struct inet_hashinfo *hinfo = death_row->hashinfo;
struct inet_bind_hashbucket *head, *head2;
struct inet_timewait_sock *tw = NULL;
int port = inet_sk(sk)->inet_num;
struct net *net = sock_net(sk);
struct inet_bind2_bucket *tb2;
struct inet_bind_bucket *tb;
bool tb_created = false;
u32 remaining, offset;
int ret, i, low, high;
int l3mdev;
u32 index;
if (port) {
local_bh_disable();
ret = check_established(death_row, sk, port, NULL);
local_bh_enable();
return ret;
}
l3mdev = inet_sk_bound_l3mdev(sk);
inet_sk_get_local_port_range(sk, &low, &high);
high++; /* [32768, 60999] -> [32768, 61000[ */
remaining = high - low;
if (likely(remaining > 1))
remaining &= ~1U;
get_random_sleepable_once(table_perturb,
INET_TABLE_PERTURB_SIZE * sizeof(*table_perturb));
index = port_offset & (INET_TABLE_PERTURB_SIZE - 1);
offset = READ_ONCE(table_perturb[index]) + (port_offset >> 32);
offset %= remaining;
/* In first pass we try ports of @low parity.
* inet_csk_get_port() does the opposite choice.
*/
offset &= ~1U;
other_parity_scan:
port = low + offset;
for (i = 0; i < remaining; i += 2, port += 2) {
if (unlikely(port >= high))
port -= remaining;
if (inet_is_local_reserved_port(net, port))
continue;
head = &hinfo->bhash[inet_bhashfn(net, port,
hinfo->bhash_size)];
spin_lock_bh(&head->lock);
/* Does not bother with rcv_saddr checks, because
* the established check is already unique enough.
*/
inet_bind_bucket_for_each(tb, &head->chain) {
if (inet_bind_bucket_match(tb, net, port, l3mdev)) {
if (tb->fastreuse >= 0 ||
tb->fastreuseport >= 0)
goto next_port;
WARN_ON(hlist_empty(&tb->owners));
if (!check_established(death_row, sk,
port, &tw))
goto ok;
goto next_port;
}
}
tb = inet_bind_bucket_create(hinfo->bind_bucket_cachep,
net, head, port, l3mdev);
if (!tb) {
spin_unlock_bh(&head->lock);
return -ENOMEM;
}
tb_created = true;
tb->fastreuse = -1;
tb->fastreuseport = -1;
goto ok;
next_port:
spin_unlock_bh(&head->lock);
cond_resched();
}
offset++;
if ((offset & 1) && remaining > 1)
goto other_parity_scan;
return -EADDRNOTAVAIL;
ok:
/* Find the corresponding tb2 bucket since we need to
* add the socket to the bhash2 table as well
*/
head2 = inet_bhashfn_portaddr(hinfo, sk, net, port);
spin_lock(&head2->lock);
tb2 = inet_bind2_bucket_find(head2, net, port, l3mdev, sk);
if (!tb2) {
tb2 = inet_bind2_bucket_create(hinfo->bind2_bucket_cachep, net,
head2, port, l3mdev, sk);
if (!tb2)
goto error;
}
/* Here we want to add a little bit of randomness to the next source
* port that will be chosen. We use a max() with a random here so that
* on low contention the randomness is maximal and on high contention
* it may be inexistent.
*/
i = max_t(int, i, get_random_u32_below(8) * 2);
WRITE_ONCE(table_perturb[index], READ_ONCE(table_perturb[index]) + i + 2);
/* Head lock still held and bh's disabled */
inet_bind_hash(sk, tb, tb2, port);
if (sk_unhashed(sk)) {
inet_sk(sk)->inet_sport = htons(port);
inet_ehash_nolisten(sk, (struct sock *)tw, NULL);
}
if (tw)
inet_twsk_bind_unhash(tw, hinfo);
spin_unlock(&head2->lock);
spin_unlock(&head->lock);
if (tw)
inet_twsk_deschedule_put(tw);
local_bh_enable();
return 0;
error:
spin_unlock(&head2->lock);
if (tb_created)
inet_bind_bucket_destroy(hinfo->bind_bucket_cachep, tb);
spin_unlock_bh(&head->lock);
return -ENOMEM;
}
/*
* Bind a port for a connect operation and hash it.
*/
int inet_hash_connect(struct inet_timewait_death_row *death_row,
struct sock *sk)
{
u64 port_offset = 0;
if (!inet_sk(sk)->inet_num)
port_offset = inet_sk_port_offset(sk);
return __inet_hash_connect(death_row, sk, port_offset,
__inet_check_established);
}
EXPORT_SYMBOL_GPL(inet_hash_connect);
static void init_hashinfo_lhash2(struct inet_hashinfo *h)
{
int i;
for (i = 0; i <= h->lhash2_mask; i++) {
spin_lock_init(&h->lhash2[i].lock);
INIT_HLIST_NULLS_HEAD(&h->lhash2[i].nulls_head,
i + LISTENING_NULLS_BASE);
}
}
void __init inet_hashinfo2_init(struct inet_hashinfo *h, const char *name,
unsigned long numentries, int scale,
unsigned long low_limit,
unsigned long high_limit)
{
h->lhash2 = alloc_large_system_hash(name,
sizeof(*h->lhash2),
numentries,
scale,
0,
NULL,
&h->lhash2_mask,
low_limit,
high_limit);
init_hashinfo_lhash2(h);
/* this one is used for source ports of outgoing connections */
table_perturb = alloc_large_system_hash("Table-perturb",
sizeof(*table_perturb),
INET_TABLE_PERTURB_SIZE,
0, 0, NULL, NULL,
INET_TABLE_PERTURB_SIZE,
INET_TABLE_PERTURB_SIZE);
}
int inet_hashinfo2_init_mod(struct inet_hashinfo *h)
{
h->lhash2 = kmalloc_array(INET_LHTABLE_SIZE, sizeof(*h->lhash2), GFP_KERNEL);
if (!h->lhash2)
return -ENOMEM;
h->lhash2_mask = INET_LHTABLE_SIZE - 1;
/* INET_LHTABLE_SIZE must be a power of 2 */
BUG_ON(INET_LHTABLE_SIZE & h->lhash2_mask);
init_hashinfo_lhash2(h);
return 0;
}
EXPORT_SYMBOL_GPL(inet_hashinfo2_init_mod);
int inet_ehash_locks_alloc(struct inet_hashinfo *hashinfo)
{
unsigned int locksz = sizeof(spinlock_t);
unsigned int i, nblocks = 1;
if (locksz != 0) {
/* allocate 2 cache lines or at least one spinlock per cpu */
nblocks = max(2U * L1_CACHE_BYTES / locksz, 1U);
nblocks = roundup_pow_of_two(nblocks * num_possible_cpus());
/* no more locks than number of hash buckets */
nblocks = min(nblocks, hashinfo->ehash_mask + 1);
hashinfo->ehash_locks = kvmalloc_array(nblocks, locksz, GFP_KERNEL);
if (!hashinfo->ehash_locks)
return -ENOMEM;
for (i = 0; i < nblocks; i++)
spin_lock_init(&hashinfo->ehash_locks[i]);
}
hashinfo->ehash_locks_mask = nblocks - 1;
return 0;
}
EXPORT_SYMBOL_GPL(inet_ehash_locks_alloc);
struct inet_hashinfo *inet_pernet_hashinfo_alloc(struct inet_hashinfo *hashinfo,
unsigned int ehash_entries)
{
struct inet_hashinfo *new_hashinfo;
int i;
new_hashinfo = kmemdup(hashinfo, sizeof(*hashinfo), GFP_KERNEL);
if (!new_hashinfo)
goto err;
new_hashinfo->ehash = vmalloc_huge(ehash_entries * sizeof(struct inet_ehash_bucket),
GFP_KERNEL_ACCOUNT);
if (!new_hashinfo->ehash)
goto free_hashinfo;
new_hashinfo->ehash_mask = ehash_entries - 1;
if (inet_ehash_locks_alloc(new_hashinfo))
goto free_ehash;
for (i = 0; i < ehash_entries; i++)
INIT_HLIST_NULLS_HEAD(&new_hashinfo->ehash[i].chain, i);
new_hashinfo->pernet = true;
return new_hashinfo;
free_ehash:
vfree(new_hashinfo->ehash);
free_hashinfo:
kfree(new_hashinfo);
err:
return NULL;
}
EXPORT_SYMBOL_GPL(inet_pernet_hashinfo_alloc);
void inet_pernet_hashinfo_free(struct inet_hashinfo *hashinfo)
{
if (!hashinfo->pernet)
return;
inet_ehash_locks_free(hashinfo);
vfree(hashinfo->ehash);
kfree(hashinfo);
}
EXPORT_SYMBOL_GPL(inet_pernet_hashinfo_free);
| linux-master | net/ipv4/inet_hashtables.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* INET An implementation of the TCP/IP protocol suite for the LINUX
* operating system. INET is implemented using the BSD Socket
* interface as the means of communication with the user level.
*
* Support for INET connection oriented protocols.
*
* Authors: See the TCP sources
*/
#include <linux/module.h>
#include <linux/jhash.h>
#include <net/inet_connection_sock.h>
#include <net/inet_hashtables.h>
#include <net/inet_timewait_sock.h>
#include <net/ip.h>
#include <net/route.h>
#include <net/tcp_states.h>
#include <net/xfrm.h>
#include <net/tcp.h>
#include <net/sock_reuseport.h>
#include <net/addrconf.h>
#if IS_ENABLED(CONFIG_IPV6)
/* match_sk*_wildcard == true: IPV6_ADDR_ANY equals to any IPv6 addresses
* if IPv6 only, and any IPv4 addresses
* if not IPv6 only
* match_sk*_wildcard == false: addresses must be exactly the same, i.e.
* IPV6_ADDR_ANY only equals to IPV6_ADDR_ANY,
* and 0.0.0.0 equals to 0.0.0.0 only
*/
static bool ipv6_rcv_saddr_equal(const struct in6_addr *sk1_rcv_saddr6,
const struct in6_addr *sk2_rcv_saddr6,
__be32 sk1_rcv_saddr, __be32 sk2_rcv_saddr,
bool sk1_ipv6only, bool sk2_ipv6only,
bool match_sk1_wildcard,
bool match_sk2_wildcard)
{
int addr_type = ipv6_addr_type(sk1_rcv_saddr6);
int addr_type2 = sk2_rcv_saddr6 ? ipv6_addr_type(sk2_rcv_saddr6) : IPV6_ADDR_MAPPED;
/* if both are mapped, treat as IPv4 */
if (addr_type == IPV6_ADDR_MAPPED && addr_type2 == IPV6_ADDR_MAPPED) {
if (!sk2_ipv6only) {
if (sk1_rcv_saddr == sk2_rcv_saddr)
return true;
return (match_sk1_wildcard && !sk1_rcv_saddr) ||
(match_sk2_wildcard && !sk2_rcv_saddr);
}
return false;
}
if (addr_type == IPV6_ADDR_ANY && addr_type2 == IPV6_ADDR_ANY)
return true;
if (addr_type2 == IPV6_ADDR_ANY && match_sk2_wildcard &&
!(sk2_ipv6only && addr_type == IPV6_ADDR_MAPPED))
return true;
if (addr_type == IPV6_ADDR_ANY && match_sk1_wildcard &&
!(sk1_ipv6only && addr_type2 == IPV6_ADDR_MAPPED))
return true;
if (sk2_rcv_saddr6 &&
ipv6_addr_equal(sk1_rcv_saddr6, sk2_rcv_saddr6))
return true;
return false;
}
#endif
/* match_sk*_wildcard == true: 0.0.0.0 equals to any IPv4 addresses
* match_sk*_wildcard == false: addresses must be exactly the same, i.e.
* 0.0.0.0 only equals to 0.0.0.0
*/
static bool ipv4_rcv_saddr_equal(__be32 sk1_rcv_saddr, __be32 sk2_rcv_saddr,
bool sk2_ipv6only, bool match_sk1_wildcard,
bool match_sk2_wildcard)
{
if (!sk2_ipv6only) {
if (sk1_rcv_saddr == sk2_rcv_saddr)
return true;
return (match_sk1_wildcard && !sk1_rcv_saddr) ||
(match_sk2_wildcard && !sk2_rcv_saddr);
}
return false;
}
bool inet_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2,
bool match_wildcard)
{
#if IS_ENABLED(CONFIG_IPV6)
if (sk->sk_family == AF_INET6)
return ipv6_rcv_saddr_equal(&sk->sk_v6_rcv_saddr,
inet6_rcv_saddr(sk2),
sk->sk_rcv_saddr,
sk2->sk_rcv_saddr,
ipv6_only_sock(sk),
ipv6_only_sock(sk2),
match_wildcard,
match_wildcard);
#endif
return ipv4_rcv_saddr_equal(sk->sk_rcv_saddr, sk2->sk_rcv_saddr,
ipv6_only_sock(sk2), match_wildcard,
match_wildcard);
}
EXPORT_SYMBOL(inet_rcv_saddr_equal);
bool inet_rcv_saddr_any(const struct sock *sk)
{
#if IS_ENABLED(CONFIG_IPV6)
if (sk->sk_family == AF_INET6)
return ipv6_addr_any(&sk->sk_v6_rcv_saddr);
#endif
return !sk->sk_rcv_saddr;
}
void inet_get_local_port_range(const struct net *net, int *low, int *high)
{
unsigned int seq;
do {
seq = read_seqbegin(&net->ipv4.ip_local_ports.lock);
*low = net->ipv4.ip_local_ports.range[0];
*high = net->ipv4.ip_local_ports.range[1];
} while (read_seqretry(&net->ipv4.ip_local_ports.lock, seq));
}
EXPORT_SYMBOL(inet_get_local_port_range);
void inet_sk_get_local_port_range(const struct sock *sk, int *low, int *high)
{
const struct inet_sock *inet = inet_sk(sk);
const struct net *net = sock_net(sk);
int lo, hi, sk_lo, sk_hi;
inet_get_local_port_range(net, &lo, &hi);
sk_lo = inet->local_port_range.lo;
sk_hi = inet->local_port_range.hi;
if (unlikely(lo <= sk_lo && sk_lo <= hi))
lo = sk_lo;
if (unlikely(lo <= sk_hi && sk_hi <= hi))
hi = sk_hi;
*low = lo;
*high = hi;
}
EXPORT_SYMBOL(inet_sk_get_local_port_range);
static bool inet_use_bhash2_on_bind(const struct sock *sk)
{
#if IS_ENABLED(CONFIG_IPV6)
if (sk->sk_family == AF_INET6) {
int addr_type = ipv6_addr_type(&sk->sk_v6_rcv_saddr);
return addr_type != IPV6_ADDR_ANY &&
addr_type != IPV6_ADDR_MAPPED;
}
#endif
return sk->sk_rcv_saddr != htonl(INADDR_ANY);
}
static bool inet_bind_conflict(const struct sock *sk, struct sock *sk2,
kuid_t sk_uid, bool relax,
bool reuseport_cb_ok, bool reuseport_ok)
{
int bound_dev_if2;
if (sk == sk2)
return false;
bound_dev_if2 = READ_ONCE(sk2->sk_bound_dev_if);
if (!sk->sk_bound_dev_if || !bound_dev_if2 ||
sk->sk_bound_dev_if == bound_dev_if2) {
if (sk->sk_reuse && sk2->sk_reuse &&
sk2->sk_state != TCP_LISTEN) {
if (!relax || (!reuseport_ok && sk->sk_reuseport &&
sk2->sk_reuseport && reuseport_cb_ok &&
(sk2->sk_state == TCP_TIME_WAIT ||
uid_eq(sk_uid, sock_i_uid(sk2)))))
return true;
} else if (!reuseport_ok || !sk->sk_reuseport ||
!sk2->sk_reuseport || !reuseport_cb_ok ||
(sk2->sk_state != TCP_TIME_WAIT &&
!uid_eq(sk_uid, sock_i_uid(sk2)))) {
return true;
}
}
return false;
}
static bool __inet_bhash2_conflict(const struct sock *sk, struct sock *sk2,
kuid_t sk_uid, bool relax,
bool reuseport_cb_ok, bool reuseport_ok)
{
if (sk->sk_family == AF_INET && ipv6_only_sock(sk2))
return false;
return inet_bind_conflict(sk, sk2, sk_uid, relax,
reuseport_cb_ok, reuseport_ok);
}
static bool inet_bhash2_conflict(const struct sock *sk,
const struct inet_bind2_bucket *tb2,
kuid_t sk_uid,
bool relax, bool reuseport_cb_ok,
bool reuseport_ok)
{
struct inet_timewait_sock *tw2;
struct sock *sk2;
sk_for_each_bound_bhash2(sk2, &tb2->owners) {
if (__inet_bhash2_conflict(sk, sk2, sk_uid, relax,
reuseport_cb_ok, reuseport_ok))
return true;
}
twsk_for_each_bound_bhash2(tw2, &tb2->deathrow) {
sk2 = (struct sock *)tw2;
if (__inet_bhash2_conflict(sk, sk2, sk_uid, relax,
reuseport_cb_ok, reuseport_ok))
return true;
}
return false;
}
/* This should be called only when the tb and tb2 hashbuckets' locks are held */
static int inet_csk_bind_conflict(const struct sock *sk,
const struct inet_bind_bucket *tb,
const struct inet_bind2_bucket *tb2, /* may be null */
bool relax, bool reuseport_ok)
{
bool reuseport_cb_ok;
struct sock_reuseport *reuseport_cb;
kuid_t uid = sock_i_uid((struct sock *)sk);
rcu_read_lock();
reuseport_cb = rcu_dereference(sk->sk_reuseport_cb);
/* paired with WRITE_ONCE() in __reuseport_(add|detach)_closed_sock */
reuseport_cb_ok = !reuseport_cb || READ_ONCE(reuseport_cb->num_closed_socks);
rcu_read_unlock();
/*
* Unlike other sk lookup places we do not check
* for sk_net here, since _all_ the socks listed
* in tb->owners and tb2->owners list belong
* to the same net - the one this bucket belongs to.
*/
if (!inet_use_bhash2_on_bind(sk)) {
struct sock *sk2;
sk_for_each_bound(sk2, &tb->owners)
if (inet_bind_conflict(sk, sk2, uid, relax,
reuseport_cb_ok, reuseport_ok) &&
inet_rcv_saddr_equal(sk, sk2, true))
return true;
return false;
}
/* Conflicts with an existing IPV6_ADDR_ANY (if ipv6) or INADDR_ANY (if
* ipv4) should have been checked already. We need to do these two
* checks separately because their spinlocks have to be acquired/released
* independently of each other, to prevent possible deadlocks
*/
return tb2 && inet_bhash2_conflict(sk, tb2, uid, relax, reuseport_cb_ok,
reuseport_ok);
}
/* Determine if there is a bind conflict with an existing IPV6_ADDR_ANY (if ipv6) or
* INADDR_ANY (if ipv4) socket.
*
* Caller must hold bhash hashbucket lock with local bh disabled, to protect
* against concurrent binds on the port for addr any
*/
static bool inet_bhash2_addr_any_conflict(const struct sock *sk, int port, int l3mdev,
bool relax, bool reuseport_ok)
{
kuid_t uid = sock_i_uid((struct sock *)sk);
const struct net *net = sock_net(sk);
struct sock_reuseport *reuseport_cb;
struct inet_bind_hashbucket *head2;
struct inet_bind2_bucket *tb2;
bool reuseport_cb_ok;
rcu_read_lock();
reuseport_cb = rcu_dereference(sk->sk_reuseport_cb);
/* paired with WRITE_ONCE() in __reuseport_(add|detach)_closed_sock */
reuseport_cb_ok = !reuseport_cb || READ_ONCE(reuseport_cb->num_closed_socks);
rcu_read_unlock();
head2 = inet_bhash2_addr_any_hashbucket(sk, net, port);
spin_lock(&head2->lock);
inet_bind_bucket_for_each(tb2, &head2->chain)
if (inet_bind2_bucket_match_addr_any(tb2, net, port, l3mdev, sk))
break;
if (tb2 && inet_bhash2_conflict(sk, tb2, uid, relax, reuseport_cb_ok,
reuseport_ok)) {
spin_unlock(&head2->lock);
return true;
}
spin_unlock(&head2->lock);
return false;
}
/*
* Find an open port number for the socket. Returns with the
* inet_bind_hashbucket locks held if successful.
*/
static struct inet_bind_hashbucket *
inet_csk_find_open_port(const struct sock *sk, struct inet_bind_bucket **tb_ret,
struct inet_bind2_bucket **tb2_ret,
struct inet_bind_hashbucket **head2_ret, int *port_ret)
{
struct inet_hashinfo *hinfo = tcp_or_dccp_get_hashinfo(sk);
int i, low, high, attempt_half, port, l3mdev;
struct inet_bind_hashbucket *head, *head2;
struct net *net = sock_net(sk);
struct inet_bind2_bucket *tb2;
struct inet_bind_bucket *tb;
u32 remaining, offset;
bool relax = false;
l3mdev = inet_sk_bound_l3mdev(sk);
ports_exhausted:
attempt_half = (sk->sk_reuse == SK_CAN_REUSE) ? 1 : 0;
other_half_scan:
inet_sk_get_local_port_range(sk, &low, &high);
high++; /* [32768, 60999] -> [32768, 61000[ */
if (high - low < 4)
attempt_half = 0;
if (attempt_half) {
int half = low + (((high - low) >> 2) << 1);
if (attempt_half == 1)
high = half;
else
low = half;
}
remaining = high - low;
if (likely(remaining > 1))
remaining &= ~1U;
offset = get_random_u32_below(remaining);
/* __inet_hash_connect() favors ports having @low parity
* We do the opposite to not pollute connect() users.
*/
offset |= 1U;
other_parity_scan:
port = low + offset;
for (i = 0; i < remaining; i += 2, port += 2) {
if (unlikely(port >= high))
port -= remaining;
if (inet_is_local_reserved_port(net, port))
continue;
head = &hinfo->bhash[inet_bhashfn(net, port,
hinfo->bhash_size)];
spin_lock_bh(&head->lock);
if (inet_use_bhash2_on_bind(sk)) {
if (inet_bhash2_addr_any_conflict(sk, port, l3mdev, relax, false))
goto next_port;
}
head2 = inet_bhashfn_portaddr(hinfo, sk, net, port);
spin_lock(&head2->lock);
tb2 = inet_bind2_bucket_find(head2, net, port, l3mdev, sk);
inet_bind_bucket_for_each(tb, &head->chain)
if (inet_bind_bucket_match(tb, net, port, l3mdev)) {
if (!inet_csk_bind_conflict(sk, tb, tb2,
relax, false))
goto success;
spin_unlock(&head2->lock);
goto next_port;
}
tb = NULL;
goto success;
next_port:
spin_unlock_bh(&head->lock);
cond_resched();
}
offset--;
if (!(offset & 1))
goto other_parity_scan;
if (attempt_half == 1) {
/* OK we now try the upper half of the range */
attempt_half = 2;
goto other_half_scan;
}
if (READ_ONCE(net->ipv4.sysctl_ip_autobind_reuse) && !relax) {
/* We still have a chance to connect to different destinations */
relax = true;
goto ports_exhausted;
}
return NULL;
success:
*port_ret = port;
*tb_ret = tb;
*tb2_ret = tb2;
*head2_ret = head2;
return head;
}
static inline int sk_reuseport_match(struct inet_bind_bucket *tb,
struct sock *sk)
{
kuid_t uid = sock_i_uid(sk);
if (tb->fastreuseport <= 0)
return 0;
if (!sk->sk_reuseport)
return 0;
if (rcu_access_pointer(sk->sk_reuseport_cb))
return 0;
if (!uid_eq(tb->fastuid, uid))
return 0;
/* We only need to check the rcv_saddr if this tb was once marked
* without fastreuseport and then was reset, as we can only know that
* the fast_*rcv_saddr doesn't have any conflicts with the socks on the
* owners list.
*/
if (tb->fastreuseport == FASTREUSEPORT_ANY)
return 1;
#if IS_ENABLED(CONFIG_IPV6)
if (tb->fast_sk_family == AF_INET6)
return ipv6_rcv_saddr_equal(&tb->fast_v6_rcv_saddr,
inet6_rcv_saddr(sk),
tb->fast_rcv_saddr,
sk->sk_rcv_saddr,
tb->fast_ipv6_only,
ipv6_only_sock(sk), true, false);
#endif
return ipv4_rcv_saddr_equal(tb->fast_rcv_saddr, sk->sk_rcv_saddr,
ipv6_only_sock(sk), true, false);
}
void inet_csk_update_fastreuse(struct inet_bind_bucket *tb,
struct sock *sk)
{
kuid_t uid = sock_i_uid(sk);
bool reuse = sk->sk_reuse && sk->sk_state != TCP_LISTEN;
if (hlist_empty(&tb->owners)) {
tb->fastreuse = reuse;
if (sk->sk_reuseport) {
tb->fastreuseport = FASTREUSEPORT_ANY;
tb->fastuid = uid;
tb->fast_rcv_saddr = sk->sk_rcv_saddr;
tb->fast_ipv6_only = ipv6_only_sock(sk);
tb->fast_sk_family = sk->sk_family;
#if IS_ENABLED(CONFIG_IPV6)
tb->fast_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
#endif
} else {
tb->fastreuseport = 0;
}
} else {
if (!reuse)
tb->fastreuse = 0;
if (sk->sk_reuseport) {
/* We didn't match or we don't have fastreuseport set on
* the tb, but we have sk_reuseport set on this socket
* and we know that there are no bind conflicts with
* this socket in this tb, so reset our tb's reuseport
* settings so that any subsequent sockets that match
* our current socket will be put on the fast path.
*
* If we reset we need to set FASTREUSEPORT_STRICT so we
* do extra checking for all subsequent sk_reuseport
* socks.
*/
if (!sk_reuseport_match(tb, sk)) {
tb->fastreuseport = FASTREUSEPORT_STRICT;
tb->fastuid = uid;
tb->fast_rcv_saddr = sk->sk_rcv_saddr;
tb->fast_ipv6_only = ipv6_only_sock(sk);
tb->fast_sk_family = sk->sk_family;
#if IS_ENABLED(CONFIG_IPV6)
tb->fast_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
#endif
}
} else {
tb->fastreuseport = 0;
}
}
}
/* Obtain a reference to a local port for the given sock,
* if snum is zero it means select any available local port.
* We try to allocate an odd port (and leave even ports for connect())
*/
int inet_csk_get_port(struct sock *sk, unsigned short snum)
{
struct inet_hashinfo *hinfo = tcp_or_dccp_get_hashinfo(sk);
bool reuse = sk->sk_reuse && sk->sk_state != TCP_LISTEN;
bool found_port = false, check_bind_conflict = true;
bool bhash_created = false, bhash2_created = false;
int ret = -EADDRINUSE, port = snum, l3mdev;
struct inet_bind_hashbucket *head, *head2;
struct inet_bind2_bucket *tb2 = NULL;
struct inet_bind_bucket *tb = NULL;
bool head2_lock_acquired = false;
struct net *net = sock_net(sk);
l3mdev = inet_sk_bound_l3mdev(sk);
if (!port) {
head = inet_csk_find_open_port(sk, &tb, &tb2, &head2, &port);
if (!head)
return ret;
head2_lock_acquired = true;
if (tb && tb2)
goto success;
found_port = true;
} else {
head = &hinfo->bhash[inet_bhashfn(net, port,
hinfo->bhash_size)];
spin_lock_bh(&head->lock);
inet_bind_bucket_for_each(tb, &head->chain)
if (inet_bind_bucket_match(tb, net, port, l3mdev))
break;
}
if (!tb) {
tb = inet_bind_bucket_create(hinfo->bind_bucket_cachep, net,
head, port, l3mdev);
if (!tb)
goto fail_unlock;
bhash_created = true;
}
if (!found_port) {
if (!hlist_empty(&tb->owners)) {
if (sk->sk_reuse == SK_FORCE_REUSE ||
(tb->fastreuse > 0 && reuse) ||
sk_reuseport_match(tb, sk))
check_bind_conflict = false;
}
if (check_bind_conflict && inet_use_bhash2_on_bind(sk)) {
if (inet_bhash2_addr_any_conflict(sk, port, l3mdev, true, true))
goto fail_unlock;
}
head2 = inet_bhashfn_portaddr(hinfo, sk, net, port);
spin_lock(&head2->lock);
head2_lock_acquired = true;
tb2 = inet_bind2_bucket_find(head2, net, port, l3mdev, sk);
}
if (!tb2) {
tb2 = inet_bind2_bucket_create(hinfo->bind2_bucket_cachep,
net, head2, port, l3mdev, sk);
if (!tb2)
goto fail_unlock;
bhash2_created = true;
}
if (!found_port && check_bind_conflict) {
if (inet_csk_bind_conflict(sk, tb, tb2, true, true))
goto fail_unlock;
}
success:
inet_csk_update_fastreuse(tb, sk);
if (!inet_csk(sk)->icsk_bind_hash)
inet_bind_hash(sk, tb, tb2, port);
WARN_ON(inet_csk(sk)->icsk_bind_hash != tb);
WARN_ON(inet_csk(sk)->icsk_bind2_hash != tb2);
ret = 0;
fail_unlock:
if (ret) {
if (bhash_created)
inet_bind_bucket_destroy(hinfo->bind_bucket_cachep, tb);
if (bhash2_created)
inet_bind2_bucket_destroy(hinfo->bind2_bucket_cachep,
tb2);
}
if (head2_lock_acquired)
spin_unlock(&head2->lock);
spin_unlock_bh(&head->lock);
return ret;
}
EXPORT_SYMBOL_GPL(inet_csk_get_port);
/*
* Wait for an incoming connection, avoid race conditions. This must be called
* with the socket locked.
*/
static int inet_csk_wait_for_connect(struct sock *sk, long timeo)
{
struct inet_connection_sock *icsk = inet_csk(sk);
DEFINE_WAIT(wait);
int err;
/*
* True wake-one mechanism for incoming connections: only
* one process gets woken up, not the 'whole herd'.
* Since we do not 'race & poll' for established sockets
* anymore, the common case will execute the loop only once.
*
* Subtle issue: "add_wait_queue_exclusive()" will be added
* after any current non-exclusive waiters, and we know that
* it will always _stay_ after any new non-exclusive waiters
* because all non-exclusive waiters are added at the
* beginning of the wait-queue. As such, it's ok to "drop"
* our exclusiveness temporarily when we get woken up without
* having to remove and re-insert us on the wait queue.
*/
for (;;) {
prepare_to_wait_exclusive(sk_sleep(sk), &wait,
TASK_INTERRUPTIBLE);
release_sock(sk);
if (reqsk_queue_empty(&icsk->icsk_accept_queue))
timeo = schedule_timeout(timeo);
sched_annotate_sleep();
lock_sock(sk);
err = 0;
if (!reqsk_queue_empty(&icsk->icsk_accept_queue))
break;
err = -EINVAL;
if (sk->sk_state != TCP_LISTEN)
break;
err = sock_intr_errno(timeo);
if (signal_pending(current))
break;
err = -EAGAIN;
if (!timeo)
break;
}
finish_wait(sk_sleep(sk), &wait);
return err;
}
/*
* This will accept the next outstanding connection.
*/
struct sock *inet_csk_accept(struct sock *sk, int flags, int *err, bool kern)
{
struct inet_connection_sock *icsk = inet_csk(sk);
struct request_sock_queue *queue = &icsk->icsk_accept_queue;
struct request_sock *req;
struct sock *newsk;
int error;
lock_sock(sk);
/* We need to make sure that this socket is listening,
* and that it has something pending.
*/
error = -EINVAL;
if (sk->sk_state != TCP_LISTEN)
goto out_err;
/* Find already established connection */
if (reqsk_queue_empty(queue)) {
long timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
/* If this is a non blocking socket don't sleep */
error = -EAGAIN;
if (!timeo)
goto out_err;
error = inet_csk_wait_for_connect(sk, timeo);
if (error)
goto out_err;
}
req = reqsk_queue_remove(queue, sk);
newsk = req->sk;
if (sk->sk_protocol == IPPROTO_TCP &&
tcp_rsk(req)->tfo_listener) {
spin_lock_bh(&queue->fastopenq.lock);
if (tcp_rsk(req)->tfo_listener) {
/* We are still waiting for the final ACK from 3WHS
* so can't free req now. Instead, we set req->sk to
* NULL to signify that the child socket is taken
* so reqsk_fastopen_remove() will free the req
* when 3WHS finishes (or is aborted).
*/
req->sk = NULL;
req = NULL;
}
spin_unlock_bh(&queue->fastopenq.lock);
}
out:
release_sock(sk);
if (newsk && mem_cgroup_sockets_enabled) {
int amt = 0;
/* atomically get the memory usage, set and charge the
* newsk->sk_memcg.
*/
lock_sock(newsk);
mem_cgroup_sk_alloc(newsk);
if (newsk->sk_memcg) {
/* The socket has not been accepted yet, no need
* to look at newsk->sk_wmem_queued.
*/
amt = sk_mem_pages(newsk->sk_forward_alloc +
atomic_read(&newsk->sk_rmem_alloc));
}
if (amt)
mem_cgroup_charge_skmem(newsk->sk_memcg, amt,
GFP_KERNEL | __GFP_NOFAIL);
release_sock(newsk);
}
if (req)
reqsk_put(req);
return newsk;
out_err:
newsk = NULL;
req = NULL;
*err = error;
goto out;
}
EXPORT_SYMBOL(inet_csk_accept);
/*
* Using different timers for retransmit, delayed acks and probes
* We may wish use just one timer maintaining a list of expire jiffies
* to optimize.
*/
void inet_csk_init_xmit_timers(struct sock *sk,
void (*retransmit_handler)(struct timer_list *t),
void (*delack_handler)(struct timer_list *t),
void (*keepalive_handler)(struct timer_list *t))
{
struct inet_connection_sock *icsk = inet_csk(sk);
timer_setup(&icsk->icsk_retransmit_timer, retransmit_handler, 0);
timer_setup(&icsk->icsk_delack_timer, delack_handler, 0);
timer_setup(&sk->sk_timer, keepalive_handler, 0);
icsk->icsk_pending = icsk->icsk_ack.pending = 0;
}
EXPORT_SYMBOL(inet_csk_init_xmit_timers);
void inet_csk_clear_xmit_timers(struct sock *sk)
{
struct inet_connection_sock *icsk = inet_csk(sk);
icsk->icsk_pending = icsk->icsk_ack.pending = 0;
sk_stop_timer(sk, &icsk->icsk_retransmit_timer);
sk_stop_timer(sk, &icsk->icsk_delack_timer);
sk_stop_timer(sk, &sk->sk_timer);
}
EXPORT_SYMBOL(inet_csk_clear_xmit_timers);
void inet_csk_delete_keepalive_timer(struct sock *sk)
{
sk_stop_timer(sk, &sk->sk_timer);
}
EXPORT_SYMBOL(inet_csk_delete_keepalive_timer);
void inet_csk_reset_keepalive_timer(struct sock *sk, unsigned long len)
{
sk_reset_timer(sk, &sk->sk_timer, jiffies + len);
}
EXPORT_SYMBOL(inet_csk_reset_keepalive_timer);
struct dst_entry *inet_csk_route_req(const struct sock *sk,
struct flowi4 *fl4,
const struct request_sock *req)
{
const struct inet_request_sock *ireq = inet_rsk(req);
struct net *net = read_pnet(&ireq->ireq_net);
struct ip_options_rcu *opt;
struct rtable *rt;
rcu_read_lock();
opt = rcu_dereference(ireq->ireq_opt);
flowi4_init_output(fl4, ireq->ir_iif, ireq->ir_mark,
ip_sock_rt_tos(sk), ip_sock_rt_scope(sk),
sk->sk_protocol, inet_sk_flowi_flags(sk),
(opt && opt->opt.srr) ? opt->opt.faddr : ireq->ir_rmt_addr,
ireq->ir_loc_addr, ireq->ir_rmt_port,
htons(ireq->ir_num), sk->sk_uid);
security_req_classify_flow(req, flowi4_to_flowi_common(fl4));
rt = ip_route_output_flow(net, fl4, sk);
if (IS_ERR(rt))
goto no_route;
if (opt && opt->opt.is_strictroute && rt->rt_uses_gateway)
goto route_err;
rcu_read_unlock();
return &rt->dst;
route_err:
ip_rt_put(rt);
no_route:
rcu_read_unlock();
__IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
return NULL;
}
EXPORT_SYMBOL_GPL(inet_csk_route_req);
struct dst_entry *inet_csk_route_child_sock(const struct sock *sk,
struct sock *newsk,
const struct request_sock *req)
{
const struct inet_request_sock *ireq = inet_rsk(req);
struct net *net = read_pnet(&ireq->ireq_net);
struct inet_sock *newinet = inet_sk(newsk);
struct ip_options_rcu *opt;
struct flowi4 *fl4;
struct rtable *rt;
opt = rcu_dereference(ireq->ireq_opt);
fl4 = &newinet->cork.fl.u.ip4;
flowi4_init_output(fl4, ireq->ir_iif, ireq->ir_mark,
ip_sock_rt_tos(sk), ip_sock_rt_scope(sk),
sk->sk_protocol, inet_sk_flowi_flags(sk),
(opt && opt->opt.srr) ? opt->opt.faddr : ireq->ir_rmt_addr,
ireq->ir_loc_addr, ireq->ir_rmt_port,
htons(ireq->ir_num), sk->sk_uid);
security_req_classify_flow(req, flowi4_to_flowi_common(fl4));
rt = ip_route_output_flow(net, fl4, sk);
if (IS_ERR(rt))
goto no_route;
if (opt && opt->opt.is_strictroute && rt->rt_uses_gateway)
goto route_err;
return &rt->dst;
route_err:
ip_rt_put(rt);
no_route:
__IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
return NULL;
}
EXPORT_SYMBOL_GPL(inet_csk_route_child_sock);
/* Decide when to expire the request and when to resend SYN-ACK */
static void syn_ack_recalc(struct request_sock *req,
const int max_syn_ack_retries,
const u8 rskq_defer_accept,
int *expire, int *resend)
{
if (!rskq_defer_accept) {
*expire = req->num_timeout >= max_syn_ack_retries;
*resend = 1;
return;
}
*expire = req->num_timeout >= max_syn_ack_retries &&
(!inet_rsk(req)->acked || req->num_timeout >= rskq_defer_accept);
/* Do not resend while waiting for data after ACK,
* start to resend on end of deferring period to give
* last chance for data or ACK to create established socket.
*/
*resend = !inet_rsk(req)->acked ||
req->num_timeout >= rskq_defer_accept - 1;
}
int inet_rtx_syn_ack(const struct sock *parent, struct request_sock *req)
{
int err = req->rsk_ops->rtx_syn_ack(parent, req);
if (!err)
req->num_retrans++;
return err;
}
EXPORT_SYMBOL(inet_rtx_syn_ack);
static struct request_sock *inet_reqsk_clone(struct request_sock *req,
struct sock *sk)
{
struct sock *req_sk, *nreq_sk;
struct request_sock *nreq;
nreq = kmem_cache_alloc(req->rsk_ops->slab, GFP_ATOMIC | __GFP_NOWARN);
if (!nreq) {
__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMIGRATEREQFAILURE);
/* paired with refcount_inc_not_zero() in reuseport_migrate_sock() */
sock_put(sk);
return NULL;
}
req_sk = req_to_sk(req);
nreq_sk = req_to_sk(nreq);
memcpy(nreq_sk, req_sk,
offsetof(struct sock, sk_dontcopy_begin));
memcpy(&nreq_sk->sk_dontcopy_end, &req_sk->sk_dontcopy_end,
req->rsk_ops->obj_size - offsetof(struct sock, sk_dontcopy_end));
sk_node_init(&nreq_sk->sk_node);
nreq_sk->sk_tx_queue_mapping = req_sk->sk_tx_queue_mapping;
#ifdef CONFIG_SOCK_RX_QUEUE_MAPPING
nreq_sk->sk_rx_queue_mapping = req_sk->sk_rx_queue_mapping;
#endif
nreq_sk->sk_incoming_cpu = req_sk->sk_incoming_cpu;
nreq->rsk_listener = sk;
/* We need not acquire fastopenq->lock
* because the child socket is locked in inet_csk_listen_stop().
*/
if (sk->sk_protocol == IPPROTO_TCP && tcp_rsk(nreq)->tfo_listener)
rcu_assign_pointer(tcp_sk(nreq->sk)->fastopen_rsk, nreq);
return nreq;
}
static void reqsk_queue_migrated(struct request_sock_queue *queue,
const struct request_sock *req)
{
if (req->num_timeout == 0)
atomic_inc(&queue->young);
atomic_inc(&queue->qlen);
}
static void reqsk_migrate_reset(struct request_sock *req)
{
req->saved_syn = NULL;
#if IS_ENABLED(CONFIG_IPV6)
inet_rsk(req)->ipv6_opt = NULL;
inet_rsk(req)->pktopts = NULL;
#else
inet_rsk(req)->ireq_opt = NULL;
#endif
}
/* return true if req was found in the ehash table */
static bool reqsk_queue_unlink(struct request_sock *req)
{
struct sock *sk = req_to_sk(req);
bool found = false;
if (sk_hashed(sk)) {
struct inet_hashinfo *hashinfo = tcp_or_dccp_get_hashinfo(sk);
spinlock_t *lock = inet_ehash_lockp(hashinfo, req->rsk_hash);
spin_lock(lock);
found = __sk_nulls_del_node_init_rcu(sk);
spin_unlock(lock);
}
if (timer_pending(&req->rsk_timer) && del_timer_sync(&req->rsk_timer))
reqsk_put(req);
return found;
}
bool inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req)
{
bool unlinked = reqsk_queue_unlink(req);
if (unlinked) {
reqsk_queue_removed(&inet_csk(sk)->icsk_accept_queue, req);
reqsk_put(req);
}
return unlinked;
}
EXPORT_SYMBOL(inet_csk_reqsk_queue_drop);
void inet_csk_reqsk_queue_drop_and_put(struct sock *sk, struct request_sock *req)
{
inet_csk_reqsk_queue_drop(sk, req);
reqsk_put(req);
}
EXPORT_SYMBOL(inet_csk_reqsk_queue_drop_and_put);
static void reqsk_timer_handler(struct timer_list *t)
{
struct request_sock *req = from_timer(req, t, rsk_timer);
struct request_sock *nreq = NULL, *oreq = req;
struct sock *sk_listener = req->rsk_listener;
struct inet_connection_sock *icsk;
struct request_sock_queue *queue;
struct net *net;
int max_syn_ack_retries, qlen, expire = 0, resend = 0;
if (inet_sk_state_load(sk_listener) != TCP_LISTEN) {
struct sock *nsk;
nsk = reuseport_migrate_sock(sk_listener, req_to_sk(req), NULL);
if (!nsk)
goto drop;
nreq = inet_reqsk_clone(req, nsk);
if (!nreq)
goto drop;
/* The new timer for the cloned req can decrease the 2
* by calling inet_csk_reqsk_queue_drop_and_put(), so
* hold another count to prevent use-after-free and
* call reqsk_put() just before return.
*/
refcount_set(&nreq->rsk_refcnt, 2 + 1);
timer_setup(&nreq->rsk_timer, reqsk_timer_handler, TIMER_PINNED);
reqsk_queue_migrated(&inet_csk(nsk)->icsk_accept_queue, req);
req = nreq;
sk_listener = nsk;
}
icsk = inet_csk(sk_listener);
net = sock_net(sk_listener);
max_syn_ack_retries = READ_ONCE(icsk->icsk_syn_retries) ? :
READ_ONCE(net->ipv4.sysctl_tcp_synack_retries);
/* Normally all the openreqs are young and become mature
* (i.e. converted to established socket) for first timeout.
* If synack was not acknowledged for 1 second, it means
* one of the following things: synack was lost, ack was lost,
* rtt is high or nobody planned to ack (i.e. synflood).
* When server is a bit loaded, queue is populated with old
* open requests, reducing effective size of queue.
* When server is well loaded, queue size reduces to zero
* after several minutes of work. It is not synflood,
* it is normal operation. The solution is pruning
* too old entries overriding normal timeout, when
* situation becomes dangerous.
*
* Essentially, we reserve half of room for young
* embrions; and abort old ones without pity, if old
* ones are about to clog our table.
*/
queue = &icsk->icsk_accept_queue;
qlen = reqsk_queue_len(queue);
if ((qlen << 1) > max(8U, READ_ONCE(sk_listener->sk_max_ack_backlog))) {
int young = reqsk_queue_len_young(queue) << 1;
while (max_syn_ack_retries > 2) {
if (qlen < young)
break;
max_syn_ack_retries--;
young <<= 1;
}
}
syn_ack_recalc(req, max_syn_ack_retries, READ_ONCE(queue->rskq_defer_accept),
&expire, &resend);
req->rsk_ops->syn_ack_timeout(req);
if (!expire &&
(!resend ||
!inet_rtx_syn_ack(sk_listener, req) ||
inet_rsk(req)->acked)) {
if (req->num_timeout++ == 0)
atomic_dec(&queue->young);
mod_timer(&req->rsk_timer, jiffies + reqsk_timeout(req, TCP_RTO_MAX));
if (!nreq)
return;
if (!inet_ehash_insert(req_to_sk(nreq), req_to_sk(oreq), NULL)) {
/* delete timer */
inet_csk_reqsk_queue_drop(sk_listener, nreq);
goto no_ownership;
}
__NET_INC_STATS(net, LINUX_MIB_TCPMIGRATEREQSUCCESS);
reqsk_migrate_reset(oreq);
reqsk_queue_removed(&inet_csk(oreq->rsk_listener)->icsk_accept_queue, oreq);
reqsk_put(oreq);
reqsk_put(nreq);
return;
}
/* Even if we can clone the req, we may need not retransmit any more
* SYN+ACKs (nreq->num_timeout > max_syn_ack_retries, etc), or another
* CPU may win the "own_req" race so that inet_ehash_insert() fails.
*/
if (nreq) {
__NET_INC_STATS(net, LINUX_MIB_TCPMIGRATEREQFAILURE);
no_ownership:
reqsk_migrate_reset(nreq);
reqsk_queue_removed(queue, nreq);
__reqsk_free(nreq);
}
drop:
inet_csk_reqsk_queue_drop_and_put(oreq->rsk_listener, oreq);
}
static void reqsk_queue_hash_req(struct request_sock *req,
unsigned long timeout)
{
timer_setup(&req->rsk_timer, reqsk_timer_handler, TIMER_PINNED);
mod_timer(&req->rsk_timer, jiffies + timeout);
inet_ehash_insert(req_to_sk(req), NULL, NULL);
/* before letting lookups find us, make sure all req fields
* are committed to memory and refcnt initialized.
*/
smp_wmb();
refcount_set(&req->rsk_refcnt, 2 + 1);
}
void inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req,
unsigned long timeout)
{
reqsk_queue_hash_req(req, timeout);
inet_csk_reqsk_queue_added(sk);
}
EXPORT_SYMBOL_GPL(inet_csk_reqsk_queue_hash_add);
static void inet_clone_ulp(const struct request_sock *req, struct sock *newsk,
const gfp_t priority)
{
struct inet_connection_sock *icsk = inet_csk(newsk);
if (!icsk->icsk_ulp_ops)
return;
icsk->icsk_ulp_ops->clone(req, newsk, priority);
}
/**
* inet_csk_clone_lock - clone an inet socket, and lock its clone
* @sk: the socket to clone
* @req: request_sock
* @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
*
* Caller must unlock socket even in error path (bh_unlock_sock(newsk))
*/
struct sock *inet_csk_clone_lock(const struct sock *sk,
const struct request_sock *req,
const gfp_t priority)
{
struct sock *newsk = sk_clone_lock(sk, priority);
if (newsk) {
struct inet_connection_sock *newicsk = inet_csk(newsk);
newsk->sk_wait_pending = 0;
inet_sk_set_state(newsk, TCP_SYN_RECV);
newicsk->icsk_bind_hash = NULL;
newicsk->icsk_bind2_hash = NULL;
inet_sk(newsk)->inet_dport = inet_rsk(req)->ir_rmt_port;
inet_sk(newsk)->inet_num = inet_rsk(req)->ir_num;
inet_sk(newsk)->inet_sport = htons(inet_rsk(req)->ir_num);
/* listeners have SOCK_RCU_FREE, not the children */
sock_reset_flag(newsk, SOCK_RCU_FREE);
inet_sk(newsk)->mc_list = NULL;
newsk->sk_mark = inet_rsk(req)->ir_mark;
atomic64_set(&newsk->sk_cookie,
atomic64_read(&inet_rsk(req)->ir_cookie));
newicsk->icsk_retransmits = 0;
newicsk->icsk_backoff = 0;
newicsk->icsk_probes_out = 0;
newicsk->icsk_probes_tstamp = 0;
/* Deinitialize accept_queue to trap illegal accesses. */
memset(&newicsk->icsk_accept_queue, 0, sizeof(newicsk->icsk_accept_queue));
inet_clone_ulp(req, newsk, priority);
security_inet_csk_clone(newsk, req);
}
return newsk;
}
EXPORT_SYMBOL_GPL(inet_csk_clone_lock);
/*
* At this point, there should be no process reference to this
* socket, and thus no user references at all. Therefore we
* can assume the socket waitqueue is inactive and nobody will
* try to jump onto it.
*/
void inet_csk_destroy_sock(struct sock *sk)
{
WARN_ON(sk->sk_state != TCP_CLOSE);
WARN_ON(!sock_flag(sk, SOCK_DEAD));
/* It cannot be in hash table! */
WARN_ON(!sk_unhashed(sk));
/* If it has not 0 inet_sk(sk)->inet_num, it must be bound */
WARN_ON(inet_sk(sk)->inet_num && !inet_csk(sk)->icsk_bind_hash);
sk->sk_prot->destroy(sk);
sk_stream_kill_queues(sk);
xfrm_sk_free_policy(sk);
this_cpu_dec(*sk->sk_prot->orphan_count);
sock_put(sk);
}
EXPORT_SYMBOL(inet_csk_destroy_sock);
/* This function allows to force a closure of a socket after the call to
* tcp/dccp_create_openreq_child().
*/
void inet_csk_prepare_forced_close(struct sock *sk)
__releases(&sk->sk_lock.slock)
{
/* sk_clone_lock locked the socket and set refcnt to 2 */
bh_unlock_sock(sk);
sock_put(sk);
inet_csk_prepare_for_destroy_sock(sk);
inet_sk(sk)->inet_num = 0;
}
EXPORT_SYMBOL(inet_csk_prepare_forced_close);
static int inet_ulp_can_listen(const struct sock *sk)
{
const struct inet_connection_sock *icsk = inet_csk(sk);
if (icsk->icsk_ulp_ops && !icsk->icsk_ulp_ops->clone)
return -EINVAL;
return 0;
}
int inet_csk_listen_start(struct sock *sk)
{
struct inet_connection_sock *icsk = inet_csk(sk);
struct inet_sock *inet = inet_sk(sk);
int err;
err = inet_ulp_can_listen(sk);
if (unlikely(err))
return err;
reqsk_queue_alloc(&icsk->icsk_accept_queue);
sk->sk_ack_backlog = 0;
inet_csk_delack_init(sk);
/* There is race window here: we announce ourselves listening,
* but this transition is still not validated by get_port().
* It is OK, because this socket enters to hash table only
* after validation is complete.
*/
inet_sk_state_store(sk, TCP_LISTEN);
err = sk->sk_prot->get_port(sk, inet->inet_num);
if (!err) {
inet->inet_sport = htons(inet->inet_num);
sk_dst_reset(sk);
err = sk->sk_prot->hash(sk);
if (likely(!err))
return 0;
}
inet_sk_set_state(sk, TCP_CLOSE);
return err;
}
EXPORT_SYMBOL_GPL(inet_csk_listen_start);
static void inet_child_forget(struct sock *sk, struct request_sock *req,
struct sock *child)
{
sk->sk_prot->disconnect(child, O_NONBLOCK);
sock_orphan(child);
this_cpu_inc(*sk->sk_prot->orphan_count);
if (sk->sk_protocol == IPPROTO_TCP && tcp_rsk(req)->tfo_listener) {
BUG_ON(rcu_access_pointer(tcp_sk(child)->fastopen_rsk) != req);
BUG_ON(sk != req->rsk_listener);
/* Paranoid, to prevent race condition if
* an inbound pkt destined for child is
* blocked by sock lock in tcp_v4_rcv().
* Also to satisfy an assertion in
* tcp_v4_destroy_sock().
*/
RCU_INIT_POINTER(tcp_sk(child)->fastopen_rsk, NULL);
}
inet_csk_destroy_sock(child);
}
struct sock *inet_csk_reqsk_queue_add(struct sock *sk,
struct request_sock *req,
struct sock *child)
{
struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
spin_lock(&queue->rskq_lock);
if (unlikely(sk->sk_state != TCP_LISTEN)) {
inet_child_forget(sk, req, child);
child = NULL;
} else {
req->sk = child;
req->dl_next = NULL;
if (queue->rskq_accept_head == NULL)
WRITE_ONCE(queue->rskq_accept_head, req);
else
queue->rskq_accept_tail->dl_next = req;
queue->rskq_accept_tail = req;
sk_acceptq_added(sk);
}
spin_unlock(&queue->rskq_lock);
return child;
}
EXPORT_SYMBOL(inet_csk_reqsk_queue_add);
struct sock *inet_csk_complete_hashdance(struct sock *sk, struct sock *child,
struct request_sock *req, bool own_req)
{
if (own_req) {
inet_csk_reqsk_queue_drop(req->rsk_listener, req);
reqsk_queue_removed(&inet_csk(req->rsk_listener)->icsk_accept_queue, req);
if (sk != req->rsk_listener) {
/* another listening sk has been selected,
* migrate the req to it.
*/
struct request_sock *nreq;
/* hold a refcnt for the nreq->rsk_listener
* which is assigned in inet_reqsk_clone()
*/
sock_hold(sk);
nreq = inet_reqsk_clone(req, sk);
if (!nreq) {
inet_child_forget(sk, req, child);
goto child_put;
}
refcount_set(&nreq->rsk_refcnt, 1);
if (inet_csk_reqsk_queue_add(sk, nreq, child)) {
__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMIGRATEREQSUCCESS);
reqsk_migrate_reset(req);
reqsk_put(req);
return child;
}
__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMIGRATEREQFAILURE);
reqsk_migrate_reset(nreq);
__reqsk_free(nreq);
} else if (inet_csk_reqsk_queue_add(sk, req, child)) {
return child;
}
}
/* Too bad, another child took ownership of the request, undo. */
child_put:
bh_unlock_sock(child);
sock_put(child);
return NULL;
}
EXPORT_SYMBOL(inet_csk_complete_hashdance);
/*
* This routine closes sockets which have been at least partially
* opened, but not yet accepted.
*/
void inet_csk_listen_stop(struct sock *sk)
{
struct inet_connection_sock *icsk = inet_csk(sk);
struct request_sock_queue *queue = &icsk->icsk_accept_queue;
struct request_sock *next, *req;
/* Following specs, it would be better either to send FIN
* (and enter FIN-WAIT-1, it is normal close)
* or to send active reset (abort).
* Certainly, it is pretty dangerous while synflood, but it is
* bad justification for our negligence 8)
* To be honest, we are not able to make either
* of the variants now. --ANK
*/
while ((req = reqsk_queue_remove(queue, sk)) != NULL) {
struct sock *child = req->sk, *nsk;
struct request_sock *nreq;
local_bh_disable();
bh_lock_sock(child);
WARN_ON(sock_owned_by_user(child));
sock_hold(child);
nsk = reuseport_migrate_sock(sk, child, NULL);
if (nsk) {
nreq = inet_reqsk_clone(req, nsk);
if (nreq) {
refcount_set(&nreq->rsk_refcnt, 1);
if (inet_csk_reqsk_queue_add(nsk, nreq, child)) {
__NET_INC_STATS(sock_net(nsk),
LINUX_MIB_TCPMIGRATEREQSUCCESS);
reqsk_migrate_reset(req);
} else {
__NET_INC_STATS(sock_net(nsk),
LINUX_MIB_TCPMIGRATEREQFAILURE);
reqsk_migrate_reset(nreq);
__reqsk_free(nreq);
}
/* inet_csk_reqsk_queue_add() has already
* called inet_child_forget() on failure case.
*/
goto skip_child_forget;
}
}
inet_child_forget(sk, req, child);
skip_child_forget:
reqsk_put(req);
bh_unlock_sock(child);
local_bh_enable();
sock_put(child);
cond_resched();
}
if (queue->fastopenq.rskq_rst_head) {
/* Free all the reqs queued in rskq_rst_head. */
spin_lock_bh(&queue->fastopenq.lock);
req = queue->fastopenq.rskq_rst_head;
queue->fastopenq.rskq_rst_head = NULL;
spin_unlock_bh(&queue->fastopenq.lock);
while (req != NULL) {
next = req->dl_next;
reqsk_put(req);
req = next;
}
}
WARN_ON_ONCE(sk->sk_ack_backlog);
}
EXPORT_SYMBOL_GPL(inet_csk_listen_stop);
void inet_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr)
{
struct sockaddr_in *sin = (struct sockaddr_in *)uaddr;
const struct inet_sock *inet = inet_sk(sk);
sin->sin_family = AF_INET;
sin->sin_addr.s_addr = inet->inet_daddr;
sin->sin_port = inet->inet_dport;
}
EXPORT_SYMBOL_GPL(inet_csk_addr2sockaddr);
static struct dst_entry *inet_csk_rebuild_route(struct sock *sk, struct flowi *fl)
{
const struct inet_sock *inet = inet_sk(sk);
const struct ip_options_rcu *inet_opt;
__be32 daddr = inet->inet_daddr;
struct flowi4 *fl4;
struct rtable *rt;
rcu_read_lock();
inet_opt = rcu_dereference(inet->inet_opt);
if (inet_opt && inet_opt->opt.srr)
daddr = inet_opt->opt.faddr;
fl4 = &fl->u.ip4;
rt = ip_route_output_ports(sock_net(sk), fl4, sk, daddr,
inet->inet_saddr, inet->inet_dport,
inet->inet_sport, sk->sk_protocol,
RT_CONN_FLAGS(sk), sk->sk_bound_dev_if);
if (IS_ERR(rt))
rt = NULL;
if (rt)
sk_setup_caps(sk, &rt->dst);
rcu_read_unlock();
return &rt->dst;
}
struct dst_entry *inet_csk_update_pmtu(struct sock *sk, u32 mtu)
{
struct dst_entry *dst = __sk_dst_check(sk, 0);
struct inet_sock *inet = inet_sk(sk);
if (!dst) {
dst = inet_csk_rebuild_route(sk, &inet->cork.fl);
if (!dst)
goto out;
}
dst->ops->update_pmtu(dst, sk, NULL, mtu, true);
dst = __sk_dst_check(sk, 0);
if (!dst)
dst = inet_csk_rebuild_route(sk, &inet->cork.fl);
out:
return dst;
}
EXPORT_SYMBOL_GPL(inet_csk_update_pmtu);
| linux-master | net/ipv4/inet_connection_sock.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* IP Payload Compression Protocol (IPComp) - RFC3173.
*
* Copyright (c) 2003 James Morris <[email protected]>
*
* Todo:
* - Tunable compression parameters.
* - Compression stats.
* - Adaptive compression.
*/
#include <linux/module.h>
#include <linux/err.h>
#include <linux/rtnetlink.h>
#include <net/ip.h>
#include <net/xfrm.h>
#include <net/icmp.h>
#include <net/ipcomp.h>
#include <net/protocol.h>
#include <net/sock.h>
static int ipcomp4_err(struct sk_buff *skb, u32 info)
{
struct net *net = dev_net(skb->dev);
__be32 spi;
const struct iphdr *iph = (const struct iphdr *)skb->data;
struct ip_comp_hdr *ipch = (struct ip_comp_hdr *)(skb->data+(iph->ihl<<2));
struct xfrm_state *x;
switch (icmp_hdr(skb)->type) {
case ICMP_DEST_UNREACH:
if (icmp_hdr(skb)->code != ICMP_FRAG_NEEDED)
return 0;
break;
case ICMP_REDIRECT:
break;
default:
return 0;
}
spi = htonl(ntohs(ipch->cpi));
x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr,
spi, IPPROTO_COMP, AF_INET);
if (!x)
return 0;
if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH)
ipv4_update_pmtu(skb, net, info, 0, IPPROTO_COMP);
else
ipv4_redirect(skb, net, 0, IPPROTO_COMP);
xfrm_state_put(x);
return 0;
}
/* We always hold one tunnel user reference to indicate a tunnel */
static struct xfrm_state *ipcomp_tunnel_create(struct xfrm_state *x)
{
struct net *net = xs_net(x);
struct xfrm_state *t;
t = xfrm_state_alloc(net);
if (!t)
goto out;
t->id.proto = IPPROTO_IPIP;
t->id.spi = x->props.saddr.a4;
t->id.daddr.a4 = x->id.daddr.a4;
memcpy(&t->sel, &x->sel, sizeof(t->sel));
t->props.family = AF_INET;
t->props.mode = x->props.mode;
t->props.saddr.a4 = x->props.saddr.a4;
t->props.flags = x->props.flags;
t->props.extra_flags = x->props.extra_flags;
memcpy(&t->mark, &x->mark, sizeof(t->mark));
t->if_id = x->if_id;
if (xfrm_init_state(t))
goto error;
atomic_set(&t->tunnel_users, 1);
out:
return t;
error:
t->km.state = XFRM_STATE_DEAD;
xfrm_state_put(t);
t = NULL;
goto out;
}
/*
* Must be protected by xfrm_cfg_mutex. State and tunnel user references are
* always incremented on success.
*/
static int ipcomp_tunnel_attach(struct xfrm_state *x)
{
struct net *net = xs_net(x);
int err = 0;
struct xfrm_state *t;
u32 mark = x->mark.v & x->mark.m;
t = xfrm_state_lookup(net, mark, (xfrm_address_t *)&x->id.daddr.a4,
x->props.saddr.a4, IPPROTO_IPIP, AF_INET);
if (!t) {
t = ipcomp_tunnel_create(x);
if (!t) {
err = -EINVAL;
goto out;
}
xfrm_state_insert(t);
xfrm_state_hold(t);
}
x->tunnel = t;
atomic_inc(&t->tunnel_users);
out:
return err;
}
static int ipcomp4_init_state(struct xfrm_state *x,
struct netlink_ext_ack *extack)
{
int err = -EINVAL;
x->props.header_len = 0;
switch (x->props.mode) {
case XFRM_MODE_TRANSPORT:
break;
case XFRM_MODE_TUNNEL:
x->props.header_len += sizeof(struct iphdr);
break;
default:
NL_SET_ERR_MSG(extack, "Unsupported XFRM mode for IPcomp");
goto out;
}
err = ipcomp_init_state(x, extack);
if (err)
goto out;
if (x->props.mode == XFRM_MODE_TUNNEL) {
err = ipcomp_tunnel_attach(x);
if (err) {
NL_SET_ERR_MSG(extack, "Kernel error: failed to initialize the associated state");
goto out;
}
}
err = 0;
out:
return err;
}
static int ipcomp4_rcv_cb(struct sk_buff *skb, int err)
{
return 0;
}
static const struct xfrm_type ipcomp_type = {
.owner = THIS_MODULE,
.proto = IPPROTO_COMP,
.init_state = ipcomp4_init_state,
.destructor = ipcomp_destroy,
.input = ipcomp_input,
.output = ipcomp_output
};
static struct xfrm4_protocol ipcomp4_protocol = {
.handler = xfrm4_rcv,
.input_handler = xfrm_input,
.cb_handler = ipcomp4_rcv_cb,
.err_handler = ipcomp4_err,
.priority = 0,
};
static int __init ipcomp4_init(void)
{
if (xfrm_register_type(&ipcomp_type, AF_INET) < 0) {
pr_info("%s: can't add xfrm type\n", __func__);
return -EAGAIN;
}
if (xfrm4_protocol_register(&ipcomp4_protocol, IPPROTO_COMP) < 0) {
pr_info("%s: can't add protocol\n", __func__);
xfrm_unregister_type(&ipcomp_type, AF_INET);
return -EAGAIN;
}
return 0;
}
static void __exit ipcomp4_fini(void)
{
if (xfrm4_protocol_deregister(&ipcomp4_protocol, IPPROTO_COMP) < 0)
pr_info("%s: can't remove protocol\n", __func__);
xfrm_unregister_type(&ipcomp_type, AF_INET);
}
module_init(ipcomp4_init);
module_exit(ipcomp4_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("IP Payload Compression Protocol (IPComp/IPv4) - RFC3173");
MODULE_AUTHOR("James Morris <[email protected]>");
MODULE_ALIAS_XFRM_TYPE(AF_INET, XFRM_PROTO_COMP);
| linux-master | net/ipv4/ipcomp.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2013 Nicira, Inc.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/capability.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <linux/in.h>
#include <linux/tcp.h>
#include <linux/udp.h>
#include <linux/if_arp.h>
#include <linux/init.h>
#include <linux/in6.h>
#include <linux/inetdevice.h>
#include <linux/igmp.h>
#include <linux/netfilter_ipv4.h>
#include <linux/etherdevice.h>
#include <linux/if_ether.h>
#include <linux/if_vlan.h>
#include <linux/rculist.h>
#include <linux/err.h>
#include <net/sock.h>
#include <net/ip.h>
#include <net/icmp.h>
#include <net/protocol.h>
#include <net/ip_tunnels.h>
#include <net/arp.h>
#include <net/checksum.h>
#include <net/dsfield.h>
#include <net/inet_ecn.h>
#include <net/xfrm.h>
#include <net/net_namespace.h>
#include <net/netns/generic.h>
#include <net/rtnetlink.h>
#include <net/udp.h>
#include <net/dst_metadata.h>
#if IS_ENABLED(CONFIG_IPV6)
#include <net/ipv6.h>
#include <net/ip6_fib.h>
#include <net/ip6_route.h>
#endif
static unsigned int ip_tunnel_hash(__be32 key, __be32 remote)
{
return hash_32((__force u32)key ^ (__force u32)remote,
IP_TNL_HASH_BITS);
}
static bool ip_tunnel_key_match(const struct ip_tunnel_parm *p,
__be16 flags, __be32 key)
{
if (p->i_flags & TUNNEL_KEY) {
if (flags & TUNNEL_KEY)
return key == p->i_key;
else
/* key expected, none present */
return false;
} else
return !(flags & TUNNEL_KEY);
}
/* Fallback tunnel: no source, no destination, no key, no options
Tunnel hash table:
We require exact key match i.e. if a key is present in packet
it will match only tunnel with the same key; if it is not present,
it will match only keyless tunnel.
All keysless packets, if not matched configured keyless tunnels
will match fallback tunnel.
Given src, dst and key, find appropriate for input tunnel.
*/
struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn,
int link, __be16 flags,
__be32 remote, __be32 local,
__be32 key)
{
struct ip_tunnel *t, *cand = NULL;
struct hlist_head *head;
struct net_device *ndev;
unsigned int hash;
hash = ip_tunnel_hash(key, remote);
head = &itn->tunnels[hash];
hlist_for_each_entry_rcu(t, head, hash_node) {
if (local != t->parms.iph.saddr ||
remote != t->parms.iph.daddr ||
!(t->dev->flags & IFF_UP))
continue;
if (!ip_tunnel_key_match(&t->parms, flags, key))
continue;
if (t->parms.link == link)
return t;
else
cand = t;
}
hlist_for_each_entry_rcu(t, head, hash_node) {
if (remote != t->parms.iph.daddr ||
t->parms.iph.saddr != 0 ||
!(t->dev->flags & IFF_UP))
continue;
if (!ip_tunnel_key_match(&t->parms, flags, key))
continue;
if (t->parms.link == link)
return t;
else if (!cand)
cand = t;
}
hash = ip_tunnel_hash(key, 0);
head = &itn->tunnels[hash];
hlist_for_each_entry_rcu(t, head, hash_node) {
if ((local != t->parms.iph.saddr || t->parms.iph.daddr != 0) &&
(local != t->parms.iph.daddr || !ipv4_is_multicast(local)))
continue;
if (!(t->dev->flags & IFF_UP))
continue;
if (!ip_tunnel_key_match(&t->parms, flags, key))
continue;
if (t->parms.link == link)
return t;
else if (!cand)
cand = t;
}
hlist_for_each_entry_rcu(t, head, hash_node) {
if ((!(flags & TUNNEL_NO_KEY) && t->parms.i_key != key) ||
t->parms.iph.saddr != 0 ||
t->parms.iph.daddr != 0 ||
!(t->dev->flags & IFF_UP))
continue;
if (t->parms.link == link)
return t;
else if (!cand)
cand = t;
}
if (cand)
return cand;
t = rcu_dereference(itn->collect_md_tun);
if (t && t->dev->flags & IFF_UP)
return t;
ndev = READ_ONCE(itn->fb_tunnel_dev);
if (ndev && ndev->flags & IFF_UP)
return netdev_priv(ndev);
return NULL;
}
EXPORT_SYMBOL_GPL(ip_tunnel_lookup);
static struct hlist_head *ip_bucket(struct ip_tunnel_net *itn,
struct ip_tunnel_parm *parms)
{
unsigned int h;
__be32 remote;
__be32 i_key = parms->i_key;
if (parms->iph.daddr && !ipv4_is_multicast(parms->iph.daddr))
remote = parms->iph.daddr;
else
remote = 0;
if (!(parms->i_flags & TUNNEL_KEY) && (parms->i_flags & VTI_ISVTI))
i_key = 0;
h = ip_tunnel_hash(i_key, remote);
return &itn->tunnels[h];
}
static void ip_tunnel_add(struct ip_tunnel_net *itn, struct ip_tunnel *t)
{
struct hlist_head *head = ip_bucket(itn, &t->parms);
if (t->collect_md)
rcu_assign_pointer(itn->collect_md_tun, t);
hlist_add_head_rcu(&t->hash_node, head);
}
static void ip_tunnel_del(struct ip_tunnel_net *itn, struct ip_tunnel *t)
{
if (t->collect_md)
rcu_assign_pointer(itn->collect_md_tun, NULL);
hlist_del_init_rcu(&t->hash_node);
}
static struct ip_tunnel *ip_tunnel_find(struct ip_tunnel_net *itn,
struct ip_tunnel_parm *parms,
int type)
{
__be32 remote = parms->iph.daddr;
__be32 local = parms->iph.saddr;
__be32 key = parms->i_key;
__be16 flags = parms->i_flags;
int link = parms->link;
struct ip_tunnel *t = NULL;
struct hlist_head *head = ip_bucket(itn, parms);
hlist_for_each_entry_rcu(t, head, hash_node) {
if (local == t->parms.iph.saddr &&
remote == t->parms.iph.daddr &&
link == t->parms.link &&
type == t->dev->type &&
ip_tunnel_key_match(&t->parms, flags, key))
break;
}
return t;
}
static struct net_device *__ip_tunnel_create(struct net *net,
const struct rtnl_link_ops *ops,
struct ip_tunnel_parm *parms)
{
int err;
struct ip_tunnel *tunnel;
struct net_device *dev;
char name[IFNAMSIZ];
err = -E2BIG;
if (parms->name[0]) {
if (!dev_valid_name(parms->name))
goto failed;
strscpy(name, parms->name, IFNAMSIZ);
} else {
if (strlen(ops->kind) > (IFNAMSIZ - 3))
goto failed;
strcpy(name, ops->kind);
strcat(name, "%d");
}
ASSERT_RTNL();
dev = alloc_netdev(ops->priv_size, name, NET_NAME_UNKNOWN, ops->setup);
if (!dev) {
err = -ENOMEM;
goto failed;
}
dev_net_set(dev, net);
dev->rtnl_link_ops = ops;
tunnel = netdev_priv(dev);
tunnel->parms = *parms;
tunnel->net = net;
err = register_netdevice(dev);
if (err)
goto failed_free;
return dev;
failed_free:
free_netdev(dev);
failed:
return ERR_PTR(err);
}
static int ip_tunnel_bind_dev(struct net_device *dev)
{
struct net_device *tdev = NULL;
struct ip_tunnel *tunnel = netdev_priv(dev);
const struct iphdr *iph;
int hlen = LL_MAX_HEADER;
int mtu = ETH_DATA_LEN;
int t_hlen = tunnel->hlen + sizeof(struct iphdr);
iph = &tunnel->parms.iph;
/* Guess output device to choose reasonable mtu and needed_headroom */
if (iph->daddr) {
struct flowi4 fl4;
struct rtable *rt;
ip_tunnel_init_flow(&fl4, iph->protocol, iph->daddr,
iph->saddr, tunnel->parms.o_key,
RT_TOS(iph->tos), dev_net(dev),
tunnel->parms.link, tunnel->fwmark, 0, 0);
rt = ip_route_output_key(tunnel->net, &fl4);
if (!IS_ERR(rt)) {
tdev = rt->dst.dev;
ip_rt_put(rt);
}
if (dev->type != ARPHRD_ETHER)
dev->flags |= IFF_POINTOPOINT;
dst_cache_reset(&tunnel->dst_cache);
}
if (!tdev && tunnel->parms.link)
tdev = __dev_get_by_index(tunnel->net, tunnel->parms.link);
if (tdev) {
hlen = tdev->hard_header_len + tdev->needed_headroom;
mtu = min(tdev->mtu, IP_MAX_MTU);
}
dev->needed_headroom = t_hlen + hlen;
mtu -= t_hlen + (dev->type == ARPHRD_ETHER ? dev->hard_header_len : 0);
if (mtu < IPV4_MIN_MTU)
mtu = IPV4_MIN_MTU;
return mtu;
}
static struct ip_tunnel *ip_tunnel_create(struct net *net,
struct ip_tunnel_net *itn,
struct ip_tunnel_parm *parms)
{
struct ip_tunnel *nt;
struct net_device *dev;
int t_hlen;
int mtu;
int err;
dev = __ip_tunnel_create(net, itn->rtnl_link_ops, parms);
if (IS_ERR(dev))
return ERR_CAST(dev);
mtu = ip_tunnel_bind_dev(dev);
err = dev_set_mtu(dev, mtu);
if (err)
goto err_dev_set_mtu;
nt = netdev_priv(dev);
t_hlen = nt->hlen + sizeof(struct iphdr);
dev->min_mtu = ETH_MIN_MTU;
dev->max_mtu = IP_MAX_MTU - t_hlen;
if (dev->type == ARPHRD_ETHER)
dev->max_mtu -= dev->hard_header_len;
ip_tunnel_add(itn, nt);
return nt;
err_dev_set_mtu:
unregister_netdevice(dev);
return ERR_PTR(err);
}
void ip_tunnel_md_udp_encap(struct sk_buff *skb, struct ip_tunnel_info *info)
{
const struct iphdr *iph = ip_hdr(skb);
const struct udphdr *udph;
if (iph->protocol != IPPROTO_UDP)
return;
udph = (struct udphdr *)((__u8 *)iph + (iph->ihl << 2));
info->encap.sport = udph->source;
info->encap.dport = udph->dest;
}
EXPORT_SYMBOL(ip_tunnel_md_udp_encap);
int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb,
const struct tnl_ptk_info *tpi, struct metadata_dst *tun_dst,
bool log_ecn_error)
{
const struct iphdr *iph = ip_hdr(skb);
int err;
#ifdef CONFIG_NET_IPGRE_BROADCAST
if (ipv4_is_multicast(iph->daddr)) {
DEV_STATS_INC(tunnel->dev, multicast);
skb->pkt_type = PACKET_BROADCAST;
}
#endif
if ((!(tpi->flags&TUNNEL_CSUM) && (tunnel->parms.i_flags&TUNNEL_CSUM)) ||
((tpi->flags&TUNNEL_CSUM) && !(tunnel->parms.i_flags&TUNNEL_CSUM))) {
DEV_STATS_INC(tunnel->dev, rx_crc_errors);
DEV_STATS_INC(tunnel->dev, rx_errors);
goto drop;
}
if (tunnel->parms.i_flags&TUNNEL_SEQ) {
if (!(tpi->flags&TUNNEL_SEQ) ||
(tunnel->i_seqno && (s32)(ntohl(tpi->seq) - tunnel->i_seqno) < 0)) {
DEV_STATS_INC(tunnel->dev, rx_fifo_errors);
DEV_STATS_INC(tunnel->dev, rx_errors);
goto drop;
}
tunnel->i_seqno = ntohl(tpi->seq) + 1;
}
skb_set_network_header(skb, (tunnel->dev->type == ARPHRD_ETHER) ? ETH_HLEN : 0);
err = IP_ECN_decapsulate(iph, skb);
if (unlikely(err)) {
if (log_ecn_error)
net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n",
&iph->saddr, iph->tos);
if (err > 1) {
DEV_STATS_INC(tunnel->dev, rx_frame_errors);
DEV_STATS_INC(tunnel->dev, rx_errors);
goto drop;
}
}
dev_sw_netstats_rx_add(tunnel->dev, skb->len);
skb_scrub_packet(skb, !net_eq(tunnel->net, dev_net(tunnel->dev)));
if (tunnel->dev->type == ARPHRD_ETHER) {
skb->protocol = eth_type_trans(skb, tunnel->dev);
skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
} else {
skb->dev = tunnel->dev;
}
if (tun_dst)
skb_dst_set(skb, (struct dst_entry *)tun_dst);
gro_cells_receive(&tunnel->gro_cells, skb);
return 0;
drop:
if (tun_dst)
dst_release((struct dst_entry *)tun_dst);
kfree_skb(skb);
return 0;
}
EXPORT_SYMBOL_GPL(ip_tunnel_rcv);
int ip_tunnel_encap_add_ops(const struct ip_tunnel_encap_ops *ops,
unsigned int num)
{
if (num >= MAX_IPTUN_ENCAP_OPS)
return -ERANGE;
return !cmpxchg((const struct ip_tunnel_encap_ops **)
&iptun_encaps[num],
NULL, ops) ? 0 : -1;
}
EXPORT_SYMBOL(ip_tunnel_encap_add_ops);
int ip_tunnel_encap_del_ops(const struct ip_tunnel_encap_ops *ops,
unsigned int num)
{
int ret;
if (num >= MAX_IPTUN_ENCAP_OPS)
return -ERANGE;
ret = (cmpxchg((const struct ip_tunnel_encap_ops **)
&iptun_encaps[num],
ops, NULL) == ops) ? 0 : -1;
synchronize_net();
return ret;
}
EXPORT_SYMBOL(ip_tunnel_encap_del_ops);
int ip_tunnel_encap_setup(struct ip_tunnel *t,
struct ip_tunnel_encap *ipencap)
{
int hlen;
memset(&t->encap, 0, sizeof(t->encap));
hlen = ip_encap_hlen(ipencap);
if (hlen < 0)
return hlen;
t->encap.type = ipencap->type;
t->encap.sport = ipencap->sport;
t->encap.dport = ipencap->dport;
t->encap.flags = ipencap->flags;
t->encap_hlen = hlen;
t->hlen = t->encap_hlen + t->tun_hlen;
return 0;
}
EXPORT_SYMBOL_GPL(ip_tunnel_encap_setup);
static int tnl_update_pmtu(struct net_device *dev, struct sk_buff *skb,
struct rtable *rt, __be16 df,
const struct iphdr *inner_iph,
int tunnel_hlen, __be32 dst, bool md)
{
struct ip_tunnel *tunnel = netdev_priv(dev);
int pkt_size;
int mtu;
tunnel_hlen = md ? tunnel_hlen : tunnel->hlen;
pkt_size = skb->len - tunnel_hlen;
pkt_size -= dev->type == ARPHRD_ETHER ? dev->hard_header_len : 0;
if (df) {
mtu = dst_mtu(&rt->dst) - (sizeof(struct iphdr) + tunnel_hlen);
mtu -= dev->type == ARPHRD_ETHER ? dev->hard_header_len : 0;
} else {
mtu = skb_valid_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu;
}
if (skb_valid_dst(skb))
skb_dst_update_pmtu_no_confirm(skb, mtu);
if (skb->protocol == htons(ETH_P_IP)) {
if (!skb_is_gso(skb) &&
(inner_iph->frag_off & htons(IP_DF)) &&
mtu < pkt_size) {
icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
return -E2BIG;
}
}
#if IS_ENABLED(CONFIG_IPV6)
else if (skb->protocol == htons(ETH_P_IPV6)) {
struct rt6_info *rt6;
__be32 daddr;
rt6 = skb_valid_dst(skb) ? (struct rt6_info *)skb_dst(skb) :
NULL;
daddr = md ? dst : tunnel->parms.iph.daddr;
if (rt6 && mtu < dst_mtu(skb_dst(skb)) &&
mtu >= IPV6_MIN_MTU) {
if ((daddr && !ipv4_is_multicast(daddr)) ||
rt6->rt6i_dst.plen == 128) {
rt6->rt6i_flags |= RTF_MODIFIED;
dst_metric_set(skb_dst(skb), RTAX_MTU, mtu);
}
}
if (!skb_is_gso(skb) && mtu >= IPV6_MIN_MTU &&
mtu < pkt_size) {
icmpv6_ndo_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
return -E2BIG;
}
}
#endif
return 0;
}
void ip_md_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
u8 proto, int tunnel_hlen)
{
struct ip_tunnel *tunnel = netdev_priv(dev);
u32 headroom = sizeof(struct iphdr);
struct ip_tunnel_info *tun_info;
const struct ip_tunnel_key *key;
const struct iphdr *inner_iph;
struct rtable *rt = NULL;
struct flowi4 fl4;
__be16 df = 0;
u8 tos, ttl;
bool use_cache;
tun_info = skb_tunnel_info(skb);
if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
ip_tunnel_info_af(tun_info) != AF_INET))
goto tx_error;
key = &tun_info->key;
memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
inner_iph = (const struct iphdr *)skb_inner_network_header(skb);
tos = key->tos;
if (tos == 1) {
if (skb->protocol == htons(ETH_P_IP))
tos = inner_iph->tos;
else if (skb->protocol == htons(ETH_P_IPV6))
tos = ipv6_get_dsfield((const struct ipv6hdr *)inner_iph);
}
ip_tunnel_init_flow(&fl4, proto, key->u.ipv4.dst, key->u.ipv4.src,
tunnel_id_to_key32(key->tun_id), RT_TOS(tos),
dev_net(dev), 0, skb->mark, skb_get_hash(skb),
key->flow_flags);
if (!tunnel_hlen)
tunnel_hlen = ip_encap_hlen(&tun_info->encap);
if (ip_tunnel_encap(skb, &tun_info->encap, &proto, &fl4) < 0)
goto tx_error;
use_cache = ip_tunnel_dst_cache_usable(skb, tun_info);
if (use_cache)
rt = dst_cache_get_ip4(&tun_info->dst_cache, &fl4.saddr);
if (!rt) {
rt = ip_route_output_key(tunnel->net, &fl4);
if (IS_ERR(rt)) {
DEV_STATS_INC(dev, tx_carrier_errors);
goto tx_error;
}
if (use_cache)
dst_cache_set_ip4(&tun_info->dst_cache, &rt->dst,
fl4.saddr);
}
if (rt->dst.dev == dev) {
ip_rt_put(rt);
DEV_STATS_INC(dev, collisions);
goto tx_error;
}
if (key->tun_flags & TUNNEL_DONT_FRAGMENT)
df = htons(IP_DF);
if (tnl_update_pmtu(dev, skb, rt, df, inner_iph, tunnel_hlen,
key->u.ipv4.dst, true)) {
ip_rt_put(rt);
goto tx_error;
}
tos = ip_tunnel_ecn_encap(tos, inner_iph, skb);
ttl = key->ttl;
if (ttl == 0) {
if (skb->protocol == htons(ETH_P_IP))
ttl = inner_iph->ttl;
else if (skb->protocol == htons(ETH_P_IPV6))
ttl = ((const struct ipv6hdr *)inner_iph)->hop_limit;
else
ttl = ip4_dst_hoplimit(&rt->dst);
}
headroom += LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len;
if (headroom > READ_ONCE(dev->needed_headroom))
WRITE_ONCE(dev->needed_headroom, headroom);
if (skb_cow_head(skb, READ_ONCE(dev->needed_headroom))) {
ip_rt_put(rt);
goto tx_dropped;
}
iptunnel_xmit(NULL, rt, skb, fl4.saddr, fl4.daddr, proto, tos, ttl,
df, !net_eq(tunnel->net, dev_net(dev)));
return;
tx_error:
DEV_STATS_INC(dev, tx_errors);
goto kfree;
tx_dropped:
DEV_STATS_INC(dev, tx_dropped);
kfree:
kfree_skb(skb);
}
EXPORT_SYMBOL_GPL(ip_md_tunnel_xmit);
void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
const struct iphdr *tnl_params, u8 protocol)
{
struct ip_tunnel *tunnel = netdev_priv(dev);
struct ip_tunnel_info *tun_info = NULL;
const struct iphdr *inner_iph;
unsigned int max_headroom; /* The extra header space needed */
struct rtable *rt = NULL; /* Route to the other host */
__be16 payload_protocol;
bool use_cache = false;
struct flowi4 fl4;
bool md = false;
bool connected;
u8 tos, ttl;
__be32 dst;
__be16 df;
inner_iph = (const struct iphdr *)skb_inner_network_header(skb);
connected = (tunnel->parms.iph.daddr != 0);
payload_protocol = skb_protocol(skb, true);
memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
dst = tnl_params->daddr;
if (dst == 0) {
/* NBMA tunnel */
if (!skb_dst(skb)) {
DEV_STATS_INC(dev, tx_fifo_errors);
goto tx_error;
}
tun_info = skb_tunnel_info(skb);
if (tun_info && (tun_info->mode & IP_TUNNEL_INFO_TX) &&
ip_tunnel_info_af(tun_info) == AF_INET &&
tun_info->key.u.ipv4.dst) {
dst = tun_info->key.u.ipv4.dst;
md = true;
connected = true;
} else if (payload_protocol == htons(ETH_P_IP)) {
rt = skb_rtable(skb);
dst = rt_nexthop(rt, inner_iph->daddr);
}
#if IS_ENABLED(CONFIG_IPV6)
else if (payload_protocol == htons(ETH_P_IPV6)) {
const struct in6_addr *addr6;
struct neighbour *neigh;
bool do_tx_error_icmp;
int addr_type;
neigh = dst_neigh_lookup(skb_dst(skb),
&ipv6_hdr(skb)->daddr);
if (!neigh)
goto tx_error;
addr6 = (const struct in6_addr *)&neigh->primary_key;
addr_type = ipv6_addr_type(addr6);
if (addr_type == IPV6_ADDR_ANY) {
addr6 = &ipv6_hdr(skb)->daddr;
addr_type = ipv6_addr_type(addr6);
}
if ((addr_type & IPV6_ADDR_COMPATv4) == 0)
do_tx_error_icmp = true;
else {
do_tx_error_icmp = false;
dst = addr6->s6_addr32[3];
}
neigh_release(neigh);
if (do_tx_error_icmp)
goto tx_error_icmp;
}
#endif
else
goto tx_error;
if (!md)
connected = false;
}
tos = tnl_params->tos;
if (tos & 0x1) {
tos &= ~0x1;
if (payload_protocol == htons(ETH_P_IP)) {
tos = inner_iph->tos;
connected = false;
} else if (payload_protocol == htons(ETH_P_IPV6)) {
tos = ipv6_get_dsfield((const struct ipv6hdr *)inner_iph);
connected = false;
}
}
ip_tunnel_init_flow(&fl4, protocol, dst, tnl_params->saddr,
tunnel->parms.o_key, RT_TOS(tos),
dev_net(dev), tunnel->parms.link,
tunnel->fwmark, skb_get_hash(skb), 0);
if (ip_tunnel_encap(skb, &tunnel->encap, &protocol, &fl4) < 0)
goto tx_error;
if (connected && md) {
use_cache = ip_tunnel_dst_cache_usable(skb, tun_info);
if (use_cache)
rt = dst_cache_get_ip4(&tun_info->dst_cache,
&fl4.saddr);
} else {
rt = connected ? dst_cache_get_ip4(&tunnel->dst_cache,
&fl4.saddr) : NULL;
}
if (!rt) {
rt = ip_route_output_key(tunnel->net, &fl4);
if (IS_ERR(rt)) {
DEV_STATS_INC(dev, tx_carrier_errors);
goto tx_error;
}
if (use_cache)
dst_cache_set_ip4(&tun_info->dst_cache, &rt->dst,
fl4.saddr);
else if (!md && connected)
dst_cache_set_ip4(&tunnel->dst_cache, &rt->dst,
fl4.saddr);
}
if (rt->dst.dev == dev) {
ip_rt_put(rt);
DEV_STATS_INC(dev, collisions);
goto tx_error;
}
df = tnl_params->frag_off;
if (payload_protocol == htons(ETH_P_IP) && !tunnel->ignore_df)
df |= (inner_iph->frag_off & htons(IP_DF));
if (tnl_update_pmtu(dev, skb, rt, df, inner_iph, 0, 0, false)) {
ip_rt_put(rt);
goto tx_error;
}
if (tunnel->err_count > 0) {
if (time_before(jiffies,
tunnel->err_time + IPTUNNEL_ERR_TIMEO)) {
tunnel->err_count--;
dst_link_failure(skb);
} else
tunnel->err_count = 0;
}
tos = ip_tunnel_ecn_encap(tos, inner_iph, skb);
ttl = tnl_params->ttl;
if (ttl == 0) {
if (payload_protocol == htons(ETH_P_IP))
ttl = inner_iph->ttl;
#if IS_ENABLED(CONFIG_IPV6)
else if (payload_protocol == htons(ETH_P_IPV6))
ttl = ((const struct ipv6hdr *)inner_iph)->hop_limit;
#endif
else
ttl = ip4_dst_hoplimit(&rt->dst);
}
max_headroom = LL_RESERVED_SPACE(rt->dst.dev) + sizeof(struct iphdr)
+ rt->dst.header_len + ip_encap_hlen(&tunnel->encap);
if (max_headroom > READ_ONCE(dev->needed_headroom))
WRITE_ONCE(dev->needed_headroom, max_headroom);
if (skb_cow_head(skb, READ_ONCE(dev->needed_headroom))) {
ip_rt_put(rt);
DEV_STATS_INC(dev, tx_dropped);
kfree_skb(skb);
return;
}
iptunnel_xmit(NULL, rt, skb, fl4.saddr, fl4.daddr, protocol, tos, ttl,
df, !net_eq(tunnel->net, dev_net(dev)));
return;
#if IS_ENABLED(CONFIG_IPV6)
tx_error_icmp:
dst_link_failure(skb);
#endif
tx_error:
DEV_STATS_INC(dev, tx_errors);
kfree_skb(skb);
}
EXPORT_SYMBOL_GPL(ip_tunnel_xmit);
static void ip_tunnel_update(struct ip_tunnel_net *itn,
struct ip_tunnel *t,
struct net_device *dev,
struct ip_tunnel_parm *p,
bool set_mtu,
__u32 fwmark)
{
ip_tunnel_del(itn, t);
t->parms.iph.saddr = p->iph.saddr;
t->parms.iph.daddr = p->iph.daddr;
t->parms.i_key = p->i_key;
t->parms.o_key = p->o_key;
if (dev->type != ARPHRD_ETHER) {
__dev_addr_set(dev, &p->iph.saddr, 4);
memcpy(dev->broadcast, &p->iph.daddr, 4);
}
ip_tunnel_add(itn, t);
t->parms.iph.ttl = p->iph.ttl;
t->parms.iph.tos = p->iph.tos;
t->parms.iph.frag_off = p->iph.frag_off;
if (t->parms.link != p->link || t->fwmark != fwmark) {
int mtu;
t->parms.link = p->link;
t->fwmark = fwmark;
mtu = ip_tunnel_bind_dev(dev);
if (set_mtu)
dev->mtu = mtu;
}
dst_cache_reset(&t->dst_cache);
netdev_state_change(dev);
}
int ip_tunnel_ctl(struct net_device *dev, struct ip_tunnel_parm *p, int cmd)
{
int err = 0;
struct ip_tunnel *t = netdev_priv(dev);
struct net *net = t->net;
struct ip_tunnel_net *itn = net_generic(net, t->ip_tnl_net_id);
switch (cmd) {
case SIOCGETTUNNEL:
if (dev == itn->fb_tunnel_dev) {
t = ip_tunnel_find(itn, p, itn->fb_tunnel_dev->type);
if (!t)
t = netdev_priv(dev);
}
memcpy(p, &t->parms, sizeof(*p));
break;
case SIOCADDTUNNEL:
case SIOCCHGTUNNEL:
err = -EPERM;
if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
goto done;
if (p->iph.ttl)
p->iph.frag_off |= htons(IP_DF);
if (!(p->i_flags & VTI_ISVTI)) {
if (!(p->i_flags & TUNNEL_KEY))
p->i_key = 0;
if (!(p->o_flags & TUNNEL_KEY))
p->o_key = 0;
}
t = ip_tunnel_find(itn, p, itn->type);
if (cmd == SIOCADDTUNNEL) {
if (!t) {
t = ip_tunnel_create(net, itn, p);
err = PTR_ERR_OR_ZERO(t);
break;
}
err = -EEXIST;
break;
}
if (dev != itn->fb_tunnel_dev && cmd == SIOCCHGTUNNEL) {
if (t) {
if (t->dev != dev) {
err = -EEXIST;
break;
}
} else {
unsigned int nflags = 0;
if (ipv4_is_multicast(p->iph.daddr))
nflags = IFF_BROADCAST;
else if (p->iph.daddr)
nflags = IFF_POINTOPOINT;
if ((dev->flags^nflags)&(IFF_POINTOPOINT|IFF_BROADCAST)) {
err = -EINVAL;
break;
}
t = netdev_priv(dev);
}
}
if (t) {
err = 0;
ip_tunnel_update(itn, t, dev, p, true, 0);
} else {
err = -ENOENT;
}
break;
case SIOCDELTUNNEL:
err = -EPERM;
if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
goto done;
if (dev == itn->fb_tunnel_dev) {
err = -ENOENT;
t = ip_tunnel_find(itn, p, itn->fb_tunnel_dev->type);
if (!t)
goto done;
err = -EPERM;
if (t == netdev_priv(itn->fb_tunnel_dev))
goto done;
dev = t->dev;
}
unregister_netdevice(dev);
err = 0;
break;
default:
err = -EINVAL;
}
done:
return err;
}
EXPORT_SYMBOL_GPL(ip_tunnel_ctl);
int ip_tunnel_siocdevprivate(struct net_device *dev, struct ifreq *ifr,
void __user *data, int cmd)
{
struct ip_tunnel_parm p;
int err;
if (copy_from_user(&p, data, sizeof(p)))
return -EFAULT;
err = dev->netdev_ops->ndo_tunnel_ctl(dev, &p, cmd);
if (!err && copy_to_user(data, &p, sizeof(p)))
return -EFAULT;
return err;
}
EXPORT_SYMBOL_GPL(ip_tunnel_siocdevprivate);
int __ip_tunnel_change_mtu(struct net_device *dev, int new_mtu, bool strict)
{
struct ip_tunnel *tunnel = netdev_priv(dev);
int t_hlen = tunnel->hlen + sizeof(struct iphdr);
int max_mtu = IP_MAX_MTU - t_hlen;
if (dev->type == ARPHRD_ETHER)
max_mtu -= dev->hard_header_len;
if (new_mtu < ETH_MIN_MTU)
return -EINVAL;
if (new_mtu > max_mtu) {
if (strict)
return -EINVAL;
new_mtu = max_mtu;
}
dev->mtu = new_mtu;
return 0;
}
EXPORT_SYMBOL_GPL(__ip_tunnel_change_mtu);
int ip_tunnel_change_mtu(struct net_device *dev, int new_mtu)
{
return __ip_tunnel_change_mtu(dev, new_mtu, true);
}
EXPORT_SYMBOL_GPL(ip_tunnel_change_mtu);
static void ip_tunnel_dev_free(struct net_device *dev)
{
struct ip_tunnel *tunnel = netdev_priv(dev);
gro_cells_destroy(&tunnel->gro_cells);
dst_cache_destroy(&tunnel->dst_cache);
free_percpu(dev->tstats);
}
void ip_tunnel_dellink(struct net_device *dev, struct list_head *head)
{
struct ip_tunnel *tunnel = netdev_priv(dev);
struct ip_tunnel_net *itn;
itn = net_generic(tunnel->net, tunnel->ip_tnl_net_id);
if (itn->fb_tunnel_dev != dev) {
ip_tunnel_del(itn, netdev_priv(dev));
unregister_netdevice_queue(dev, head);
}
}
EXPORT_SYMBOL_GPL(ip_tunnel_dellink);
struct net *ip_tunnel_get_link_net(const struct net_device *dev)
{
struct ip_tunnel *tunnel = netdev_priv(dev);
return tunnel->net;
}
EXPORT_SYMBOL(ip_tunnel_get_link_net);
int ip_tunnel_get_iflink(const struct net_device *dev)
{
struct ip_tunnel *tunnel = netdev_priv(dev);
return tunnel->parms.link;
}
EXPORT_SYMBOL(ip_tunnel_get_iflink);
int ip_tunnel_init_net(struct net *net, unsigned int ip_tnl_net_id,
struct rtnl_link_ops *ops, char *devname)
{
struct ip_tunnel_net *itn = net_generic(net, ip_tnl_net_id);
struct ip_tunnel_parm parms;
unsigned int i;
itn->rtnl_link_ops = ops;
for (i = 0; i < IP_TNL_HASH_SIZE; i++)
INIT_HLIST_HEAD(&itn->tunnels[i]);
if (!ops || !net_has_fallback_tunnels(net)) {
struct ip_tunnel_net *it_init_net;
it_init_net = net_generic(&init_net, ip_tnl_net_id);
itn->type = it_init_net->type;
itn->fb_tunnel_dev = NULL;
return 0;
}
memset(&parms, 0, sizeof(parms));
if (devname)
strscpy(parms.name, devname, IFNAMSIZ);
rtnl_lock();
itn->fb_tunnel_dev = __ip_tunnel_create(net, ops, &parms);
/* FB netdevice is special: we have one, and only one per netns.
* Allowing to move it to another netns is clearly unsafe.
*/
if (!IS_ERR(itn->fb_tunnel_dev)) {
itn->fb_tunnel_dev->features |= NETIF_F_NETNS_LOCAL;
itn->fb_tunnel_dev->mtu = ip_tunnel_bind_dev(itn->fb_tunnel_dev);
ip_tunnel_add(itn, netdev_priv(itn->fb_tunnel_dev));
itn->type = itn->fb_tunnel_dev->type;
}
rtnl_unlock();
return PTR_ERR_OR_ZERO(itn->fb_tunnel_dev);
}
EXPORT_SYMBOL_GPL(ip_tunnel_init_net);
static void ip_tunnel_destroy(struct net *net, struct ip_tunnel_net *itn,
struct list_head *head,
struct rtnl_link_ops *ops)
{
struct net_device *dev, *aux;
int h;
for_each_netdev_safe(net, dev, aux)
if (dev->rtnl_link_ops == ops)
unregister_netdevice_queue(dev, head);
for (h = 0; h < IP_TNL_HASH_SIZE; h++) {
struct ip_tunnel *t;
struct hlist_node *n;
struct hlist_head *thead = &itn->tunnels[h];
hlist_for_each_entry_safe(t, n, thead, hash_node)
/* If dev is in the same netns, it has already
* been added to the list by the previous loop.
*/
if (!net_eq(dev_net(t->dev), net))
unregister_netdevice_queue(t->dev, head);
}
}
void ip_tunnel_delete_nets(struct list_head *net_list, unsigned int id,
struct rtnl_link_ops *ops)
{
struct ip_tunnel_net *itn;
struct net *net;
LIST_HEAD(list);
rtnl_lock();
list_for_each_entry(net, net_list, exit_list) {
itn = net_generic(net, id);
ip_tunnel_destroy(net, itn, &list, ops);
}
unregister_netdevice_many(&list);
rtnl_unlock();
}
EXPORT_SYMBOL_GPL(ip_tunnel_delete_nets);
int ip_tunnel_newlink(struct net_device *dev, struct nlattr *tb[],
struct ip_tunnel_parm *p, __u32 fwmark)
{
struct ip_tunnel *nt;
struct net *net = dev_net(dev);
struct ip_tunnel_net *itn;
int mtu;
int err;
nt = netdev_priv(dev);
itn = net_generic(net, nt->ip_tnl_net_id);
if (nt->collect_md) {
if (rtnl_dereference(itn->collect_md_tun))
return -EEXIST;
} else {
if (ip_tunnel_find(itn, p, dev->type))
return -EEXIST;
}
nt->net = net;
nt->parms = *p;
nt->fwmark = fwmark;
err = register_netdevice(dev);
if (err)
goto err_register_netdevice;
if (dev->type == ARPHRD_ETHER && !tb[IFLA_ADDRESS])
eth_hw_addr_random(dev);
mtu = ip_tunnel_bind_dev(dev);
if (tb[IFLA_MTU]) {
unsigned int max = IP_MAX_MTU - (nt->hlen + sizeof(struct iphdr));
if (dev->type == ARPHRD_ETHER)
max -= dev->hard_header_len;
mtu = clamp(dev->mtu, (unsigned int)ETH_MIN_MTU, max);
}
err = dev_set_mtu(dev, mtu);
if (err)
goto err_dev_set_mtu;
ip_tunnel_add(itn, nt);
return 0;
err_dev_set_mtu:
unregister_netdevice(dev);
err_register_netdevice:
return err;
}
EXPORT_SYMBOL_GPL(ip_tunnel_newlink);
int ip_tunnel_changelink(struct net_device *dev, struct nlattr *tb[],
struct ip_tunnel_parm *p, __u32 fwmark)
{
struct ip_tunnel *t;
struct ip_tunnel *tunnel = netdev_priv(dev);
struct net *net = tunnel->net;
struct ip_tunnel_net *itn = net_generic(net, tunnel->ip_tnl_net_id);
if (dev == itn->fb_tunnel_dev)
return -EINVAL;
t = ip_tunnel_find(itn, p, dev->type);
if (t) {
if (t->dev != dev)
return -EEXIST;
} else {
t = tunnel;
if (dev->type != ARPHRD_ETHER) {
unsigned int nflags = 0;
if (ipv4_is_multicast(p->iph.daddr))
nflags = IFF_BROADCAST;
else if (p->iph.daddr)
nflags = IFF_POINTOPOINT;
if ((dev->flags ^ nflags) &
(IFF_POINTOPOINT | IFF_BROADCAST))
return -EINVAL;
}
}
ip_tunnel_update(itn, t, dev, p, !tb[IFLA_MTU], fwmark);
return 0;
}
EXPORT_SYMBOL_GPL(ip_tunnel_changelink);
int ip_tunnel_init(struct net_device *dev)
{
struct ip_tunnel *tunnel = netdev_priv(dev);
struct iphdr *iph = &tunnel->parms.iph;
int err;
dev->needs_free_netdev = true;
dev->priv_destructor = ip_tunnel_dev_free;
dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
if (!dev->tstats)
return -ENOMEM;
err = dst_cache_init(&tunnel->dst_cache, GFP_KERNEL);
if (err) {
free_percpu(dev->tstats);
return err;
}
err = gro_cells_init(&tunnel->gro_cells, dev);
if (err) {
dst_cache_destroy(&tunnel->dst_cache);
free_percpu(dev->tstats);
return err;
}
tunnel->dev = dev;
tunnel->net = dev_net(dev);
strcpy(tunnel->parms.name, dev->name);
iph->version = 4;
iph->ihl = 5;
if (tunnel->collect_md)
netif_keep_dst(dev);
return 0;
}
EXPORT_SYMBOL_GPL(ip_tunnel_init);
void ip_tunnel_uninit(struct net_device *dev)
{
struct ip_tunnel *tunnel = netdev_priv(dev);
struct net *net = tunnel->net;
struct ip_tunnel_net *itn;
itn = net_generic(net, tunnel->ip_tnl_net_id);
ip_tunnel_del(itn, netdev_priv(dev));
if (itn->fb_tunnel_dev == dev)
WRITE_ONCE(itn->fb_tunnel_dev, NULL);
dst_cache_reset(&tunnel->dst_cache);
}
EXPORT_SYMBOL_GPL(ip_tunnel_uninit);
/* Do least required initialization, rest of init is done in tunnel_init call */
void ip_tunnel_setup(struct net_device *dev, unsigned int net_id)
{
struct ip_tunnel *tunnel = netdev_priv(dev);
tunnel->ip_tnl_net_id = net_id;
}
EXPORT_SYMBOL_GPL(ip_tunnel_setup);
MODULE_LICENSE("GPL");
| linux-master | net/ipv4/ip_tunnel.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* INET An implementation of the TCP/IP protocol suite for the LINUX
* operating system. INET is implemented using the BSD Socket
* interface as the means of communication with the user level.
*
* Implementation of the Transmission Control Protocol(TCP).
*
* Authors: Ross Biro
* Fred N. van Kempen, <[email protected]>
* Mark Evans, <[email protected]>
* Corey Minyard <[email protected]>
* Florian La Roche, <[email protected]>
* Charles Hedrick, <[email protected]>
* Linus Torvalds, <[email protected]>
* Alan Cox, <[email protected]>
* Matthew Dillon, <[email protected]>
* Arnt Gulbrandsen, <[email protected]>
* Jorge Cwik, <[email protected]>
*/
#include <net/tcp.h>
#include <net/xfrm.h>
#include <net/busy_poll.h>
static bool tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win)
{
if (seq == s_win)
return true;
if (after(end_seq, s_win) && before(seq, e_win))
return true;
return seq == e_win && seq == end_seq;
}
static enum tcp_tw_status
tcp_timewait_check_oow_rate_limit(struct inet_timewait_sock *tw,
const struct sk_buff *skb, int mib_idx)
{
struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
if (!tcp_oow_rate_limited(twsk_net(tw), skb, mib_idx,
&tcptw->tw_last_oow_ack_time)) {
/* Send ACK. Note, we do not put the bucket,
* it will be released by caller.
*/
return TCP_TW_ACK;
}
/* We are rate-limiting, so just release the tw sock and drop skb. */
inet_twsk_put(tw);
return TCP_TW_SUCCESS;
}
/*
* * Main purpose of TIME-WAIT state is to close connection gracefully,
* when one of ends sits in LAST-ACK or CLOSING retransmitting FIN
* (and, probably, tail of data) and one or more our ACKs are lost.
* * What is TIME-WAIT timeout? It is associated with maximal packet
* lifetime in the internet, which results in wrong conclusion, that
* it is set to catch "old duplicate segments" wandering out of their path.
* It is not quite correct. This timeout is calculated so that it exceeds
* maximal retransmission timeout enough to allow to lose one (or more)
* segments sent by peer and our ACKs. This time may be calculated from RTO.
* * When TIME-WAIT socket receives RST, it means that another end
* finally closed and we are allowed to kill TIME-WAIT too.
* * Second purpose of TIME-WAIT is catching old duplicate segments.
* Well, certainly it is pure paranoia, but if we load TIME-WAIT
* with this semantics, we MUST NOT kill TIME-WAIT state with RSTs.
* * If we invented some more clever way to catch duplicates
* (f.e. based on PAWS), we could truncate TIME-WAIT to several RTOs.
*
* The algorithm below is based on FORMAL INTERPRETATION of RFCs.
* When you compare it to RFCs, please, read section SEGMENT ARRIVES
* from the very beginning.
*
* NOTE. With recycling (and later with fin-wait-2) TW bucket
* is _not_ stateless. It means, that strictly speaking we must
* spinlock it. I do not want! Well, probability of misbehaviour
* is ridiculously low and, seems, we could use some mb() tricks
* to avoid misread sequence numbers, states etc. --ANK
*
* We don't need to initialize tmp_out.sack_ok as we don't use the results
*/
enum tcp_tw_status
tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,
const struct tcphdr *th)
{
struct tcp_options_received tmp_opt;
struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
bool paws_reject = false;
tmp_opt.saw_tstamp = 0;
if (th->doff > (sizeof(*th) >> 2) && tcptw->tw_ts_recent_stamp) {
tcp_parse_options(twsk_net(tw), skb, &tmp_opt, 0, NULL);
if (tmp_opt.saw_tstamp) {
if (tmp_opt.rcv_tsecr)
tmp_opt.rcv_tsecr -= tcptw->tw_ts_offset;
tmp_opt.ts_recent = tcptw->tw_ts_recent;
tmp_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
paws_reject = tcp_paws_reject(&tmp_opt, th->rst);
}
}
if (tw->tw_substate == TCP_FIN_WAIT2) {
/* Just repeat all the checks of tcp_rcv_state_process() */
/* Out of window, send ACK */
if (paws_reject ||
!tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
tcptw->tw_rcv_nxt,
tcptw->tw_rcv_nxt + tcptw->tw_rcv_wnd))
return tcp_timewait_check_oow_rate_limit(
tw, skb, LINUX_MIB_TCPACKSKIPPEDFINWAIT2);
if (th->rst)
goto kill;
if (th->syn && !before(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt))
return TCP_TW_RST;
/* Dup ACK? */
if (!th->ack ||
!after(TCP_SKB_CB(skb)->end_seq, tcptw->tw_rcv_nxt) ||
TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq) {
inet_twsk_put(tw);
return TCP_TW_SUCCESS;
}
/* New data or FIN. If new data arrive after half-duplex close,
* reset.
*/
if (!th->fin ||
TCP_SKB_CB(skb)->end_seq != tcptw->tw_rcv_nxt + 1)
return TCP_TW_RST;
/* FIN arrived, enter true time-wait state. */
tw->tw_substate = TCP_TIME_WAIT;
tcptw->tw_rcv_nxt = TCP_SKB_CB(skb)->end_seq;
if (tmp_opt.saw_tstamp) {
tcptw->tw_ts_recent_stamp = ktime_get_seconds();
tcptw->tw_ts_recent = tmp_opt.rcv_tsval;
}
inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
return TCP_TW_ACK;
}
/*
* Now real TIME-WAIT state.
*
* RFC 1122:
* "When a connection is [...] on TIME-WAIT state [...]
* [a TCP] MAY accept a new SYN from the remote TCP to
* reopen the connection directly, if it:
*
* (1) assigns its initial sequence number for the new
* connection to be larger than the largest sequence
* number it used on the previous connection incarnation,
* and
*
* (2) returns to TIME-WAIT state if the SYN turns out
* to be an old duplicate".
*/
if (!paws_reject &&
(TCP_SKB_CB(skb)->seq == tcptw->tw_rcv_nxt &&
(TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq || th->rst))) {
/* In window segment, it may be only reset or bare ack. */
if (th->rst) {
/* This is TIME_WAIT assassination, in two flavors.
* Oh well... nobody has a sufficient solution to this
* protocol bug yet.
*/
if (!READ_ONCE(twsk_net(tw)->ipv4.sysctl_tcp_rfc1337)) {
kill:
inet_twsk_deschedule_put(tw);
return TCP_TW_SUCCESS;
}
} else {
inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
}
if (tmp_opt.saw_tstamp) {
tcptw->tw_ts_recent = tmp_opt.rcv_tsval;
tcptw->tw_ts_recent_stamp = ktime_get_seconds();
}
inet_twsk_put(tw);
return TCP_TW_SUCCESS;
}
/* Out of window segment.
All the segments are ACKed immediately.
The only exception is new SYN. We accept it, if it is
not old duplicate and we are not in danger to be killed
by delayed old duplicates. RFC check is that it has
newer sequence number works at rates <40Mbit/sec.
However, if paws works, it is reliable AND even more,
we even may relax silly seq space cutoff.
RED-PEN: we violate main RFC requirement, if this SYN will appear
old duplicate (i.e. we receive RST in reply to SYN-ACK),
we must return socket to time-wait state. It is not good,
but not fatal yet.
*/
if (th->syn && !th->rst && !th->ack && !paws_reject &&
(after(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt) ||
(tmp_opt.saw_tstamp &&
(s32)(tcptw->tw_ts_recent - tmp_opt.rcv_tsval) < 0))) {
u32 isn = tcptw->tw_snd_nxt + 65535 + 2;
if (isn == 0)
isn++;
TCP_SKB_CB(skb)->tcp_tw_isn = isn;
return TCP_TW_SYN;
}
if (paws_reject)
__NET_INC_STATS(twsk_net(tw), LINUX_MIB_PAWSESTABREJECTED);
if (!th->rst) {
/* In this case we must reset the TIMEWAIT timer.
*
* If it is ACKless SYN it may be both old duplicate
* and new good SYN with random sequence number <rcv_nxt.
* Do not reschedule in the last case.
*/
if (paws_reject || th->ack)
inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
return tcp_timewait_check_oow_rate_limit(
tw, skb, LINUX_MIB_TCPACKSKIPPEDTIMEWAIT);
}
inet_twsk_put(tw);
return TCP_TW_SUCCESS;
}
EXPORT_SYMBOL(tcp_timewait_state_process);
static void tcp_time_wait_init(struct sock *sk, struct tcp_timewait_sock *tcptw)
{
#ifdef CONFIG_TCP_MD5SIG
const struct tcp_sock *tp = tcp_sk(sk);
struct tcp_md5sig_key *key;
/*
* The timewait bucket does not have the key DB from the
* sock structure. We just make a quick copy of the
* md5 key being used (if indeed we are using one)
* so the timewait ack generating code has the key.
*/
tcptw->tw_md5_key = NULL;
if (!static_branch_unlikely(&tcp_md5_needed.key))
return;
key = tp->af_specific->md5_lookup(sk, sk);
if (key) {
tcptw->tw_md5_key = kmemdup(key, sizeof(*key), GFP_ATOMIC);
if (!tcptw->tw_md5_key)
return;
if (!tcp_alloc_md5sig_pool())
goto out_free;
if (!static_key_fast_inc_not_disabled(&tcp_md5_needed.key.key))
goto out_free;
}
return;
out_free:
WARN_ON_ONCE(1);
kfree(tcptw->tw_md5_key);
tcptw->tw_md5_key = NULL;
#endif
}
/*
* Move a socket to time-wait or dead fin-wait-2 state.
*/
void tcp_time_wait(struct sock *sk, int state, int timeo)
{
const struct inet_connection_sock *icsk = inet_csk(sk);
const struct tcp_sock *tp = tcp_sk(sk);
struct net *net = sock_net(sk);
struct inet_timewait_sock *tw;
tw = inet_twsk_alloc(sk, &net->ipv4.tcp_death_row, state);
if (tw) {
struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1);
tw->tw_transparent = inet_test_bit(TRANSPARENT, sk);
tw->tw_mark = sk->sk_mark;
tw->tw_priority = sk->sk_priority;
tw->tw_rcv_wscale = tp->rx_opt.rcv_wscale;
tcptw->tw_rcv_nxt = tp->rcv_nxt;
tcptw->tw_snd_nxt = tp->snd_nxt;
tcptw->tw_rcv_wnd = tcp_receive_window(tp);
tcptw->tw_ts_recent = tp->rx_opt.ts_recent;
tcptw->tw_ts_recent_stamp = tp->rx_opt.ts_recent_stamp;
tcptw->tw_ts_offset = tp->tsoffset;
tcptw->tw_last_oow_ack_time = 0;
tcptw->tw_tx_delay = tp->tcp_tx_delay;
tw->tw_txhash = sk->sk_txhash;
#if IS_ENABLED(CONFIG_IPV6)
if (tw->tw_family == PF_INET6) {
struct ipv6_pinfo *np = inet6_sk(sk);
tw->tw_v6_daddr = sk->sk_v6_daddr;
tw->tw_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
tw->tw_tclass = np->tclass;
tw->tw_flowlabel = be32_to_cpu(np->flow_label & IPV6_FLOWLABEL_MASK);
tw->tw_ipv6only = sk->sk_ipv6only;
}
#endif
tcp_time_wait_init(sk, tcptw);
/* Get the TIME_WAIT timeout firing. */
if (timeo < rto)
timeo = rto;
if (state == TCP_TIME_WAIT)
timeo = TCP_TIMEWAIT_LEN;
/* tw_timer is pinned, so we need to make sure BH are disabled
* in following section, otherwise timer handler could run before
* we complete the initialization.
*/
local_bh_disable();
inet_twsk_schedule(tw, timeo);
/* Linkage updates.
* Note that access to tw after this point is illegal.
*/
inet_twsk_hashdance(tw, sk, net->ipv4.tcp_death_row.hashinfo);
local_bh_enable();
} else {
/* Sorry, if we're out of memory, just CLOSE this
* socket up. We've got bigger problems than
* non-graceful socket closings.
*/
NET_INC_STATS(net, LINUX_MIB_TCPTIMEWAITOVERFLOW);
}
tcp_update_metrics(sk);
tcp_done(sk);
}
EXPORT_SYMBOL(tcp_time_wait);
void tcp_twsk_destructor(struct sock *sk)
{
#ifdef CONFIG_TCP_MD5SIG
if (static_branch_unlikely(&tcp_md5_needed.key)) {
struct tcp_timewait_sock *twsk = tcp_twsk(sk);
if (twsk->tw_md5_key) {
kfree_rcu(twsk->tw_md5_key, rcu);
static_branch_slow_dec_deferred(&tcp_md5_needed);
}
}
#endif
}
EXPORT_SYMBOL_GPL(tcp_twsk_destructor);
void tcp_twsk_purge(struct list_head *net_exit_list, int family)
{
bool purged_once = false;
struct net *net;
list_for_each_entry(net, net_exit_list, exit_list) {
if (net->ipv4.tcp_death_row.hashinfo->pernet) {
/* Even if tw_refcount == 1, we must clean up kernel reqsk */
inet_twsk_purge(net->ipv4.tcp_death_row.hashinfo, family);
} else if (!purged_once) {
/* The last refcount is decremented in tcp_sk_exit_batch() */
if (refcount_read(&net->ipv4.tcp_death_row.tw_refcount) == 1)
continue;
inet_twsk_purge(&tcp_hashinfo, family);
purged_once = true;
}
}
}
EXPORT_SYMBOL_GPL(tcp_twsk_purge);
/* Warning : This function is called without sk_listener being locked.
* Be sure to read socket fields once, as their value could change under us.
*/
void tcp_openreq_init_rwin(struct request_sock *req,
const struct sock *sk_listener,
const struct dst_entry *dst)
{
struct inet_request_sock *ireq = inet_rsk(req);
const struct tcp_sock *tp = tcp_sk(sk_listener);
int full_space = tcp_full_space(sk_listener);
u32 window_clamp;
__u8 rcv_wscale;
u32 rcv_wnd;
int mss;
mss = tcp_mss_clamp(tp, dst_metric_advmss(dst));
window_clamp = READ_ONCE(tp->window_clamp);
/* Set this up on the first call only */
req->rsk_window_clamp = window_clamp ? : dst_metric(dst, RTAX_WINDOW);
/* limit the window selection if the user enforce a smaller rx buffer */
if (sk_listener->sk_userlocks & SOCK_RCVBUF_LOCK &&
(req->rsk_window_clamp > full_space || req->rsk_window_clamp == 0))
req->rsk_window_clamp = full_space;
rcv_wnd = tcp_rwnd_init_bpf((struct sock *)req);
if (rcv_wnd == 0)
rcv_wnd = dst_metric(dst, RTAX_INITRWND);
else if (full_space < rcv_wnd * mss)
full_space = rcv_wnd * mss;
/* tcp_full_space because it is guaranteed to be the first packet */
tcp_select_initial_window(sk_listener, full_space,
mss - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0),
&req->rsk_rcv_wnd,
&req->rsk_window_clamp,
ireq->wscale_ok,
&rcv_wscale,
rcv_wnd);
ireq->rcv_wscale = rcv_wscale;
}
EXPORT_SYMBOL(tcp_openreq_init_rwin);
static void tcp_ecn_openreq_child(struct tcp_sock *tp,
const struct request_sock *req)
{
tp->ecn_flags = inet_rsk(req)->ecn_ok ? TCP_ECN_OK : 0;
}
void tcp_ca_openreq_child(struct sock *sk, const struct dst_entry *dst)
{
struct inet_connection_sock *icsk = inet_csk(sk);
u32 ca_key = dst_metric(dst, RTAX_CC_ALGO);
bool ca_got_dst = false;
if (ca_key != TCP_CA_UNSPEC) {
const struct tcp_congestion_ops *ca;
rcu_read_lock();
ca = tcp_ca_find_key(ca_key);
if (likely(ca && bpf_try_module_get(ca, ca->owner))) {
icsk->icsk_ca_dst_locked = tcp_ca_dst_locked(dst);
icsk->icsk_ca_ops = ca;
ca_got_dst = true;
}
rcu_read_unlock();
}
/* If no valid choice made yet, assign current system default ca. */
if (!ca_got_dst &&
(!icsk->icsk_ca_setsockopt ||
!bpf_try_module_get(icsk->icsk_ca_ops, icsk->icsk_ca_ops->owner)))
tcp_assign_congestion_control(sk);
tcp_set_ca_state(sk, TCP_CA_Open);
}
EXPORT_SYMBOL_GPL(tcp_ca_openreq_child);
static void smc_check_reset_syn_req(const struct tcp_sock *oldtp,
struct request_sock *req,
struct tcp_sock *newtp)
{
#if IS_ENABLED(CONFIG_SMC)
struct inet_request_sock *ireq;
if (static_branch_unlikely(&tcp_have_smc)) {
ireq = inet_rsk(req);
if (oldtp->syn_smc && !ireq->smc_ok)
newtp->syn_smc = 0;
}
#endif
}
/* This is not only more efficient than what we used to do, it eliminates
* a lot of code duplication between IPv4/IPv6 SYN recv processing. -DaveM
*
* Actually, we could lots of memory writes here. tp of listening
* socket contains all necessary default parameters.
*/
struct sock *tcp_create_openreq_child(const struct sock *sk,
struct request_sock *req,
struct sk_buff *skb)
{
struct sock *newsk = inet_csk_clone_lock(sk, req, GFP_ATOMIC);
const struct inet_request_sock *ireq = inet_rsk(req);
struct tcp_request_sock *treq = tcp_rsk(req);
struct inet_connection_sock *newicsk;
const struct tcp_sock *oldtp;
struct tcp_sock *newtp;
u32 seq;
if (!newsk)
return NULL;
newicsk = inet_csk(newsk);
newtp = tcp_sk(newsk);
oldtp = tcp_sk(sk);
smc_check_reset_syn_req(oldtp, req, newtp);
/* Now setup tcp_sock */
newtp->pred_flags = 0;
seq = treq->rcv_isn + 1;
newtp->rcv_wup = seq;
WRITE_ONCE(newtp->copied_seq, seq);
WRITE_ONCE(newtp->rcv_nxt, seq);
newtp->segs_in = 1;
seq = treq->snt_isn + 1;
newtp->snd_sml = newtp->snd_una = seq;
WRITE_ONCE(newtp->snd_nxt, seq);
newtp->snd_up = seq;
INIT_LIST_HEAD(&newtp->tsq_node);
INIT_LIST_HEAD(&newtp->tsorted_sent_queue);
tcp_init_wl(newtp, treq->rcv_isn);
minmax_reset(&newtp->rtt_min, tcp_jiffies32, ~0U);
newicsk->icsk_ack.lrcvtime = tcp_jiffies32;
newtp->lsndtime = tcp_jiffies32;
newsk->sk_txhash = READ_ONCE(treq->txhash);
newtp->total_retrans = req->num_retrans;
tcp_init_xmit_timers(newsk);
WRITE_ONCE(newtp->write_seq, newtp->pushed_seq = treq->snt_isn + 1);
if (sock_flag(newsk, SOCK_KEEPOPEN))
inet_csk_reset_keepalive_timer(newsk,
keepalive_time_when(newtp));
newtp->rx_opt.tstamp_ok = ireq->tstamp_ok;
newtp->rx_opt.sack_ok = ireq->sack_ok;
newtp->window_clamp = req->rsk_window_clamp;
newtp->rcv_ssthresh = req->rsk_rcv_wnd;
newtp->rcv_wnd = req->rsk_rcv_wnd;
newtp->rx_opt.wscale_ok = ireq->wscale_ok;
if (newtp->rx_opt.wscale_ok) {
newtp->rx_opt.snd_wscale = ireq->snd_wscale;
newtp->rx_opt.rcv_wscale = ireq->rcv_wscale;
} else {
newtp->rx_opt.snd_wscale = newtp->rx_opt.rcv_wscale = 0;
newtp->window_clamp = min(newtp->window_clamp, 65535U);
}
newtp->snd_wnd = ntohs(tcp_hdr(skb)->window) << newtp->rx_opt.snd_wscale;
newtp->max_window = newtp->snd_wnd;
if (newtp->rx_opt.tstamp_ok) {
newtp->rx_opt.ts_recent = READ_ONCE(req->ts_recent);
newtp->rx_opt.ts_recent_stamp = ktime_get_seconds();
newtp->tcp_header_len = sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED;
} else {
newtp->rx_opt.ts_recent_stamp = 0;
newtp->tcp_header_len = sizeof(struct tcphdr);
}
if (req->num_timeout) {
newtp->undo_marker = treq->snt_isn;
newtp->retrans_stamp = div_u64(treq->snt_synack,
USEC_PER_SEC / TCP_TS_HZ);
}
newtp->tsoffset = treq->ts_off;
#ifdef CONFIG_TCP_MD5SIG
newtp->md5sig_info = NULL; /*XXX*/
#endif
if (skb->len >= TCP_MSS_DEFAULT + newtp->tcp_header_len)
newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len;
newtp->rx_opt.mss_clamp = req->mss;
tcp_ecn_openreq_child(newtp, req);
newtp->fastopen_req = NULL;
RCU_INIT_POINTER(newtp->fastopen_rsk, NULL);
newtp->bpf_chg_cc_inprogress = 0;
tcp_bpf_clone(sk, newsk);
__TCP_INC_STATS(sock_net(sk), TCP_MIB_PASSIVEOPENS);
return newsk;
}
EXPORT_SYMBOL(tcp_create_openreq_child);
/*
* Process an incoming packet for SYN_RECV sockets represented as a
* request_sock. Normally sk is the listener socket but for TFO it
* points to the child socket.
*
* XXX (TFO) - The current impl contains a special check for ack
* validation and inside tcp_v4_reqsk_send_ack(). Can we do better?
*
* We don't need to initialize tmp_opt.sack_ok as we don't use the results
*
* Note: If @fastopen is true, this can be called from process context.
* Otherwise, this is from BH context.
*/
struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
struct request_sock *req,
bool fastopen, bool *req_stolen)
{
struct tcp_options_received tmp_opt;
struct sock *child;
const struct tcphdr *th = tcp_hdr(skb);
__be32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK);
bool paws_reject = false;
bool own_req;
tmp_opt.saw_tstamp = 0;
if (th->doff > (sizeof(struct tcphdr)>>2)) {
tcp_parse_options(sock_net(sk), skb, &tmp_opt, 0, NULL);
if (tmp_opt.saw_tstamp) {
tmp_opt.ts_recent = READ_ONCE(req->ts_recent);
if (tmp_opt.rcv_tsecr)
tmp_opt.rcv_tsecr -= tcp_rsk(req)->ts_off;
/* We do not store true stamp, but it is not required,
* it can be estimated (approximately)
* from another data.
*/
tmp_opt.ts_recent_stamp = ktime_get_seconds() - reqsk_timeout(req, TCP_RTO_MAX) / HZ;
paws_reject = tcp_paws_reject(&tmp_opt, th->rst);
}
}
/* Check for pure retransmitted SYN. */
if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn &&
flg == TCP_FLAG_SYN &&
!paws_reject) {
/*
* RFC793 draws (Incorrectly! It was fixed in RFC1122)
* this case on figure 6 and figure 8, but formal
* protocol description says NOTHING.
* To be more exact, it says that we should send ACK,
* because this segment (at least, if it has no data)
* is out of window.
*
* CONCLUSION: RFC793 (even with RFC1122) DOES NOT
* describe SYN-RECV state. All the description
* is wrong, we cannot believe to it and should
* rely only on common sense and implementation
* experience.
*
* Enforce "SYN-ACK" according to figure 8, figure 6
* of RFC793, fixed by RFC1122.
*
* Note that even if there is new data in the SYN packet
* they will be thrown away too.
*
* Reset timer after retransmitting SYNACK, similar to
* the idea of fast retransmit in recovery.
*/
if (!tcp_oow_rate_limited(sock_net(sk), skb,
LINUX_MIB_TCPACKSKIPPEDSYNRECV,
&tcp_rsk(req)->last_oow_ack_time) &&
!inet_rtx_syn_ack(sk, req)) {
unsigned long expires = jiffies;
expires += reqsk_timeout(req, TCP_RTO_MAX);
if (!fastopen)
mod_timer_pending(&req->rsk_timer, expires);
else
req->rsk_timer.expires = expires;
}
return NULL;
}
/* Further reproduces section "SEGMENT ARRIVES"
for state SYN-RECEIVED of RFC793.
It is broken, however, it does not work only
when SYNs are crossed.
You would think that SYN crossing is impossible here, since
we should have a SYN_SENT socket (from connect()) on our end,
but this is not true if the crossed SYNs were sent to both
ends by a malicious third party. We must defend against this,
and to do that we first verify the ACK (as per RFC793, page
36) and reset if it is invalid. Is this a true full defense?
To convince ourselves, let us consider a way in which the ACK
test can still pass in this 'malicious crossed SYNs' case.
Malicious sender sends identical SYNs (and thus identical sequence
numbers) to both A and B:
A: gets SYN, seq=7
B: gets SYN, seq=7
By our good fortune, both A and B select the same initial
send sequence number of seven :-)
A: sends SYN|ACK, seq=7, ack_seq=8
B: sends SYN|ACK, seq=7, ack_seq=8
So we are now A eating this SYN|ACK, ACK test passes. So
does sequence test, SYN is truncated, and thus we consider
it a bare ACK.
If icsk->icsk_accept_queue.rskq_defer_accept, we silently drop this
bare ACK. Otherwise, we create an established connection. Both
ends (listening sockets) accept the new incoming connection and try
to talk to each other. 8-)
Note: This case is both harmless, and rare. Possibility is about the
same as us discovering intelligent life on another plant tomorrow.
But generally, we should (RFC lies!) to accept ACK
from SYNACK both here and in tcp_rcv_state_process().
tcp_rcv_state_process() does not, hence, we do not too.
Note that the case is absolutely generic:
we cannot optimize anything here without
violating protocol. All the checks must be made
before attempt to create socket.
*/
/* RFC793 page 36: "If the connection is in any non-synchronized state ...
* and the incoming segment acknowledges something not yet
* sent (the segment carries an unacceptable ACK) ...
* a reset is sent."
*
* Invalid ACK: reset will be sent by listening socket.
* Note that the ACK validity check for a Fast Open socket is done
* elsewhere and is checked directly against the child socket rather
* than req because user data may have been sent out.
*/
if ((flg & TCP_FLAG_ACK) && !fastopen &&
(TCP_SKB_CB(skb)->ack_seq !=
tcp_rsk(req)->snt_isn + 1))
return sk;
/* Also, it would be not so bad idea to check rcv_tsecr, which
* is essentially ACK extension and too early or too late values
* should cause reset in unsynchronized states.
*/
/* RFC793: "first check sequence number". */
if (paws_reject || !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
tcp_rsk(req)->rcv_nxt, tcp_rsk(req)->rcv_nxt + req->rsk_rcv_wnd)) {
/* Out of window: send ACK and drop. */
if (!(flg & TCP_FLAG_RST) &&
!tcp_oow_rate_limited(sock_net(sk), skb,
LINUX_MIB_TCPACKSKIPPEDSYNRECV,
&tcp_rsk(req)->last_oow_ack_time))
req->rsk_ops->send_ack(sk, skb, req);
if (paws_reject)
NET_INC_STATS(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED);
return NULL;
}
/* In sequence, PAWS is OK. */
/* TODO: We probably should defer ts_recent change once
* we take ownership of @req.
*/
if (tmp_opt.saw_tstamp && !after(TCP_SKB_CB(skb)->seq, tcp_rsk(req)->rcv_nxt))
WRITE_ONCE(req->ts_recent, tmp_opt.rcv_tsval);
if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn) {
/* Truncate SYN, it is out of window starting
at tcp_rsk(req)->rcv_isn + 1. */
flg &= ~TCP_FLAG_SYN;
}
/* RFC793: "second check the RST bit" and
* "fourth, check the SYN bit"
*/
if (flg & (TCP_FLAG_RST|TCP_FLAG_SYN)) {
TCP_INC_STATS(sock_net(sk), TCP_MIB_ATTEMPTFAILS);
goto embryonic_reset;
}
/* ACK sequence verified above, just make sure ACK is
* set. If ACK not set, just silently drop the packet.
*
* XXX (TFO) - if we ever allow "data after SYN", the
* following check needs to be removed.
*/
if (!(flg & TCP_FLAG_ACK))
return NULL;
/* For Fast Open no more processing is needed (sk is the
* child socket).
*/
if (fastopen)
return sk;
/* While TCP_DEFER_ACCEPT is active, drop bare ACK. */
if (req->num_timeout < READ_ONCE(inet_csk(sk)->icsk_accept_queue.rskq_defer_accept) &&
TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) {
inet_rsk(req)->acked = 1;
__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPDEFERACCEPTDROP);
return NULL;
}
/* OK, ACK is valid, create big socket and
* feed this segment to it. It will repeat all
* the tests. THIS SEGMENT MUST MOVE SOCKET TO
* ESTABLISHED STATE. If it will be dropped after
* socket is created, wait for troubles.
*/
child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL,
req, &own_req);
if (!child)
goto listen_overflow;
if (own_req && rsk_drop_req(req)) {
reqsk_queue_removed(&inet_csk(req->rsk_listener)->icsk_accept_queue, req);
inet_csk_reqsk_queue_drop_and_put(req->rsk_listener, req);
return child;
}
sock_rps_save_rxhash(child, skb);
tcp_synack_rtt_meas(child, req);
*req_stolen = !own_req;
return inet_csk_complete_hashdance(sk, child, req, own_req);
listen_overflow:
if (sk != req->rsk_listener)
__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMIGRATEREQFAILURE);
if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_abort_on_overflow)) {
inet_rsk(req)->acked = 1;
return NULL;
}
embryonic_reset:
if (!(flg & TCP_FLAG_RST)) {
/* Received a bad SYN pkt - for TFO We try not to reset
* the local connection unless it's really necessary to
* avoid becoming vulnerable to outside attack aiming at
* resetting legit local connections.
*/
req->rsk_ops->send_reset(sk, skb);
} else if (fastopen) { /* received a valid RST pkt */
reqsk_fastopen_remove(sk, req, true);
tcp_reset(sk, skb);
}
if (!fastopen) {
bool unlinked = inet_csk_reqsk_queue_drop(sk, req);
if (unlinked)
__NET_INC_STATS(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
*req_stolen = !unlinked;
}
return NULL;
}
EXPORT_SYMBOL(tcp_check_req);
/*
* Queue segment on the new socket if the new socket is active,
* otherwise we just shortcircuit this and continue with
* the new socket.
*
* For the vast majority of cases child->sk_state will be TCP_SYN_RECV
* when entering. But other states are possible due to a race condition
* where after __inet_lookup_established() fails but before the listener
* locked is obtained, other packets cause the same connection to
* be created.
*/
int tcp_child_process(struct sock *parent, struct sock *child,
struct sk_buff *skb)
__releases(&((child)->sk_lock.slock))
{
int ret = 0;
int state = child->sk_state;
/* record sk_napi_id and sk_rx_queue_mapping of child. */
sk_mark_napi_id_set(child, skb);
tcp_segs_in(tcp_sk(child), skb);
if (!sock_owned_by_user(child)) {
ret = tcp_rcv_state_process(child, skb);
/* Wakeup parent, send SIGIO */
if (state == TCP_SYN_RECV && child->sk_state != state)
parent->sk_data_ready(parent);
} else {
/* Alas, it is possible again, because we do lookup
* in main socket hash table and lock on listening
* socket does not protect us more.
*/
__sk_add_backlog(child, skb);
}
bh_unlock_sock(child);
sock_put(child);
return ret;
}
EXPORT_SYMBOL(tcp_child_process);
| linux-master | net/ipv4/tcp_minisocks.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2017 - 2018 Covalent IO, Inc. http://covalent.io */
#include <linux/skmsg.h>
#include <linux/filter.h>
#include <linux/bpf.h>
#include <linux/init.h>
#include <linux/wait.h>
#include <linux/util_macros.h>
#include <net/inet_common.h>
#include <net/tls.h>
void tcp_eat_skb(struct sock *sk, struct sk_buff *skb)
{
struct tcp_sock *tcp;
int copied;
if (!skb || !skb->len || !sk_is_tcp(sk))
return;
if (skb_bpf_strparser(skb))
return;
tcp = tcp_sk(sk);
copied = tcp->copied_seq + skb->len;
WRITE_ONCE(tcp->copied_seq, copied);
tcp_rcv_space_adjust(sk);
__tcp_cleanup_rbuf(sk, skb->len);
}
static int bpf_tcp_ingress(struct sock *sk, struct sk_psock *psock,
struct sk_msg *msg, u32 apply_bytes, int flags)
{
bool apply = apply_bytes;
struct scatterlist *sge;
u32 size, copied = 0;
struct sk_msg *tmp;
int i, ret = 0;
tmp = kzalloc(sizeof(*tmp), __GFP_NOWARN | GFP_KERNEL);
if (unlikely(!tmp))
return -ENOMEM;
lock_sock(sk);
tmp->sg.start = msg->sg.start;
i = msg->sg.start;
do {
sge = sk_msg_elem(msg, i);
size = (apply && apply_bytes < sge->length) ?
apply_bytes : sge->length;
if (!sk_wmem_schedule(sk, size)) {
if (!copied)
ret = -ENOMEM;
break;
}
sk_mem_charge(sk, size);
sk_msg_xfer(tmp, msg, i, size);
copied += size;
if (sge->length)
get_page(sk_msg_page(tmp, i));
sk_msg_iter_var_next(i);
tmp->sg.end = i;
if (apply) {
apply_bytes -= size;
if (!apply_bytes) {
if (sge->length)
sk_msg_iter_var_prev(i);
break;
}
}
} while (i != msg->sg.end);
if (!ret) {
msg->sg.start = i;
sk_psock_queue_msg(psock, tmp);
sk_psock_data_ready(sk, psock);
} else {
sk_msg_free(sk, tmp);
kfree(tmp);
}
release_sock(sk);
return ret;
}
static int tcp_bpf_push(struct sock *sk, struct sk_msg *msg, u32 apply_bytes,
int flags, bool uncharge)
{
struct msghdr msghdr = {};
bool apply = apply_bytes;
struct scatterlist *sge;
struct page *page;
int size, ret = 0;
u32 off;
while (1) {
struct bio_vec bvec;
bool has_tx_ulp;
sge = sk_msg_elem(msg, msg->sg.start);
size = (apply && apply_bytes < sge->length) ?
apply_bytes : sge->length;
off = sge->offset;
page = sg_page(sge);
tcp_rate_check_app_limited(sk);
retry:
msghdr.msg_flags = flags | MSG_SPLICE_PAGES;
has_tx_ulp = tls_sw_has_ctx_tx(sk);
if (has_tx_ulp)
msghdr.msg_flags |= MSG_SENDPAGE_NOPOLICY;
if (size < sge->length && msg->sg.start != msg->sg.end)
msghdr.msg_flags |= MSG_MORE;
bvec_set_page(&bvec, page, size, off);
iov_iter_bvec(&msghdr.msg_iter, ITER_SOURCE, &bvec, 1, size);
ret = tcp_sendmsg_locked(sk, &msghdr, size);
if (ret <= 0)
return ret;
if (apply)
apply_bytes -= ret;
msg->sg.size -= ret;
sge->offset += ret;
sge->length -= ret;
if (uncharge)
sk_mem_uncharge(sk, ret);
if (ret != size) {
size -= ret;
off += ret;
goto retry;
}
if (!sge->length) {
put_page(page);
sk_msg_iter_next(msg, start);
sg_init_table(sge, 1);
if (msg->sg.start == msg->sg.end)
break;
}
if (apply && !apply_bytes)
break;
}
return 0;
}
static int tcp_bpf_push_locked(struct sock *sk, struct sk_msg *msg,
u32 apply_bytes, int flags, bool uncharge)
{
int ret;
lock_sock(sk);
ret = tcp_bpf_push(sk, msg, apply_bytes, flags, uncharge);
release_sock(sk);
return ret;
}
int tcp_bpf_sendmsg_redir(struct sock *sk, bool ingress,
struct sk_msg *msg, u32 bytes, int flags)
{
struct sk_psock *psock = sk_psock_get(sk);
int ret;
if (unlikely(!psock))
return -EPIPE;
ret = ingress ? bpf_tcp_ingress(sk, psock, msg, bytes, flags) :
tcp_bpf_push_locked(sk, msg, bytes, flags, false);
sk_psock_put(sk, psock);
return ret;
}
EXPORT_SYMBOL_GPL(tcp_bpf_sendmsg_redir);
#ifdef CONFIG_BPF_SYSCALL
static int tcp_msg_wait_data(struct sock *sk, struct sk_psock *psock,
long timeo)
{
DEFINE_WAIT_FUNC(wait, woken_wake_function);
int ret = 0;
if (sk->sk_shutdown & RCV_SHUTDOWN)
return 1;
if (!timeo)
return ret;
add_wait_queue(sk_sleep(sk), &wait);
sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
ret = sk_wait_event(sk, &timeo,
!list_empty(&psock->ingress_msg) ||
!skb_queue_empty_lockless(&sk->sk_receive_queue), &wait);
sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
remove_wait_queue(sk_sleep(sk), &wait);
return ret;
}
static bool is_next_msg_fin(struct sk_psock *psock)
{
struct scatterlist *sge;
struct sk_msg *msg_rx;
int i;
msg_rx = sk_psock_peek_msg(psock);
i = msg_rx->sg.start;
sge = sk_msg_elem(msg_rx, i);
if (!sge->length) {
struct sk_buff *skb = msg_rx->skb;
if (skb && TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
return true;
}
return false;
}
static int tcp_bpf_recvmsg_parser(struct sock *sk,
struct msghdr *msg,
size_t len,
int flags,
int *addr_len)
{
struct tcp_sock *tcp = tcp_sk(sk);
u32 seq = tcp->copied_seq;
struct sk_psock *psock;
int copied = 0;
if (unlikely(flags & MSG_ERRQUEUE))
return inet_recv_error(sk, msg, len, addr_len);
if (!len)
return 0;
psock = sk_psock_get(sk);
if (unlikely(!psock))
return tcp_recvmsg(sk, msg, len, flags, addr_len);
lock_sock(sk);
/* We may have received data on the sk_receive_queue pre-accept and
* then we can not use read_skb in this context because we haven't
* assigned a sk_socket yet so have no link to the ops. The work-around
* is to check the sk_receive_queue and in these cases read skbs off
* queue again. The read_skb hook is not running at this point because
* of lock_sock so we avoid having multiple runners in read_skb.
*/
if (unlikely(!skb_queue_empty(&sk->sk_receive_queue))) {
tcp_data_ready(sk);
/* This handles the ENOMEM errors if we both receive data
* pre accept and are already under memory pressure. At least
* let user know to retry.
*/
if (unlikely(!skb_queue_empty(&sk->sk_receive_queue))) {
copied = -EAGAIN;
goto out;
}
}
msg_bytes_ready:
copied = sk_msg_recvmsg(sk, psock, msg, len, flags);
/* The typical case for EFAULT is the socket was gracefully
* shutdown with a FIN pkt. So check here the other case is
* some error on copy_page_to_iter which would be unexpected.
* On fin return correct return code to zero.
*/
if (copied == -EFAULT) {
bool is_fin = is_next_msg_fin(psock);
if (is_fin) {
copied = 0;
seq++;
goto out;
}
}
seq += copied;
if (!copied) {
long timeo;
int data;
if (sock_flag(sk, SOCK_DONE))
goto out;
if (sk->sk_err) {
copied = sock_error(sk);
goto out;
}
if (sk->sk_shutdown & RCV_SHUTDOWN)
goto out;
if (sk->sk_state == TCP_CLOSE) {
copied = -ENOTCONN;
goto out;
}
timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
if (!timeo) {
copied = -EAGAIN;
goto out;
}
if (signal_pending(current)) {
copied = sock_intr_errno(timeo);
goto out;
}
data = tcp_msg_wait_data(sk, psock, timeo);
if (data && !sk_psock_queue_empty(psock))
goto msg_bytes_ready;
copied = -EAGAIN;
}
out:
WRITE_ONCE(tcp->copied_seq, seq);
tcp_rcv_space_adjust(sk);
if (copied > 0)
__tcp_cleanup_rbuf(sk, copied);
release_sock(sk);
sk_psock_put(sk, psock);
return copied;
}
static int tcp_bpf_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
int flags, int *addr_len)
{
struct sk_psock *psock;
int copied, ret;
if (unlikely(flags & MSG_ERRQUEUE))
return inet_recv_error(sk, msg, len, addr_len);
if (!len)
return 0;
psock = sk_psock_get(sk);
if (unlikely(!psock))
return tcp_recvmsg(sk, msg, len, flags, addr_len);
if (!skb_queue_empty(&sk->sk_receive_queue) &&
sk_psock_queue_empty(psock)) {
sk_psock_put(sk, psock);
return tcp_recvmsg(sk, msg, len, flags, addr_len);
}
lock_sock(sk);
msg_bytes_ready:
copied = sk_msg_recvmsg(sk, psock, msg, len, flags);
if (!copied) {
long timeo;
int data;
timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
data = tcp_msg_wait_data(sk, psock, timeo);
if (data) {
if (!sk_psock_queue_empty(psock))
goto msg_bytes_ready;
release_sock(sk);
sk_psock_put(sk, psock);
return tcp_recvmsg(sk, msg, len, flags, addr_len);
}
copied = -EAGAIN;
}
ret = copied;
release_sock(sk);
sk_psock_put(sk, psock);
return ret;
}
static int tcp_bpf_send_verdict(struct sock *sk, struct sk_psock *psock,
struct sk_msg *msg, int *copied, int flags)
{
bool cork = false, enospc = sk_msg_full(msg), redir_ingress;
struct sock *sk_redir;
u32 tosend, origsize, sent, delta = 0;
u32 eval;
int ret;
more_data:
if (psock->eval == __SK_NONE) {
/* Track delta in msg size to add/subtract it on SK_DROP from
* returned to user copied size. This ensures user doesn't
* get a positive return code with msg_cut_data and SK_DROP
* verdict.
*/
delta = msg->sg.size;
psock->eval = sk_psock_msg_verdict(sk, psock, msg);
delta -= msg->sg.size;
}
if (msg->cork_bytes &&
msg->cork_bytes > msg->sg.size && !enospc) {
psock->cork_bytes = msg->cork_bytes - msg->sg.size;
if (!psock->cork) {
psock->cork = kzalloc(sizeof(*psock->cork),
GFP_ATOMIC | __GFP_NOWARN);
if (!psock->cork)
return -ENOMEM;
}
memcpy(psock->cork, msg, sizeof(*msg));
return 0;
}
tosend = msg->sg.size;
if (psock->apply_bytes && psock->apply_bytes < tosend)
tosend = psock->apply_bytes;
eval = __SK_NONE;
switch (psock->eval) {
case __SK_PASS:
ret = tcp_bpf_push(sk, msg, tosend, flags, true);
if (unlikely(ret)) {
*copied -= sk_msg_free(sk, msg);
break;
}
sk_msg_apply_bytes(psock, tosend);
break;
case __SK_REDIRECT:
redir_ingress = psock->redir_ingress;
sk_redir = psock->sk_redir;
sk_msg_apply_bytes(psock, tosend);
if (!psock->apply_bytes) {
/* Clean up before releasing the sock lock. */
eval = psock->eval;
psock->eval = __SK_NONE;
psock->sk_redir = NULL;
}
if (psock->cork) {
cork = true;
psock->cork = NULL;
}
sk_msg_return(sk, msg, tosend);
release_sock(sk);
origsize = msg->sg.size;
ret = tcp_bpf_sendmsg_redir(sk_redir, redir_ingress,
msg, tosend, flags);
sent = origsize - msg->sg.size;
if (eval == __SK_REDIRECT)
sock_put(sk_redir);
lock_sock(sk);
if (unlikely(ret < 0)) {
int free = sk_msg_free_nocharge(sk, msg);
if (!cork)
*copied -= free;
}
if (cork) {
sk_msg_free(sk, msg);
kfree(msg);
msg = NULL;
ret = 0;
}
break;
case __SK_DROP:
default:
sk_msg_free_partial(sk, msg, tosend);
sk_msg_apply_bytes(psock, tosend);
*copied -= (tosend + delta);
return -EACCES;
}
if (likely(!ret)) {
if (!psock->apply_bytes) {
psock->eval = __SK_NONE;
if (psock->sk_redir) {
sock_put(psock->sk_redir);
psock->sk_redir = NULL;
}
}
if (msg &&
msg->sg.data[msg->sg.start].page_link &&
msg->sg.data[msg->sg.start].length) {
if (eval == __SK_REDIRECT)
sk_mem_charge(sk, tosend - sent);
goto more_data;
}
}
return ret;
}
static int tcp_bpf_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
{
struct sk_msg tmp, *msg_tx = NULL;
int copied = 0, err = 0;
struct sk_psock *psock;
long timeo;
int flags;
/* Don't let internal flags through */
flags = (msg->msg_flags & ~MSG_SENDPAGE_DECRYPTED);
flags |= MSG_NO_SHARED_FRAGS;
psock = sk_psock_get(sk);
if (unlikely(!psock))
return tcp_sendmsg(sk, msg, size);
lock_sock(sk);
timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
while (msg_data_left(msg)) {
bool enospc = false;
u32 copy, osize;
if (sk->sk_err) {
err = -sk->sk_err;
goto out_err;
}
copy = msg_data_left(msg);
if (!sk_stream_memory_free(sk))
goto wait_for_sndbuf;
if (psock->cork) {
msg_tx = psock->cork;
} else {
msg_tx = &tmp;
sk_msg_init(msg_tx);
}
osize = msg_tx->sg.size;
err = sk_msg_alloc(sk, msg_tx, msg_tx->sg.size + copy, msg_tx->sg.end - 1);
if (err) {
if (err != -ENOSPC)
goto wait_for_memory;
enospc = true;
copy = msg_tx->sg.size - osize;
}
err = sk_msg_memcopy_from_iter(sk, &msg->msg_iter, msg_tx,
copy);
if (err < 0) {
sk_msg_trim(sk, msg_tx, osize);
goto out_err;
}
copied += copy;
if (psock->cork_bytes) {
if (size > psock->cork_bytes)
psock->cork_bytes = 0;
else
psock->cork_bytes -= size;
if (psock->cork_bytes && !enospc)
goto out_err;
/* All cork bytes are accounted, rerun the prog. */
psock->eval = __SK_NONE;
psock->cork_bytes = 0;
}
err = tcp_bpf_send_verdict(sk, psock, msg_tx, &copied, flags);
if (unlikely(err < 0))
goto out_err;
continue;
wait_for_sndbuf:
set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
wait_for_memory:
err = sk_stream_wait_memory(sk, &timeo);
if (err) {
if (msg_tx && msg_tx != psock->cork)
sk_msg_free(sk, msg_tx);
goto out_err;
}
}
out_err:
if (err < 0)
err = sk_stream_error(sk, msg->msg_flags, err);
release_sock(sk);
sk_psock_put(sk, psock);
return copied ? copied : err;
}
enum {
TCP_BPF_IPV4,
TCP_BPF_IPV6,
TCP_BPF_NUM_PROTS,
};
enum {
TCP_BPF_BASE,
TCP_BPF_TX,
TCP_BPF_RX,
TCP_BPF_TXRX,
TCP_BPF_NUM_CFGS,
};
static struct proto *tcpv6_prot_saved __read_mostly;
static DEFINE_SPINLOCK(tcpv6_prot_lock);
static struct proto tcp_bpf_prots[TCP_BPF_NUM_PROTS][TCP_BPF_NUM_CFGS];
static void tcp_bpf_rebuild_protos(struct proto prot[TCP_BPF_NUM_CFGS],
struct proto *base)
{
prot[TCP_BPF_BASE] = *base;
prot[TCP_BPF_BASE].destroy = sock_map_destroy;
prot[TCP_BPF_BASE].close = sock_map_close;
prot[TCP_BPF_BASE].recvmsg = tcp_bpf_recvmsg;
prot[TCP_BPF_BASE].sock_is_readable = sk_msg_is_readable;
prot[TCP_BPF_TX] = prot[TCP_BPF_BASE];
prot[TCP_BPF_TX].sendmsg = tcp_bpf_sendmsg;
prot[TCP_BPF_RX] = prot[TCP_BPF_BASE];
prot[TCP_BPF_RX].recvmsg = tcp_bpf_recvmsg_parser;
prot[TCP_BPF_TXRX] = prot[TCP_BPF_TX];
prot[TCP_BPF_TXRX].recvmsg = tcp_bpf_recvmsg_parser;
}
static void tcp_bpf_check_v6_needs_rebuild(struct proto *ops)
{
if (unlikely(ops != smp_load_acquire(&tcpv6_prot_saved))) {
spin_lock_bh(&tcpv6_prot_lock);
if (likely(ops != tcpv6_prot_saved)) {
tcp_bpf_rebuild_protos(tcp_bpf_prots[TCP_BPF_IPV6], ops);
smp_store_release(&tcpv6_prot_saved, ops);
}
spin_unlock_bh(&tcpv6_prot_lock);
}
}
static int __init tcp_bpf_v4_build_proto(void)
{
tcp_bpf_rebuild_protos(tcp_bpf_prots[TCP_BPF_IPV4], &tcp_prot);
return 0;
}
late_initcall(tcp_bpf_v4_build_proto);
static int tcp_bpf_assert_proto_ops(struct proto *ops)
{
/* In order to avoid retpoline, we make assumptions when we call
* into ops if e.g. a psock is not present. Make sure they are
* indeed valid assumptions.
*/
return ops->recvmsg == tcp_recvmsg &&
ops->sendmsg == tcp_sendmsg ? 0 : -ENOTSUPP;
}
int tcp_bpf_update_proto(struct sock *sk, struct sk_psock *psock, bool restore)
{
int family = sk->sk_family == AF_INET6 ? TCP_BPF_IPV6 : TCP_BPF_IPV4;
int config = psock->progs.msg_parser ? TCP_BPF_TX : TCP_BPF_BASE;
if (psock->progs.stream_verdict || psock->progs.skb_verdict) {
config = (config == TCP_BPF_TX) ? TCP_BPF_TXRX : TCP_BPF_RX;
}
if (restore) {
if (inet_csk_has_ulp(sk)) {
/* TLS does not have an unhash proto in SW cases,
* but we need to ensure we stop using the sock_map
* unhash routine because the associated psock is being
* removed. So use the original unhash handler.
*/
WRITE_ONCE(sk->sk_prot->unhash, psock->saved_unhash);
tcp_update_ulp(sk, psock->sk_proto, psock->saved_write_space);
} else {
sk->sk_write_space = psock->saved_write_space;
/* Pairs with lockless read in sk_clone_lock() */
sock_replace_proto(sk, psock->sk_proto);
}
return 0;
}
if (sk->sk_family == AF_INET6) {
if (tcp_bpf_assert_proto_ops(psock->sk_proto))
return -EINVAL;
tcp_bpf_check_v6_needs_rebuild(psock->sk_proto);
}
/* Pairs with lockless read in sk_clone_lock() */
sock_replace_proto(sk, &tcp_bpf_prots[family][config]);
return 0;
}
EXPORT_SYMBOL_GPL(tcp_bpf_update_proto);
/* If a child got cloned from a listening socket that had tcp_bpf
* protocol callbacks installed, we need to restore the callbacks to
* the default ones because the child does not inherit the psock state
* that tcp_bpf callbacks expect.
*/
void tcp_bpf_clone(const struct sock *sk, struct sock *newsk)
{
struct proto *prot = newsk->sk_prot;
if (is_insidevar(prot, tcp_bpf_prots))
newsk->sk_prot = sk->sk_prot_creator;
}
#endif /* CONFIG_BPF_SYSCALL */
| linux-master | net/ipv4/tcp_bpf.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* INET An implementation of the TCP/IP protocol suite for the LINUX
* operating system. INET is implemented using the BSD Socket
* interface as the means of communication with the user level.
*
* IPv4 Forwarding Information Base: semantics.
*
* Authors: Alexey Kuznetsov, <[email protected]>
*/
#include <linux/uaccess.h>
#include <linux/bitops.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/jiffies.h>
#include <linux/mm.h>
#include <linux/string.h>
#include <linux/socket.h>
#include <linux/sockios.h>
#include <linux/errno.h>
#include <linux/in.h>
#include <linux/inet.h>
#include <linux/inetdevice.h>
#include <linux/netdevice.h>
#include <linux/if_arp.h>
#include <linux/proc_fs.h>
#include <linux/skbuff.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/netlink.h>
#include <linux/hash.h>
#include <linux/nospec.h>
#include <net/arp.h>
#include <net/inet_dscp.h>
#include <net/ip.h>
#include <net/protocol.h>
#include <net/route.h>
#include <net/tcp.h>
#include <net/sock.h>
#include <net/ip_fib.h>
#include <net/ip6_fib.h>
#include <net/nexthop.h>
#include <net/netlink.h>
#include <net/rtnh.h>
#include <net/lwtunnel.h>
#include <net/fib_notifier.h>
#include <net/addrconf.h>
#include "fib_lookup.h"
static DEFINE_SPINLOCK(fib_info_lock);
static struct hlist_head *fib_info_hash;
static struct hlist_head *fib_info_laddrhash;
static unsigned int fib_info_hash_size;
static unsigned int fib_info_hash_bits;
static unsigned int fib_info_cnt;
#define DEVINDEX_HASHBITS 8
#define DEVINDEX_HASHSIZE (1U << DEVINDEX_HASHBITS)
static struct hlist_head fib_info_devhash[DEVINDEX_HASHSIZE];
/* for_nexthops and change_nexthops only used when nexthop object
* is not set in a fib_info. The logic within can reference fib_nh.
*/
#ifdef CONFIG_IP_ROUTE_MULTIPATH
#define for_nexthops(fi) { \
int nhsel; const struct fib_nh *nh; \
for (nhsel = 0, nh = (fi)->fib_nh; \
nhsel < fib_info_num_path((fi)); \
nh++, nhsel++)
#define change_nexthops(fi) { \
int nhsel; struct fib_nh *nexthop_nh; \
for (nhsel = 0, nexthop_nh = (struct fib_nh *)((fi)->fib_nh); \
nhsel < fib_info_num_path((fi)); \
nexthop_nh++, nhsel++)
#else /* CONFIG_IP_ROUTE_MULTIPATH */
/* Hope, that gcc will optimize it to get rid of dummy loop */
#define for_nexthops(fi) { \
int nhsel; const struct fib_nh *nh = (fi)->fib_nh; \
for (nhsel = 0; nhsel < 1; nhsel++)
#define change_nexthops(fi) { \
int nhsel; \
struct fib_nh *nexthop_nh = (struct fib_nh *)((fi)->fib_nh); \
for (nhsel = 0; nhsel < 1; nhsel++)
#endif /* CONFIG_IP_ROUTE_MULTIPATH */
#define endfor_nexthops(fi) }
const struct fib_prop fib_props[RTN_MAX + 1] = {
[RTN_UNSPEC] = {
.error = 0,
.scope = RT_SCOPE_NOWHERE,
},
[RTN_UNICAST] = {
.error = 0,
.scope = RT_SCOPE_UNIVERSE,
},
[RTN_LOCAL] = {
.error = 0,
.scope = RT_SCOPE_HOST,
},
[RTN_BROADCAST] = {
.error = 0,
.scope = RT_SCOPE_LINK,
},
[RTN_ANYCAST] = {
.error = 0,
.scope = RT_SCOPE_LINK,
},
[RTN_MULTICAST] = {
.error = 0,
.scope = RT_SCOPE_UNIVERSE,
},
[RTN_BLACKHOLE] = {
.error = -EINVAL,
.scope = RT_SCOPE_UNIVERSE,
},
[RTN_UNREACHABLE] = {
.error = -EHOSTUNREACH,
.scope = RT_SCOPE_UNIVERSE,
},
[RTN_PROHIBIT] = {
.error = -EACCES,
.scope = RT_SCOPE_UNIVERSE,
},
[RTN_THROW] = {
.error = -EAGAIN,
.scope = RT_SCOPE_UNIVERSE,
},
[RTN_NAT] = {
.error = -EINVAL,
.scope = RT_SCOPE_NOWHERE,
},
[RTN_XRESOLVE] = {
.error = -EINVAL,
.scope = RT_SCOPE_NOWHERE,
},
};
static void rt_fibinfo_free(struct rtable __rcu **rtp)
{
struct rtable *rt = rcu_dereference_protected(*rtp, 1);
if (!rt)
return;
/* Not even needed : RCU_INIT_POINTER(*rtp, NULL);
* because we waited an RCU grace period before calling
* free_fib_info_rcu()
*/
dst_dev_put(&rt->dst);
dst_release_immediate(&rt->dst);
}
static void free_nh_exceptions(struct fib_nh_common *nhc)
{
struct fnhe_hash_bucket *hash;
int i;
hash = rcu_dereference_protected(nhc->nhc_exceptions, 1);
if (!hash)
return;
for (i = 0; i < FNHE_HASH_SIZE; i++) {
struct fib_nh_exception *fnhe;
fnhe = rcu_dereference_protected(hash[i].chain, 1);
while (fnhe) {
struct fib_nh_exception *next;
next = rcu_dereference_protected(fnhe->fnhe_next, 1);
rt_fibinfo_free(&fnhe->fnhe_rth_input);
rt_fibinfo_free(&fnhe->fnhe_rth_output);
kfree(fnhe);
fnhe = next;
}
}
kfree(hash);
}
static void rt_fibinfo_free_cpus(struct rtable __rcu * __percpu *rtp)
{
int cpu;
if (!rtp)
return;
for_each_possible_cpu(cpu) {
struct rtable *rt;
rt = rcu_dereference_protected(*per_cpu_ptr(rtp, cpu), 1);
if (rt) {
dst_dev_put(&rt->dst);
dst_release_immediate(&rt->dst);
}
}
free_percpu(rtp);
}
void fib_nh_common_release(struct fib_nh_common *nhc)
{
netdev_put(nhc->nhc_dev, &nhc->nhc_dev_tracker);
lwtstate_put(nhc->nhc_lwtstate);
rt_fibinfo_free_cpus(nhc->nhc_pcpu_rth_output);
rt_fibinfo_free(&nhc->nhc_rth_input);
free_nh_exceptions(nhc);
}
EXPORT_SYMBOL_GPL(fib_nh_common_release);
void fib_nh_release(struct net *net, struct fib_nh *fib_nh)
{
#ifdef CONFIG_IP_ROUTE_CLASSID
if (fib_nh->nh_tclassid)
atomic_dec(&net->ipv4.fib_num_tclassid_users);
#endif
fib_nh_common_release(&fib_nh->nh_common);
}
/* Release a nexthop info record */
static void free_fib_info_rcu(struct rcu_head *head)
{
struct fib_info *fi = container_of(head, struct fib_info, rcu);
if (fi->nh) {
nexthop_put(fi->nh);
} else {
change_nexthops(fi) {
fib_nh_release(fi->fib_net, nexthop_nh);
} endfor_nexthops(fi);
}
ip_fib_metrics_put(fi->fib_metrics);
kfree(fi);
}
void free_fib_info(struct fib_info *fi)
{
if (fi->fib_dead == 0) {
pr_warn("Freeing alive fib_info %p\n", fi);
return;
}
call_rcu(&fi->rcu, free_fib_info_rcu);
}
EXPORT_SYMBOL_GPL(free_fib_info);
void fib_release_info(struct fib_info *fi)
{
spin_lock_bh(&fib_info_lock);
if (fi && refcount_dec_and_test(&fi->fib_treeref)) {
hlist_del(&fi->fib_hash);
/* Paired with READ_ONCE() in fib_create_info(). */
WRITE_ONCE(fib_info_cnt, fib_info_cnt - 1);
if (fi->fib_prefsrc)
hlist_del(&fi->fib_lhash);
if (fi->nh) {
list_del(&fi->nh_list);
} else {
change_nexthops(fi) {
if (!nexthop_nh->fib_nh_dev)
continue;
hlist_del(&nexthop_nh->nh_hash);
} endfor_nexthops(fi)
}
/* Paired with READ_ONCE() from fib_table_lookup() */
WRITE_ONCE(fi->fib_dead, 1);
fib_info_put(fi);
}
spin_unlock_bh(&fib_info_lock);
}
static inline int nh_comp(struct fib_info *fi, struct fib_info *ofi)
{
const struct fib_nh *onh;
if (fi->nh || ofi->nh)
return nexthop_cmp(fi->nh, ofi->nh) ? 0 : -1;
if (ofi->fib_nhs == 0)
return 0;
for_nexthops(fi) {
onh = fib_info_nh(ofi, nhsel);
if (nh->fib_nh_oif != onh->fib_nh_oif ||
nh->fib_nh_gw_family != onh->fib_nh_gw_family ||
nh->fib_nh_scope != onh->fib_nh_scope ||
#ifdef CONFIG_IP_ROUTE_MULTIPATH
nh->fib_nh_weight != onh->fib_nh_weight ||
#endif
#ifdef CONFIG_IP_ROUTE_CLASSID
nh->nh_tclassid != onh->nh_tclassid ||
#endif
lwtunnel_cmp_encap(nh->fib_nh_lws, onh->fib_nh_lws) ||
((nh->fib_nh_flags ^ onh->fib_nh_flags) & ~RTNH_COMPARE_MASK))
return -1;
if (nh->fib_nh_gw_family == AF_INET &&
nh->fib_nh_gw4 != onh->fib_nh_gw4)
return -1;
if (nh->fib_nh_gw_family == AF_INET6 &&
ipv6_addr_cmp(&nh->fib_nh_gw6, &onh->fib_nh_gw6))
return -1;
} endfor_nexthops(fi);
return 0;
}
static inline unsigned int fib_devindex_hashfn(unsigned int val)
{
return hash_32(val, DEVINDEX_HASHBITS);
}
static struct hlist_head *
fib_info_devhash_bucket(const struct net_device *dev)
{
u32 val = net_hash_mix(dev_net(dev)) ^ dev->ifindex;
return &fib_info_devhash[fib_devindex_hashfn(val)];
}
static unsigned int fib_info_hashfn_1(int init_val, u8 protocol, u8 scope,
u32 prefsrc, u32 priority)
{
unsigned int val = init_val;
val ^= (protocol << 8) | scope;
val ^= prefsrc;
val ^= priority;
return val;
}
static unsigned int fib_info_hashfn_result(unsigned int val)
{
unsigned int mask = (fib_info_hash_size - 1);
return (val ^ (val >> 7) ^ (val >> 12)) & mask;
}
static inline unsigned int fib_info_hashfn(struct fib_info *fi)
{
unsigned int val;
val = fib_info_hashfn_1(fi->fib_nhs, fi->fib_protocol,
fi->fib_scope, (__force u32)fi->fib_prefsrc,
fi->fib_priority);
if (fi->nh) {
val ^= fib_devindex_hashfn(fi->nh->id);
} else {
for_nexthops(fi) {
val ^= fib_devindex_hashfn(nh->fib_nh_oif);
} endfor_nexthops(fi)
}
return fib_info_hashfn_result(val);
}
/* no metrics, only nexthop id */
static struct fib_info *fib_find_info_nh(struct net *net,
const struct fib_config *cfg)
{
struct hlist_head *head;
struct fib_info *fi;
unsigned int hash;
hash = fib_info_hashfn_1(fib_devindex_hashfn(cfg->fc_nh_id),
cfg->fc_protocol, cfg->fc_scope,
(__force u32)cfg->fc_prefsrc,
cfg->fc_priority);
hash = fib_info_hashfn_result(hash);
head = &fib_info_hash[hash];
hlist_for_each_entry(fi, head, fib_hash) {
if (!net_eq(fi->fib_net, net))
continue;
if (!fi->nh || fi->nh->id != cfg->fc_nh_id)
continue;
if (cfg->fc_protocol == fi->fib_protocol &&
cfg->fc_scope == fi->fib_scope &&
cfg->fc_prefsrc == fi->fib_prefsrc &&
cfg->fc_priority == fi->fib_priority &&
cfg->fc_type == fi->fib_type &&
cfg->fc_table == fi->fib_tb_id &&
!((cfg->fc_flags ^ fi->fib_flags) & ~RTNH_COMPARE_MASK))
return fi;
}
return NULL;
}
static struct fib_info *fib_find_info(struct fib_info *nfi)
{
struct hlist_head *head;
struct fib_info *fi;
unsigned int hash;
hash = fib_info_hashfn(nfi);
head = &fib_info_hash[hash];
hlist_for_each_entry(fi, head, fib_hash) {
if (!net_eq(fi->fib_net, nfi->fib_net))
continue;
if (fi->fib_nhs != nfi->fib_nhs)
continue;
if (nfi->fib_protocol == fi->fib_protocol &&
nfi->fib_scope == fi->fib_scope &&
nfi->fib_prefsrc == fi->fib_prefsrc &&
nfi->fib_priority == fi->fib_priority &&
nfi->fib_type == fi->fib_type &&
nfi->fib_tb_id == fi->fib_tb_id &&
memcmp(nfi->fib_metrics, fi->fib_metrics,
sizeof(u32) * RTAX_MAX) == 0 &&
!((nfi->fib_flags ^ fi->fib_flags) & ~RTNH_COMPARE_MASK) &&
nh_comp(fi, nfi) == 0)
return fi;
}
return NULL;
}
/* Check, that the gateway is already configured.
* Used only by redirect accept routine.
*/
int ip_fib_check_default(__be32 gw, struct net_device *dev)
{
struct hlist_head *head;
struct fib_nh *nh;
spin_lock(&fib_info_lock);
head = fib_info_devhash_bucket(dev);
hlist_for_each_entry(nh, head, nh_hash) {
if (nh->fib_nh_dev == dev &&
nh->fib_nh_gw4 == gw &&
!(nh->fib_nh_flags & RTNH_F_DEAD)) {
spin_unlock(&fib_info_lock);
return 0;
}
}
spin_unlock(&fib_info_lock);
return -1;
}
size_t fib_nlmsg_size(struct fib_info *fi)
{
size_t payload = NLMSG_ALIGN(sizeof(struct rtmsg))
+ nla_total_size(4) /* RTA_TABLE */
+ nla_total_size(4) /* RTA_DST */
+ nla_total_size(4) /* RTA_PRIORITY */
+ nla_total_size(4) /* RTA_PREFSRC */
+ nla_total_size(TCP_CA_NAME_MAX); /* RTAX_CC_ALGO */
unsigned int nhs = fib_info_num_path(fi);
/* space for nested metrics */
payload += nla_total_size((RTAX_MAX * nla_total_size(4)));
if (fi->nh)
payload += nla_total_size(4); /* RTA_NH_ID */
if (nhs) {
size_t nh_encapsize = 0;
/* Also handles the special case nhs == 1 */
/* each nexthop is packed in an attribute */
size_t nhsize = nla_total_size(sizeof(struct rtnexthop));
unsigned int i;
/* may contain flow and gateway attribute */
nhsize += 2 * nla_total_size(4);
/* grab encap info */
for (i = 0; i < fib_info_num_path(fi); i++) {
struct fib_nh_common *nhc = fib_info_nhc(fi, i);
if (nhc->nhc_lwtstate) {
/* RTA_ENCAP_TYPE */
nh_encapsize += lwtunnel_get_encap_size(
nhc->nhc_lwtstate);
/* RTA_ENCAP */
nh_encapsize += nla_total_size(2);
}
}
/* all nexthops are packed in a nested attribute */
payload += nla_total_size((nhs * nhsize) + nh_encapsize);
}
return payload;
}
void rtmsg_fib(int event, __be32 key, struct fib_alias *fa,
int dst_len, u32 tb_id, const struct nl_info *info,
unsigned int nlm_flags)
{
struct fib_rt_info fri;
struct sk_buff *skb;
u32 seq = info->nlh ? info->nlh->nlmsg_seq : 0;
int err = -ENOBUFS;
skb = nlmsg_new(fib_nlmsg_size(fa->fa_info), GFP_KERNEL);
if (!skb)
goto errout;
fri.fi = fa->fa_info;
fri.tb_id = tb_id;
fri.dst = key;
fri.dst_len = dst_len;
fri.dscp = fa->fa_dscp;
fri.type = fa->fa_type;
fri.offload = READ_ONCE(fa->offload);
fri.trap = READ_ONCE(fa->trap);
fri.offload_failed = READ_ONCE(fa->offload_failed);
err = fib_dump_info(skb, info->portid, seq, event, &fri, nlm_flags);
if (err < 0) {
/* -EMSGSIZE implies BUG in fib_nlmsg_size() */
WARN_ON(err == -EMSGSIZE);
kfree_skb(skb);
goto errout;
}
rtnl_notify(skb, info->nl_net, info->portid, RTNLGRP_IPV4_ROUTE,
info->nlh, GFP_KERNEL);
return;
errout:
if (err < 0)
rtnl_set_sk_err(info->nl_net, RTNLGRP_IPV4_ROUTE, err);
}
static int fib_detect_death(struct fib_info *fi, int order,
struct fib_info **last_resort, int *last_idx,
int dflt)
{
const struct fib_nh_common *nhc = fib_info_nhc(fi, 0);
struct neighbour *n;
int state = NUD_NONE;
if (likely(nhc->nhc_gw_family == AF_INET))
n = neigh_lookup(&arp_tbl, &nhc->nhc_gw.ipv4, nhc->nhc_dev);
else if (nhc->nhc_gw_family == AF_INET6)
n = neigh_lookup(ipv6_stub->nd_tbl, &nhc->nhc_gw.ipv6,
nhc->nhc_dev);
else
n = NULL;
if (n) {
state = READ_ONCE(n->nud_state);
neigh_release(n);
} else {
return 0;
}
if (state == NUD_REACHABLE)
return 0;
if ((state & NUD_VALID) && order != dflt)
return 0;
if ((state & NUD_VALID) ||
(*last_idx < 0 && order > dflt && state != NUD_INCOMPLETE)) {
*last_resort = fi;
*last_idx = order;
}
return 1;
}
int fib_nh_common_init(struct net *net, struct fib_nh_common *nhc,
struct nlattr *encap, u16 encap_type,
void *cfg, gfp_t gfp_flags,
struct netlink_ext_ack *extack)
{
int err;
nhc->nhc_pcpu_rth_output = alloc_percpu_gfp(struct rtable __rcu *,
gfp_flags);
if (!nhc->nhc_pcpu_rth_output)
return -ENOMEM;
if (encap) {
struct lwtunnel_state *lwtstate;
if (encap_type == LWTUNNEL_ENCAP_NONE) {
NL_SET_ERR_MSG(extack, "LWT encap type not specified");
err = -EINVAL;
goto lwt_failure;
}
err = lwtunnel_build_state(net, encap_type, encap,
nhc->nhc_family, cfg, &lwtstate,
extack);
if (err)
goto lwt_failure;
nhc->nhc_lwtstate = lwtstate_get(lwtstate);
}
return 0;
lwt_failure:
rt_fibinfo_free_cpus(nhc->nhc_pcpu_rth_output);
nhc->nhc_pcpu_rth_output = NULL;
return err;
}
EXPORT_SYMBOL_GPL(fib_nh_common_init);
int fib_nh_init(struct net *net, struct fib_nh *nh,
struct fib_config *cfg, int nh_weight,
struct netlink_ext_ack *extack)
{
int err;
nh->fib_nh_family = AF_INET;
err = fib_nh_common_init(net, &nh->nh_common, cfg->fc_encap,
cfg->fc_encap_type, cfg, GFP_KERNEL, extack);
if (err)
return err;
nh->fib_nh_oif = cfg->fc_oif;
nh->fib_nh_gw_family = cfg->fc_gw_family;
if (cfg->fc_gw_family == AF_INET)
nh->fib_nh_gw4 = cfg->fc_gw4;
else if (cfg->fc_gw_family == AF_INET6)
nh->fib_nh_gw6 = cfg->fc_gw6;
nh->fib_nh_flags = cfg->fc_flags;
#ifdef CONFIG_IP_ROUTE_CLASSID
nh->nh_tclassid = cfg->fc_flow;
if (nh->nh_tclassid)
atomic_inc(&net->ipv4.fib_num_tclassid_users);
#endif
#ifdef CONFIG_IP_ROUTE_MULTIPATH
nh->fib_nh_weight = nh_weight;
#endif
return 0;
}
#ifdef CONFIG_IP_ROUTE_MULTIPATH
static int fib_count_nexthops(struct rtnexthop *rtnh, int remaining,
struct netlink_ext_ack *extack)
{
int nhs = 0;
while (rtnh_ok(rtnh, remaining)) {
nhs++;
rtnh = rtnh_next(rtnh, &remaining);
}
/* leftover implies invalid nexthop configuration, discard it */
if (remaining > 0) {
NL_SET_ERR_MSG(extack,
"Invalid nexthop configuration - extra data after nexthops");
nhs = 0;
}
return nhs;
}
static int fib_gw_from_attr(__be32 *gw, struct nlattr *nla,
struct netlink_ext_ack *extack)
{
if (nla_len(nla) < sizeof(*gw)) {
NL_SET_ERR_MSG(extack, "Invalid IPv4 address in RTA_GATEWAY");
return -EINVAL;
}
*gw = nla_get_in_addr(nla);
return 0;
}
/* only called when fib_nh is integrated into fib_info */
static int fib_get_nhs(struct fib_info *fi, struct rtnexthop *rtnh,
int remaining, struct fib_config *cfg,
struct netlink_ext_ack *extack)
{
struct net *net = fi->fib_net;
struct fib_config fib_cfg;
struct fib_nh *nh;
int ret;
change_nexthops(fi) {
int attrlen;
memset(&fib_cfg, 0, sizeof(fib_cfg));
if (!rtnh_ok(rtnh, remaining)) {
NL_SET_ERR_MSG(extack,
"Invalid nexthop configuration - extra data after nexthop");
return -EINVAL;
}
if (rtnh->rtnh_flags & (RTNH_F_DEAD | RTNH_F_LINKDOWN)) {
NL_SET_ERR_MSG(extack,
"Invalid flags for nexthop - can not contain DEAD or LINKDOWN");
return -EINVAL;
}
fib_cfg.fc_flags = (cfg->fc_flags & ~0xFF) | rtnh->rtnh_flags;
fib_cfg.fc_oif = rtnh->rtnh_ifindex;
attrlen = rtnh_attrlen(rtnh);
if (attrlen > 0) {
struct nlattr *nla, *nlav, *attrs = rtnh_attrs(rtnh);
nla = nla_find(attrs, attrlen, RTA_GATEWAY);
nlav = nla_find(attrs, attrlen, RTA_VIA);
if (nla && nlav) {
NL_SET_ERR_MSG(extack,
"Nexthop configuration can not contain both GATEWAY and VIA");
return -EINVAL;
}
if (nla) {
ret = fib_gw_from_attr(&fib_cfg.fc_gw4, nla,
extack);
if (ret)
goto errout;
if (fib_cfg.fc_gw4)
fib_cfg.fc_gw_family = AF_INET;
} else if (nlav) {
ret = fib_gw_from_via(&fib_cfg, nlav, extack);
if (ret)
goto errout;
}
nla = nla_find(attrs, attrlen, RTA_FLOW);
if (nla) {
if (nla_len(nla) < sizeof(u32)) {
NL_SET_ERR_MSG(extack, "Invalid RTA_FLOW");
return -EINVAL;
}
fib_cfg.fc_flow = nla_get_u32(nla);
}
fib_cfg.fc_encap = nla_find(attrs, attrlen, RTA_ENCAP);
/* RTA_ENCAP_TYPE length checked in
* lwtunnel_valid_encap_type_attr
*/
nla = nla_find(attrs, attrlen, RTA_ENCAP_TYPE);
if (nla)
fib_cfg.fc_encap_type = nla_get_u16(nla);
}
ret = fib_nh_init(net, nexthop_nh, &fib_cfg,
rtnh->rtnh_hops + 1, extack);
if (ret)
goto errout;
rtnh = rtnh_next(rtnh, &remaining);
} endfor_nexthops(fi);
ret = -EINVAL;
nh = fib_info_nh(fi, 0);
if (cfg->fc_oif && nh->fib_nh_oif != cfg->fc_oif) {
NL_SET_ERR_MSG(extack,
"Nexthop device index does not match RTA_OIF");
goto errout;
}
if (cfg->fc_gw_family) {
if (cfg->fc_gw_family != nh->fib_nh_gw_family ||
(cfg->fc_gw_family == AF_INET &&
nh->fib_nh_gw4 != cfg->fc_gw4) ||
(cfg->fc_gw_family == AF_INET6 &&
ipv6_addr_cmp(&nh->fib_nh_gw6, &cfg->fc_gw6))) {
NL_SET_ERR_MSG(extack,
"Nexthop gateway does not match RTA_GATEWAY or RTA_VIA");
goto errout;
}
}
#ifdef CONFIG_IP_ROUTE_CLASSID
if (cfg->fc_flow && nh->nh_tclassid != cfg->fc_flow) {
NL_SET_ERR_MSG(extack,
"Nexthop class id does not match RTA_FLOW");
goto errout;
}
#endif
ret = 0;
errout:
return ret;
}
/* only called when fib_nh is integrated into fib_info */
static void fib_rebalance(struct fib_info *fi)
{
int total;
int w;
if (fib_info_num_path(fi) < 2)
return;
total = 0;
for_nexthops(fi) {
if (nh->fib_nh_flags & RTNH_F_DEAD)
continue;
if (ip_ignore_linkdown(nh->fib_nh_dev) &&
nh->fib_nh_flags & RTNH_F_LINKDOWN)
continue;
total += nh->fib_nh_weight;
} endfor_nexthops(fi);
w = 0;
change_nexthops(fi) {
int upper_bound;
if (nexthop_nh->fib_nh_flags & RTNH_F_DEAD) {
upper_bound = -1;
} else if (ip_ignore_linkdown(nexthop_nh->fib_nh_dev) &&
nexthop_nh->fib_nh_flags & RTNH_F_LINKDOWN) {
upper_bound = -1;
} else {
w += nexthop_nh->fib_nh_weight;
upper_bound = DIV_ROUND_CLOSEST_ULL((u64)w << 31,
total) - 1;
}
atomic_set(&nexthop_nh->fib_nh_upper_bound, upper_bound);
} endfor_nexthops(fi);
}
#else /* CONFIG_IP_ROUTE_MULTIPATH */
static int fib_get_nhs(struct fib_info *fi, struct rtnexthop *rtnh,
int remaining, struct fib_config *cfg,
struct netlink_ext_ack *extack)
{
NL_SET_ERR_MSG(extack, "Multipath support not enabled in kernel");
return -EINVAL;
}
#define fib_rebalance(fi) do { } while (0)
#endif /* CONFIG_IP_ROUTE_MULTIPATH */
static int fib_encap_match(struct net *net, u16 encap_type,
struct nlattr *encap,
const struct fib_nh *nh,
const struct fib_config *cfg,
struct netlink_ext_ack *extack)
{
struct lwtunnel_state *lwtstate;
int ret, result = 0;
if (encap_type == LWTUNNEL_ENCAP_NONE)
return 0;
ret = lwtunnel_build_state(net, encap_type, encap, AF_INET,
cfg, &lwtstate, extack);
if (!ret) {
result = lwtunnel_cmp_encap(lwtstate, nh->fib_nh_lws);
lwtstate_free(lwtstate);
}
return result;
}
int fib_nh_match(struct net *net, struct fib_config *cfg, struct fib_info *fi,
struct netlink_ext_ack *extack)
{
#ifdef CONFIG_IP_ROUTE_MULTIPATH
struct rtnexthop *rtnh;
int remaining;
#endif
if (cfg->fc_priority && cfg->fc_priority != fi->fib_priority)
return 1;
if (cfg->fc_nh_id) {
if (fi->nh && cfg->fc_nh_id == fi->nh->id)
return 0;
return 1;
}
if (fi->nh) {
if (cfg->fc_oif || cfg->fc_gw_family || cfg->fc_mp)
return 1;
return 0;
}
if (cfg->fc_oif || cfg->fc_gw_family) {
struct fib_nh *nh;
nh = fib_info_nh(fi, 0);
if (cfg->fc_encap) {
if (fib_encap_match(net, cfg->fc_encap_type,
cfg->fc_encap, nh, cfg, extack))
return 1;
}
#ifdef CONFIG_IP_ROUTE_CLASSID
if (cfg->fc_flow &&
cfg->fc_flow != nh->nh_tclassid)
return 1;
#endif
if ((cfg->fc_oif && cfg->fc_oif != nh->fib_nh_oif) ||
(cfg->fc_gw_family &&
cfg->fc_gw_family != nh->fib_nh_gw_family))
return 1;
if (cfg->fc_gw_family == AF_INET &&
cfg->fc_gw4 != nh->fib_nh_gw4)
return 1;
if (cfg->fc_gw_family == AF_INET6 &&
ipv6_addr_cmp(&cfg->fc_gw6, &nh->fib_nh_gw6))
return 1;
return 0;
}
#ifdef CONFIG_IP_ROUTE_MULTIPATH
if (!cfg->fc_mp)
return 0;
rtnh = cfg->fc_mp;
remaining = cfg->fc_mp_len;
for_nexthops(fi) {
int attrlen;
if (!rtnh_ok(rtnh, remaining))
return -EINVAL;
if (rtnh->rtnh_ifindex && rtnh->rtnh_ifindex != nh->fib_nh_oif)
return 1;
attrlen = rtnh_attrlen(rtnh);
if (attrlen > 0) {
struct nlattr *nla, *nlav, *attrs = rtnh_attrs(rtnh);
int err;
nla = nla_find(attrs, attrlen, RTA_GATEWAY);
nlav = nla_find(attrs, attrlen, RTA_VIA);
if (nla && nlav) {
NL_SET_ERR_MSG(extack,
"Nexthop configuration can not contain both GATEWAY and VIA");
return -EINVAL;
}
if (nla) {
__be32 gw;
err = fib_gw_from_attr(&gw, nla, extack);
if (err)
return err;
if (nh->fib_nh_gw_family != AF_INET ||
gw != nh->fib_nh_gw4)
return 1;
} else if (nlav) {
struct fib_config cfg2;
err = fib_gw_from_via(&cfg2, nlav, extack);
if (err)
return err;
switch (nh->fib_nh_gw_family) {
case AF_INET:
if (cfg2.fc_gw_family != AF_INET ||
cfg2.fc_gw4 != nh->fib_nh_gw4)
return 1;
break;
case AF_INET6:
if (cfg2.fc_gw_family != AF_INET6 ||
ipv6_addr_cmp(&cfg2.fc_gw6,
&nh->fib_nh_gw6))
return 1;
break;
}
}
#ifdef CONFIG_IP_ROUTE_CLASSID
nla = nla_find(attrs, attrlen, RTA_FLOW);
if (nla) {
if (nla_len(nla) < sizeof(u32)) {
NL_SET_ERR_MSG(extack, "Invalid RTA_FLOW");
return -EINVAL;
}
if (nla_get_u32(nla) != nh->nh_tclassid)
return 1;
}
#endif
}
rtnh = rtnh_next(rtnh, &remaining);
} endfor_nexthops(fi);
#endif
return 0;
}
bool fib_metrics_match(struct fib_config *cfg, struct fib_info *fi)
{
struct nlattr *nla;
int remaining;
if (!cfg->fc_mx)
return true;
nla_for_each_attr(nla, cfg->fc_mx, cfg->fc_mx_len, remaining) {
int type = nla_type(nla);
u32 fi_val, val;
if (!type)
continue;
if (type > RTAX_MAX)
return false;
type = array_index_nospec(type, RTAX_MAX + 1);
if (type == RTAX_CC_ALGO) {
char tmp[TCP_CA_NAME_MAX];
bool ecn_ca = false;
nla_strscpy(tmp, nla, sizeof(tmp));
val = tcp_ca_get_key_by_name(fi->fib_net, tmp, &ecn_ca);
} else {
if (nla_len(nla) != sizeof(u32))
return false;
val = nla_get_u32(nla);
}
fi_val = fi->fib_metrics->metrics[type - 1];
if (type == RTAX_FEATURES)
fi_val &= ~DST_FEATURE_ECN_CA;
if (fi_val != val)
return false;
}
return true;
}
static int fib_check_nh_v6_gw(struct net *net, struct fib_nh *nh,
u32 table, struct netlink_ext_ack *extack)
{
struct fib6_config cfg = {
.fc_table = table,
.fc_flags = nh->fib_nh_flags | RTF_GATEWAY,
.fc_ifindex = nh->fib_nh_oif,
.fc_gateway = nh->fib_nh_gw6,
};
struct fib6_nh fib6_nh = {};
int err;
err = ipv6_stub->fib6_nh_init(net, &fib6_nh, &cfg, GFP_KERNEL, extack);
if (!err) {
nh->fib_nh_dev = fib6_nh.fib_nh_dev;
netdev_hold(nh->fib_nh_dev, &nh->fib_nh_dev_tracker,
GFP_KERNEL);
nh->fib_nh_oif = nh->fib_nh_dev->ifindex;
nh->fib_nh_scope = RT_SCOPE_LINK;
ipv6_stub->fib6_nh_release(&fib6_nh);
}
return err;
}
/*
* Picture
* -------
*
* Semantics of nexthop is very messy by historical reasons.
* We have to take into account, that:
* a) gateway can be actually local interface address,
* so that gatewayed route is direct.
* b) gateway must be on-link address, possibly
* described not by an ifaddr, but also by a direct route.
* c) If both gateway and interface are specified, they should not
* contradict.
* d) If we use tunnel routes, gateway could be not on-link.
*
* Attempt to reconcile all of these (alas, self-contradictory) conditions
* results in pretty ugly and hairy code with obscure logic.
*
* I chose to generalized it instead, so that the size
* of code does not increase practically, but it becomes
* much more general.
* Every prefix is assigned a "scope" value: "host" is local address,
* "link" is direct route,
* [ ... "site" ... "interior" ... ]
* and "universe" is true gateway route with global meaning.
*
* Every prefix refers to a set of "nexthop"s (gw, oif),
* where gw must have narrower scope. This recursion stops
* when gw has LOCAL scope or if "nexthop" is declared ONLINK,
* which means that gw is forced to be on link.
*
* Code is still hairy, but now it is apparently logically
* consistent and very flexible. F.e. as by-product it allows
* to co-exists in peace independent exterior and interior
* routing processes.
*
* Normally it looks as following.
*
* {universe prefix} -> (gw, oif) [scope link]
* |
* |-> {link prefix} -> (gw, oif) [scope local]
* |
* |-> {local prefix} (terminal node)
*/
static int fib_check_nh_v4_gw(struct net *net, struct fib_nh *nh, u32 table,
u8 scope, struct netlink_ext_ack *extack)
{
struct net_device *dev;
struct fib_result res;
int err = 0;
if (nh->fib_nh_flags & RTNH_F_ONLINK) {
unsigned int addr_type;
if (scope >= RT_SCOPE_LINK) {
NL_SET_ERR_MSG(extack, "Nexthop has invalid scope");
return -EINVAL;
}
dev = __dev_get_by_index(net, nh->fib_nh_oif);
if (!dev) {
NL_SET_ERR_MSG(extack, "Nexthop device required for onlink");
return -ENODEV;
}
if (!(dev->flags & IFF_UP)) {
NL_SET_ERR_MSG(extack, "Nexthop device is not up");
return -ENETDOWN;
}
addr_type = inet_addr_type_dev_table(net, dev, nh->fib_nh_gw4);
if (addr_type != RTN_UNICAST) {
NL_SET_ERR_MSG(extack, "Nexthop has invalid gateway");
return -EINVAL;
}
if (!netif_carrier_ok(dev))
nh->fib_nh_flags |= RTNH_F_LINKDOWN;
nh->fib_nh_dev = dev;
netdev_hold(dev, &nh->fib_nh_dev_tracker, GFP_ATOMIC);
nh->fib_nh_scope = RT_SCOPE_LINK;
return 0;
}
rcu_read_lock();
{
struct fib_table *tbl = NULL;
struct flowi4 fl4 = {
.daddr = nh->fib_nh_gw4,
.flowi4_scope = scope + 1,
.flowi4_oif = nh->fib_nh_oif,
.flowi4_iif = LOOPBACK_IFINDEX,
};
/* It is not necessary, but requires a bit of thinking */
if (fl4.flowi4_scope < RT_SCOPE_LINK)
fl4.flowi4_scope = RT_SCOPE_LINK;
if (table && table != RT_TABLE_MAIN)
tbl = fib_get_table(net, table);
if (tbl)
err = fib_table_lookup(tbl, &fl4, &res,
FIB_LOOKUP_IGNORE_LINKSTATE |
FIB_LOOKUP_NOREF);
/* on error or if no table given do full lookup. This
* is needed for example when nexthops are in the local
* table rather than the given table
*/
if (!tbl || err) {
err = fib_lookup(net, &fl4, &res,
FIB_LOOKUP_IGNORE_LINKSTATE);
}
if (err) {
NL_SET_ERR_MSG(extack, "Nexthop has invalid gateway");
goto out;
}
}
err = -EINVAL;
if (res.type != RTN_UNICAST && res.type != RTN_LOCAL) {
NL_SET_ERR_MSG(extack, "Nexthop has invalid gateway");
goto out;
}
nh->fib_nh_scope = res.scope;
nh->fib_nh_oif = FIB_RES_OIF(res);
nh->fib_nh_dev = dev = FIB_RES_DEV(res);
if (!dev) {
NL_SET_ERR_MSG(extack,
"No egress device for nexthop gateway");
goto out;
}
netdev_hold(dev, &nh->fib_nh_dev_tracker, GFP_ATOMIC);
if (!netif_carrier_ok(dev))
nh->fib_nh_flags |= RTNH_F_LINKDOWN;
err = (dev->flags & IFF_UP) ? 0 : -ENETDOWN;
out:
rcu_read_unlock();
return err;
}
static int fib_check_nh_nongw(struct net *net, struct fib_nh *nh,
struct netlink_ext_ack *extack)
{
struct in_device *in_dev;
int err;
if (nh->fib_nh_flags & (RTNH_F_PERVASIVE | RTNH_F_ONLINK)) {
NL_SET_ERR_MSG(extack,
"Invalid flags for nexthop - PERVASIVE and ONLINK can not be set");
return -EINVAL;
}
rcu_read_lock();
err = -ENODEV;
in_dev = inetdev_by_index(net, nh->fib_nh_oif);
if (!in_dev)
goto out;
err = -ENETDOWN;
if (!(in_dev->dev->flags & IFF_UP)) {
NL_SET_ERR_MSG(extack, "Device for nexthop is not up");
goto out;
}
nh->fib_nh_dev = in_dev->dev;
netdev_hold(nh->fib_nh_dev, &nh->fib_nh_dev_tracker, GFP_ATOMIC);
nh->fib_nh_scope = RT_SCOPE_HOST;
if (!netif_carrier_ok(nh->fib_nh_dev))
nh->fib_nh_flags |= RTNH_F_LINKDOWN;
err = 0;
out:
rcu_read_unlock();
return err;
}
int fib_check_nh(struct net *net, struct fib_nh *nh, u32 table, u8 scope,
struct netlink_ext_ack *extack)
{
int err;
if (nh->fib_nh_gw_family == AF_INET)
err = fib_check_nh_v4_gw(net, nh, table, scope, extack);
else if (nh->fib_nh_gw_family == AF_INET6)
err = fib_check_nh_v6_gw(net, nh, table, extack);
else
err = fib_check_nh_nongw(net, nh, extack);
return err;
}
static struct hlist_head *
fib_info_laddrhash_bucket(const struct net *net, __be32 val)
{
u32 slot = hash_32(net_hash_mix(net) ^ (__force u32)val,
fib_info_hash_bits);
return &fib_info_laddrhash[slot];
}
static void fib_info_hash_move(struct hlist_head *new_info_hash,
struct hlist_head *new_laddrhash,
unsigned int new_size)
{
struct hlist_head *old_info_hash, *old_laddrhash;
unsigned int old_size = fib_info_hash_size;
unsigned int i;
spin_lock_bh(&fib_info_lock);
old_info_hash = fib_info_hash;
old_laddrhash = fib_info_laddrhash;
fib_info_hash_size = new_size;
fib_info_hash_bits = ilog2(new_size);
for (i = 0; i < old_size; i++) {
struct hlist_head *head = &fib_info_hash[i];
struct hlist_node *n;
struct fib_info *fi;
hlist_for_each_entry_safe(fi, n, head, fib_hash) {
struct hlist_head *dest;
unsigned int new_hash;
new_hash = fib_info_hashfn(fi);
dest = &new_info_hash[new_hash];
hlist_add_head(&fi->fib_hash, dest);
}
}
fib_info_hash = new_info_hash;
fib_info_laddrhash = new_laddrhash;
for (i = 0; i < old_size; i++) {
struct hlist_head *lhead = &old_laddrhash[i];
struct hlist_node *n;
struct fib_info *fi;
hlist_for_each_entry_safe(fi, n, lhead, fib_lhash) {
struct hlist_head *ldest;
ldest = fib_info_laddrhash_bucket(fi->fib_net,
fi->fib_prefsrc);
hlist_add_head(&fi->fib_lhash, ldest);
}
}
spin_unlock_bh(&fib_info_lock);
kvfree(old_info_hash);
kvfree(old_laddrhash);
}
__be32 fib_info_update_nhc_saddr(struct net *net, struct fib_nh_common *nhc,
unsigned char scope)
{
struct fib_nh *nh;
if (nhc->nhc_family != AF_INET)
return inet_select_addr(nhc->nhc_dev, 0, scope);
nh = container_of(nhc, struct fib_nh, nh_common);
nh->nh_saddr = inet_select_addr(nh->fib_nh_dev, nh->fib_nh_gw4, scope);
nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid);
return nh->nh_saddr;
}
__be32 fib_result_prefsrc(struct net *net, struct fib_result *res)
{
struct fib_nh_common *nhc = res->nhc;
if (res->fi->fib_prefsrc)
return res->fi->fib_prefsrc;
if (nhc->nhc_family == AF_INET) {
struct fib_nh *nh;
nh = container_of(nhc, struct fib_nh, nh_common);
if (nh->nh_saddr_genid == atomic_read(&net->ipv4.dev_addr_genid))
return nh->nh_saddr;
}
return fib_info_update_nhc_saddr(net, nhc, res->fi->fib_scope);
}
static bool fib_valid_prefsrc(struct fib_config *cfg, __be32 fib_prefsrc)
{
if (cfg->fc_type != RTN_LOCAL || !cfg->fc_dst ||
fib_prefsrc != cfg->fc_dst) {
u32 tb_id = cfg->fc_table;
int rc;
if (tb_id == RT_TABLE_MAIN)
tb_id = RT_TABLE_LOCAL;
rc = inet_addr_type_table(cfg->fc_nlinfo.nl_net,
fib_prefsrc, tb_id);
if (rc != RTN_LOCAL && tb_id != RT_TABLE_LOCAL) {
rc = inet_addr_type_table(cfg->fc_nlinfo.nl_net,
fib_prefsrc, RT_TABLE_LOCAL);
}
if (rc != RTN_LOCAL)
return false;
}
return true;
}
struct fib_info *fib_create_info(struct fib_config *cfg,
struct netlink_ext_ack *extack)
{
int err;
struct fib_info *fi = NULL;
struct nexthop *nh = NULL;
struct fib_info *ofi;
int nhs = 1;
struct net *net = cfg->fc_nlinfo.nl_net;
if (cfg->fc_type > RTN_MAX)
goto err_inval;
/* Fast check to catch the most weird cases */
if (fib_props[cfg->fc_type].scope > cfg->fc_scope) {
NL_SET_ERR_MSG(extack, "Invalid scope");
goto err_inval;
}
if (cfg->fc_flags & (RTNH_F_DEAD | RTNH_F_LINKDOWN)) {
NL_SET_ERR_MSG(extack,
"Invalid rtm_flags - can not contain DEAD or LINKDOWN");
goto err_inval;
}
if (cfg->fc_nh_id) {
if (!cfg->fc_mx) {
fi = fib_find_info_nh(net, cfg);
if (fi) {
refcount_inc(&fi->fib_treeref);
return fi;
}
}
nh = nexthop_find_by_id(net, cfg->fc_nh_id);
if (!nh) {
NL_SET_ERR_MSG(extack, "Nexthop id does not exist");
goto err_inval;
}
nhs = 0;
}
#ifdef CONFIG_IP_ROUTE_MULTIPATH
if (cfg->fc_mp) {
nhs = fib_count_nexthops(cfg->fc_mp, cfg->fc_mp_len, extack);
if (nhs == 0)
goto err_inval;
}
#endif
err = -ENOBUFS;
/* Paired with WRITE_ONCE() in fib_release_info() */
if (READ_ONCE(fib_info_cnt) >= fib_info_hash_size) {
unsigned int new_size = fib_info_hash_size << 1;
struct hlist_head *new_info_hash;
struct hlist_head *new_laddrhash;
size_t bytes;
if (!new_size)
new_size = 16;
bytes = (size_t)new_size * sizeof(struct hlist_head *);
new_info_hash = kvzalloc(bytes, GFP_KERNEL);
new_laddrhash = kvzalloc(bytes, GFP_KERNEL);
if (!new_info_hash || !new_laddrhash) {
kvfree(new_info_hash);
kvfree(new_laddrhash);
} else {
fib_info_hash_move(new_info_hash, new_laddrhash, new_size);
}
if (!fib_info_hash_size)
goto failure;
}
fi = kzalloc(struct_size(fi, fib_nh, nhs), GFP_KERNEL);
if (!fi)
goto failure;
fi->fib_metrics = ip_fib_metrics_init(fi->fib_net, cfg->fc_mx,
cfg->fc_mx_len, extack);
if (IS_ERR(fi->fib_metrics)) {
err = PTR_ERR(fi->fib_metrics);
kfree(fi);
return ERR_PTR(err);
}
fi->fib_net = net;
fi->fib_protocol = cfg->fc_protocol;
fi->fib_scope = cfg->fc_scope;
fi->fib_flags = cfg->fc_flags;
fi->fib_priority = cfg->fc_priority;
fi->fib_prefsrc = cfg->fc_prefsrc;
fi->fib_type = cfg->fc_type;
fi->fib_tb_id = cfg->fc_table;
fi->fib_nhs = nhs;
if (nh) {
if (!nexthop_get(nh)) {
NL_SET_ERR_MSG(extack, "Nexthop has been deleted");
err = -EINVAL;
} else {
err = 0;
fi->nh = nh;
}
} else {
change_nexthops(fi) {
nexthop_nh->nh_parent = fi;
} endfor_nexthops(fi)
if (cfg->fc_mp)
err = fib_get_nhs(fi, cfg->fc_mp, cfg->fc_mp_len, cfg,
extack);
else
err = fib_nh_init(net, fi->fib_nh, cfg, 1, extack);
}
if (err != 0)
goto failure;
if (fib_props[cfg->fc_type].error) {
if (cfg->fc_gw_family || cfg->fc_oif || cfg->fc_mp) {
NL_SET_ERR_MSG(extack,
"Gateway, device and multipath can not be specified for this route type");
goto err_inval;
}
goto link_it;
} else {
switch (cfg->fc_type) {
case RTN_UNICAST:
case RTN_LOCAL:
case RTN_BROADCAST:
case RTN_ANYCAST:
case RTN_MULTICAST:
break;
default:
NL_SET_ERR_MSG(extack, "Invalid route type");
goto err_inval;
}
}
if (cfg->fc_scope > RT_SCOPE_HOST) {
NL_SET_ERR_MSG(extack, "Invalid scope");
goto err_inval;
}
if (fi->nh) {
err = fib_check_nexthop(fi->nh, cfg->fc_scope, extack);
if (err)
goto failure;
} else if (cfg->fc_scope == RT_SCOPE_HOST) {
struct fib_nh *nh = fi->fib_nh;
/* Local address is added. */
if (nhs != 1) {
NL_SET_ERR_MSG(extack,
"Route with host scope can not have multiple nexthops");
goto err_inval;
}
if (nh->fib_nh_gw_family) {
NL_SET_ERR_MSG(extack,
"Route with host scope can not have a gateway");
goto err_inval;
}
nh->fib_nh_scope = RT_SCOPE_NOWHERE;
nh->fib_nh_dev = dev_get_by_index(net, nh->fib_nh_oif);
err = -ENODEV;
if (!nh->fib_nh_dev)
goto failure;
netdev_tracker_alloc(nh->fib_nh_dev, &nh->fib_nh_dev_tracker,
GFP_KERNEL);
} else {
int linkdown = 0;
change_nexthops(fi) {
err = fib_check_nh(cfg->fc_nlinfo.nl_net, nexthop_nh,
cfg->fc_table, cfg->fc_scope,
extack);
if (err != 0)
goto failure;
if (nexthop_nh->fib_nh_flags & RTNH_F_LINKDOWN)
linkdown++;
} endfor_nexthops(fi)
if (linkdown == fi->fib_nhs)
fi->fib_flags |= RTNH_F_LINKDOWN;
}
if (fi->fib_prefsrc && !fib_valid_prefsrc(cfg, fi->fib_prefsrc)) {
NL_SET_ERR_MSG(extack, "Invalid prefsrc address");
goto err_inval;
}
if (!fi->nh) {
change_nexthops(fi) {
fib_info_update_nhc_saddr(net, &nexthop_nh->nh_common,
fi->fib_scope);
if (nexthop_nh->fib_nh_gw_family == AF_INET6)
fi->fib_nh_is_v6 = true;
} endfor_nexthops(fi)
fib_rebalance(fi);
}
link_it:
ofi = fib_find_info(fi);
if (ofi) {
/* fib_table_lookup() should not see @fi yet. */
fi->fib_dead = 1;
free_fib_info(fi);
refcount_inc(&ofi->fib_treeref);
return ofi;
}
refcount_set(&fi->fib_treeref, 1);
refcount_set(&fi->fib_clntref, 1);
spin_lock_bh(&fib_info_lock);
fib_info_cnt++;
hlist_add_head(&fi->fib_hash,
&fib_info_hash[fib_info_hashfn(fi)]);
if (fi->fib_prefsrc) {
struct hlist_head *head;
head = fib_info_laddrhash_bucket(net, fi->fib_prefsrc);
hlist_add_head(&fi->fib_lhash, head);
}
if (fi->nh) {
list_add(&fi->nh_list, &nh->fi_list);
} else {
change_nexthops(fi) {
struct hlist_head *head;
if (!nexthop_nh->fib_nh_dev)
continue;
head = fib_info_devhash_bucket(nexthop_nh->fib_nh_dev);
hlist_add_head(&nexthop_nh->nh_hash, head);
} endfor_nexthops(fi)
}
spin_unlock_bh(&fib_info_lock);
return fi;
err_inval:
err = -EINVAL;
failure:
if (fi) {
/* fib_table_lookup() should not see @fi yet. */
fi->fib_dead = 1;
free_fib_info(fi);
}
return ERR_PTR(err);
}
int fib_nexthop_info(struct sk_buff *skb, const struct fib_nh_common *nhc,
u8 rt_family, unsigned char *flags, bool skip_oif)
{
if (nhc->nhc_flags & RTNH_F_DEAD)
*flags |= RTNH_F_DEAD;
if (nhc->nhc_flags & RTNH_F_LINKDOWN) {
*flags |= RTNH_F_LINKDOWN;
rcu_read_lock();
switch (nhc->nhc_family) {
case AF_INET:
if (ip_ignore_linkdown(nhc->nhc_dev))
*flags |= RTNH_F_DEAD;
break;
case AF_INET6:
if (ip6_ignore_linkdown(nhc->nhc_dev))
*flags |= RTNH_F_DEAD;
break;
}
rcu_read_unlock();
}
switch (nhc->nhc_gw_family) {
case AF_INET:
if (nla_put_in_addr(skb, RTA_GATEWAY, nhc->nhc_gw.ipv4))
goto nla_put_failure;
break;
case AF_INET6:
/* if gateway family does not match nexthop family
* gateway is encoded as RTA_VIA
*/
if (rt_family != nhc->nhc_gw_family) {
int alen = sizeof(struct in6_addr);
struct nlattr *nla;
struct rtvia *via;
nla = nla_reserve(skb, RTA_VIA, alen + 2);
if (!nla)
goto nla_put_failure;
via = nla_data(nla);
via->rtvia_family = AF_INET6;
memcpy(via->rtvia_addr, &nhc->nhc_gw.ipv6, alen);
} else if (nla_put_in6_addr(skb, RTA_GATEWAY,
&nhc->nhc_gw.ipv6) < 0) {
goto nla_put_failure;
}
break;
}
*flags |= (nhc->nhc_flags &
(RTNH_F_ONLINK | RTNH_F_OFFLOAD | RTNH_F_TRAP));
if (!skip_oif && nhc->nhc_dev &&
nla_put_u32(skb, RTA_OIF, nhc->nhc_dev->ifindex))
goto nla_put_failure;
if (nhc->nhc_lwtstate &&
lwtunnel_fill_encap(skb, nhc->nhc_lwtstate,
RTA_ENCAP, RTA_ENCAP_TYPE) < 0)
goto nla_put_failure;
return 0;
nla_put_failure:
return -EMSGSIZE;
}
EXPORT_SYMBOL_GPL(fib_nexthop_info);
#if IS_ENABLED(CONFIG_IP_ROUTE_MULTIPATH) || IS_ENABLED(CONFIG_IPV6)
int fib_add_nexthop(struct sk_buff *skb, const struct fib_nh_common *nhc,
int nh_weight, u8 rt_family, u32 nh_tclassid)
{
const struct net_device *dev = nhc->nhc_dev;
struct rtnexthop *rtnh;
unsigned char flags = 0;
rtnh = nla_reserve_nohdr(skb, sizeof(*rtnh));
if (!rtnh)
goto nla_put_failure;
rtnh->rtnh_hops = nh_weight - 1;
rtnh->rtnh_ifindex = dev ? dev->ifindex : 0;
if (fib_nexthop_info(skb, nhc, rt_family, &flags, true) < 0)
goto nla_put_failure;
rtnh->rtnh_flags = flags;
if (nh_tclassid && nla_put_u32(skb, RTA_FLOW, nh_tclassid))
goto nla_put_failure;
/* length of rtnetlink header + attributes */
rtnh->rtnh_len = nlmsg_get_pos(skb) - (void *)rtnh;
return 0;
nla_put_failure:
return -EMSGSIZE;
}
EXPORT_SYMBOL_GPL(fib_add_nexthop);
#endif
#ifdef CONFIG_IP_ROUTE_MULTIPATH
static int fib_add_multipath(struct sk_buff *skb, struct fib_info *fi)
{
struct nlattr *mp;
mp = nla_nest_start_noflag(skb, RTA_MULTIPATH);
if (!mp)
goto nla_put_failure;
if (unlikely(fi->nh)) {
if (nexthop_mpath_fill_node(skb, fi->nh, AF_INET) < 0)
goto nla_put_failure;
goto mp_end;
}
for_nexthops(fi) {
u32 nh_tclassid = 0;
#ifdef CONFIG_IP_ROUTE_CLASSID
nh_tclassid = nh->nh_tclassid;
#endif
if (fib_add_nexthop(skb, &nh->nh_common, nh->fib_nh_weight,
AF_INET, nh_tclassid) < 0)
goto nla_put_failure;
} endfor_nexthops(fi);
mp_end:
nla_nest_end(skb, mp);
return 0;
nla_put_failure:
return -EMSGSIZE;
}
#else
static int fib_add_multipath(struct sk_buff *skb, struct fib_info *fi)
{
return 0;
}
#endif
int fib_dump_info(struct sk_buff *skb, u32 portid, u32 seq, int event,
const struct fib_rt_info *fri, unsigned int flags)
{
unsigned int nhs = fib_info_num_path(fri->fi);
struct fib_info *fi = fri->fi;
u32 tb_id = fri->tb_id;
struct nlmsghdr *nlh;
struct rtmsg *rtm;
nlh = nlmsg_put(skb, portid, seq, event, sizeof(*rtm), flags);
if (!nlh)
return -EMSGSIZE;
rtm = nlmsg_data(nlh);
rtm->rtm_family = AF_INET;
rtm->rtm_dst_len = fri->dst_len;
rtm->rtm_src_len = 0;
rtm->rtm_tos = inet_dscp_to_dsfield(fri->dscp);
if (tb_id < 256)
rtm->rtm_table = tb_id;
else
rtm->rtm_table = RT_TABLE_COMPAT;
if (nla_put_u32(skb, RTA_TABLE, tb_id))
goto nla_put_failure;
rtm->rtm_type = fri->type;
rtm->rtm_flags = fi->fib_flags;
rtm->rtm_scope = fi->fib_scope;
rtm->rtm_protocol = fi->fib_protocol;
if (rtm->rtm_dst_len &&
nla_put_in_addr(skb, RTA_DST, fri->dst))
goto nla_put_failure;
if (fi->fib_priority &&
nla_put_u32(skb, RTA_PRIORITY, fi->fib_priority))
goto nla_put_failure;
if (rtnetlink_put_metrics(skb, fi->fib_metrics->metrics) < 0)
goto nla_put_failure;
if (fi->fib_prefsrc &&
nla_put_in_addr(skb, RTA_PREFSRC, fi->fib_prefsrc))
goto nla_put_failure;
if (fi->nh) {
if (nla_put_u32(skb, RTA_NH_ID, fi->nh->id))
goto nla_put_failure;
if (nexthop_is_blackhole(fi->nh))
rtm->rtm_type = RTN_BLACKHOLE;
if (!READ_ONCE(fi->fib_net->ipv4.sysctl_nexthop_compat_mode))
goto offload;
}
if (nhs == 1) {
const struct fib_nh_common *nhc = fib_info_nhc(fi, 0);
unsigned char flags = 0;
if (fib_nexthop_info(skb, nhc, AF_INET, &flags, false) < 0)
goto nla_put_failure;
rtm->rtm_flags = flags;
#ifdef CONFIG_IP_ROUTE_CLASSID
if (nhc->nhc_family == AF_INET) {
struct fib_nh *nh;
nh = container_of(nhc, struct fib_nh, nh_common);
if (nh->nh_tclassid &&
nla_put_u32(skb, RTA_FLOW, nh->nh_tclassid))
goto nla_put_failure;
}
#endif
} else {
if (fib_add_multipath(skb, fi) < 0)
goto nla_put_failure;
}
offload:
if (fri->offload)
rtm->rtm_flags |= RTM_F_OFFLOAD;
if (fri->trap)
rtm->rtm_flags |= RTM_F_TRAP;
if (fri->offload_failed)
rtm->rtm_flags |= RTM_F_OFFLOAD_FAILED;
nlmsg_end(skb, nlh);
return 0;
nla_put_failure:
nlmsg_cancel(skb, nlh);
return -EMSGSIZE;
}
/*
* Update FIB if:
* - local address disappeared -> we must delete all the entries
* referring to it.
* - device went down -> we must shutdown all nexthops going via it.
*/
int fib_sync_down_addr(struct net_device *dev, __be32 local)
{
int tb_id = l3mdev_fib_table(dev) ? : RT_TABLE_MAIN;
struct net *net = dev_net(dev);
struct hlist_head *head;
struct fib_info *fi;
int ret = 0;
if (!fib_info_laddrhash || local == 0)
return 0;
head = fib_info_laddrhash_bucket(net, local);
hlist_for_each_entry(fi, head, fib_lhash) {
if (!net_eq(fi->fib_net, net) ||
fi->fib_tb_id != tb_id)
continue;
if (fi->fib_prefsrc == local) {
fi->fib_flags |= RTNH_F_DEAD;
ret++;
}
}
return ret;
}
static int call_fib_nh_notifiers(struct fib_nh *nh,
enum fib_event_type event_type)
{
bool ignore_link_down = ip_ignore_linkdown(nh->fib_nh_dev);
struct fib_nh_notifier_info info = {
.fib_nh = nh,
};
switch (event_type) {
case FIB_EVENT_NH_ADD:
if (nh->fib_nh_flags & RTNH_F_DEAD)
break;
if (ignore_link_down && nh->fib_nh_flags & RTNH_F_LINKDOWN)
break;
return call_fib4_notifiers(dev_net(nh->fib_nh_dev), event_type,
&info.info);
case FIB_EVENT_NH_DEL:
if ((ignore_link_down && nh->fib_nh_flags & RTNH_F_LINKDOWN) ||
(nh->fib_nh_flags & RTNH_F_DEAD))
return call_fib4_notifiers(dev_net(nh->fib_nh_dev),
event_type, &info.info);
break;
default:
break;
}
return NOTIFY_DONE;
}
/* Update the PMTU of exceptions when:
* - the new MTU of the first hop becomes smaller than the PMTU
* - the old MTU was the same as the PMTU, and it limited discovery of
* larger MTUs on the path. With that limit raised, we can now
* discover larger MTUs
* A special case is locked exceptions, for which the PMTU is smaller
* than the minimal accepted PMTU:
* - if the new MTU is greater than the PMTU, don't make any change
* - otherwise, unlock and set PMTU
*/
void fib_nhc_update_mtu(struct fib_nh_common *nhc, u32 new, u32 orig)
{
struct fnhe_hash_bucket *bucket;
int i;
bucket = rcu_dereference_protected(nhc->nhc_exceptions, 1);
if (!bucket)
return;
for (i = 0; i < FNHE_HASH_SIZE; i++) {
struct fib_nh_exception *fnhe;
for (fnhe = rcu_dereference_protected(bucket[i].chain, 1);
fnhe;
fnhe = rcu_dereference_protected(fnhe->fnhe_next, 1)) {
if (fnhe->fnhe_mtu_locked) {
if (new <= fnhe->fnhe_pmtu) {
fnhe->fnhe_pmtu = new;
fnhe->fnhe_mtu_locked = false;
}
} else if (new < fnhe->fnhe_pmtu ||
orig == fnhe->fnhe_pmtu) {
fnhe->fnhe_pmtu = new;
}
}
}
}
void fib_sync_mtu(struct net_device *dev, u32 orig_mtu)
{
struct hlist_head *head = fib_info_devhash_bucket(dev);
struct fib_nh *nh;
hlist_for_each_entry(nh, head, nh_hash) {
if (nh->fib_nh_dev == dev)
fib_nhc_update_mtu(&nh->nh_common, dev->mtu, orig_mtu);
}
}
/* Event force Flags Description
* NETDEV_CHANGE 0 LINKDOWN Carrier OFF, not for scope host
* NETDEV_DOWN 0 LINKDOWN|DEAD Link down, not for scope host
* NETDEV_DOWN 1 LINKDOWN|DEAD Last address removed
* NETDEV_UNREGISTER 1 LINKDOWN|DEAD Device removed
*
* only used when fib_nh is built into fib_info
*/
int fib_sync_down_dev(struct net_device *dev, unsigned long event, bool force)
{
struct hlist_head *head = fib_info_devhash_bucket(dev);
struct fib_info *prev_fi = NULL;
int scope = RT_SCOPE_NOWHERE;
struct fib_nh *nh;
int ret = 0;
if (force)
scope = -1;
hlist_for_each_entry(nh, head, nh_hash) {
struct fib_info *fi = nh->nh_parent;
int dead;
BUG_ON(!fi->fib_nhs);
if (nh->fib_nh_dev != dev || fi == prev_fi)
continue;
prev_fi = fi;
dead = 0;
change_nexthops(fi) {
if (nexthop_nh->fib_nh_flags & RTNH_F_DEAD)
dead++;
else if (nexthop_nh->fib_nh_dev == dev &&
nexthop_nh->fib_nh_scope != scope) {
switch (event) {
case NETDEV_DOWN:
case NETDEV_UNREGISTER:
nexthop_nh->fib_nh_flags |= RTNH_F_DEAD;
fallthrough;
case NETDEV_CHANGE:
nexthop_nh->fib_nh_flags |= RTNH_F_LINKDOWN;
break;
}
call_fib_nh_notifiers(nexthop_nh,
FIB_EVENT_NH_DEL);
dead++;
}
#ifdef CONFIG_IP_ROUTE_MULTIPATH
if (event == NETDEV_UNREGISTER &&
nexthop_nh->fib_nh_dev == dev) {
dead = fi->fib_nhs;
break;
}
#endif
} endfor_nexthops(fi)
if (dead == fi->fib_nhs) {
switch (event) {
case NETDEV_DOWN:
case NETDEV_UNREGISTER:
fi->fib_flags |= RTNH_F_DEAD;
fallthrough;
case NETDEV_CHANGE:
fi->fib_flags |= RTNH_F_LINKDOWN;
break;
}
ret++;
}
fib_rebalance(fi);
}
return ret;
}
/* Must be invoked inside of an RCU protected region. */
static void fib_select_default(const struct flowi4 *flp, struct fib_result *res)
{
struct fib_info *fi = NULL, *last_resort = NULL;
struct hlist_head *fa_head = res->fa_head;
struct fib_table *tb = res->table;
u8 slen = 32 - res->prefixlen;
int order = -1, last_idx = -1;
struct fib_alias *fa, *fa1 = NULL;
u32 last_prio = res->fi->fib_priority;
dscp_t last_dscp = 0;
hlist_for_each_entry_rcu(fa, fa_head, fa_list) {
struct fib_info *next_fi = fa->fa_info;
struct fib_nh_common *nhc;
if (fa->fa_slen != slen)
continue;
if (fa->fa_dscp &&
fa->fa_dscp != inet_dsfield_to_dscp(flp->flowi4_tos))
continue;
if (fa->tb_id != tb->tb_id)
continue;
if (next_fi->fib_priority > last_prio &&
fa->fa_dscp == last_dscp) {
if (last_dscp)
continue;
break;
}
if (next_fi->fib_flags & RTNH_F_DEAD)
continue;
last_dscp = fa->fa_dscp;
last_prio = next_fi->fib_priority;
if (next_fi->fib_scope != res->scope ||
fa->fa_type != RTN_UNICAST)
continue;
nhc = fib_info_nhc(next_fi, 0);
if (!nhc->nhc_gw_family || nhc->nhc_scope != RT_SCOPE_LINK)
continue;
fib_alias_accessed(fa);
if (!fi) {
if (next_fi != res->fi)
break;
fa1 = fa;
} else if (!fib_detect_death(fi, order, &last_resort,
&last_idx, fa1->fa_default)) {
fib_result_assign(res, fi);
fa1->fa_default = order;
goto out;
}
fi = next_fi;
order++;
}
if (order <= 0 || !fi) {
if (fa1)
fa1->fa_default = -1;
goto out;
}
if (!fib_detect_death(fi, order, &last_resort, &last_idx,
fa1->fa_default)) {
fib_result_assign(res, fi);
fa1->fa_default = order;
goto out;
}
if (last_idx >= 0)
fib_result_assign(res, last_resort);
fa1->fa_default = last_idx;
out:
return;
}
/*
* Dead device goes up. We wake up dead nexthops.
* It takes sense only on multipath routes.
*
* only used when fib_nh is built into fib_info
*/
int fib_sync_up(struct net_device *dev, unsigned char nh_flags)
{
struct fib_info *prev_fi;
struct hlist_head *head;
struct fib_nh *nh;
int ret;
if (!(dev->flags & IFF_UP))
return 0;
if (nh_flags & RTNH_F_DEAD) {
unsigned int flags = dev_get_flags(dev);
if (flags & (IFF_RUNNING | IFF_LOWER_UP))
nh_flags |= RTNH_F_LINKDOWN;
}
prev_fi = NULL;
head = fib_info_devhash_bucket(dev);
ret = 0;
hlist_for_each_entry(nh, head, nh_hash) {
struct fib_info *fi = nh->nh_parent;
int alive;
BUG_ON(!fi->fib_nhs);
if (nh->fib_nh_dev != dev || fi == prev_fi)
continue;
prev_fi = fi;
alive = 0;
change_nexthops(fi) {
if (!(nexthop_nh->fib_nh_flags & nh_flags)) {
alive++;
continue;
}
if (!nexthop_nh->fib_nh_dev ||
!(nexthop_nh->fib_nh_dev->flags & IFF_UP))
continue;
if (nexthop_nh->fib_nh_dev != dev ||
!__in_dev_get_rtnl(dev))
continue;
alive++;
nexthop_nh->fib_nh_flags &= ~nh_flags;
call_fib_nh_notifiers(nexthop_nh, FIB_EVENT_NH_ADD);
} endfor_nexthops(fi)
if (alive > 0) {
fi->fib_flags &= ~nh_flags;
ret++;
}
fib_rebalance(fi);
}
return ret;
}
#ifdef CONFIG_IP_ROUTE_MULTIPATH
static bool fib_good_nh(const struct fib_nh *nh)
{
int state = NUD_REACHABLE;
if (nh->fib_nh_scope == RT_SCOPE_LINK) {
struct neighbour *n;
rcu_read_lock();
if (likely(nh->fib_nh_gw_family == AF_INET))
n = __ipv4_neigh_lookup_noref(nh->fib_nh_dev,
(__force u32)nh->fib_nh_gw4);
else if (nh->fib_nh_gw_family == AF_INET6)
n = __ipv6_neigh_lookup_noref_stub(nh->fib_nh_dev,
&nh->fib_nh_gw6);
else
n = NULL;
if (n)
state = READ_ONCE(n->nud_state);
rcu_read_unlock();
}
return !!(state & NUD_VALID);
}
void fib_select_multipath(struct fib_result *res, int hash)
{
struct fib_info *fi = res->fi;
struct net *net = fi->fib_net;
bool first = false;
if (unlikely(res->fi->nh)) {
nexthop_path_fib_result(res, hash);
return;
}
change_nexthops(fi) {
if (READ_ONCE(net->ipv4.sysctl_fib_multipath_use_neigh)) {
if (!fib_good_nh(nexthop_nh))
continue;
if (!first) {
res->nh_sel = nhsel;
res->nhc = &nexthop_nh->nh_common;
first = true;
}
}
if (hash > atomic_read(&nexthop_nh->fib_nh_upper_bound))
continue;
res->nh_sel = nhsel;
res->nhc = &nexthop_nh->nh_common;
return;
} endfor_nexthops(fi);
}
#endif
void fib_select_path(struct net *net, struct fib_result *res,
struct flowi4 *fl4, const struct sk_buff *skb)
{
if (fl4->flowi4_oif)
goto check_saddr;
#ifdef CONFIG_IP_ROUTE_MULTIPATH
if (fib_info_num_path(res->fi) > 1) {
int h = fib_multipath_hash(net, fl4, skb, NULL);
fib_select_multipath(res, h);
}
else
#endif
if (!res->prefixlen &&
res->table->tb_num_default > 1 &&
res->type == RTN_UNICAST)
fib_select_default(fl4, res);
check_saddr:
if (!fl4->saddr)
fl4->saddr = fib_result_prefsrc(net, res);
}
| linux-master | net/ipv4/fib_semantics.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Pluggable TCP congestion control support and newReno
* congestion control.
* Based on ideas from I/O scheduler support and Web100.
*
* Copyright (C) 2005 Stephen Hemminger <[email protected]>
*/
#define pr_fmt(fmt) "TCP: " fmt
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/types.h>
#include <linux/list.h>
#include <linux/gfp.h>
#include <linux/jhash.h>
#include <net/tcp.h>
#include <trace/events/tcp.h>
static DEFINE_SPINLOCK(tcp_cong_list_lock);
static LIST_HEAD(tcp_cong_list);
/* Simple linear search, don't expect many entries! */
struct tcp_congestion_ops *tcp_ca_find(const char *name)
{
struct tcp_congestion_ops *e;
list_for_each_entry_rcu(e, &tcp_cong_list, list) {
if (strcmp(e->name, name) == 0)
return e;
}
return NULL;
}
void tcp_set_ca_state(struct sock *sk, const u8 ca_state)
{
struct inet_connection_sock *icsk = inet_csk(sk);
trace_tcp_cong_state_set(sk, ca_state);
if (icsk->icsk_ca_ops->set_state)
icsk->icsk_ca_ops->set_state(sk, ca_state);
icsk->icsk_ca_state = ca_state;
}
/* Must be called with rcu lock held */
static struct tcp_congestion_ops *tcp_ca_find_autoload(struct net *net,
const char *name)
{
struct tcp_congestion_ops *ca = tcp_ca_find(name);
#ifdef CONFIG_MODULES
if (!ca && capable(CAP_NET_ADMIN)) {
rcu_read_unlock();
request_module("tcp_%s", name);
rcu_read_lock();
ca = tcp_ca_find(name);
}
#endif
return ca;
}
/* Simple linear search, not much in here. */
struct tcp_congestion_ops *tcp_ca_find_key(u32 key)
{
struct tcp_congestion_ops *e;
list_for_each_entry_rcu(e, &tcp_cong_list, list) {
if (e->key == key)
return e;
}
return NULL;
}
int tcp_validate_congestion_control(struct tcp_congestion_ops *ca)
{
/* all algorithms must implement these */
if (!ca->ssthresh || !ca->undo_cwnd ||
!(ca->cong_avoid || ca->cong_control)) {
pr_err("%s does not implement required ops\n", ca->name);
return -EINVAL;
}
return 0;
}
/* Attach new congestion control algorithm to the list
* of available options.
*/
int tcp_register_congestion_control(struct tcp_congestion_ops *ca)
{
int ret;
ret = tcp_validate_congestion_control(ca);
if (ret)
return ret;
ca->key = jhash(ca->name, sizeof(ca->name), strlen(ca->name));
spin_lock(&tcp_cong_list_lock);
if (ca->key == TCP_CA_UNSPEC || tcp_ca_find_key(ca->key)) {
pr_notice("%s already registered or non-unique key\n",
ca->name);
ret = -EEXIST;
} else {
list_add_tail_rcu(&ca->list, &tcp_cong_list);
pr_debug("%s registered\n", ca->name);
}
spin_unlock(&tcp_cong_list_lock);
return ret;
}
EXPORT_SYMBOL_GPL(tcp_register_congestion_control);
/*
* Remove congestion control algorithm, called from
* the module's remove function. Module ref counts are used
* to ensure that this can't be done till all sockets using
* that method are closed.
*/
void tcp_unregister_congestion_control(struct tcp_congestion_ops *ca)
{
spin_lock(&tcp_cong_list_lock);
list_del_rcu(&ca->list);
spin_unlock(&tcp_cong_list_lock);
/* Wait for outstanding readers to complete before the
* module gets removed entirely.
*
* A try_module_get() should fail by now as our module is
* in "going" state since no refs are held anymore and
* module_exit() handler being called.
*/
synchronize_rcu();
}
EXPORT_SYMBOL_GPL(tcp_unregister_congestion_control);
/* Replace a registered old ca with a new one.
*
* The new ca must have the same name as the old one, that has been
* registered.
*/
int tcp_update_congestion_control(struct tcp_congestion_ops *ca, struct tcp_congestion_ops *old_ca)
{
struct tcp_congestion_ops *existing;
int ret;
ret = tcp_validate_congestion_control(ca);
if (ret)
return ret;
ca->key = jhash(ca->name, sizeof(ca->name), strlen(ca->name));
spin_lock(&tcp_cong_list_lock);
existing = tcp_ca_find_key(old_ca->key);
if (ca->key == TCP_CA_UNSPEC || !existing || strcmp(existing->name, ca->name)) {
pr_notice("%s not registered or non-unique key\n",
ca->name);
ret = -EINVAL;
} else if (existing != old_ca) {
pr_notice("invalid old congestion control algorithm to replace\n");
ret = -EINVAL;
} else {
/* Add the new one before removing the old one to keep
* one implementation available all the time.
*/
list_add_tail_rcu(&ca->list, &tcp_cong_list);
list_del_rcu(&existing->list);
pr_debug("%s updated\n", ca->name);
}
spin_unlock(&tcp_cong_list_lock);
/* Wait for outstanding readers to complete before the
* module or struct_ops gets removed entirely.
*/
if (!ret)
synchronize_rcu();
return ret;
}
u32 tcp_ca_get_key_by_name(struct net *net, const char *name, bool *ecn_ca)
{
const struct tcp_congestion_ops *ca;
u32 key = TCP_CA_UNSPEC;
might_sleep();
rcu_read_lock();
ca = tcp_ca_find_autoload(net, name);
if (ca) {
key = ca->key;
*ecn_ca = ca->flags & TCP_CONG_NEEDS_ECN;
}
rcu_read_unlock();
return key;
}
char *tcp_ca_get_name_by_key(u32 key, char *buffer)
{
const struct tcp_congestion_ops *ca;
char *ret = NULL;
rcu_read_lock();
ca = tcp_ca_find_key(key);
if (ca)
ret = strncpy(buffer, ca->name,
TCP_CA_NAME_MAX);
rcu_read_unlock();
return ret;
}
/* Assign choice of congestion control. */
void tcp_assign_congestion_control(struct sock *sk)
{
struct net *net = sock_net(sk);
struct inet_connection_sock *icsk = inet_csk(sk);
const struct tcp_congestion_ops *ca;
rcu_read_lock();
ca = rcu_dereference(net->ipv4.tcp_congestion_control);
if (unlikely(!bpf_try_module_get(ca, ca->owner)))
ca = &tcp_reno;
icsk->icsk_ca_ops = ca;
rcu_read_unlock();
memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv));
if (ca->flags & TCP_CONG_NEEDS_ECN)
INET_ECN_xmit(sk);
else
INET_ECN_dontxmit(sk);
}
void tcp_init_congestion_control(struct sock *sk)
{
struct inet_connection_sock *icsk = inet_csk(sk);
tcp_sk(sk)->prior_ssthresh = 0;
if (icsk->icsk_ca_ops->init)
icsk->icsk_ca_ops->init(sk);
if (tcp_ca_needs_ecn(sk))
INET_ECN_xmit(sk);
else
INET_ECN_dontxmit(sk);
icsk->icsk_ca_initialized = 1;
}
static void tcp_reinit_congestion_control(struct sock *sk,
const struct tcp_congestion_ops *ca)
{
struct inet_connection_sock *icsk = inet_csk(sk);
tcp_cleanup_congestion_control(sk);
icsk->icsk_ca_ops = ca;
icsk->icsk_ca_setsockopt = 1;
memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv));
if (ca->flags & TCP_CONG_NEEDS_ECN)
INET_ECN_xmit(sk);
else
INET_ECN_dontxmit(sk);
if (!((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)))
tcp_init_congestion_control(sk);
}
/* Manage refcounts on socket close. */
void tcp_cleanup_congestion_control(struct sock *sk)
{
struct inet_connection_sock *icsk = inet_csk(sk);
if (icsk->icsk_ca_ops->release)
icsk->icsk_ca_ops->release(sk);
bpf_module_put(icsk->icsk_ca_ops, icsk->icsk_ca_ops->owner);
}
/* Used by sysctl to change default congestion control */
int tcp_set_default_congestion_control(struct net *net, const char *name)
{
struct tcp_congestion_ops *ca;
const struct tcp_congestion_ops *prev;
int ret;
rcu_read_lock();
ca = tcp_ca_find_autoload(net, name);
if (!ca) {
ret = -ENOENT;
} else if (!bpf_try_module_get(ca, ca->owner)) {
ret = -EBUSY;
} else if (!net_eq(net, &init_net) &&
!(ca->flags & TCP_CONG_NON_RESTRICTED)) {
/* Only init netns can set default to a restricted algorithm */
ret = -EPERM;
} else {
prev = xchg(&net->ipv4.tcp_congestion_control, ca);
if (prev)
bpf_module_put(prev, prev->owner);
ca->flags |= TCP_CONG_NON_RESTRICTED;
ret = 0;
}
rcu_read_unlock();
return ret;
}
/* Set default value from kernel configuration at bootup */
static int __init tcp_congestion_default(void)
{
return tcp_set_default_congestion_control(&init_net,
CONFIG_DEFAULT_TCP_CONG);
}
late_initcall(tcp_congestion_default);
/* Build string with list of available congestion control values */
void tcp_get_available_congestion_control(char *buf, size_t maxlen)
{
struct tcp_congestion_ops *ca;
size_t offs = 0;
rcu_read_lock();
list_for_each_entry_rcu(ca, &tcp_cong_list, list) {
offs += snprintf(buf + offs, maxlen - offs,
"%s%s",
offs == 0 ? "" : " ", ca->name);
if (WARN_ON_ONCE(offs >= maxlen))
break;
}
rcu_read_unlock();
}
/* Get current default congestion control */
void tcp_get_default_congestion_control(struct net *net, char *name)
{
const struct tcp_congestion_ops *ca;
rcu_read_lock();
ca = rcu_dereference(net->ipv4.tcp_congestion_control);
strncpy(name, ca->name, TCP_CA_NAME_MAX);
rcu_read_unlock();
}
/* Built list of non-restricted congestion control values */
void tcp_get_allowed_congestion_control(char *buf, size_t maxlen)
{
struct tcp_congestion_ops *ca;
size_t offs = 0;
*buf = '\0';
rcu_read_lock();
list_for_each_entry_rcu(ca, &tcp_cong_list, list) {
if (!(ca->flags & TCP_CONG_NON_RESTRICTED))
continue;
offs += snprintf(buf + offs, maxlen - offs,
"%s%s",
offs == 0 ? "" : " ", ca->name);
if (WARN_ON_ONCE(offs >= maxlen))
break;
}
rcu_read_unlock();
}
/* Change list of non-restricted congestion control */
int tcp_set_allowed_congestion_control(char *val)
{
struct tcp_congestion_ops *ca;
char *saved_clone, *clone, *name;
int ret = 0;
saved_clone = clone = kstrdup(val, GFP_USER);
if (!clone)
return -ENOMEM;
spin_lock(&tcp_cong_list_lock);
/* pass 1 check for bad entries */
while ((name = strsep(&clone, " ")) && *name) {
ca = tcp_ca_find(name);
if (!ca) {
ret = -ENOENT;
goto out;
}
}
/* pass 2 clear old values */
list_for_each_entry_rcu(ca, &tcp_cong_list, list)
ca->flags &= ~TCP_CONG_NON_RESTRICTED;
/* pass 3 mark as allowed */
while ((name = strsep(&val, " ")) && *name) {
ca = tcp_ca_find(name);
WARN_ON(!ca);
if (ca)
ca->flags |= TCP_CONG_NON_RESTRICTED;
}
out:
spin_unlock(&tcp_cong_list_lock);
kfree(saved_clone);
return ret;
}
/* Change congestion control for socket. If load is false, then it is the
* responsibility of the caller to call tcp_init_congestion_control or
* tcp_reinit_congestion_control (if the current congestion control was
* already initialized.
*/
int tcp_set_congestion_control(struct sock *sk, const char *name, bool load,
bool cap_net_admin)
{
struct inet_connection_sock *icsk = inet_csk(sk);
const struct tcp_congestion_ops *ca;
int err = 0;
if (icsk->icsk_ca_dst_locked)
return -EPERM;
rcu_read_lock();
if (!load)
ca = tcp_ca_find(name);
else
ca = tcp_ca_find_autoload(sock_net(sk), name);
/* No change asking for existing value */
if (ca == icsk->icsk_ca_ops) {
icsk->icsk_ca_setsockopt = 1;
goto out;
}
if (!ca)
err = -ENOENT;
else if (!((ca->flags & TCP_CONG_NON_RESTRICTED) || cap_net_admin))
err = -EPERM;
else if (!bpf_try_module_get(ca, ca->owner))
err = -EBUSY;
else
tcp_reinit_congestion_control(sk, ca);
out:
rcu_read_unlock();
return err;
}
/* Slow start is used when congestion window is no greater than the slow start
* threshold. We base on RFC2581 and also handle stretch ACKs properly.
* We do not implement RFC3465 Appropriate Byte Counting (ABC) per se but
* something better;) a packet is only considered (s)acked in its entirety to
* defend the ACK attacks described in the RFC. Slow start processes a stretch
* ACK of degree N as if N acks of degree 1 are received back to back except
* ABC caps N to 2. Slow start exits when cwnd grows over ssthresh and
* returns the leftover acks to adjust cwnd in congestion avoidance mode.
*/
__bpf_kfunc u32 tcp_slow_start(struct tcp_sock *tp, u32 acked)
{
u32 cwnd = min(tcp_snd_cwnd(tp) + acked, tp->snd_ssthresh);
acked -= cwnd - tcp_snd_cwnd(tp);
tcp_snd_cwnd_set(tp, min(cwnd, tp->snd_cwnd_clamp));
return acked;
}
EXPORT_SYMBOL_GPL(tcp_slow_start);
/* In theory this is tp->snd_cwnd += 1 / tp->snd_cwnd (or alternative w),
* for every packet that was ACKed.
*/
__bpf_kfunc void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w, u32 acked)
{
/* If credits accumulated at a higher w, apply them gently now. */
if (tp->snd_cwnd_cnt >= w) {
tp->snd_cwnd_cnt = 0;
tcp_snd_cwnd_set(tp, tcp_snd_cwnd(tp) + 1);
}
tp->snd_cwnd_cnt += acked;
if (tp->snd_cwnd_cnt >= w) {
u32 delta = tp->snd_cwnd_cnt / w;
tp->snd_cwnd_cnt -= delta * w;
tcp_snd_cwnd_set(tp, tcp_snd_cwnd(tp) + delta);
}
tcp_snd_cwnd_set(tp, min(tcp_snd_cwnd(tp), tp->snd_cwnd_clamp));
}
EXPORT_SYMBOL_GPL(tcp_cong_avoid_ai);
/*
* TCP Reno congestion control
* This is special case used for fallback as well.
*/
/* This is Jacobson's slow start and congestion avoidance.
* SIGCOMM '88, p. 328.
*/
__bpf_kfunc void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked)
{
struct tcp_sock *tp = tcp_sk(sk);
if (!tcp_is_cwnd_limited(sk))
return;
/* In "safe" area, increase. */
if (tcp_in_slow_start(tp)) {
acked = tcp_slow_start(tp, acked);
if (!acked)
return;
}
/* In dangerous area, increase slowly. */
tcp_cong_avoid_ai(tp, tcp_snd_cwnd(tp), acked);
}
EXPORT_SYMBOL_GPL(tcp_reno_cong_avoid);
/* Slow start threshold is half the congestion window (min 2) */
__bpf_kfunc u32 tcp_reno_ssthresh(struct sock *sk)
{
const struct tcp_sock *tp = tcp_sk(sk);
return max(tcp_snd_cwnd(tp) >> 1U, 2U);
}
EXPORT_SYMBOL_GPL(tcp_reno_ssthresh);
__bpf_kfunc u32 tcp_reno_undo_cwnd(struct sock *sk)
{
const struct tcp_sock *tp = tcp_sk(sk);
return max(tcp_snd_cwnd(tp), tp->prior_cwnd);
}
EXPORT_SYMBOL_GPL(tcp_reno_undo_cwnd);
struct tcp_congestion_ops tcp_reno = {
.flags = TCP_CONG_NON_RESTRICTED,
.name = "reno",
.owner = THIS_MODULE,
.ssthresh = tcp_reno_ssthresh,
.cong_avoid = tcp_reno_cong_avoid,
.undo_cwnd = tcp_reno_undo_cwnd,
};
| linux-master | net/ipv4/tcp_cong.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* INET An implementation of the TCP/IP protocol suite for the LINUX
* operating system. INET is implemented using the BSD Socket
* interface as the means of communication with the user level.
*
* ROUTE - implementation of the IP router.
*
* Authors: Ross Biro
* Fred N. van Kempen, <[email protected]>
* Alan Cox, <[email protected]>
* Linus Torvalds, <[email protected]>
* Alexey Kuznetsov, <[email protected]>
*
* Fixes:
* Alan Cox : Verify area fixes.
* Alan Cox : cli() protects routing changes
* Rui Oliveira : ICMP routing table updates
* ([email protected]) Routing table insertion and update
* Linus Torvalds : Rewrote bits to be sensible
* Alan Cox : Added BSD route gw semantics
* Alan Cox : Super /proc >4K
* Alan Cox : MTU in route table
* Alan Cox : MSS actually. Also added the window
* clamper.
* Sam Lantinga : Fixed route matching in rt_del()
* Alan Cox : Routing cache support.
* Alan Cox : Removed compatibility cruft.
* Alan Cox : RTF_REJECT support.
* Alan Cox : TCP irtt support.
* Jonathan Naylor : Added Metric support.
* Miquel van Smoorenburg : BSD API fixes.
* Miquel van Smoorenburg : Metrics.
* Alan Cox : Use __u32 properly
* Alan Cox : Aligned routing errors more closely with BSD
* our system is still very different.
* Alan Cox : Faster /proc handling
* Alexey Kuznetsov : Massive rework to support tree based routing,
* routing caches and better behaviour.
*
* Olaf Erb : irtt wasn't being copied right.
* Bjorn Ekwall : Kerneld route support.
* Alan Cox : Multicast fixed (I hope)
* Pavel Krauz : Limited broadcast fixed
* Mike McLagan : Routing by source
* Alexey Kuznetsov : End of old history. Split to fib.c and
* route.c and rewritten from scratch.
* Andi Kleen : Load-limit warning messages.
* Vitaly E. Lavrov : Transparent proxy revived after year coma.
* Vitaly E. Lavrov : Race condition in ip_route_input_slow.
* Tobias Ringstrom : Uninitialized res.type in ip_route_output_slow.
* Vladimir V. Ivanov : IP rule info (flowid) is really useful.
* Marc Boucher : routing by fwmark
* Robert Olsson : Added rt_cache statistics
* Arnaldo C. Melo : Convert proc stuff to seq_file
* Eric Dumazet : hashed spinlocks and rt_check_expire() fixes.
* Ilia Sotnikov : Ignore TOS on PMTUD and Redirect
* Ilia Sotnikov : Removed TOS from hash calculations
*/
#define pr_fmt(fmt) "IPv4: " fmt
#include <linux/module.h>
#include <linux/bitops.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/memblock.h>
#include <linux/socket.h>
#include <linux/errno.h>
#include <linux/in.h>
#include <linux/inet.h>
#include <linux/netdevice.h>
#include <linux/proc_fs.h>
#include <linux/init.h>
#include <linux/skbuff.h>
#include <linux/inetdevice.h>
#include <linux/igmp.h>
#include <linux/pkt_sched.h>
#include <linux/mroute.h>
#include <linux/netfilter_ipv4.h>
#include <linux/random.h>
#include <linux/rcupdate.h>
#include <linux/slab.h>
#include <linux/jhash.h>
#include <net/dst.h>
#include <net/dst_metadata.h>
#include <net/inet_dscp.h>
#include <net/net_namespace.h>
#include <net/ip.h>
#include <net/route.h>
#include <net/inetpeer.h>
#include <net/sock.h>
#include <net/ip_fib.h>
#include <net/nexthop.h>
#include <net/tcp.h>
#include <net/icmp.h>
#include <net/xfrm.h>
#include <net/lwtunnel.h>
#include <net/netevent.h>
#include <net/rtnetlink.h>
#ifdef CONFIG_SYSCTL
#include <linux/sysctl.h>
#endif
#include <net/secure_seq.h>
#include <net/ip_tunnels.h>
#include "fib_lookup.h"
#define RT_FL_TOS(oldflp4) \
((oldflp4)->flowi4_tos & (IPTOS_RT_MASK | RTO_ONLINK))
#define RT_GC_TIMEOUT (300*HZ)
#define DEFAULT_MIN_PMTU (512 + 20 + 20)
#define DEFAULT_MTU_EXPIRES (10 * 60 * HZ)
#define DEFAULT_MIN_ADVMSS 256
static int ip_rt_max_size;
static int ip_rt_redirect_number __read_mostly = 9;
static int ip_rt_redirect_load __read_mostly = HZ / 50;
static int ip_rt_redirect_silence __read_mostly = ((HZ / 50) << (9 + 1));
static int ip_rt_error_cost __read_mostly = HZ;
static int ip_rt_error_burst __read_mostly = 5 * HZ;
static int ip_rt_gc_timeout __read_mostly = RT_GC_TIMEOUT;
/*
* Interface to generic destination cache.
*/
INDIRECT_CALLABLE_SCOPE
struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie);
static unsigned int ipv4_default_advmss(const struct dst_entry *dst);
INDIRECT_CALLABLE_SCOPE
unsigned int ipv4_mtu(const struct dst_entry *dst);
static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst);
static void ipv4_link_failure(struct sk_buff *skb);
static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
struct sk_buff *skb, u32 mtu,
bool confirm_neigh);
static void ip_do_redirect(struct dst_entry *dst, struct sock *sk,
struct sk_buff *skb);
static void ipv4_dst_destroy(struct dst_entry *dst);
static u32 *ipv4_cow_metrics(struct dst_entry *dst, unsigned long old)
{
WARN_ON(1);
return NULL;
}
static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
struct sk_buff *skb,
const void *daddr);
static void ipv4_confirm_neigh(const struct dst_entry *dst, const void *daddr);
static struct dst_ops ipv4_dst_ops = {
.family = AF_INET,
.check = ipv4_dst_check,
.default_advmss = ipv4_default_advmss,
.mtu = ipv4_mtu,
.cow_metrics = ipv4_cow_metrics,
.destroy = ipv4_dst_destroy,
.negative_advice = ipv4_negative_advice,
.link_failure = ipv4_link_failure,
.update_pmtu = ip_rt_update_pmtu,
.redirect = ip_do_redirect,
.local_out = __ip_local_out,
.neigh_lookup = ipv4_neigh_lookup,
.confirm_neigh = ipv4_confirm_neigh,
};
#define ECN_OR_COST(class) TC_PRIO_##class
const __u8 ip_tos2prio[16] = {
TC_PRIO_BESTEFFORT,
ECN_OR_COST(BESTEFFORT),
TC_PRIO_BESTEFFORT,
ECN_OR_COST(BESTEFFORT),
TC_PRIO_BULK,
ECN_OR_COST(BULK),
TC_PRIO_BULK,
ECN_OR_COST(BULK),
TC_PRIO_INTERACTIVE,
ECN_OR_COST(INTERACTIVE),
TC_PRIO_INTERACTIVE,
ECN_OR_COST(INTERACTIVE),
TC_PRIO_INTERACTIVE_BULK,
ECN_OR_COST(INTERACTIVE_BULK),
TC_PRIO_INTERACTIVE_BULK,
ECN_OR_COST(INTERACTIVE_BULK)
};
EXPORT_SYMBOL(ip_tos2prio);
static DEFINE_PER_CPU(struct rt_cache_stat, rt_cache_stat);
#define RT_CACHE_STAT_INC(field) raw_cpu_inc(rt_cache_stat.field)
#ifdef CONFIG_PROC_FS
static void *rt_cache_seq_start(struct seq_file *seq, loff_t *pos)
{
if (*pos)
return NULL;
return SEQ_START_TOKEN;
}
static void *rt_cache_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
++*pos;
return NULL;
}
static void rt_cache_seq_stop(struct seq_file *seq, void *v)
{
}
static int rt_cache_seq_show(struct seq_file *seq, void *v)
{
if (v == SEQ_START_TOKEN)
seq_printf(seq, "%-127s\n",
"Iface\tDestination\tGateway \tFlags\t\tRefCnt\tUse\t"
"Metric\tSource\t\tMTU\tWindow\tIRTT\tTOS\tHHRef\t"
"HHUptod\tSpecDst");
return 0;
}
static const struct seq_operations rt_cache_seq_ops = {
.start = rt_cache_seq_start,
.next = rt_cache_seq_next,
.stop = rt_cache_seq_stop,
.show = rt_cache_seq_show,
};
static void *rt_cpu_seq_start(struct seq_file *seq, loff_t *pos)
{
int cpu;
if (*pos == 0)
return SEQ_START_TOKEN;
for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
if (!cpu_possible(cpu))
continue;
*pos = cpu+1;
return &per_cpu(rt_cache_stat, cpu);
}
return NULL;
}
static void *rt_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
int cpu;
for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
if (!cpu_possible(cpu))
continue;
*pos = cpu+1;
return &per_cpu(rt_cache_stat, cpu);
}
(*pos)++;
return NULL;
}
static void rt_cpu_seq_stop(struct seq_file *seq, void *v)
{
}
static int rt_cpu_seq_show(struct seq_file *seq, void *v)
{
struct rt_cache_stat *st = v;
if (v == SEQ_START_TOKEN) {
seq_puts(seq, "entries in_hit in_slow_tot in_slow_mc in_no_route in_brd in_martian_dst in_martian_src out_hit out_slow_tot out_slow_mc gc_total gc_ignored gc_goal_miss gc_dst_overflow in_hlist_search out_hlist_search\n");
return 0;
}
seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x "
"%08x %08x %08x %08x %08x %08x "
"%08x %08x %08x %08x\n",
dst_entries_get_slow(&ipv4_dst_ops),
0, /* st->in_hit */
st->in_slow_tot,
st->in_slow_mc,
st->in_no_route,
st->in_brd,
st->in_martian_dst,
st->in_martian_src,
0, /* st->out_hit */
st->out_slow_tot,
st->out_slow_mc,
0, /* st->gc_total */
0, /* st->gc_ignored */
0, /* st->gc_goal_miss */
0, /* st->gc_dst_overflow */
0, /* st->in_hlist_search */
0 /* st->out_hlist_search */
);
return 0;
}
static const struct seq_operations rt_cpu_seq_ops = {
.start = rt_cpu_seq_start,
.next = rt_cpu_seq_next,
.stop = rt_cpu_seq_stop,
.show = rt_cpu_seq_show,
};
#ifdef CONFIG_IP_ROUTE_CLASSID
static int rt_acct_proc_show(struct seq_file *m, void *v)
{
struct ip_rt_acct *dst, *src;
unsigned int i, j;
dst = kcalloc(256, sizeof(struct ip_rt_acct), GFP_KERNEL);
if (!dst)
return -ENOMEM;
for_each_possible_cpu(i) {
src = (struct ip_rt_acct *)per_cpu_ptr(ip_rt_acct, i);
for (j = 0; j < 256; j++) {
dst[j].o_bytes += src[j].o_bytes;
dst[j].o_packets += src[j].o_packets;
dst[j].i_bytes += src[j].i_bytes;
dst[j].i_packets += src[j].i_packets;
}
}
seq_write(m, dst, 256 * sizeof(struct ip_rt_acct));
kfree(dst);
return 0;
}
#endif
static int __net_init ip_rt_do_proc_init(struct net *net)
{
struct proc_dir_entry *pde;
pde = proc_create_seq("rt_cache", 0444, net->proc_net,
&rt_cache_seq_ops);
if (!pde)
goto err1;
pde = proc_create_seq("rt_cache", 0444, net->proc_net_stat,
&rt_cpu_seq_ops);
if (!pde)
goto err2;
#ifdef CONFIG_IP_ROUTE_CLASSID
pde = proc_create_single("rt_acct", 0, net->proc_net,
rt_acct_proc_show);
if (!pde)
goto err3;
#endif
return 0;
#ifdef CONFIG_IP_ROUTE_CLASSID
err3:
remove_proc_entry("rt_cache", net->proc_net_stat);
#endif
err2:
remove_proc_entry("rt_cache", net->proc_net);
err1:
return -ENOMEM;
}
static void __net_exit ip_rt_do_proc_exit(struct net *net)
{
remove_proc_entry("rt_cache", net->proc_net_stat);
remove_proc_entry("rt_cache", net->proc_net);
#ifdef CONFIG_IP_ROUTE_CLASSID
remove_proc_entry("rt_acct", net->proc_net);
#endif
}
static struct pernet_operations ip_rt_proc_ops __net_initdata = {
.init = ip_rt_do_proc_init,
.exit = ip_rt_do_proc_exit,
};
static int __init ip_rt_proc_init(void)
{
return register_pernet_subsys(&ip_rt_proc_ops);
}
#else
static inline int ip_rt_proc_init(void)
{
return 0;
}
#endif /* CONFIG_PROC_FS */
static inline bool rt_is_expired(const struct rtable *rth)
{
return rth->rt_genid != rt_genid_ipv4(dev_net(rth->dst.dev));
}
void rt_cache_flush(struct net *net)
{
rt_genid_bump_ipv4(net);
}
static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
struct sk_buff *skb,
const void *daddr)
{
const struct rtable *rt = container_of(dst, struct rtable, dst);
struct net_device *dev = dst->dev;
struct neighbour *n;
rcu_read_lock();
if (likely(rt->rt_gw_family == AF_INET)) {
n = ip_neigh_gw4(dev, rt->rt_gw4);
} else if (rt->rt_gw_family == AF_INET6) {
n = ip_neigh_gw6(dev, &rt->rt_gw6);
} else {
__be32 pkey;
pkey = skb ? ip_hdr(skb)->daddr : *((__be32 *) daddr);
n = ip_neigh_gw4(dev, pkey);
}
if (!IS_ERR(n) && !refcount_inc_not_zero(&n->refcnt))
n = NULL;
rcu_read_unlock();
return n;
}
static void ipv4_confirm_neigh(const struct dst_entry *dst, const void *daddr)
{
const struct rtable *rt = container_of(dst, struct rtable, dst);
struct net_device *dev = dst->dev;
const __be32 *pkey = daddr;
if (rt->rt_gw_family == AF_INET) {
pkey = (const __be32 *)&rt->rt_gw4;
} else if (rt->rt_gw_family == AF_INET6) {
return __ipv6_confirm_neigh_stub(dev, &rt->rt_gw6);
} else if (!daddr ||
(rt->rt_flags &
(RTCF_MULTICAST | RTCF_BROADCAST | RTCF_LOCAL))) {
return;
}
__ipv4_confirm_neigh(dev, *(__force u32 *)pkey);
}
/* Hash tables of size 2048..262144 depending on RAM size.
* Each bucket uses 8 bytes.
*/
static u32 ip_idents_mask __read_mostly;
static atomic_t *ip_idents __read_mostly;
static u32 *ip_tstamps __read_mostly;
/* In order to protect privacy, we add a perturbation to identifiers
* if one generator is seldom used. This makes hard for an attacker
* to infer how many packets were sent between two points in time.
*/
static u32 ip_idents_reserve(u32 hash, int segs)
{
u32 bucket, old, now = (u32)jiffies;
atomic_t *p_id;
u32 *p_tstamp;
u32 delta = 0;
bucket = hash & ip_idents_mask;
p_tstamp = ip_tstamps + bucket;
p_id = ip_idents + bucket;
old = READ_ONCE(*p_tstamp);
if (old != now && cmpxchg(p_tstamp, old, now) == old)
delta = get_random_u32_below(now - old);
/* If UBSAN reports an error there, please make sure your compiler
* supports -fno-strict-overflow before reporting it that was a bug
* in UBSAN, and it has been fixed in GCC-8.
*/
return atomic_add_return(segs + delta, p_id) - segs;
}
void __ip_select_ident(struct net *net, struct iphdr *iph, int segs)
{
u32 hash, id;
/* Note the following code is not safe, but this is okay. */
if (unlikely(siphash_key_is_zero(&net->ipv4.ip_id_key)))
get_random_bytes(&net->ipv4.ip_id_key,
sizeof(net->ipv4.ip_id_key));
hash = siphash_3u32((__force u32)iph->daddr,
(__force u32)iph->saddr,
iph->protocol,
&net->ipv4.ip_id_key);
id = ip_idents_reserve(hash, segs);
iph->id = htons(id);
}
EXPORT_SYMBOL(__ip_select_ident);
static void ip_rt_fix_tos(struct flowi4 *fl4)
{
__u8 tos = RT_FL_TOS(fl4);
fl4->flowi4_tos = tos & IPTOS_RT_MASK;
if (tos & RTO_ONLINK)
fl4->flowi4_scope = RT_SCOPE_LINK;
}
static void __build_flow_key(const struct net *net, struct flowi4 *fl4,
const struct sock *sk, const struct iphdr *iph,
int oif, __u8 tos, u8 prot, u32 mark,
int flow_flags)
{
__u8 scope = RT_SCOPE_UNIVERSE;
if (sk) {
oif = sk->sk_bound_dev_if;
mark = READ_ONCE(sk->sk_mark);
tos = ip_sock_rt_tos(sk);
scope = ip_sock_rt_scope(sk);
prot = inet_test_bit(HDRINCL, sk) ? IPPROTO_RAW :
sk->sk_protocol;
}
flowi4_init_output(fl4, oif, mark, tos & IPTOS_RT_MASK, scope,
prot, flow_flags, iph->daddr, iph->saddr, 0, 0,
sock_net_uid(net, sk));
}
static void build_skb_flow_key(struct flowi4 *fl4, const struct sk_buff *skb,
const struct sock *sk)
{
const struct net *net = dev_net(skb->dev);
const struct iphdr *iph = ip_hdr(skb);
int oif = skb->dev->ifindex;
u8 prot = iph->protocol;
u32 mark = skb->mark;
__u8 tos = iph->tos;
__build_flow_key(net, fl4, sk, iph, oif, tos, prot, mark, 0);
}
static void build_sk_flow_key(struct flowi4 *fl4, const struct sock *sk)
{
const struct inet_sock *inet = inet_sk(sk);
const struct ip_options_rcu *inet_opt;
__be32 daddr = inet->inet_daddr;
rcu_read_lock();
inet_opt = rcu_dereference(inet->inet_opt);
if (inet_opt && inet_opt->opt.srr)
daddr = inet_opt->opt.faddr;
flowi4_init_output(fl4, sk->sk_bound_dev_if, READ_ONCE(sk->sk_mark),
ip_sock_rt_tos(sk) & IPTOS_RT_MASK,
ip_sock_rt_scope(sk),
inet_test_bit(HDRINCL, sk) ?
IPPROTO_RAW : sk->sk_protocol,
inet_sk_flowi_flags(sk),
daddr, inet->inet_saddr, 0, 0, sk->sk_uid);
rcu_read_unlock();
}
static void ip_rt_build_flow_key(struct flowi4 *fl4, const struct sock *sk,
const struct sk_buff *skb)
{
if (skb)
build_skb_flow_key(fl4, skb, sk);
else
build_sk_flow_key(fl4, sk);
}
static DEFINE_SPINLOCK(fnhe_lock);
static void fnhe_flush_routes(struct fib_nh_exception *fnhe)
{
struct rtable *rt;
rt = rcu_dereference(fnhe->fnhe_rth_input);
if (rt) {
RCU_INIT_POINTER(fnhe->fnhe_rth_input, NULL);
dst_dev_put(&rt->dst);
dst_release(&rt->dst);
}
rt = rcu_dereference(fnhe->fnhe_rth_output);
if (rt) {
RCU_INIT_POINTER(fnhe->fnhe_rth_output, NULL);
dst_dev_put(&rt->dst);
dst_release(&rt->dst);
}
}
static void fnhe_remove_oldest(struct fnhe_hash_bucket *hash)
{
struct fib_nh_exception __rcu **fnhe_p, **oldest_p;
struct fib_nh_exception *fnhe, *oldest = NULL;
for (fnhe_p = &hash->chain; ; fnhe_p = &fnhe->fnhe_next) {
fnhe = rcu_dereference_protected(*fnhe_p,
lockdep_is_held(&fnhe_lock));
if (!fnhe)
break;
if (!oldest ||
time_before(fnhe->fnhe_stamp, oldest->fnhe_stamp)) {
oldest = fnhe;
oldest_p = fnhe_p;
}
}
fnhe_flush_routes(oldest);
*oldest_p = oldest->fnhe_next;
kfree_rcu(oldest, rcu);
}
static u32 fnhe_hashfun(__be32 daddr)
{
static siphash_aligned_key_t fnhe_hash_key;
u64 hval;
net_get_random_once(&fnhe_hash_key, sizeof(fnhe_hash_key));
hval = siphash_1u32((__force u32)daddr, &fnhe_hash_key);
return hash_64(hval, FNHE_HASH_SHIFT);
}
static void fill_route_from_fnhe(struct rtable *rt, struct fib_nh_exception *fnhe)
{
rt->rt_pmtu = fnhe->fnhe_pmtu;
rt->rt_mtu_locked = fnhe->fnhe_mtu_locked;
rt->dst.expires = fnhe->fnhe_expires;
if (fnhe->fnhe_gw) {
rt->rt_flags |= RTCF_REDIRECTED;
rt->rt_uses_gateway = 1;
rt->rt_gw_family = AF_INET;
rt->rt_gw4 = fnhe->fnhe_gw;
}
}
static void update_or_create_fnhe(struct fib_nh_common *nhc, __be32 daddr,
__be32 gw, u32 pmtu, bool lock,
unsigned long expires)
{
struct fnhe_hash_bucket *hash;
struct fib_nh_exception *fnhe;
struct rtable *rt;
u32 genid, hval;
unsigned int i;
int depth;
genid = fnhe_genid(dev_net(nhc->nhc_dev));
hval = fnhe_hashfun(daddr);
spin_lock_bh(&fnhe_lock);
hash = rcu_dereference(nhc->nhc_exceptions);
if (!hash) {
hash = kcalloc(FNHE_HASH_SIZE, sizeof(*hash), GFP_ATOMIC);
if (!hash)
goto out_unlock;
rcu_assign_pointer(nhc->nhc_exceptions, hash);
}
hash += hval;
depth = 0;
for (fnhe = rcu_dereference(hash->chain); fnhe;
fnhe = rcu_dereference(fnhe->fnhe_next)) {
if (fnhe->fnhe_daddr == daddr)
break;
depth++;
}
if (fnhe) {
if (fnhe->fnhe_genid != genid)
fnhe->fnhe_genid = genid;
if (gw)
fnhe->fnhe_gw = gw;
if (pmtu) {
fnhe->fnhe_pmtu = pmtu;
fnhe->fnhe_mtu_locked = lock;
}
fnhe->fnhe_expires = max(1UL, expires);
/* Update all cached dsts too */
rt = rcu_dereference(fnhe->fnhe_rth_input);
if (rt)
fill_route_from_fnhe(rt, fnhe);
rt = rcu_dereference(fnhe->fnhe_rth_output);
if (rt)
fill_route_from_fnhe(rt, fnhe);
} else {
/* Randomize max depth to avoid some side channels attacks. */
int max_depth = FNHE_RECLAIM_DEPTH +
get_random_u32_below(FNHE_RECLAIM_DEPTH);
while (depth > max_depth) {
fnhe_remove_oldest(hash);
depth--;
}
fnhe = kzalloc(sizeof(*fnhe), GFP_ATOMIC);
if (!fnhe)
goto out_unlock;
fnhe->fnhe_next = hash->chain;
fnhe->fnhe_genid = genid;
fnhe->fnhe_daddr = daddr;
fnhe->fnhe_gw = gw;
fnhe->fnhe_pmtu = pmtu;
fnhe->fnhe_mtu_locked = lock;
fnhe->fnhe_expires = max(1UL, expires);
rcu_assign_pointer(hash->chain, fnhe);
/* Exception created; mark the cached routes for the nexthop
* stale, so anyone caching it rechecks if this exception
* applies to them.
*/
rt = rcu_dereference(nhc->nhc_rth_input);
if (rt)
rt->dst.obsolete = DST_OBSOLETE_KILL;
for_each_possible_cpu(i) {
struct rtable __rcu **prt;
prt = per_cpu_ptr(nhc->nhc_pcpu_rth_output, i);
rt = rcu_dereference(*prt);
if (rt)
rt->dst.obsolete = DST_OBSOLETE_KILL;
}
}
fnhe->fnhe_stamp = jiffies;
out_unlock:
spin_unlock_bh(&fnhe_lock);
}
static void __ip_do_redirect(struct rtable *rt, struct sk_buff *skb, struct flowi4 *fl4,
bool kill_route)
{
__be32 new_gw = icmp_hdr(skb)->un.gateway;
__be32 old_gw = ip_hdr(skb)->saddr;
struct net_device *dev = skb->dev;
struct in_device *in_dev;
struct fib_result res;
struct neighbour *n;
struct net *net;
switch (icmp_hdr(skb)->code & 7) {
case ICMP_REDIR_NET:
case ICMP_REDIR_NETTOS:
case ICMP_REDIR_HOST:
case ICMP_REDIR_HOSTTOS:
break;
default:
return;
}
if (rt->rt_gw_family != AF_INET || rt->rt_gw4 != old_gw)
return;
in_dev = __in_dev_get_rcu(dev);
if (!in_dev)
return;
net = dev_net(dev);
if (new_gw == old_gw || !IN_DEV_RX_REDIRECTS(in_dev) ||
ipv4_is_multicast(new_gw) || ipv4_is_lbcast(new_gw) ||
ipv4_is_zeronet(new_gw))
goto reject_redirect;
if (!IN_DEV_SHARED_MEDIA(in_dev)) {
if (!inet_addr_onlink(in_dev, new_gw, old_gw))
goto reject_redirect;
if (IN_DEV_SEC_REDIRECTS(in_dev) && ip_fib_check_default(new_gw, dev))
goto reject_redirect;
} else {
if (inet_addr_type(net, new_gw) != RTN_UNICAST)
goto reject_redirect;
}
n = __ipv4_neigh_lookup(rt->dst.dev, new_gw);
if (!n)
n = neigh_create(&arp_tbl, &new_gw, rt->dst.dev);
if (!IS_ERR(n)) {
if (!(READ_ONCE(n->nud_state) & NUD_VALID)) {
neigh_event_send(n, NULL);
} else {
if (fib_lookup(net, fl4, &res, 0) == 0) {
struct fib_nh_common *nhc;
fib_select_path(net, &res, fl4, skb);
nhc = FIB_RES_NHC(res);
update_or_create_fnhe(nhc, fl4->daddr, new_gw,
0, false,
jiffies + ip_rt_gc_timeout);
}
if (kill_route)
rt->dst.obsolete = DST_OBSOLETE_KILL;
call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, n);
}
neigh_release(n);
}
return;
reject_redirect:
#ifdef CONFIG_IP_ROUTE_VERBOSE
if (IN_DEV_LOG_MARTIANS(in_dev)) {
const struct iphdr *iph = (const struct iphdr *) skb->data;
__be32 daddr = iph->daddr;
__be32 saddr = iph->saddr;
net_info_ratelimited("Redirect from %pI4 on %s about %pI4 ignored\n"
" Advised path = %pI4 -> %pI4\n",
&old_gw, dev->name, &new_gw,
&saddr, &daddr);
}
#endif
;
}
static void ip_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb)
{
struct rtable *rt;
struct flowi4 fl4;
const struct iphdr *iph = (const struct iphdr *) skb->data;
struct net *net = dev_net(skb->dev);
int oif = skb->dev->ifindex;
u8 prot = iph->protocol;
u32 mark = skb->mark;
__u8 tos = iph->tos;
rt = (struct rtable *) dst;
__build_flow_key(net, &fl4, sk, iph, oif, tos, prot, mark, 0);
__ip_do_redirect(rt, skb, &fl4, true);
}
static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst)
{
struct rtable *rt = (struct rtable *)dst;
struct dst_entry *ret = dst;
if (rt) {
if (dst->obsolete > 0) {
ip_rt_put(rt);
ret = NULL;
} else if ((rt->rt_flags & RTCF_REDIRECTED) ||
rt->dst.expires) {
ip_rt_put(rt);
ret = NULL;
}
}
return ret;
}
/*
* Algorithm:
* 1. The first ip_rt_redirect_number redirects are sent
* with exponential backoff, then we stop sending them at all,
* assuming that the host ignores our redirects.
* 2. If we did not see packets requiring redirects
* during ip_rt_redirect_silence, we assume that the host
* forgot redirected route and start to send redirects again.
*
* This algorithm is much cheaper and more intelligent than dumb load limiting
* in icmp.c.
*
* NOTE. Do not forget to inhibit load limiting for redirects (redundant)
* and "frag. need" (breaks PMTU discovery) in icmp.c.
*/
void ip_rt_send_redirect(struct sk_buff *skb)
{
struct rtable *rt = skb_rtable(skb);
struct in_device *in_dev;
struct inet_peer *peer;
struct net *net;
int log_martians;
int vif;
rcu_read_lock();
in_dev = __in_dev_get_rcu(rt->dst.dev);
if (!in_dev || !IN_DEV_TX_REDIRECTS(in_dev)) {
rcu_read_unlock();
return;
}
log_martians = IN_DEV_LOG_MARTIANS(in_dev);
vif = l3mdev_master_ifindex_rcu(rt->dst.dev);
rcu_read_unlock();
net = dev_net(rt->dst.dev);
peer = inet_getpeer_v4(net->ipv4.peers, ip_hdr(skb)->saddr, vif, 1);
if (!peer) {
icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST,
rt_nexthop(rt, ip_hdr(skb)->daddr));
return;
}
/* No redirected packets during ip_rt_redirect_silence;
* reset the algorithm.
*/
if (time_after(jiffies, peer->rate_last + ip_rt_redirect_silence)) {
peer->rate_tokens = 0;
peer->n_redirects = 0;
}
/* Too many ignored redirects; do not send anything
* set dst.rate_last to the last seen redirected packet.
*/
if (peer->n_redirects >= ip_rt_redirect_number) {
peer->rate_last = jiffies;
goto out_put_peer;
}
/* Check for load limit; set rate_last to the latest sent
* redirect.
*/
if (peer->n_redirects == 0 ||
time_after(jiffies,
(peer->rate_last +
(ip_rt_redirect_load << peer->n_redirects)))) {
__be32 gw = rt_nexthop(rt, ip_hdr(skb)->daddr);
icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, gw);
peer->rate_last = jiffies;
++peer->n_redirects;
#ifdef CONFIG_IP_ROUTE_VERBOSE
if (log_martians &&
peer->n_redirects == ip_rt_redirect_number)
net_warn_ratelimited("host %pI4/if%d ignores redirects for %pI4 to %pI4\n",
&ip_hdr(skb)->saddr, inet_iif(skb),
&ip_hdr(skb)->daddr, &gw);
#endif
}
out_put_peer:
inet_putpeer(peer);
}
static int ip_error(struct sk_buff *skb)
{
struct rtable *rt = skb_rtable(skb);
struct net_device *dev = skb->dev;
struct in_device *in_dev;
struct inet_peer *peer;
unsigned long now;
struct net *net;
SKB_DR(reason);
bool send;
int code;
if (netif_is_l3_master(skb->dev)) {
dev = __dev_get_by_index(dev_net(skb->dev), IPCB(skb)->iif);
if (!dev)
goto out;
}
in_dev = __in_dev_get_rcu(dev);
/* IP on this device is disabled. */
if (!in_dev)
goto out;
net = dev_net(rt->dst.dev);
if (!IN_DEV_FORWARD(in_dev)) {
switch (rt->dst.error) {
case EHOSTUNREACH:
SKB_DR_SET(reason, IP_INADDRERRORS);
__IP_INC_STATS(net, IPSTATS_MIB_INADDRERRORS);
break;
case ENETUNREACH:
SKB_DR_SET(reason, IP_INNOROUTES);
__IP_INC_STATS(net, IPSTATS_MIB_INNOROUTES);
break;
}
goto out;
}
switch (rt->dst.error) {
case EINVAL:
default:
goto out;
case EHOSTUNREACH:
code = ICMP_HOST_UNREACH;
break;
case ENETUNREACH:
code = ICMP_NET_UNREACH;
SKB_DR_SET(reason, IP_INNOROUTES);
__IP_INC_STATS(net, IPSTATS_MIB_INNOROUTES);
break;
case EACCES:
code = ICMP_PKT_FILTERED;
break;
}
peer = inet_getpeer_v4(net->ipv4.peers, ip_hdr(skb)->saddr,
l3mdev_master_ifindex(skb->dev), 1);
send = true;
if (peer) {
now = jiffies;
peer->rate_tokens += now - peer->rate_last;
if (peer->rate_tokens > ip_rt_error_burst)
peer->rate_tokens = ip_rt_error_burst;
peer->rate_last = now;
if (peer->rate_tokens >= ip_rt_error_cost)
peer->rate_tokens -= ip_rt_error_cost;
else
send = false;
inet_putpeer(peer);
}
if (send)
icmp_send(skb, ICMP_DEST_UNREACH, code, 0);
out: kfree_skb_reason(skb, reason);
return 0;
}
static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu)
{
struct dst_entry *dst = &rt->dst;
struct net *net = dev_net(dst->dev);
struct fib_result res;
bool lock = false;
u32 old_mtu;
if (ip_mtu_locked(dst))
return;
old_mtu = ipv4_mtu(dst);
if (old_mtu < mtu)
return;
if (mtu < net->ipv4.ip_rt_min_pmtu) {
lock = true;
mtu = min(old_mtu, net->ipv4.ip_rt_min_pmtu);
}
if (rt->rt_pmtu == mtu && !lock &&
time_before(jiffies, dst->expires - net->ipv4.ip_rt_mtu_expires / 2))
return;
rcu_read_lock();
if (fib_lookup(net, fl4, &res, 0) == 0) {
struct fib_nh_common *nhc;
fib_select_path(net, &res, fl4, NULL);
nhc = FIB_RES_NHC(res);
update_or_create_fnhe(nhc, fl4->daddr, 0, mtu, lock,
jiffies + net->ipv4.ip_rt_mtu_expires);
}
rcu_read_unlock();
}
static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
struct sk_buff *skb, u32 mtu,
bool confirm_neigh)
{
struct rtable *rt = (struct rtable *) dst;
struct flowi4 fl4;
ip_rt_build_flow_key(&fl4, sk, skb);
/* Don't make lookup fail for bridged encapsulations */
if (skb && netif_is_any_bridge_port(skb->dev))
fl4.flowi4_oif = 0;
__ip_rt_update_pmtu(rt, &fl4, mtu);
}
void ipv4_update_pmtu(struct sk_buff *skb, struct net *net, u32 mtu,
int oif, u8 protocol)
{
const struct iphdr *iph = (const struct iphdr *)skb->data;
struct flowi4 fl4;
struct rtable *rt;
u32 mark = IP4_REPLY_MARK(net, skb->mark);
__build_flow_key(net, &fl4, NULL, iph, oif, iph->tos, protocol, mark,
0);
rt = __ip_route_output_key(net, &fl4);
if (!IS_ERR(rt)) {
__ip_rt_update_pmtu(rt, &fl4, mtu);
ip_rt_put(rt);
}
}
EXPORT_SYMBOL_GPL(ipv4_update_pmtu);
static void __ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
{
const struct iphdr *iph = (const struct iphdr *)skb->data;
struct flowi4 fl4;
struct rtable *rt;
__build_flow_key(sock_net(sk), &fl4, sk, iph, 0, 0, 0, 0, 0);
if (!fl4.flowi4_mark)
fl4.flowi4_mark = IP4_REPLY_MARK(sock_net(sk), skb->mark);
rt = __ip_route_output_key(sock_net(sk), &fl4);
if (!IS_ERR(rt)) {
__ip_rt_update_pmtu(rt, &fl4, mtu);
ip_rt_put(rt);
}
}
void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
{
const struct iphdr *iph = (const struct iphdr *)skb->data;
struct flowi4 fl4;
struct rtable *rt;
struct dst_entry *odst = NULL;
bool new = false;
struct net *net = sock_net(sk);
bh_lock_sock(sk);
if (!ip_sk_accept_pmtu(sk))
goto out;
odst = sk_dst_get(sk);
if (sock_owned_by_user(sk) || !odst) {
__ipv4_sk_update_pmtu(skb, sk, mtu);
goto out;
}
__build_flow_key(net, &fl4, sk, iph, 0, 0, 0, 0, 0);
rt = (struct rtable *)odst;
if (odst->obsolete && !odst->ops->check(odst, 0)) {
rt = ip_route_output_flow(sock_net(sk), &fl4, sk);
if (IS_ERR(rt))
goto out;
new = true;
}
__ip_rt_update_pmtu((struct rtable *)xfrm_dst_path(&rt->dst), &fl4, mtu);
if (!dst_check(&rt->dst, 0)) {
if (new)
dst_release(&rt->dst);
rt = ip_route_output_flow(sock_net(sk), &fl4, sk);
if (IS_ERR(rt))
goto out;
new = true;
}
if (new)
sk_dst_set(sk, &rt->dst);
out:
bh_unlock_sock(sk);
dst_release(odst);
}
EXPORT_SYMBOL_GPL(ipv4_sk_update_pmtu);
void ipv4_redirect(struct sk_buff *skb, struct net *net,
int oif, u8 protocol)
{
const struct iphdr *iph = (const struct iphdr *)skb->data;
struct flowi4 fl4;
struct rtable *rt;
__build_flow_key(net, &fl4, NULL, iph, oif, iph->tos, protocol, 0, 0);
rt = __ip_route_output_key(net, &fl4);
if (!IS_ERR(rt)) {
__ip_do_redirect(rt, skb, &fl4, false);
ip_rt_put(rt);
}
}
EXPORT_SYMBOL_GPL(ipv4_redirect);
void ipv4_sk_redirect(struct sk_buff *skb, struct sock *sk)
{
const struct iphdr *iph = (const struct iphdr *)skb->data;
struct flowi4 fl4;
struct rtable *rt;
struct net *net = sock_net(sk);
__build_flow_key(net, &fl4, sk, iph, 0, 0, 0, 0, 0);
rt = __ip_route_output_key(net, &fl4);
if (!IS_ERR(rt)) {
__ip_do_redirect(rt, skb, &fl4, false);
ip_rt_put(rt);
}
}
EXPORT_SYMBOL_GPL(ipv4_sk_redirect);
INDIRECT_CALLABLE_SCOPE struct dst_entry *ipv4_dst_check(struct dst_entry *dst,
u32 cookie)
{
struct rtable *rt = (struct rtable *) dst;
/* All IPV4 dsts are created with ->obsolete set to the value
* DST_OBSOLETE_FORCE_CHK which forces validation calls down
* into this function always.
*
* When a PMTU/redirect information update invalidates a route,
* this is indicated by setting obsolete to DST_OBSOLETE_KILL or
* DST_OBSOLETE_DEAD.
*/
if (dst->obsolete != DST_OBSOLETE_FORCE_CHK || rt_is_expired(rt))
return NULL;
return dst;
}
EXPORT_INDIRECT_CALLABLE(ipv4_dst_check);
static void ipv4_send_dest_unreach(struct sk_buff *skb)
{
struct net_device *dev;
struct ip_options opt;
int res;
/* Recompile ip options since IPCB may not be valid anymore.
* Also check we have a reasonable ipv4 header.
*/
if (!pskb_network_may_pull(skb, sizeof(struct iphdr)) ||
ip_hdr(skb)->version != 4 || ip_hdr(skb)->ihl < 5)
return;
memset(&opt, 0, sizeof(opt));
if (ip_hdr(skb)->ihl > 5) {
if (!pskb_network_may_pull(skb, ip_hdr(skb)->ihl * 4))
return;
opt.optlen = ip_hdr(skb)->ihl * 4 - sizeof(struct iphdr);
rcu_read_lock();
dev = skb->dev ? skb->dev : skb_rtable(skb)->dst.dev;
res = __ip_options_compile(dev_net(dev), &opt, skb, NULL);
rcu_read_unlock();
if (res)
return;
}
__icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0, &opt);
}
static void ipv4_link_failure(struct sk_buff *skb)
{
struct rtable *rt;
ipv4_send_dest_unreach(skb);
rt = skb_rtable(skb);
if (rt)
dst_set_expires(&rt->dst, 0);
}
static int ip_rt_bug(struct net *net, struct sock *sk, struct sk_buff *skb)
{
pr_debug("%s: %pI4 -> %pI4, %s\n",
__func__, &ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr,
skb->dev ? skb->dev->name : "?");
kfree_skb(skb);
WARN_ON(1);
return 0;
}
/*
* We do not cache source address of outgoing interface,
* because it is used only by IP RR, TS and SRR options,
* so that it out of fast path.
*
* BTW remember: "addr" is allowed to be not aligned
* in IP options!
*/
void ip_rt_get_source(u8 *addr, struct sk_buff *skb, struct rtable *rt)
{
__be32 src;
if (rt_is_output_route(rt))
src = ip_hdr(skb)->saddr;
else {
struct fib_result res;
struct iphdr *iph = ip_hdr(skb);
struct flowi4 fl4 = {
.daddr = iph->daddr,
.saddr = iph->saddr,
.flowi4_tos = RT_TOS(iph->tos),
.flowi4_oif = rt->dst.dev->ifindex,
.flowi4_iif = skb->dev->ifindex,
.flowi4_mark = skb->mark,
};
rcu_read_lock();
if (fib_lookup(dev_net(rt->dst.dev), &fl4, &res, 0) == 0)
src = fib_result_prefsrc(dev_net(rt->dst.dev), &res);
else
src = inet_select_addr(rt->dst.dev,
rt_nexthop(rt, iph->daddr),
RT_SCOPE_UNIVERSE);
rcu_read_unlock();
}
memcpy(addr, &src, 4);
}
#ifdef CONFIG_IP_ROUTE_CLASSID
static void set_class_tag(struct rtable *rt, u32 tag)
{
if (!(rt->dst.tclassid & 0xFFFF))
rt->dst.tclassid |= tag & 0xFFFF;
if (!(rt->dst.tclassid & 0xFFFF0000))
rt->dst.tclassid |= tag & 0xFFFF0000;
}
#endif
static unsigned int ipv4_default_advmss(const struct dst_entry *dst)
{
struct net *net = dev_net(dst->dev);
unsigned int header_size = sizeof(struct tcphdr) + sizeof(struct iphdr);
unsigned int advmss = max_t(unsigned int, ipv4_mtu(dst) - header_size,
net->ipv4.ip_rt_min_advmss);
return min(advmss, IPV4_MAX_PMTU - header_size);
}
INDIRECT_CALLABLE_SCOPE unsigned int ipv4_mtu(const struct dst_entry *dst)
{
return ip_dst_mtu_maybe_forward(dst, false);
}
EXPORT_INDIRECT_CALLABLE(ipv4_mtu);
static void ip_del_fnhe(struct fib_nh_common *nhc, __be32 daddr)
{
struct fnhe_hash_bucket *hash;
struct fib_nh_exception *fnhe, __rcu **fnhe_p;
u32 hval = fnhe_hashfun(daddr);
spin_lock_bh(&fnhe_lock);
hash = rcu_dereference_protected(nhc->nhc_exceptions,
lockdep_is_held(&fnhe_lock));
hash += hval;
fnhe_p = &hash->chain;
fnhe = rcu_dereference_protected(*fnhe_p, lockdep_is_held(&fnhe_lock));
while (fnhe) {
if (fnhe->fnhe_daddr == daddr) {
rcu_assign_pointer(*fnhe_p, rcu_dereference_protected(
fnhe->fnhe_next, lockdep_is_held(&fnhe_lock)));
/* set fnhe_daddr to 0 to ensure it won't bind with
* new dsts in rt_bind_exception().
*/
fnhe->fnhe_daddr = 0;
fnhe_flush_routes(fnhe);
kfree_rcu(fnhe, rcu);
break;
}
fnhe_p = &fnhe->fnhe_next;
fnhe = rcu_dereference_protected(fnhe->fnhe_next,
lockdep_is_held(&fnhe_lock));
}
spin_unlock_bh(&fnhe_lock);
}
static struct fib_nh_exception *find_exception(struct fib_nh_common *nhc,
__be32 daddr)
{
struct fnhe_hash_bucket *hash = rcu_dereference(nhc->nhc_exceptions);
struct fib_nh_exception *fnhe;
u32 hval;
if (!hash)
return NULL;
hval = fnhe_hashfun(daddr);
for (fnhe = rcu_dereference(hash[hval].chain); fnhe;
fnhe = rcu_dereference(fnhe->fnhe_next)) {
if (fnhe->fnhe_daddr == daddr) {
if (fnhe->fnhe_expires &&
time_after(jiffies, fnhe->fnhe_expires)) {
ip_del_fnhe(nhc, daddr);
break;
}
return fnhe;
}
}
return NULL;
}
/* MTU selection:
* 1. mtu on route is locked - use it
* 2. mtu from nexthop exception
* 3. mtu from egress device
*/
u32 ip_mtu_from_fib_result(struct fib_result *res, __be32 daddr)
{
struct fib_nh_common *nhc = res->nhc;
struct net_device *dev = nhc->nhc_dev;
struct fib_info *fi = res->fi;
u32 mtu = 0;
if (READ_ONCE(dev_net(dev)->ipv4.sysctl_ip_fwd_use_pmtu) ||
fi->fib_metrics->metrics[RTAX_LOCK - 1] & (1 << RTAX_MTU))
mtu = fi->fib_mtu;
if (likely(!mtu)) {
struct fib_nh_exception *fnhe;
fnhe = find_exception(nhc, daddr);
if (fnhe && !time_after_eq(jiffies, fnhe->fnhe_expires))
mtu = fnhe->fnhe_pmtu;
}
if (likely(!mtu))
mtu = min(READ_ONCE(dev->mtu), IP_MAX_MTU);
return mtu - lwtunnel_headroom(nhc->nhc_lwtstate, mtu);
}
static bool rt_bind_exception(struct rtable *rt, struct fib_nh_exception *fnhe,
__be32 daddr, const bool do_cache)
{
bool ret = false;
spin_lock_bh(&fnhe_lock);
if (daddr == fnhe->fnhe_daddr) {
struct rtable __rcu **porig;
struct rtable *orig;
int genid = fnhe_genid(dev_net(rt->dst.dev));
if (rt_is_input_route(rt))
porig = &fnhe->fnhe_rth_input;
else
porig = &fnhe->fnhe_rth_output;
orig = rcu_dereference(*porig);
if (fnhe->fnhe_genid != genid) {
fnhe->fnhe_genid = genid;
fnhe->fnhe_gw = 0;
fnhe->fnhe_pmtu = 0;
fnhe->fnhe_expires = 0;
fnhe->fnhe_mtu_locked = false;
fnhe_flush_routes(fnhe);
orig = NULL;
}
fill_route_from_fnhe(rt, fnhe);
if (!rt->rt_gw4) {
rt->rt_gw4 = daddr;
rt->rt_gw_family = AF_INET;
}
if (do_cache) {
dst_hold(&rt->dst);
rcu_assign_pointer(*porig, rt);
if (orig) {
dst_dev_put(&orig->dst);
dst_release(&orig->dst);
}
ret = true;
}
fnhe->fnhe_stamp = jiffies;
}
spin_unlock_bh(&fnhe_lock);
return ret;
}
static bool rt_cache_route(struct fib_nh_common *nhc, struct rtable *rt)
{
struct rtable *orig, *prev, **p;
bool ret = true;
if (rt_is_input_route(rt)) {
p = (struct rtable **)&nhc->nhc_rth_input;
} else {
p = (struct rtable **)raw_cpu_ptr(nhc->nhc_pcpu_rth_output);
}
orig = *p;
/* hold dst before doing cmpxchg() to avoid race condition
* on this dst
*/
dst_hold(&rt->dst);
prev = cmpxchg(p, orig, rt);
if (prev == orig) {
if (orig) {
rt_add_uncached_list(orig);
dst_release(&orig->dst);
}
} else {
dst_release(&rt->dst);
ret = false;
}
return ret;
}
struct uncached_list {
spinlock_t lock;
struct list_head head;
struct list_head quarantine;
};
static DEFINE_PER_CPU_ALIGNED(struct uncached_list, rt_uncached_list);
void rt_add_uncached_list(struct rtable *rt)
{
struct uncached_list *ul = raw_cpu_ptr(&rt_uncached_list);
rt->dst.rt_uncached_list = ul;
spin_lock_bh(&ul->lock);
list_add_tail(&rt->dst.rt_uncached, &ul->head);
spin_unlock_bh(&ul->lock);
}
void rt_del_uncached_list(struct rtable *rt)
{
if (!list_empty(&rt->dst.rt_uncached)) {
struct uncached_list *ul = rt->dst.rt_uncached_list;
spin_lock_bh(&ul->lock);
list_del_init(&rt->dst.rt_uncached);
spin_unlock_bh(&ul->lock);
}
}
static void ipv4_dst_destroy(struct dst_entry *dst)
{
struct rtable *rt = (struct rtable *)dst;
ip_dst_metrics_put(dst);
rt_del_uncached_list(rt);
}
void rt_flush_dev(struct net_device *dev)
{
struct rtable *rt, *safe;
int cpu;
for_each_possible_cpu(cpu) {
struct uncached_list *ul = &per_cpu(rt_uncached_list, cpu);
if (list_empty(&ul->head))
continue;
spin_lock_bh(&ul->lock);
list_for_each_entry_safe(rt, safe, &ul->head, dst.rt_uncached) {
if (rt->dst.dev != dev)
continue;
rt->dst.dev = blackhole_netdev;
netdev_ref_replace(dev, blackhole_netdev,
&rt->dst.dev_tracker, GFP_ATOMIC);
list_move(&rt->dst.rt_uncached, &ul->quarantine);
}
spin_unlock_bh(&ul->lock);
}
}
static bool rt_cache_valid(const struct rtable *rt)
{
return rt &&
rt->dst.obsolete == DST_OBSOLETE_FORCE_CHK &&
!rt_is_expired(rt);
}
static void rt_set_nexthop(struct rtable *rt, __be32 daddr,
const struct fib_result *res,
struct fib_nh_exception *fnhe,
struct fib_info *fi, u16 type, u32 itag,
const bool do_cache)
{
bool cached = false;
if (fi) {
struct fib_nh_common *nhc = FIB_RES_NHC(*res);
if (nhc->nhc_gw_family && nhc->nhc_scope == RT_SCOPE_LINK) {
rt->rt_uses_gateway = 1;
rt->rt_gw_family = nhc->nhc_gw_family;
/* only INET and INET6 are supported */
if (likely(nhc->nhc_gw_family == AF_INET))
rt->rt_gw4 = nhc->nhc_gw.ipv4;
else
rt->rt_gw6 = nhc->nhc_gw.ipv6;
}
ip_dst_init_metrics(&rt->dst, fi->fib_metrics);
#ifdef CONFIG_IP_ROUTE_CLASSID
if (nhc->nhc_family == AF_INET) {
struct fib_nh *nh;
nh = container_of(nhc, struct fib_nh, nh_common);
rt->dst.tclassid = nh->nh_tclassid;
}
#endif
rt->dst.lwtstate = lwtstate_get(nhc->nhc_lwtstate);
if (unlikely(fnhe))
cached = rt_bind_exception(rt, fnhe, daddr, do_cache);
else if (do_cache)
cached = rt_cache_route(nhc, rt);
if (unlikely(!cached)) {
/* Routes we intend to cache in nexthop exception or
* FIB nexthop have the DST_NOCACHE bit clear.
* However, if we are unsuccessful at storing this
* route into the cache we really need to set it.
*/
if (!rt->rt_gw4) {
rt->rt_gw_family = AF_INET;
rt->rt_gw4 = daddr;
}
rt_add_uncached_list(rt);
}
} else
rt_add_uncached_list(rt);
#ifdef CONFIG_IP_ROUTE_CLASSID
#ifdef CONFIG_IP_MULTIPLE_TABLES
set_class_tag(rt, res->tclassid);
#endif
set_class_tag(rt, itag);
#endif
}
struct rtable *rt_dst_alloc(struct net_device *dev,
unsigned int flags, u16 type,
bool noxfrm)
{
struct rtable *rt;
rt = dst_alloc(&ipv4_dst_ops, dev, 1, DST_OBSOLETE_FORCE_CHK,
(noxfrm ? DST_NOXFRM : 0));
if (rt) {
rt->rt_genid = rt_genid_ipv4(dev_net(dev));
rt->rt_flags = flags;
rt->rt_type = type;
rt->rt_is_input = 0;
rt->rt_iif = 0;
rt->rt_pmtu = 0;
rt->rt_mtu_locked = 0;
rt->rt_uses_gateway = 0;
rt->rt_gw_family = 0;
rt->rt_gw4 = 0;
rt->dst.output = ip_output;
if (flags & RTCF_LOCAL)
rt->dst.input = ip_local_deliver;
}
return rt;
}
EXPORT_SYMBOL(rt_dst_alloc);
struct rtable *rt_dst_clone(struct net_device *dev, struct rtable *rt)
{
struct rtable *new_rt;
new_rt = dst_alloc(&ipv4_dst_ops, dev, 1, DST_OBSOLETE_FORCE_CHK,
rt->dst.flags);
if (new_rt) {
new_rt->rt_genid = rt_genid_ipv4(dev_net(dev));
new_rt->rt_flags = rt->rt_flags;
new_rt->rt_type = rt->rt_type;
new_rt->rt_is_input = rt->rt_is_input;
new_rt->rt_iif = rt->rt_iif;
new_rt->rt_pmtu = rt->rt_pmtu;
new_rt->rt_mtu_locked = rt->rt_mtu_locked;
new_rt->rt_gw_family = rt->rt_gw_family;
if (rt->rt_gw_family == AF_INET)
new_rt->rt_gw4 = rt->rt_gw4;
else if (rt->rt_gw_family == AF_INET6)
new_rt->rt_gw6 = rt->rt_gw6;
new_rt->dst.input = rt->dst.input;
new_rt->dst.output = rt->dst.output;
new_rt->dst.error = rt->dst.error;
new_rt->dst.lastuse = jiffies;
new_rt->dst.lwtstate = lwtstate_get(rt->dst.lwtstate);
}
return new_rt;
}
EXPORT_SYMBOL(rt_dst_clone);
/* called in rcu_read_lock() section */
int ip_mc_validate_source(struct sk_buff *skb, __be32 daddr, __be32 saddr,
u8 tos, struct net_device *dev,
struct in_device *in_dev, u32 *itag)
{
int err;
/* Primary sanity checks. */
if (!in_dev)
return -EINVAL;
if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr) ||
skb->protocol != htons(ETH_P_IP))
return -EINVAL;
if (ipv4_is_loopback(saddr) && !IN_DEV_ROUTE_LOCALNET(in_dev))
return -EINVAL;
if (ipv4_is_zeronet(saddr)) {
if (!ipv4_is_local_multicast(daddr) &&
ip_hdr(skb)->protocol != IPPROTO_IGMP)
return -EINVAL;
} else {
err = fib_validate_source(skb, saddr, 0, tos, 0, dev,
in_dev, itag);
if (err < 0)
return err;
}
return 0;
}
/* called in rcu_read_lock() section */
static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
u8 tos, struct net_device *dev, int our)
{
struct in_device *in_dev = __in_dev_get_rcu(dev);
unsigned int flags = RTCF_MULTICAST;
struct rtable *rth;
u32 itag = 0;
int err;
err = ip_mc_validate_source(skb, daddr, saddr, tos, dev, in_dev, &itag);
if (err)
return err;
if (our)
flags |= RTCF_LOCAL;
if (IN_DEV_ORCONF(in_dev, NOPOLICY))
IPCB(skb)->flags |= IPSKB_NOPOLICY;
rth = rt_dst_alloc(dev_net(dev)->loopback_dev, flags, RTN_MULTICAST,
false);
if (!rth)
return -ENOBUFS;
#ifdef CONFIG_IP_ROUTE_CLASSID
rth->dst.tclassid = itag;
#endif
rth->dst.output = ip_rt_bug;
rth->rt_is_input= 1;
#ifdef CONFIG_IP_MROUTE
if (!ipv4_is_local_multicast(daddr) && IN_DEV_MFORWARD(in_dev))
rth->dst.input = ip_mr_input;
#endif
RT_CACHE_STAT_INC(in_slow_mc);
skb_dst_drop(skb);
skb_dst_set(skb, &rth->dst);
return 0;
}
static void ip_handle_martian_source(struct net_device *dev,
struct in_device *in_dev,
struct sk_buff *skb,
__be32 daddr,
__be32 saddr)
{
RT_CACHE_STAT_INC(in_martian_src);
#ifdef CONFIG_IP_ROUTE_VERBOSE
if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit()) {
/*
* RFC1812 recommendation, if source is martian,
* the only hint is MAC header.
*/
pr_warn("martian source %pI4 from %pI4, on dev %s\n",
&daddr, &saddr, dev->name);
if (dev->hard_header_len && skb_mac_header_was_set(skb)) {
print_hex_dump(KERN_WARNING, "ll header: ",
DUMP_PREFIX_OFFSET, 16, 1,
skb_mac_header(skb),
dev->hard_header_len, false);
}
}
#endif
}
/* called in rcu_read_lock() section */
static int __mkroute_input(struct sk_buff *skb,
const struct fib_result *res,
struct in_device *in_dev,
__be32 daddr, __be32 saddr, u32 tos)
{
struct fib_nh_common *nhc = FIB_RES_NHC(*res);
struct net_device *dev = nhc->nhc_dev;
struct fib_nh_exception *fnhe;
struct rtable *rth;
int err;
struct in_device *out_dev;
bool do_cache;
u32 itag = 0;
/* get a working reference to the output device */
out_dev = __in_dev_get_rcu(dev);
if (!out_dev) {
net_crit_ratelimited("Bug in ip_route_input_slow(). Please report.\n");
return -EINVAL;
}
err = fib_validate_source(skb, saddr, daddr, tos, FIB_RES_OIF(*res),
in_dev->dev, in_dev, &itag);
if (err < 0) {
ip_handle_martian_source(in_dev->dev, in_dev, skb, daddr,
saddr);
goto cleanup;
}
do_cache = res->fi && !itag;
if (out_dev == in_dev && err && IN_DEV_TX_REDIRECTS(out_dev) &&
skb->protocol == htons(ETH_P_IP)) {
__be32 gw;
gw = nhc->nhc_gw_family == AF_INET ? nhc->nhc_gw.ipv4 : 0;
if (IN_DEV_SHARED_MEDIA(out_dev) ||
inet_addr_onlink(out_dev, saddr, gw))
IPCB(skb)->flags |= IPSKB_DOREDIRECT;
}
if (skb->protocol != htons(ETH_P_IP)) {
/* Not IP (i.e. ARP). Do not create route, if it is
* invalid for proxy arp. DNAT routes are always valid.
*
* Proxy arp feature have been extended to allow, ARP
* replies back to the same interface, to support
* Private VLAN switch technologies. See arp.c.
*/
if (out_dev == in_dev &&
IN_DEV_PROXY_ARP_PVLAN(in_dev) == 0) {
err = -EINVAL;
goto cleanup;
}
}
if (IN_DEV_ORCONF(in_dev, NOPOLICY))
IPCB(skb)->flags |= IPSKB_NOPOLICY;
fnhe = find_exception(nhc, daddr);
if (do_cache) {
if (fnhe)
rth = rcu_dereference(fnhe->fnhe_rth_input);
else
rth = rcu_dereference(nhc->nhc_rth_input);
if (rt_cache_valid(rth)) {
skb_dst_set_noref(skb, &rth->dst);
goto out;
}
}
rth = rt_dst_alloc(out_dev->dev, 0, res->type,
IN_DEV_ORCONF(out_dev, NOXFRM));
if (!rth) {
err = -ENOBUFS;
goto cleanup;
}
rth->rt_is_input = 1;
RT_CACHE_STAT_INC(in_slow_tot);
rth->dst.input = ip_forward;
rt_set_nexthop(rth, daddr, res, fnhe, res->fi, res->type, itag,
do_cache);
lwtunnel_set_redirect(&rth->dst);
skb_dst_set(skb, &rth->dst);
out:
err = 0;
cleanup:
return err;
}
#ifdef CONFIG_IP_ROUTE_MULTIPATH
/* To make ICMP packets follow the right flow, the multipath hash is
* calculated from the inner IP addresses.
*/
static void ip_multipath_l3_keys(const struct sk_buff *skb,
struct flow_keys *hash_keys)
{
const struct iphdr *outer_iph = ip_hdr(skb);
const struct iphdr *key_iph = outer_iph;
const struct iphdr *inner_iph;
const struct icmphdr *icmph;
struct iphdr _inner_iph;
struct icmphdr _icmph;
if (likely(outer_iph->protocol != IPPROTO_ICMP))
goto out;
if (unlikely((outer_iph->frag_off & htons(IP_OFFSET)) != 0))
goto out;
icmph = skb_header_pointer(skb, outer_iph->ihl * 4, sizeof(_icmph),
&_icmph);
if (!icmph)
goto out;
if (!icmp_is_err(icmph->type))
goto out;
inner_iph = skb_header_pointer(skb,
outer_iph->ihl * 4 + sizeof(_icmph),
sizeof(_inner_iph), &_inner_iph);
if (!inner_iph)
goto out;
key_iph = inner_iph;
out:
hash_keys->addrs.v4addrs.src = key_iph->saddr;
hash_keys->addrs.v4addrs.dst = key_iph->daddr;
}
static u32 fib_multipath_custom_hash_outer(const struct net *net,
const struct sk_buff *skb,
bool *p_has_inner)
{
u32 hash_fields = READ_ONCE(net->ipv4.sysctl_fib_multipath_hash_fields);
struct flow_keys keys, hash_keys;
if (!(hash_fields & FIB_MULTIPATH_HASH_FIELD_OUTER_MASK))
return 0;
memset(&hash_keys, 0, sizeof(hash_keys));
skb_flow_dissect_flow_keys(skb, &keys, FLOW_DISSECTOR_F_STOP_AT_ENCAP);
hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_IP)
hash_keys.addrs.v4addrs.src = keys.addrs.v4addrs.src;
if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_IP)
hash_keys.addrs.v4addrs.dst = keys.addrs.v4addrs.dst;
if (hash_fields & FIB_MULTIPATH_HASH_FIELD_IP_PROTO)
hash_keys.basic.ip_proto = keys.basic.ip_proto;
if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_PORT)
hash_keys.ports.src = keys.ports.src;
if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_PORT)
hash_keys.ports.dst = keys.ports.dst;
*p_has_inner = !!(keys.control.flags & FLOW_DIS_ENCAPSULATION);
return flow_hash_from_keys(&hash_keys);
}
static u32 fib_multipath_custom_hash_inner(const struct net *net,
const struct sk_buff *skb,
bool has_inner)
{
u32 hash_fields = READ_ONCE(net->ipv4.sysctl_fib_multipath_hash_fields);
struct flow_keys keys, hash_keys;
/* We assume the packet carries an encapsulation, but if none was
* encountered during dissection of the outer flow, then there is no
* point in calling the flow dissector again.
*/
if (!has_inner)
return 0;
if (!(hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_MASK))
return 0;
memset(&hash_keys, 0, sizeof(hash_keys));
skb_flow_dissect_flow_keys(skb, &keys, 0);
if (!(keys.control.flags & FLOW_DIS_ENCAPSULATION))
return 0;
if (keys.control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_SRC_IP)
hash_keys.addrs.v4addrs.src = keys.addrs.v4addrs.src;
if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_DST_IP)
hash_keys.addrs.v4addrs.dst = keys.addrs.v4addrs.dst;
} else if (keys.control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_SRC_IP)
hash_keys.addrs.v6addrs.src = keys.addrs.v6addrs.src;
if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_DST_IP)
hash_keys.addrs.v6addrs.dst = keys.addrs.v6addrs.dst;
if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_FLOWLABEL)
hash_keys.tags.flow_label = keys.tags.flow_label;
}
if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_IP_PROTO)
hash_keys.basic.ip_proto = keys.basic.ip_proto;
if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_SRC_PORT)
hash_keys.ports.src = keys.ports.src;
if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_DST_PORT)
hash_keys.ports.dst = keys.ports.dst;
return flow_hash_from_keys(&hash_keys);
}
static u32 fib_multipath_custom_hash_skb(const struct net *net,
const struct sk_buff *skb)
{
u32 mhash, mhash_inner;
bool has_inner = true;
mhash = fib_multipath_custom_hash_outer(net, skb, &has_inner);
mhash_inner = fib_multipath_custom_hash_inner(net, skb, has_inner);
return jhash_2words(mhash, mhash_inner, 0);
}
static u32 fib_multipath_custom_hash_fl4(const struct net *net,
const struct flowi4 *fl4)
{
u32 hash_fields = READ_ONCE(net->ipv4.sysctl_fib_multipath_hash_fields);
struct flow_keys hash_keys;
if (!(hash_fields & FIB_MULTIPATH_HASH_FIELD_OUTER_MASK))
return 0;
memset(&hash_keys, 0, sizeof(hash_keys));
hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_IP)
hash_keys.addrs.v4addrs.src = fl4->saddr;
if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_IP)
hash_keys.addrs.v4addrs.dst = fl4->daddr;
if (hash_fields & FIB_MULTIPATH_HASH_FIELD_IP_PROTO)
hash_keys.basic.ip_proto = fl4->flowi4_proto;
if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_PORT)
hash_keys.ports.src = fl4->fl4_sport;
if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_PORT)
hash_keys.ports.dst = fl4->fl4_dport;
return flow_hash_from_keys(&hash_keys);
}
/* if skb is set it will be used and fl4 can be NULL */
int fib_multipath_hash(const struct net *net, const struct flowi4 *fl4,
const struct sk_buff *skb, struct flow_keys *flkeys)
{
u32 multipath_hash = fl4 ? fl4->flowi4_multipath_hash : 0;
struct flow_keys hash_keys;
u32 mhash = 0;
switch (READ_ONCE(net->ipv4.sysctl_fib_multipath_hash_policy)) {
case 0:
memset(&hash_keys, 0, sizeof(hash_keys));
hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
if (skb) {
ip_multipath_l3_keys(skb, &hash_keys);
} else {
hash_keys.addrs.v4addrs.src = fl4->saddr;
hash_keys.addrs.v4addrs.dst = fl4->daddr;
}
mhash = flow_hash_from_keys(&hash_keys);
break;
case 1:
/* skb is currently provided only when forwarding */
if (skb) {
unsigned int flag = FLOW_DISSECTOR_F_STOP_AT_ENCAP;
struct flow_keys keys;
/* short-circuit if we already have L4 hash present */
if (skb->l4_hash)
return skb_get_hash_raw(skb) >> 1;
memset(&hash_keys, 0, sizeof(hash_keys));
if (!flkeys) {
skb_flow_dissect_flow_keys(skb, &keys, flag);
flkeys = &keys;
}
hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
hash_keys.addrs.v4addrs.src = flkeys->addrs.v4addrs.src;
hash_keys.addrs.v4addrs.dst = flkeys->addrs.v4addrs.dst;
hash_keys.ports.src = flkeys->ports.src;
hash_keys.ports.dst = flkeys->ports.dst;
hash_keys.basic.ip_proto = flkeys->basic.ip_proto;
} else {
memset(&hash_keys, 0, sizeof(hash_keys));
hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
hash_keys.addrs.v4addrs.src = fl4->saddr;
hash_keys.addrs.v4addrs.dst = fl4->daddr;
hash_keys.ports.src = fl4->fl4_sport;
hash_keys.ports.dst = fl4->fl4_dport;
hash_keys.basic.ip_proto = fl4->flowi4_proto;
}
mhash = flow_hash_from_keys(&hash_keys);
break;
case 2:
memset(&hash_keys, 0, sizeof(hash_keys));
/* skb is currently provided only when forwarding */
if (skb) {
struct flow_keys keys;
skb_flow_dissect_flow_keys(skb, &keys, 0);
/* Inner can be v4 or v6 */
if (keys.control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
hash_keys.addrs.v4addrs.src = keys.addrs.v4addrs.src;
hash_keys.addrs.v4addrs.dst = keys.addrs.v4addrs.dst;
} else if (keys.control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
hash_keys.addrs.v6addrs.src = keys.addrs.v6addrs.src;
hash_keys.addrs.v6addrs.dst = keys.addrs.v6addrs.dst;
hash_keys.tags.flow_label = keys.tags.flow_label;
hash_keys.basic.ip_proto = keys.basic.ip_proto;
} else {
/* Same as case 0 */
hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
ip_multipath_l3_keys(skb, &hash_keys);
}
} else {
/* Same as case 0 */
hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
hash_keys.addrs.v4addrs.src = fl4->saddr;
hash_keys.addrs.v4addrs.dst = fl4->daddr;
}
mhash = flow_hash_from_keys(&hash_keys);
break;
case 3:
if (skb)
mhash = fib_multipath_custom_hash_skb(net, skb);
else
mhash = fib_multipath_custom_hash_fl4(net, fl4);
break;
}
if (multipath_hash)
mhash = jhash_2words(mhash, multipath_hash, 0);
return mhash >> 1;
}
#endif /* CONFIG_IP_ROUTE_MULTIPATH */
static int ip_mkroute_input(struct sk_buff *skb,
struct fib_result *res,
struct in_device *in_dev,
__be32 daddr, __be32 saddr, u32 tos,
struct flow_keys *hkeys)
{
#ifdef CONFIG_IP_ROUTE_MULTIPATH
if (res->fi && fib_info_num_path(res->fi) > 1) {
int h = fib_multipath_hash(res->fi->fib_net, NULL, skb, hkeys);
fib_select_multipath(res, h);
IPCB(skb)->flags |= IPSKB_MULTIPATH;
}
#endif
/* create a routing cache entry */
return __mkroute_input(skb, res, in_dev, daddr, saddr, tos);
}
/* Implements all the saddr-related checks as ip_route_input_slow(),
* assuming daddr is valid and the destination is not a local broadcast one.
* Uses the provided hint instead of performing a route lookup.
*/
int ip_route_use_hint(struct sk_buff *skb, __be32 daddr, __be32 saddr,
u8 tos, struct net_device *dev,
const struct sk_buff *hint)
{
struct in_device *in_dev = __in_dev_get_rcu(dev);
struct rtable *rt = skb_rtable(hint);
struct net *net = dev_net(dev);
int err = -EINVAL;
u32 tag = 0;
if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr))
goto martian_source;
if (ipv4_is_zeronet(saddr))
goto martian_source;
if (ipv4_is_loopback(saddr) && !IN_DEV_NET_ROUTE_LOCALNET(in_dev, net))
goto martian_source;
if (rt->rt_type != RTN_LOCAL)
goto skip_validate_source;
tos &= IPTOS_RT_MASK;
err = fib_validate_source(skb, saddr, daddr, tos, 0, dev, in_dev, &tag);
if (err < 0)
goto martian_source;
skip_validate_source:
skb_dst_copy(skb, hint);
return 0;
martian_source:
ip_handle_martian_source(dev, in_dev, skb, daddr, saddr);
return err;
}
/* get device for dst_alloc with local routes */
static struct net_device *ip_rt_get_dev(struct net *net,
const struct fib_result *res)
{
struct fib_nh_common *nhc = res->fi ? res->nhc : NULL;
struct net_device *dev = NULL;
if (nhc)
dev = l3mdev_master_dev_rcu(nhc->nhc_dev);
return dev ? : net->loopback_dev;
}
/*
* NOTE. We drop all the packets that has local source
* addresses, because every properly looped back packet
* must have correct destination already attached by output routine.
* Changes in the enforced policies must be applied also to
* ip_route_use_hint().
*
* Such approach solves two big problems:
* 1. Not simplex devices are handled properly.
* 2. IP spoofing attempts are filtered with 100% of guarantee.
* called with rcu_read_lock()
*/
static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
u8 tos, struct net_device *dev,
struct fib_result *res)
{
struct in_device *in_dev = __in_dev_get_rcu(dev);
struct flow_keys *flkeys = NULL, _flkeys;
struct net *net = dev_net(dev);
struct ip_tunnel_info *tun_info;
int err = -EINVAL;
unsigned int flags = 0;
u32 itag = 0;
struct rtable *rth;
struct flowi4 fl4;
bool do_cache = true;
/* IP on this device is disabled. */
if (!in_dev)
goto out;
/* Check for the most weird martians, which can be not detected
* by fib_lookup.
*/
tun_info = skb_tunnel_info(skb);
if (tun_info && !(tun_info->mode & IP_TUNNEL_INFO_TX))
fl4.flowi4_tun_key.tun_id = tun_info->key.tun_id;
else
fl4.flowi4_tun_key.tun_id = 0;
skb_dst_drop(skb);
if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr))
goto martian_source;
res->fi = NULL;
res->table = NULL;
if (ipv4_is_lbcast(daddr) || (saddr == 0 && daddr == 0))
goto brd_input;
/* Accept zero addresses only to limited broadcast;
* I even do not know to fix it or not. Waiting for complains :-)
*/
if (ipv4_is_zeronet(saddr))
goto martian_source;
if (ipv4_is_zeronet(daddr))
goto martian_destination;
/* Following code try to avoid calling IN_DEV_NET_ROUTE_LOCALNET(),
* and call it once if daddr or/and saddr are loopback addresses
*/
if (ipv4_is_loopback(daddr)) {
if (!IN_DEV_NET_ROUTE_LOCALNET(in_dev, net))
goto martian_destination;
} else if (ipv4_is_loopback(saddr)) {
if (!IN_DEV_NET_ROUTE_LOCALNET(in_dev, net))
goto martian_source;
}
/*
* Now we are ready to route packet.
*/
fl4.flowi4_l3mdev = 0;
fl4.flowi4_oif = 0;
fl4.flowi4_iif = dev->ifindex;
fl4.flowi4_mark = skb->mark;
fl4.flowi4_tos = tos;
fl4.flowi4_scope = RT_SCOPE_UNIVERSE;
fl4.flowi4_flags = 0;
fl4.daddr = daddr;
fl4.saddr = saddr;
fl4.flowi4_uid = sock_net_uid(net, NULL);
fl4.flowi4_multipath_hash = 0;
if (fib4_rules_early_flow_dissect(net, skb, &fl4, &_flkeys)) {
flkeys = &_flkeys;
} else {
fl4.flowi4_proto = 0;
fl4.fl4_sport = 0;
fl4.fl4_dport = 0;
}
err = fib_lookup(net, &fl4, res, 0);
if (err != 0) {
if (!IN_DEV_FORWARD(in_dev))
err = -EHOSTUNREACH;
goto no_route;
}
if (res->type == RTN_BROADCAST) {
if (IN_DEV_BFORWARD(in_dev))
goto make_route;
/* not do cache if bc_forwarding is enabled */
if (IPV4_DEVCONF_ALL(net, BC_FORWARDING))
do_cache = false;
goto brd_input;
}
if (res->type == RTN_LOCAL) {
err = fib_validate_source(skb, saddr, daddr, tos,
0, dev, in_dev, &itag);
if (err < 0)
goto martian_source;
goto local_input;
}
if (!IN_DEV_FORWARD(in_dev)) {
err = -EHOSTUNREACH;
goto no_route;
}
if (res->type != RTN_UNICAST)
goto martian_destination;
make_route:
err = ip_mkroute_input(skb, res, in_dev, daddr, saddr, tos, flkeys);
out: return err;
brd_input:
if (skb->protocol != htons(ETH_P_IP))
goto e_inval;
if (!ipv4_is_zeronet(saddr)) {
err = fib_validate_source(skb, saddr, 0, tos, 0, dev,
in_dev, &itag);
if (err < 0)
goto martian_source;
}
flags |= RTCF_BROADCAST;
res->type = RTN_BROADCAST;
RT_CACHE_STAT_INC(in_brd);
local_input:
if (IN_DEV_ORCONF(in_dev, NOPOLICY))
IPCB(skb)->flags |= IPSKB_NOPOLICY;
do_cache &= res->fi && !itag;
if (do_cache) {
struct fib_nh_common *nhc = FIB_RES_NHC(*res);
rth = rcu_dereference(nhc->nhc_rth_input);
if (rt_cache_valid(rth)) {
skb_dst_set_noref(skb, &rth->dst);
err = 0;
goto out;
}
}
rth = rt_dst_alloc(ip_rt_get_dev(net, res),
flags | RTCF_LOCAL, res->type, false);
if (!rth)
goto e_nobufs;
rth->dst.output= ip_rt_bug;
#ifdef CONFIG_IP_ROUTE_CLASSID
rth->dst.tclassid = itag;
#endif
rth->rt_is_input = 1;
RT_CACHE_STAT_INC(in_slow_tot);
if (res->type == RTN_UNREACHABLE) {
rth->dst.input= ip_error;
rth->dst.error= -err;
rth->rt_flags &= ~RTCF_LOCAL;
}
if (do_cache) {
struct fib_nh_common *nhc = FIB_RES_NHC(*res);
rth->dst.lwtstate = lwtstate_get(nhc->nhc_lwtstate);
if (lwtunnel_input_redirect(rth->dst.lwtstate)) {
WARN_ON(rth->dst.input == lwtunnel_input);
rth->dst.lwtstate->orig_input = rth->dst.input;
rth->dst.input = lwtunnel_input;
}
if (unlikely(!rt_cache_route(nhc, rth)))
rt_add_uncached_list(rth);
}
skb_dst_set(skb, &rth->dst);
err = 0;
goto out;
no_route:
RT_CACHE_STAT_INC(in_no_route);
res->type = RTN_UNREACHABLE;
res->fi = NULL;
res->table = NULL;
goto local_input;
/*
* Do not cache martian addresses: they should be logged (RFC1812)
*/
martian_destination:
RT_CACHE_STAT_INC(in_martian_dst);
#ifdef CONFIG_IP_ROUTE_VERBOSE
if (IN_DEV_LOG_MARTIANS(in_dev))
net_warn_ratelimited("martian destination %pI4 from %pI4, dev %s\n",
&daddr, &saddr, dev->name);
#endif
e_inval:
err = -EINVAL;
goto out;
e_nobufs:
err = -ENOBUFS;
goto out;
martian_source:
ip_handle_martian_source(dev, in_dev, skb, daddr, saddr);
goto out;
}
/* called with rcu_read_lock held */
static int ip_route_input_rcu(struct sk_buff *skb, __be32 daddr, __be32 saddr,
u8 tos, struct net_device *dev, struct fib_result *res)
{
/* Multicast recognition logic is moved from route cache to here.
* The problem was that too many Ethernet cards have broken/missing
* hardware multicast filters :-( As result the host on multicasting
* network acquires a lot of useless route cache entries, sort of
* SDR messages from all the world. Now we try to get rid of them.
* Really, provided software IP multicast filter is organized
* reasonably (at least, hashed), it does not result in a slowdown
* comparing with route cache reject entries.
* Note, that multicast routers are not affected, because
* route cache entry is created eventually.
*/
if (ipv4_is_multicast(daddr)) {
struct in_device *in_dev = __in_dev_get_rcu(dev);
int our = 0;
int err = -EINVAL;
if (!in_dev)
return err;
our = ip_check_mc_rcu(in_dev, daddr, saddr,
ip_hdr(skb)->protocol);
/* check l3 master if no match yet */
if (!our && netif_is_l3_slave(dev)) {
struct in_device *l3_in_dev;
l3_in_dev = __in_dev_get_rcu(skb->dev);
if (l3_in_dev)
our = ip_check_mc_rcu(l3_in_dev, daddr, saddr,
ip_hdr(skb)->protocol);
}
if (our
#ifdef CONFIG_IP_MROUTE
||
(!ipv4_is_local_multicast(daddr) &&
IN_DEV_MFORWARD(in_dev))
#endif
) {
err = ip_route_input_mc(skb, daddr, saddr,
tos, dev, our);
}
return err;
}
return ip_route_input_slow(skb, daddr, saddr, tos, dev, res);
}
int ip_route_input_noref(struct sk_buff *skb, __be32 daddr, __be32 saddr,
u8 tos, struct net_device *dev)
{
struct fib_result res;
int err;
tos &= IPTOS_RT_MASK;
rcu_read_lock();
err = ip_route_input_rcu(skb, daddr, saddr, tos, dev, &res);
rcu_read_unlock();
return err;
}
EXPORT_SYMBOL(ip_route_input_noref);
/* called with rcu_read_lock() */
static struct rtable *__mkroute_output(const struct fib_result *res,
const struct flowi4 *fl4, int orig_oif,
struct net_device *dev_out,
unsigned int flags)
{
struct fib_info *fi = res->fi;
struct fib_nh_exception *fnhe;
struct in_device *in_dev;
u16 type = res->type;
struct rtable *rth;
bool do_cache;
in_dev = __in_dev_get_rcu(dev_out);
if (!in_dev)
return ERR_PTR(-EINVAL);
if (likely(!IN_DEV_ROUTE_LOCALNET(in_dev)))
if (ipv4_is_loopback(fl4->saddr) &&
!(dev_out->flags & IFF_LOOPBACK) &&
!netif_is_l3_master(dev_out))
return ERR_PTR(-EINVAL);
if (ipv4_is_lbcast(fl4->daddr))
type = RTN_BROADCAST;
else if (ipv4_is_multicast(fl4->daddr))
type = RTN_MULTICAST;
else if (ipv4_is_zeronet(fl4->daddr))
return ERR_PTR(-EINVAL);
if (dev_out->flags & IFF_LOOPBACK)
flags |= RTCF_LOCAL;
do_cache = true;
if (type == RTN_BROADCAST) {
flags |= RTCF_BROADCAST | RTCF_LOCAL;
fi = NULL;
} else if (type == RTN_MULTICAST) {
flags |= RTCF_MULTICAST | RTCF_LOCAL;
if (!ip_check_mc_rcu(in_dev, fl4->daddr, fl4->saddr,
fl4->flowi4_proto))
flags &= ~RTCF_LOCAL;
else
do_cache = false;
/* If multicast route do not exist use
* default one, but do not gateway in this case.
* Yes, it is hack.
*/
if (fi && res->prefixlen < 4)
fi = NULL;
} else if ((type == RTN_LOCAL) && (orig_oif != 0) &&
(orig_oif != dev_out->ifindex)) {
/* For local routes that require a particular output interface
* we do not want to cache the result. Caching the result
* causes incorrect behaviour when there are multiple source
* addresses on the interface, the end result being that if the
* intended recipient is waiting on that interface for the
* packet he won't receive it because it will be delivered on
* the loopback interface and the IP_PKTINFO ipi_ifindex will
* be set to the loopback interface as well.
*/
do_cache = false;
}
fnhe = NULL;
do_cache &= fi != NULL;
if (fi) {
struct fib_nh_common *nhc = FIB_RES_NHC(*res);
struct rtable __rcu **prth;
fnhe = find_exception(nhc, fl4->daddr);
if (!do_cache)
goto add;
if (fnhe) {
prth = &fnhe->fnhe_rth_output;
} else {
if (unlikely(fl4->flowi4_flags &
FLOWI_FLAG_KNOWN_NH &&
!(nhc->nhc_gw_family &&
nhc->nhc_scope == RT_SCOPE_LINK))) {
do_cache = false;
goto add;
}
prth = raw_cpu_ptr(nhc->nhc_pcpu_rth_output);
}
rth = rcu_dereference(*prth);
if (rt_cache_valid(rth) && dst_hold_safe(&rth->dst))
return rth;
}
add:
rth = rt_dst_alloc(dev_out, flags, type,
IN_DEV_ORCONF(in_dev, NOXFRM));
if (!rth)
return ERR_PTR(-ENOBUFS);
rth->rt_iif = orig_oif;
RT_CACHE_STAT_INC(out_slow_tot);
if (flags & (RTCF_BROADCAST | RTCF_MULTICAST)) {
if (flags & RTCF_LOCAL &&
!(dev_out->flags & IFF_LOOPBACK)) {
rth->dst.output = ip_mc_output;
RT_CACHE_STAT_INC(out_slow_mc);
}
#ifdef CONFIG_IP_MROUTE
if (type == RTN_MULTICAST) {
if (IN_DEV_MFORWARD(in_dev) &&
!ipv4_is_local_multicast(fl4->daddr)) {
rth->dst.input = ip_mr_input;
rth->dst.output = ip_mc_output;
}
}
#endif
}
rt_set_nexthop(rth, fl4->daddr, res, fnhe, fi, type, 0, do_cache);
lwtunnel_set_redirect(&rth->dst);
return rth;
}
/*
* Major route resolver routine.
*/
struct rtable *ip_route_output_key_hash(struct net *net, struct flowi4 *fl4,
const struct sk_buff *skb)
{
struct fib_result res = {
.type = RTN_UNSPEC,
.fi = NULL,
.table = NULL,
.tclassid = 0,
};
struct rtable *rth;
fl4->flowi4_iif = LOOPBACK_IFINDEX;
ip_rt_fix_tos(fl4);
rcu_read_lock();
rth = ip_route_output_key_hash_rcu(net, fl4, &res, skb);
rcu_read_unlock();
return rth;
}
EXPORT_SYMBOL_GPL(ip_route_output_key_hash);
struct rtable *ip_route_output_key_hash_rcu(struct net *net, struct flowi4 *fl4,
struct fib_result *res,
const struct sk_buff *skb)
{
struct net_device *dev_out = NULL;
int orig_oif = fl4->flowi4_oif;
unsigned int flags = 0;
struct rtable *rth;
int err;
if (fl4->saddr) {
if (ipv4_is_multicast(fl4->saddr) ||
ipv4_is_lbcast(fl4->saddr) ||
ipv4_is_zeronet(fl4->saddr)) {
rth = ERR_PTR(-EINVAL);
goto out;
}
rth = ERR_PTR(-ENETUNREACH);
/* I removed check for oif == dev_out->oif here.
* It was wrong for two reasons:
* 1. ip_dev_find(net, saddr) can return wrong iface, if saddr
* is assigned to multiple interfaces.
* 2. Moreover, we are allowed to send packets with saddr
* of another iface. --ANK
*/
if (fl4->flowi4_oif == 0 &&
(ipv4_is_multicast(fl4->daddr) ||
ipv4_is_lbcast(fl4->daddr))) {
/* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
dev_out = __ip_dev_find(net, fl4->saddr, false);
if (!dev_out)
goto out;
/* Special hack: user can direct multicasts
* and limited broadcast via necessary interface
* without fiddling with IP_MULTICAST_IF or IP_PKTINFO.
* This hack is not just for fun, it allows
* vic,vat and friends to work.
* They bind socket to loopback, set ttl to zero
* and expect that it will work.
* From the viewpoint of routing cache they are broken,
* because we are not allowed to build multicast path
* with loopback source addr (look, routing cache
* cannot know, that ttl is zero, so that packet
* will not leave this host and route is valid).
* Luckily, this hack is good workaround.
*/
fl4->flowi4_oif = dev_out->ifindex;
goto make_route;
}
if (!(fl4->flowi4_flags & FLOWI_FLAG_ANYSRC)) {
/* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
if (!__ip_dev_find(net, fl4->saddr, false))
goto out;
}
}
if (fl4->flowi4_oif) {
dev_out = dev_get_by_index_rcu(net, fl4->flowi4_oif);
rth = ERR_PTR(-ENODEV);
if (!dev_out)
goto out;
/* RACE: Check return value of inet_select_addr instead. */
if (!(dev_out->flags & IFF_UP) || !__in_dev_get_rcu(dev_out)) {
rth = ERR_PTR(-ENETUNREACH);
goto out;
}
if (ipv4_is_local_multicast(fl4->daddr) ||
ipv4_is_lbcast(fl4->daddr) ||
fl4->flowi4_proto == IPPROTO_IGMP) {
if (!fl4->saddr)
fl4->saddr = inet_select_addr(dev_out, 0,
RT_SCOPE_LINK);
goto make_route;
}
if (!fl4->saddr) {
if (ipv4_is_multicast(fl4->daddr))
fl4->saddr = inet_select_addr(dev_out, 0,
fl4->flowi4_scope);
else if (!fl4->daddr)
fl4->saddr = inet_select_addr(dev_out, 0,
RT_SCOPE_HOST);
}
}
if (!fl4->daddr) {
fl4->daddr = fl4->saddr;
if (!fl4->daddr)
fl4->daddr = fl4->saddr = htonl(INADDR_LOOPBACK);
dev_out = net->loopback_dev;
fl4->flowi4_oif = LOOPBACK_IFINDEX;
res->type = RTN_LOCAL;
flags |= RTCF_LOCAL;
goto make_route;
}
err = fib_lookup(net, fl4, res, 0);
if (err) {
res->fi = NULL;
res->table = NULL;
if (fl4->flowi4_oif &&
(ipv4_is_multicast(fl4->daddr) || !fl4->flowi4_l3mdev)) {
/* Apparently, routing tables are wrong. Assume,
* that the destination is on link.
*
* WHY? DW.
* Because we are allowed to send to iface
* even if it has NO routes and NO assigned
* addresses. When oif is specified, routing
* tables are looked up with only one purpose:
* to catch if destination is gatewayed, rather than
* direct. Moreover, if MSG_DONTROUTE is set,
* we send packet, ignoring both routing tables
* and ifaddr state. --ANK
*
*
* We could make it even if oif is unknown,
* likely IPv6, but we do not.
*/
if (fl4->saddr == 0)
fl4->saddr = inet_select_addr(dev_out, 0,
RT_SCOPE_LINK);
res->type = RTN_UNICAST;
goto make_route;
}
rth = ERR_PTR(err);
goto out;
}
if (res->type == RTN_LOCAL) {
if (!fl4->saddr) {
if (res->fi->fib_prefsrc)
fl4->saddr = res->fi->fib_prefsrc;
else
fl4->saddr = fl4->daddr;
}
/* L3 master device is the loopback for that domain */
dev_out = l3mdev_master_dev_rcu(FIB_RES_DEV(*res)) ? :
net->loopback_dev;
/* make sure orig_oif points to fib result device even
* though packet rx/tx happens over loopback or l3mdev
*/
orig_oif = FIB_RES_OIF(*res);
fl4->flowi4_oif = dev_out->ifindex;
flags |= RTCF_LOCAL;
goto make_route;
}
fib_select_path(net, res, fl4, skb);
dev_out = FIB_RES_DEV(*res);
make_route:
rth = __mkroute_output(res, fl4, orig_oif, dev_out, flags);
out:
return rth;
}
static struct dst_ops ipv4_dst_blackhole_ops = {
.family = AF_INET,
.default_advmss = ipv4_default_advmss,
.neigh_lookup = ipv4_neigh_lookup,
.check = dst_blackhole_check,
.cow_metrics = dst_blackhole_cow_metrics,
.update_pmtu = dst_blackhole_update_pmtu,
.redirect = dst_blackhole_redirect,
.mtu = dst_blackhole_mtu,
};
struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_orig)
{
struct rtable *ort = (struct rtable *) dst_orig;
struct rtable *rt;
rt = dst_alloc(&ipv4_dst_blackhole_ops, NULL, 1, DST_OBSOLETE_DEAD, 0);
if (rt) {
struct dst_entry *new = &rt->dst;
new->__use = 1;
new->input = dst_discard;
new->output = dst_discard_out;
new->dev = net->loopback_dev;
netdev_hold(new->dev, &new->dev_tracker, GFP_ATOMIC);
rt->rt_is_input = ort->rt_is_input;
rt->rt_iif = ort->rt_iif;
rt->rt_pmtu = ort->rt_pmtu;
rt->rt_mtu_locked = ort->rt_mtu_locked;
rt->rt_genid = rt_genid_ipv4(net);
rt->rt_flags = ort->rt_flags;
rt->rt_type = ort->rt_type;
rt->rt_uses_gateway = ort->rt_uses_gateway;
rt->rt_gw_family = ort->rt_gw_family;
if (rt->rt_gw_family == AF_INET)
rt->rt_gw4 = ort->rt_gw4;
else if (rt->rt_gw_family == AF_INET6)
rt->rt_gw6 = ort->rt_gw6;
}
dst_release(dst_orig);
return rt ? &rt->dst : ERR_PTR(-ENOMEM);
}
struct rtable *ip_route_output_flow(struct net *net, struct flowi4 *flp4,
const struct sock *sk)
{
struct rtable *rt = __ip_route_output_key(net, flp4);
if (IS_ERR(rt))
return rt;
if (flp4->flowi4_proto) {
flp4->flowi4_oif = rt->dst.dev->ifindex;
rt = (struct rtable *)xfrm_lookup_route(net, &rt->dst,
flowi4_to_flowi(flp4),
sk, 0);
}
return rt;
}
EXPORT_SYMBOL_GPL(ip_route_output_flow);
struct rtable *ip_route_output_tunnel(struct sk_buff *skb,
struct net_device *dev,
struct net *net, __be32 *saddr,
const struct ip_tunnel_info *info,
u8 protocol, bool use_cache)
{
#ifdef CONFIG_DST_CACHE
struct dst_cache *dst_cache;
#endif
struct rtable *rt = NULL;
struct flowi4 fl4;
__u8 tos;
#ifdef CONFIG_DST_CACHE
dst_cache = (struct dst_cache *)&info->dst_cache;
if (use_cache) {
rt = dst_cache_get_ip4(dst_cache, saddr);
if (rt)
return rt;
}
#endif
memset(&fl4, 0, sizeof(fl4));
fl4.flowi4_mark = skb->mark;
fl4.flowi4_proto = protocol;
fl4.daddr = info->key.u.ipv4.dst;
fl4.saddr = info->key.u.ipv4.src;
tos = info->key.tos;
fl4.flowi4_tos = RT_TOS(tos);
rt = ip_route_output_key(net, &fl4);
if (IS_ERR(rt)) {
netdev_dbg(dev, "no route to %pI4\n", &fl4.daddr);
return ERR_PTR(-ENETUNREACH);
}
if (rt->dst.dev == dev) { /* is this necessary? */
netdev_dbg(dev, "circular route to %pI4\n", &fl4.daddr);
ip_rt_put(rt);
return ERR_PTR(-ELOOP);
}
#ifdef CONFIG_DST_CACHE
if (use_cache)
dst_cache_set_ip4(dst_cache, &rt->dst, fl4.saddr);
#endif
*saddr = fl4.saddr;
return rt;
}
EXPORT_SYMBOL_GPL(ip_route_output_tunnel);
/* called with rcu_read_lock held */
static int rt_fill_info(struct net *net, __be32 dst, __be32 src,
struct rtable *rt, u32 table_id, struct flowi4 *fl4,
struct sk_buff *skb, u32 portid, u32 seq,
unsigned int flags)
{
struct rtmsg *r;
struct nlmsghdr *nlh;
unsigned long expires = 0;
u32 error;
u32 metrics[RTAX_MAX];
nlh = nlmsg_put(skb, portid, seq, RTM_NEWROUTE, sizeof(*r), flags);
if (!nlh)
return -EMSGSIZE;
r = nlmsg_data(nlh);
r->rtm_family = AF_INET;
r->rtm_dst_len = 32;
r->rtm_src_len = 0;
r->rtm_tos = fl4 ? fl4->flowi4_tos : 0;
r->rtm_table = table_id < 256 ? table_id : RT_TABLE_COMPAT;
if (nla_put_u32(skb, RTA_TABLE, table_id))
goto nla_put_failure;
r->rtm_type = rt->rt_type;
r->rtm_scope = RT_SCOPE_UNIVERSE;
r->rtm_protocol = RTPROT_UNSPEC;
r->rtm_flags = (rt->rt_flags & ~0xFFFF) | RTM_F_CLONED;
if (rt->rt_flags & RTCF_NOTIFY)
r->rtm_flags |= RTM_F_NOTIFY;
if (IPCB(skb)->flags & IPSKB_DOREDIRECT)
r->rtm_flags |= RTCF_DOREDIRECT;
if (nla_put_in_addr(skb, RTA_DST, dst))
goto nla_put_failure;
if (src) {
r->rtm_src_len = 32;
if (nla_put_in_addr(skb, RTA_SRC, src))
goto nla_put_failure;
}
if (rt->dst.dev &&
nla_put_u32(skb, RTA_OIF, rt->dst.dev->ifindex))
goto nla_put_failure;
if (rt->dst.lwtstate &&
lwtunnel_fill_encap(skb, rt->dst.lwtstate, RTA_ENCAP, RTA_ENCAP_TYPE) < 0)
goto nla_put_failure;
#ifdef CONFIG_IP_ROUTE_CLASSID
if (rt->dst.tclassid &&
nla_put_u32(skb, RTA_FLOW, rt->dst.tclassid))
goto nla_put_failure;
#endif
if (fl4 && !rt_is_input_route(rt) &&
fl4->saddr != src) {
if (nla_put_in_addr(skb, RTA_PREFSRC, fl4->saddr))
goto nla_put_failure;
}
if (rt->rt_uses_gateway) {
if (rt->rt_gw_family == AF_INET &&
nla_put_in_addr(skb, RTA_GATEWAY, rt->rt_gw4)) {
goto nla_put_failure;
} else if (rt->rt_gw_family == AF_INET6) {
int alen = sizeof(struct in6_addr);
struct nlattr *nla;
struct rtvia *via;
nla = nla_reserve(skb, RTA_VIA, alen + 2);
if (!nla)
goto nla_put_failure;
via = nla_data(nla);
via->rtvia_family = AF_INET6;
memcpy(via->rtvia_addr, &rt->rt_gw6, alen);
}
}
expires = rt->dst.expires;
if (expires) {
unsigned long now = jiffies;
if (time_before(now, expires))
expires -= now;
else
expires = 0;
}
memcpy(metrics, dst_metrics_ptr(&rt->dst), sizeof(metrics));
if (rt->rt_pmtu && expires)
metrics[RTAX_MTU - 1] = rt->rt_pmtu;
if (rt->rt_mtu_locked && expires)
metrics[RTAX_LOCK - 1] |= BIT(RTAX_MTU);
if (rtnetlink_put_metrics(skb, metrics) < 0)
goto nla_put_failure;
if (fl4) {
if (fl4->flowi4_mark &&
nla_put_u32(skb, RTA_MARK, fl4->flowi4_mark))
goto nla_put_failure;
if (!uid_eq(fl4->flowi4_uid, INVALID_UID) &&
nla_put_u32(skb, RTA_UID,
from_kuid_munged(current_user_ns(),
fl4->flowi4_uid)))
goto nla_put_failure;
if (rt_is_input_route(rt)) {
#ifdef CONFIG_IP_MROUTE
if (ipv4_is_multicast(dst) &&
!ipv4_is_local_multicast(dst) &&
IPV4_DEVCONF_ALL(net, MC_FORWARDING)) {
int err = ipmr_get_route(net, skb,
fl4->saddr, fl4->daddr,
r, portid);
if (err <= 0) {
if (err == 0)
return 0;
goto nla_put_failure;
}
} else
#endif
if (nla_put_u32(skb, RTA_IIF, fl4->flowi4_iif))
goto nla_put_failure;
}
}
error = rt->dst.error;
if (rtnl_put_cacheinfo(skb, &rt->dst, 0, expires, error) < 0)
goto nla_put_failure;
nlmsg_end(skb, nlh);
return 0;
nla_put_failure:
nlmsg_cancel(skb, nlh);
return -EMSGSIZE;
}
static int fnhe_dump_bucket(struct net *net, struct sk_buff *skb,
struct netlink_callback *cb, u32 table_id,
struct fnhe_hash_bucket *bucket, int genid,
int *fa_index, int fa_start, unsigned int flags)
{
int i;
for (i = 0; i < FNHE_HASH_SIZE; i++) {
struct fib_nh_exception *fnhe;
for (fnhe = rcu_dereference(bucket[i].chain); fnhe;
fnhe = rcu_dereference(fnhe->fnhe_next)) {
struct rtable *rt;
int err;
if (*fa_index < fa_start)
goto next;
if (fnhe->fnhe_genid != genid)
goto next;
if (fnhe->fnhe_expires &&
time_after(jiffies, fnhe->fnhe_expires))
goto next;
rt = rcu_dereference(fnhe->fnhe_rth_input);
if (!rt)
rt = rcu_dereference(fnhe->fnhe_rth_output);
if (!rt)
goto next;
err = rt_fill_info(net, fnhe->fnhe_daddr, 0, rt,
table_id, NULL, skb,
NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq, flags);
if (err)
return err;
next:
(*fa_index)++;
}
}
return 0;
}
int fib_dump_info_fnhe(struct sk_buff *skb, struct netlink_callback *cb,
u32 table_id, struct fib_info *fi,
int *fa_index, int fa_start, unsigned int flags)
{
struct net *net = sock_net(cb->skb->sk);
int nhsel, genid = fnhe_genid(net);
for (nhsel = 0; nhsel < fib_info_num_path(fi); nhsel++) {
struct fib_nh_common *nhc = fib_info_nhc(fi, nhsel);
struct fnhe_hash_bucket *bucket;
int err;
if (nhc->nhc_flags & RTNH_F_DEAD)
continue;
rcu_read_lock();
bucket = rcu_dereference(nhc->nhc_exceptions);
err = 0;
if (bucket)
err = fnhe_dump_bucket(net, skb, cb, table_id, bucket,
genid, fa_index, fa_start,
flags);
rcu_read_unlock();
if (err)
return err;
}
return 0;
}
static struct sk_buff *inet_rtm_getroute_build_skb(__be32 src, __be32 dst,
u8 ip_proto, __be16 sport,
__be16 dport)
{
struct sk_buff *skb;
struct iphdr *iph;
skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
if (!skb)
return NULL;
/* Reserve room for dummy headers, this skb can pass
* through good chunk of routing engine.
*/
skb_reset_mac_header(skb);
skb_reset_network_header(skb);
skb->protocol = htons(ETH_P_IP);
iph = skb_put(skb, sizeof(struct iphdr));
iph->protocol = ip_proto;
iph->saddr = src;
iph->daddr = dst;
iph->version = 0x4;
iph->frag_off = 0;
iph->ihl = 0x5;
skb_set_transport_header(skb, skb->len);
switch (iph->protocol) {
case IPPROTO_UDP: {
struct udphdr *udph;
udph = skb_put_zero(skb, sizeof(struct udphdr));
udph->source = sport;
udph->dest = dport;
udph->len = htons(sizeof(struct udphdr));
udph->check = 0;
break;
}
case IPPROTO_TCP: {
struct tcphdr *tcph;
tcph = skb_put_zero(skb, sizeof(struct tcphdr));
tcph->source = sport;
tcph->dest = dport;
tcph->doff = sizeof(struct tcphdr) / 4;
tcph->rst = 1;
tcph->check = ~tcp_v4_check(sizeof(struct tcphdr),
src, dst, 0);
break;
}
case IPPROTO_ICMP: {
struct icmphdr *icmph;
icmph = skb_put_zero(skb, sizeof(struct icmphdr));
icmph->type = ICMP_ECHO;
icmph->code = 0;
}
}
return skb;
}
static int inet_rtm_valid_getroute_req(struct sk_buff *skb,
const struct nlmsghdr *nlh,
struct nlattr **tb,
struct netlink_ext_ack *extack)
{
struct rtmsg *rtm;
int i, err;
if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*rtm))) {
NL_SET_ERR_MSG(extack,
"ipv4: Invalid header for route get request");
return -EINVAL;
}
if (!netlink_strict_get_check(skb))
return nlmsg_parse_deprecated(nlh, sizeof(*rtm), tb, RTA_MAX,
rtm_ipv4_policy, extack);
rtm = nlmsg_data(nlh);
if ((rtm->rtm_src_len && rtm->rtm_src_len != 32) ||
(rtm->rtm_dst_len && rtm->rtm_dst_len != 32) ||
rtm->rtm_table || rtm->rtm_protocol ||
rtm->rtm_scope || rtm->rtm_type) {
NL_SET_ERR_MSG(extack, "ipv4: Invalid values in header for route get request");
return -EINVAL;
}
if (rtm->rtm_flags & ~(RTM_F_NOTIFY |
RTM_F_LOOKUP_TABLE |
RTM_F_FIB_MATCH)) {
NL_SET_ERR_MSG(extack, "ipv4: Unsupported rtm_flags for route get request");
return -EINVAL;
}
err = nlmsg_parse_deprecated_strict(nlh, sizeof(*rtm), tb, RTA_MAX,
rtm_ipv4_policy, extack);
if (err)
return err;
if ((tb[RTA_SRC] && !rtm->rtm_src_len) ||
(tb[RTA_DST] && !rtm->rtm_dst_len)) {
NL_SET_ERR_MSG(extack, "ipv4: rtm_src_len and rtm_dst_len must be 32 for IPv4");
return -EINVAL;
}
for (i = 0; i <= RTA_MAX; i++) {
if (!tb[i])
continue;
switch (i) {
case RTA_IIF:
case RTA_OIF:
case RTA_SRC:
case RTA_DST:
case RTA_IP_PROTO:
case RTA_SPORT:
case RTA_DPORT:
case RTA_MARK:
case RTA_UID:
break;
default:
NL_SET_ERR_MSG(extack, "ipv4: Unsupported attribute in route get request");
return -EINVAL;
}
}
return 0;
}
static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
struct netlink_ext_ack *extack)
{
struct net *net = sock_net(in_skb->sk);
struct nlattr *tb[RTA_MAX+1];
u32 table_id = RT_TABLE_MAIN;
__be16 sport = 0, dport = 0;
struct fib_result res = {};
u8 ip_proto = IPPROTO_UDP;
struct rtable *rt = NULL;
struct sk_buff *skb;
struct rtmsg *rtm;
struct flowi4 fl4 = {};
__be32 dst = 0;
__be32 src = 0;
kuid_t uid;
u32 iif;
int err;
int mark;
err = inet_rtm_valid_getroute_req(in_skb, nlh, tb, extack);
if (err < 0)
return err;
rtm = nlmsg_data(nlh);
src = tb[RTA_SRC] ? nla_get_in_addr(tb[RTA_SRC]) : 0;
dst = tb[RTA_DST] ? nla_get_in_addr(tb[RTA_DST]) : 0;
iif = tb[RTA_IIF] ? nla_get_u32(tb[RTA_IIF]) : 0;
mark = tb[RTA_MARK] ? nla_get_u32(tb[RTA_MARK]) : 0;
if (tb[RTA_UID])
uid = make_kuid(current_user_ns(), nla_get_u32(tb[RTA_UID]));
else
uid = (iif ? INVALID_UID : current_uid());
if (tb[RTA_IP_PROTO]) {
err = rtm_getroute_parse_ip_proto(tb[RTA_IP_PROTO],
&ip_proto, AF_INET, extack);
if (err)
return err;
}
if (tb[RTA_SPORT])
sport = nla_get_be16(tb[RTA_SPORT]);
if (tb[RTA_DPORT])
dport = nla_get_be16(tb[RTA_DPORT]);
skb = inet_rtm_getroute_build_skb(src, dst, ip_proto, sport, dport);
if (!skb)
return -ENOBUFS;
fl4.daddr = dst;
fl4.saddr = src;
fl4.flowi4_tos = rtm->rtm_tos & IPTOS_RT_MASK;
fl4.flowi4_oif = tb[RTA_OIF] ? nla_get_u32(tb[RTA_OIF]) : 0;
fl4.flowi4_mark = mark;
fl4.flowi4_uid = uid;
if (sport)
fl4.fl4_sport = sport;
if (dport)
fl4.fl4_dport = dport;
fl4.flowi4_proto = ip_proto;
rcu_read_lock();
if (iif) {
struct net_device *dev;
dev = dev_get_by_index_rcu(net, iif);
if (!dev) {
err = -ENODEV;
goto errout_rcu;
}
fl4.flowi4_iif = iif; /* for rt_fill_info */
skb->dev = dev;
skb->mark = mark;
err = ip_route_input_rcu(skb, dst, src,
rtm->rtm_tos & IPTOS_RT_MASK, dev,
&res);
rt = skb_rtable(skb);
if (err == 0 && rt->dst.error)
err = -rt->dst.error;
} else {
fl4.flowi4_iif = LOOPBACK_IFINDEX;
skb->dev = net->loopback_dev;
rt = ip_route_output_key_hash_rcu(net, &fl4, &res, skb);
err = 0;
if (IS_ERR(rt))
err = PTR_ERR(rt);
else
skb_dst_set(skb, &rt->dst);
}
if (err)
goto errout_rcu;
if (rtm->rtm_flags & RTM_F_NOTIFY)
rt->rt_flags |= RTCF_NOTIFY;
if (rtm->rtm_flags & RTM_F_LOOKUP_TABLE)
table_id = res.table ? res.table->tb_id : 0;
/* reset skb for netlink reply msg */
skb_trim(skb, 0);
skb_reset_network_header(skb);
skb_reset_transport_header(skb);
skb_reset_mac_header(skb);
if (rtm->rtm_flags & RTM_F_FIB_MATCH) {
struct fib_rt_info fri;
if (!res.fi) {
err = fib_props[res.type].error;
if (!err)
err = -EHOSTUNREACH;
goto errout_rcu;
}
fri.fi = res.fi;
fri.tb_id = table_id;
fri.dst = res.prefix;
fri.dst_len = res.prefixlen;
fri.dscp = inet_dsfield_to_dscp(fl4.flowi4_tos);
fri.type = rt->rt_type;
fri.offload = 0;
fri.trap = 0;
fri.offload_failed = 0;
if (res.fa_head) {
struct fib_alias *fa;
hlist_for_each_entry_rcu(fa, res.fa_head, fa_list) {
u8 slen = 32 - fri.dst_len;
if (fa->fa_slen == slen &&
fa->tb_id == fri.tb_id &&
fa->fa_dscp == fri.dscp &&
fa->fa_info == res.fi &&
fa->fa_type == fri.type) {
fri.offload = READ_ONCE(fa->offload);
fri.trap = READ_ONCE(fa->trap);
break;
}
}
}
err = fib_dump_info(skb, NETLINK_CB(in_skb).portid,
nlh->nlmsg_seq, RTM_NEWROUTE, &fri, 0);
} else {
err = rt_fill_info(net, dst, src, rt, table_id, &fl4, skb,
NETLINK_CB(in_skb).portid,
nlh->nlmsg_seq, 0);
}
if (err < 0)
goto errout_rcu;
rcu_read_unlock();
err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
errout_free:
return err;
errout_rcu:
rcu_read_unlock();
kfree_skb(skb);
goto errout_free;
}
void ip_rt_multicast_event(struct in_device *in_dev)
{
rt_cache_flush(dev_net(in_dev->dev));
}
#ifdef CONFIG_SYSCTL
static int ip_rt_gc_interval __read_mostly = 60 * HZ;
static int ip_rt_gc_min_interval __read_mostly = HZ / 2;
static int ip_rt_gc_elasticity __read_mostly = 8;
static int ip_min_valid_pmtu __read_mostly = IPV4_MIN_MTU;
static int ipv4_sysctl_rtcache_flush(struct ctl_table *__ctl, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
struct net *net = (struct net *)__ctl->extra1;
if (write) {
rt_cache_flush(net);
fnhe_genid_bump(net);
return 0;
}
return -EINVAL;
}
static struct ctl_table ipv4_route_table[] = {
{
.procname = "gc_thresh",
.data = &ipv4_dst_ops.gc_thresh,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "max_size",
.data = &ip_rt_max_size,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
/* Deprecated. Use gc_min_interval_ms */
.procname = "gc_min_interval",
.data = &ip_rt_gc_min_interval,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "gc_min_interval_ms",
.data = &ip_rt_gc_min_interval,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_ms_jiffies,
},
{
.procname = "gc_timeout",
.data = &ip_rt_gc_timeout,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "gc_interval",
.data = &ip_rt_gc_interval,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "redirect_load",
.data = &ip_rt_redirect_load,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "redirect_number",
.data = &ip_rt_redirect_number,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "redirect_silence",
.data = &ip_rt_redirect_silence,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "error_cost",
.data = &ip_rt_error_cost,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "error_burst",
.data = &ip_rt_error_burst,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "gc_elasticity",
.data = &ip_rt_gc_elasticity,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{ }
};
static const char ipv4_route_flush_procname[] = "flush";
static struct ctl_table ipv4_route_netns_table[] = {
{
.procname = ipv4_route_flush_procname,
.maxlen = sizeof(int),
.mode = 0200,
.proc_handler = ipv4_sysctl_rtcache_flush,
},
{
.procname = "min_pmtu",
.data = &init_net.ipv4.ip_rt_min_pmtu,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = &ip_min_valid_pmtu,
},
{
.procname = "mtu_expires",
.data = &init_net.ipv4.ip_rt_mtu_expires,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "min_adv_mss",
.data = &init_net.ipv4.ip_rt_min_advmss,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{ },
};
static __net_init int sysctl_route_net_init(struct net *net)
{
struct ctl_table *tbl;
size_t table_size = ARRAY_SIZE(ipv4_route_netns_table);
tbl = ipv4_route_netns_table;
if (!net_eq(net, &init_net)) {
int i;
tbl = kmemdup(tbl, sizeof(ipv4_route_netns_table), GFP_KERNEL);
if (!tbl)
goto err_dup;
/* Don't export non-whitelisted sysctls to unprivileged users */
if (net->user_ns != &init_user_ns) {
if (tbl[0].procname != ipv4_route_flush_procname) {
tbl[0].procname = NULL;
table_size = 0;
}
}
/* Update the variables to point into the current struct net
* except for the first element flush
*/
for (i = 1; i < ARRAY_SIZE(ipv4_route_netns_table) - 1; i++)
tbl[i].data += (void *)net - (void *)&init_net;
}
tbl[0].extra1 = net;
net->ipv4.route_hdr = register_net_sysctl_sz(net, "net/ipv4/route",
tbl, table_size);
if (!net->ipv4.route_hdr)
goto err_reg;
return 0;
err_reg:
if (tbl != ipv4_route_netns_table)
kfree(tbl);
err_dup:
return -ENOMEM;
}
static __net_exit void sysctl_route_net_exit(struct net *net)
{
struct ctl_table *tbl;
tbl = net->ipv4.route_hdr->ctl_table_arg;
unregister_net_sysctl_table(net->ipv4.route_hdr);
BUG_ON(tbl == ipv4_route_netns_table);
kfree(tbl);
}
static __net_initdata struct pernet_operations sysctl_route_ops = {
.init = sysctl_route_net_init,
.exit = sysctl_route_net_exit,
};
#endif
static __net_init int netns_ip_rt_init(struct net *net)
{
/* Set default value for namespaceified sysctls */
net->ipv4.ip_rt_min_pmtu = DEFAULT_MIN_PMTU;
net->ipv4.ip_rt_mtu_expires = DEFAULT_MTU_EXPIRES;
net->ipv4.ip_rt_min_advmss = DEFAULT_MIN_ADVMSS;
return 0;
}
static struct pernet_operations __net_initdata ip_rt_ops = {
.init = netns_ip_rt_init,
};
static __net_init int rt_genid_init(struct net *net)
{
atomic_set(&net->ipv4.rt_genid, 0);
atomic_set(&net->fnhe_genid, 0);
atomic_set(&net->ipv4.dev_addr_genid, get_random_u32());
return 0;
}
static __net_initdata struct pernet_operations rt_genid_ops = {
.init = rt_genid_init,
};
static int __net_init ipv4_inetpeer_init(struct net *net)
{
struct inet_peer_base *bp = kmalloc(sizeof(*bp), GFP_KERNEL);
if (!bp)
return -ENOMEM;
inet_peer_base_init(bp);
net->ipv4.peers = bp;
return 0;
}
static void __net_exit ipv4_inetpeer_exit(struct net *net)
{
struct inet_peer_base *bp = net->ipv4.peers;
net->ipv4.peers = NULL;
inetpeer_invalidate_tree(bp);
kfree(bp);
}
static __net_initdata struct pernet_operations ipv4_inetpeer_ops = {
.init = ipv4_inetpeer_init,
.exit = ipv4_inetpeer_exit,
};
#ifdef CONFIG_IP_ROUTE_CLASSID
struct ip_rt_acct __percpu *ip_rt_acct __read_mostly;
#endif /* CONFIG_IP_ROUTE_CLASSID */
int __init ip_rt_init(void)
{
void *idents_hash;
int cpu;
/* For modern hosts, this will use 2 MB of memory */
idents_hash = alloc_large_system_hash("IP idents",
sizeof(*ip_idents) + sizeof(*ip_tstamps),
0,
16, /* one bucket per 64 KB */
HASH_ZERO,
NULL,
&ip_idents_mask,
2048,
256*1024);
ip_idents = idents_hash;
get_random_bytes(ip_idents, (ip_idents_mask + 1) * sizeof(*ip_idents));
ip_tstamps = idents_hash + (ip_idents_mask + 1) * sizeof(*ip_idents);
for_each_possible_cpu(cpu) {
struct uncached_list *ul = &per_cpu(rt_uncached_list, cpu);
INIT_LIST_HEAD(&ul->head);
INIT_LIST_HEAD(&ul->quarantine);
spin_lock_init(&ul->lock);
}
#ifdef CONFIG_IP_ROUTE_CLASSID
ip_rt_acct = __alloc_percpu(256 * sizeof(struct ip_rt_acct), __alignof__(struct ip_rt_acct));
if (!ip_rt_acct)
panic("IP: failed to allocate ip_rt_acct\n");
#endif
ipv4_dst_ops.kmem_cachep =
kmem_cache_create("ip_dst_cache", sizeof(struct rtable), 0,
SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
ipv4_dst_blackhole_ops.kmem_cachep = ipv4_dst_ops.kmem_cachep;
if (dst_entries_init(&ipv4_dst_ops) < 0)
panic("IP: failed to allocate ipv4_dst_ops counter\n");
if (dst_entries_init(&ipv4_dst_blackhole_ops) < 0)
panic("IP: failed to allocate ipv4_dst_blackhole_ops counter\n");
ipv4_dst_ops.gc_thresh = ~0;
ip_rt_max_size = INT_MAX;
devinet_init();
ip_fib_init();
if (ip_rt_proc_init())
pr_err("Unable to create route proc files\n");
#ifdef CONFIG_XFRM
xfrm_init();
xfrm4_init();
#endif
rtnl_register(PF_INET, RTM_GETROUTE, inet_rtm_getroute, NULL,
RTNL_FLAG_DOIT_UNLOCKED);
#ifdef CONFIG_SYSCTL
register_pernet_subsys(&sysctl_route_ops);
#endif
register_pernet_subsys(&ip_rt_ops);
register_pernet_subsys(&rt_genid_ops);
register_pernet_subsys(&ipv4_inetpeer_ops);
return 0;
}
#ifdef CONFIG_SYSCTL
/*
* We really need to sanitize the damn ipv4 init order, then all
* this nonsense will go away.
*/
void __init ip_static_sysctl_init(void)
{
register_net_sysctl(&init_net, "net/ipv4/route", ipv4_route_table);
}
#endif
| linux-master | net/ipv4/route.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* inet fragments management
*
* Authors: Pavel Emelyanov <[email protected]>
* Started as consolidation of ipv4/ip_fragment.c,
* ipv6/reassembly. and ipv6 nf conntrack reassembly
*/
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/module.h>
#include <linux/timer.h>
#include <linux/mm.h>
#include <linux/random.h>
#include <linux/skbuff.h>
#include <linux/rtnetlink.h>
#include <linux/slab.h>
#include <linux/rhashtable.h>
#include <net/sock.h>
#include <net/inet_frag.h>
#include <net/inet_ecn.h>
#include <net/ip.h>
#include <net/ipv6.h>
/* Use skb->cb to track consecutive/adjacent fragments coming at
* the end of the queue. Nodes in the rb-tree queue will
* contain "runs" of one or more adjacent fragments.
*
* Invariants:
* - next_frag is NULL at the tail of a "run";
* - the head of a "run" has the sum of all fragment lengths in frag_run_len.
*/
struct ipfrag_skb_cb {
union {
struct inet_skb_parm h4;
struct inet6_skb_parm h6;
};
struct sk_buff *next_frag;
int frag_run_len;
};
#define FRAG_CB(skb) ((struct ipfrag_skb_cb *)((skb)->cb))
static void fragcb_clear(struct sk_buff *skb)
{
RB_CLEAR_NODE(&skb->rbnode);
FRAG_CB(skb)->next_frag = NULL;
FRAG_CB(skb)->frag_run_len = skb->len;
}
/* Append skb to the last "run". */
static void fragrun_append_to_last(struct inet_frag_queue *q,
struct sk_buff *skb)
{
fragcb_clear(skb);
FRAG_CB(q->last_run_head)->frag_run_len += skb->len;
FRAG_CB(q->fragments_tail)->next_frag = skb;
q->fragments_tail = skb;
}
/* Create a new "run" with the skb. */
static void fragrun_create(struct inet_frag_queue *q, struct sk_buff *skb)
{
BUILD_BUG_ON(sizeof(struct ipfrag_skb_cb) > sizeof(skb->cb));
fragcb_clear(skb);
if (q->last_run_head)
rb_link_node(&skb->rbnode, &q->last_run_head->rbnode,
&q->last_run_head->rbnode.rb_right);
else
rb_link_node(&skb->rbnode, NULL, &q->rb_fragments.rb_node);
rb_insert_color(&skb->rbnode, &q->rb_fragments);
q->fragments_tail = skb;
q->last_run_head = skb;
}
/* Given the OR values of all fragments, apply RFC 3168 5.3 requirements
* Value : 0xff if frame should be dropped.
* 0 or INET_ECN_CE value, to be ORed in to final iph->tos field
*/
const u8 ip_frag_ecn_table[16] = {
/* at least one fragment had CE, and others ECT_0 or ECT_1 */
[IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0] = INET_ECN_CE,
[IPFRAG_ECN_CE | IPFRAG_ECN_ECT_1] = INET_ECN_CE,
[IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1] = INET_ECN_CE,
/* invalid combinations : drop frame */
[IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE] = 0xff,
[IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_0] = 0xff,
[IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_1] = 0xff,
[IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1] = 0xff,
[IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0] = 0xff,
[IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_1] = 0xff,
[IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1] = 0xff,
};
EXPORT_SYMBOL(ip_frag_ecn_table);
int inet_frags_init(struct inet_frags *f)
{
f->frags_cachep = kmem_cache_create(f->frags_cache_name, f->qsize, 0, 0,
NULL);
if (!f->frags_cachep)
return -ENOMEM;
refcount_set(&f->refcnt, 1);
init_completion(&f->completion);
return 0;
}
EXPORT_SYMBOL(inet_frags_init);
void inet_frags_fini(struct inet_frags *f)
{
if (refcount_dec_and_test(&f->refcnt))
complete(&f->completion);
wait_for_completion(&f->completion);
kmem_cache_destroy(f->frags_cachep);
f->frags_cachep = NULL;
}
EXPORT_SYMBOL(inet_frags_fini);
/* called from rhashtable_free_and_destroy() at netns_frags dismantle */
static void inet_frags_free_cb(void *ptr, void *arg)
{
struct inet_frag_queue *fq = ptr;
int count;
count = del_timer_sync(&fq->timer) ? 1 : 0;
spin_lock_bh(&fq->lock);
fq->flags |= INET_FRAG_DROP;
if (!(fq->flags & INET_FRAG_COMPLETE)) {
fq->flags |= INET_FRAG_COMPLETE;
count++;
} else if (fq->flags & INET_FRAG_HASH_DEAD) {
count++;
}
spin_unlock_bh(&fq->lock);
if (refcount_sub_and_test(count, &fq->refcnt))
inet_frag_destroy(fq);
}
static LLIST_HEAD(fqdir_free_list);
static void fqdir_free_fn(struct work_struct *work)
{
struct llist_node *kill_list;
struct fqdir *fqdir, *tmp;
struct inet_frags *f;
/* Atomically snapshot the list of fqdirs to free */
kill_list = llist_del_all(&fqdir_free_list);
/* We need to make sure all ongoing call_rcu(..., inet_frag_destroy_rcu)
* have completed, since they need to dereference fqdir.
* Would it not be nice to have kfree_rcu_barrier() ? :)
*/
rcu_barrier();
llist_for_each_entry_safe(fqdir, tmp, kill_list, free_list) {
f = fqdir->f;
if (refcount_dec_and_test(&f->refcnt))
complete(&f->completion);
kfree(fqdir);
}
}
static DECLARE_WORK(fqdir_free_work, fqdir_free_fn);
static void fqdir_work_fn(struct work_struct *work)
{
struct fqdir *fqdir = container_of(work, struct fqdir, destroy_work);
rhashtable_free_and_destroy(&fqdir->rhashtable, inet_frags_free_cb, NULL);
if (llist_add(&fqdir->free_list, &fqdir_free_list))
queue_work(system_wq, &fqdir_free_work);
}
int fqdir_init(struct fqdir **fqdirp, struct inet_frags *f, struct net *net)
{
struct fqdir *fqdir = kzalloc(sizeof(*fqdir), GFP_KERNEL);
int res;
if (!fqdir)
return -ENOMEM;
fqdir->f = f;
fqdir->net = net;
res = rhashtable_init(&fqdir->rhashtable, &fqdir->f->rhash_params);
if (res < 0) {
kfree(fqdir);
return res;
}
refcount_inc(&f->refcnt);
*fqdirp = fqdir;
return 0;
}
EXPORT_SYMBOL(fqdir_init);
static struct workqueue_struct *inet_frag_wq;
static int __init inet_frag_wq_init(void)
{
inet_frag_wq = create_workqueue("inet_frag_wq");
if (!inet_frag_wq)
panic("Could not create inet frag workq");
return 0;
}
pure_initcall(inet_frag_wq_init);
void fqdir_exit(struct fqdir *fqdir)
{
INIT_WORK(&fqdir->destroy_work, fqdir_work_fn);
queue_work(inet_frag_wq, &fqdir->destroy_work);
}
EXPORT_SYMBOL(fqdir_exit);
void inet_frag_kill(struct inet_frag_queue *fq)
{
if (del_timer(&fq->timer))
refcount_dec(&fq->refcnt);
if (!(fq->flags & INET_FRAG_COMPLETE)) {
struct fqdir *fqdir = fq->fqdir;
fq->flags |= INET_FRAG_COMPLETE;
rcu_read_lock();
/* The RCU read lock provides a memory barrier
* guaranteeing that if fqdir->dead is false then
* the hash table destruction will not start until
* after we unlock. Paired with fqdir_pre_exit().
*/
if (!READ_ONCE(fqdir->dead)) {
rhashtable_remove_fast(&fqdir->rhashtable, &fq->node,
fqdir->f->rhash_params);
refcount_dec(&fq->refcnt);
} else {
fq->flags |= INET_FRAG_HASH_DEAD;
}
rcu_read_unlock();
}
}
EXPORT_SYMBOL(inet_frag_kill);
static void inet_frag_destroy_rcu(struct rcu_head *head)
{
struct inet_frag_queue *q = container_of(head, struct inet_frag_queue,
rcu);
struct inet_frags *f = q->fqdir->f;
if (f->destructor)
f->destructor(q);
kmem_cache_free(f->frags_cachep, q);
}
unsigned int inet_frag_rbtree_purge(struct rb_root *root,
enum skb_drop_reason reason)
{
struct rb_node *p = rb_first(root);
unsigned int sum = 0;
while (p) {
struct sk_buff *skb = rb_entry(p, struct sk_buff, rbnode);
p = rb_next(p);
rb_erase(&skb->rbnode, root);
while (skb) {
struct sk_buff *next = FRAG_CB(skb)->next_frag;
sum += skb->truesize;
kfree_skb_reason(skb, reason);
skb = next;
}
}
return sum;
}
EXPORT_SYMBOL(inet_frag_rbtree_purge);
void inet_frag_destroy(struct inet_frag_queue *q)
{
unsigned int sum, sum_truesize = 0;
enum skb_drop_reason reason;
struct inet_frags *f;
struct fqdir *fqdir;
WARN_ON(!(q->flags & INET_FRAG_COMPLETE));
reason = (q->flags & INET_FRAG_DROP) ?
SKB_DROP_REASON_FRAG_REASM_TIMEOUT :
SKB_CONSUMED;
WARN_ON(del_timer(&q->timer) != 0);
/* Release all fragment data. */
fqdir = q->fqdir;
f = fqdir->f;
sum_truesize = inet_frag_rbtree_purge(&q->rb_fragments, reason);
sum = sum_truesize + f->qsize;
call_rcu(&q->rcu, inet_frag_destroy_rcu);
sub_frag_mem_limit(fqdir, sum);
}
EXPORT_SYMBOL(inet_frag_destroy);
static struct inet_frag_queue *inet_frag_alloc(struct fqdir *fqdir,
struct inet_frags *f,
void *arg)
{
struct inet_frag_queue *q;
q = kmem_cache_zalloc(f->frags_cachep, GFP_ATOMIC);
if (!q)
return NULL;
q->fqdir = fqdir;
f->constructor(q, arg);
add_frag_mem_limit(fqdir, f->qsize);
timer_setup(&q->timer, f->frag_expire, 0);
spin_lock_init(&q->lock);
refcount_set(&q->refcnt, 3);
return q;
}
static struct inet_frag_queue *inet_frag_create(struct fqdir *fqdir,
void *arg,
struct inet_frag_queue **prev)
{
struct inet_frags *f = fqdir->f;
struct inet_frag_queue *q;
q = inet_frag_alloc(fqdir, f, arg);
if (!q) {
*prev = ERR_PTR(-ENOMEM);
return NULL;
}
mod_timer(&q->timer, jiffies + fqdir->timeout);
*prev = rhashtable_lookup_get_insert_key(&fqdir->rhashtable, &q->key,
&q->node, f->rhash_params);
if (*prev) {
q->flags |= INET_FRAG_COMPLETE;
inet_frag_kill(q);
inet_frag_destroy(q);
return NULL;
}
return q;
}
/* TODO : call from rcu_read_lock() and no longer use refcount_inc_not_zero() */
struct inet_frag_queue *inet_frag_find(struct fqdir *fqdir, void *key)
{
/* This pairs with WRITE_ONCE() in fqdir_pre_exit(). */
long high_thresh = READ_ONCE(fqdir->high_thresh);
struct inet_frag_queue *fq = NULL, *prev;
if (!high_thresh || frag_mem_limit(fqdir) > high_thresh)
return NULL;
rcu_read_lock();
prev = rhashtable_lookup(&fqdir->rhashtable, key, fqdir->f->rhash_params);
if (!prev)
fq = inet_frag_create(fqdir, key, &prev);
if (!IS_ERR_OR_NULL(prev)) {
fq = prev;
if (!refcount_inc_not_zero(&fq->refcnt))
fq = NULL;
}
rcu_read_unlock();
return fq;
}
EXPORT_SYMBOL(inet_frag_find);
int inet_frag_queue_insert(struct inet_frag_queue *q, struct sk_buff *skb,
int offset, int end)
{
struct sk_buff *last = q->fragments_tail;
/* RFC5722, Section 4, amended by Errata ID : 3089
* When reassembling an IPv6 datagram, if
* one or more its constituent fragments is determined to be an
* overlapping fragment, the entire datagram (and any constituent
* fragments) MUST be silently discarded.
*
* Duplicates, however, should be ignored (i.e. skb dropped, but the
* queue/fragments kept for later reassembly).
*/
if (!last)
fragrun_create(q, skb); /* First fragment. */
else if (last->ip_defrag_offset + last->len < end) {
/* This is the common case: skb goes to the end. */
/* Detect and discard overlaps. */
if (offset < last->ip_defrag_offset + last->len)
return IPFRAG_OVERLAP;
if (offset == last->ip_defrag_offset + last->len)
fragrun_append_to_last(q, skb);
else
fragrun_create(q, skb);
} else {
/* Binary search. Note that skb can become the first fragment,
* but not the last (covered above).
*/
struct rb_node **rbn, *parent;
rbn = &q->rb_fragments.rb_node;
do {
struct sk_buff *curr;
int curr_run_end;
parent = *rbn;
curr = rb_to_skb(parent);
curr_run_end = curr->ip_defrag_offset +
FRAG_CB(curr)->frag_run_len;
if (end <= curr->ip_defrag_offset)
rbn = &parent->rb_left;
else if (offset >= curr_run_end)
rbn = &parent->rb_right;
else if (offset >= curr->ip_defrag_offset &&
end <= curr_run_end)
return IPFRAG_DUP;
else
return IPFRAG_OVERLAP;
} while (*rbn);
/* Here we have parent properly set, and rbn pointing to
* one of its NULL left/right children. Insert skb.
*/
fragcb_clear(skb);
rb_link_node(&skb->rbnode, parent, rbn);
rb_insert_color(&skb->rbnode, &q->rb_fragments);
}
skb->ip_defrag_offset = offset;
return IPFRAG_OK;
}
EXPORT_SYMBOL(inet_frag_queue_insert);
void *inet_frag_reasm_prepare(struct inet_frag_queue *q, struct sk_buff *skb,
struct sk_buff *parent)
{
struct sk_buff *fp, *head = skb_rb_first(&q->rb_fragments);
struct sk_buff **nextp;
int delta;
if (head != skb) {
fp = skb_clone(skb, GFP_ATOMIC);
if (!fp)
return NULL;
FRAG_CB(fp)->next_frag = FRAG_CB(skb)->next_frag;
if (RB_EMPTY_NODE(&skb->rbnode))
FRAG_CB(parent)->next_frag = fp;
else
rb_replace_node(&skb->rbnode, &fp->rbnode,
&q->rb_fragments);
if (q->fragments_tail == skb)
q->fragments_tail = fp;
skb_morph(skb, head);
FRAG_CB(skb)->next_frag = FRAG_CB(head)->next_frag;
rb_replace_node(&head->rbnode, &skb->rbnode,
&q->rb_fragments);
consume_skb(head);
head = skb;
}
WARN_ON(head->ip_defrag_offset != 0);
delta = -head->truesize;
/* Head of list must not be cloned. */
if (skb_unclone(head, GFP_ATOMIC))
return NULL;
delta += head->truesize;
if (delta)
add_frag_mem_limit(q->fqdir, delta);
/* If the first fragment is fragmented itself, we split
* it to two chunks: the first with data and paged part
* and the second, holding only fragments.
*/
if (skb_has_frag_list(head)) {
struct sk_buff *clone;
int i, plen = 0;
clone = alloc_skb(0, GFP_ATOMIC);
if (!clone)
return NULL;
skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
skb_frag_list_init(head);
for (i = 0; i < skb_shinfo(head)->nr_frags; i++)
plen += skb_frag_size(&skb_shinfo(head)->frags[i]);
clone->data_len = head->data_len - plen;
clone->len = clone->data_len;
head->truesize += clone->truesize;
clone->csum = 0;
clone->ip_summed = head->ip_summed;
add_frag_mem_limit(q->fqdir, clone->truesize);
skb_shinfo(head)->frag_list = clone;
nextp = &clone->next;
} else {
nextp = &skb_shinfo(head)->frag_list;
}
return nextp;
}
EXPORT_SYMBOL(inet_frag_reasm_prepare);
void inet_frag_reasm_finish(struct inet_frag_queue *q, struct sk_buff *head,
void *reasm_data, bool try_coalesce)
{
struct sk_buff **nextp = reasm_data;
struct rb_node *rbn;
struct sk_buff *fp;
int sum_truesize;
skb_push(head, head->data - skb_network_header(head));
/* Traverse the tree in order, to build frag_list. */
fp = FRAG_CB(head)->next_frag;
rbn = rb_next(&head->rbnode);
rb_erase(&head->rbnode, &q->rb_fragments);
sum_truesize = head->truesize;
while (rbn || fp) {
/* fp points to the next sk_buff in the current run;
* rbn points to the next run.
*/
/* Go through the current run. */
while (fp) {
struct sk_buff *next_frag = FRAG_CB(fp)->next_frag;
bool stolen;
int delta;
sum_truesize += fp->truesize;
if (head->ip_summed != fp->ip_summed)
head->ip_summed = CHECKSUM_NONE;
else if (head->ip_summed == CHECKSUM_COMPLETE)
head->csum = csum_add(head->csum, fp->csum);
if (try_coalesce && skb_try_coalesce(head, fp, &stolen,
&delta)) {
kfree_skb_partial(fp, stolen);
} else {
fp->prev = NULL;
memset(&fp->rbnode, 0, sizeof(fp->rbnode));
fp->sk = NULL;
head->data_len += fp->len;
head->len += fp->len;
head->truesize += fp->truesize;
*nextp = fp;
nextp = &fp->next;
}
fp = next_frag;
}
/* Move to the next run. */
if (rbn) {
struct rb_node *rbnext = rb_next(rbn);
fp = rb_to_skb(rbn);
rb_erase(rbn, &q->rb_fragments);
rbn = rbnext;
}
}
sub_frag_mem_limit(q->fqdir, sum_truesize);
*nextp = NULL;
skb_mark_not_on_list(head);
head->prev = NULL;
head->tstamp = q->stamp;
head->mono_delivery_time = q->mono_delivery_time;
}
EXPORT_SYMBOL(inet_frag_reasm_finish);
struct sk_buff *inet_frag_pull_head(struct inet_frag_queue *q)
{
struct sk_buff *head, *skb;
head = skb_rb_first(&q->rb_fragments);
if (!head)
return NULL;
skb = FRAG_CB(head)->next_frag;
if (skb)
rb_replace_node(&head->rbnode, &skb->rbnode,
&q->rb_fragments);
else
rb_erase(&head->rbnode, &q->rb_fragments);
memset(&head->rbnode, 0, sizeof(head->rbnode));
barrier();
if (head == q->fragments_tail)
q->fragments_tail = NULL;
sub_frag_mem_limit(q->fqdir, head->truesize);
return head;
}
EXPORT_SYMBOL(inet_frag_pull_head);
| linux-master | net/ipv4/inet_fragment.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Tom Kelly's Scalable TCP
*
* See http://www.deneholme.net/tom/scalable/
*
* John Heffner <[email protected]>
*/
#include <linux/module.h>
#include <net/tcp.h>
/* These factors derived from the recommended values in the aer:
* .01 and 7/8.
*/
#define TCP_SCALABLE_AI_CNT 100U
#define TCP_SCALABLE_MD_SCALE 3
static void tcp_scalable_cong_avoid(struct sock *sk, u32 ack, u32 acked)
{
struct tcp_sock *tp = tcp_sk(sk);
if (!tcp_is_cwnd_limited(sk))
return;
if (tcp_in_slow_start(tp)) {
acked = tcp_slow_start(tp, acked);
if (!acked)
return;
}
tcp_cong_avoid_ai(tp, min(tcp_snd_cwnd(tp), TCP_SCALABLE_AI_CNT),
acked);
}
static u32 tcp_scalable_ssthresh(struct sock *sk)
{
const struct tcp_sock *tp = tcp_sk(sk);
return max(tcp_snd_cwnd(tp) - (tcp_snd_cwnd(tp)>>TCP_SCALABLE_MD_SCALE), 2U);
}
static struct tcp_congestion_ops tcp_scalable __read_mostly = {
.ssthresh = tcp_scalable_ssthresh,
.undo_cwnd = tcp_reno_undo_cwnd,
.cong_avoid = tcp_scalable_cong_avoid,
.owner = THIS_MODULE,
.name = "scalable",
};
static int __init tcp_scalable_register(void)
{
return tcp_register_congestion_control(&tcp_scalable);
}
static void __exit tcp_scalable_unregister(void)
{
tcp_unregister_congestion_control(&tcp_scalable);
}
module_init(tcp_scalable_register);
module_exit(tcp_scalable_unregister);
MODULE_AUTHOR("John Heffner");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Scalable TCP");
| linux-master | net/ipv4/tcp_scalable.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* IPV4 GSO/GRO offload support
* Linux INET implementation
*
* TCPv4 GSO/GRO support
*/
#include <linux/indirect_call_wrapper.h>
#include <linux/skbuff.h>
#include <net/gro.h>
#include <net/gso.h>
#include <net/tcp.h>
#include <net/protocol.h>
static void tcp_gso_tstamp(struct sk_buff *skb, unsigned int ts_seq,
unsigned int seq, unsigned int mss)
{
while (skb) {
if (before(ts_seq, seq + mss)) {
skb_shinfo(skb)->tx_flags |= SKBTX_SW_TSTAMP;
skb_shinfo(skb)->tskey = ts_seq;
return;
}
skb = skb->next;
seq += mss;
}
}
static struct sk_buff *tcp4_gso_segment(struct sk_buff *skb,
netdev_features_t features)
{
if (!(skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4))
return ERR_PTR(-EINVAL);
if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
return ERR_PTR(-EINVAL);
if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
const struct iphdr *iph = ip_hdr(skb);
struct tcphdr *th = tcp_hdr(skb);
/* Set up checksum pseudo header, usually expect stack to
* have done this already.
*/
th->check = 0;
skb->ip_summed = CHECKSUM_PARTIAL;
__tcp_v4_send_check(skb, iph->saddr, iph->daddr);
}
return tcp_gso_segment(skb, features);
}
struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
netdev_features_t features)
{
struct sk_buff *segs = ERR_PTR(-EINVAL);
unsigned int sum_truesize = 0;
struct tcphdr *th;
unsigned int thlen;
unsigned int seq;
unsigned int oldlen;
unsigned int mss;
struct sk_buff *gso_skb = skb;
__sum16 newcheck;
bool ooo_okay, copy_destructor;
__wsum delta;
th = tcp_hdr(skb);
thlen = th->doff * 4;
if (thlen < sizeof(*th))
goto out;
if (!pskb_may_pull(skb, thlen))
goto out;
oldlen = ~skb->len;
__skb_pull(skb, thlen);
mss = skb_shinfo(skb)->gso_size;
if (unlikely(skb->len <= mss))
goto out;
if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) {
/* Packet is from an untrusted source, reset gso_segs. */
skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss);
segs = NULL;
goto out;
}
copy_destructor = gso_skb->destructor == tcp_wfree;
ooo_okay = gso_skb->ooo_okay;
/* All segments but the first should have ooo_okay cleared */
skb->ooo_okay = 0;
segs = skb_segment(skb, features);
if (IS_ERR(segs))
goto out;
/* Only first segment might have ooo_okay set */
segs->ooo_okay = ooo_okay;
/* GSO partial and frag_list segmentation only requires splitting
* the frame into an MSS multiple and possibly a remainder, both
* cases return a GSO skb. So update the mss now.
*/
if (skb_is_gso(segs))
mss *= skb_shinfo(segs)->gso_segs;
delta = (__force __wsum)htonl(oldlen + thlen + mss);
skb = segs;
th = tcp_hdr(skb);
seq = ntohl(th->seq);
if (unlikely(skb_shinfo(gso_skb)->tx_flags & SKBTX_SW_TSTAMP))
tcp_gso_tstamp(segs, skb_shinfo(gso_skb)->tskey, seq, mss);
newcheck = ~csum_fold(csum_add(csum_unfold(th->check), delta));
while (skb->next) {
th->fin = th->psh = 0;
th->check = newcheck;
if (skb->ip_summed == CHECKSUM_PARTIAL)
gso_reset_checksum(skb, ~th->check);
else
th->check = gso_make_checksum(skb, ~th->check);
seq += mss;
if (copy_destructor) {
skb->destructor = gso_skb->destructor;
skb->sk = gso_skb->sk;
sum_truesize += skb->truesize;
}
skb = skb->next;
th = tcp_hdr(skb);
th->seq = htonl(seq);
th->cwr = 0;
}
/* Following permits TCP Small Queues to work well with GSO :
* The callback to TCP stack will be called at the time last frag
* is freed at TX completion, and not right now when gso_skb
* is freed by GSO engine
*/
if (copy_destructor) {
int delta;
swap(gso_skb->sk, skb->sk);
swap(gso_skb->destructor, skb->destructor);
sum_truesize += skb->truesize;
delta = sum_truesize - gso_skb->truesize;
/* In some pathological cases, delta can be negative.
* We need to either use refcount_add() or refcount_sub_and_test()
*/
if (likely(delta >= 0))
refcount_add(delta, &skb->sk->sk_wmem_alloc);
else
WARN_ON_ONCE(refcount_sub_and_test(-delta, &skb->sk->sk_wmem_alloc));
}
delta = (__force __wsum)htonl(oldlen +
(skb_tail_pointer(skb) -
skb_transport_header(skb)) +
skb->data_len);
th->check = ~csum_fold(csum_add(csum_unfold(th->check), delta));
if (skb->ip_summed == CHECKSUM_PARTIAL)
gso_reset_checksum(skb, ~th->check);
else
th->check = gso_make_checksum(skb, ~th->check);
out:
return segs;
}
struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb)
{
struct sk_buff *pp = NULL;
struct sk_buff *p;
struct tcphdr *th;
struct tcphdr *th2;
unsigned int len;
unsigned int thlen;
__be32 flags;
unsigned int mss = 1;
unsigned int hlen;
unsigned int off;
int flush = 1;
int i;
off = skb_gro_offset(skb);
hlen = off + sizeof(*th);
th = skb_gro_header(skb, hlen, off);
if (unlikely(!th))
goto out;
thlen = th->doff * 4;
if (thlen < sizeof(*th))
goto out;
hlen = off + thlen;
if (skb_gro_header_hard(skb, hlen)) {
th = skb_gro_header_slow(skb, hlen, off);
if (unlikely(!th))
goto out;
}
skb_gro_pull(skb, thlen);
len = skb_gro_len(skb);
flags = tcp_flag_word(th);
list_for_each_entry(p, head, list) {
if (!NAPI_GRO_CB(p)->same_flow)
continue;
th2 = tcp_hdr(p);
if (*(u32 *)&th->source ^ *(u32 *)&th2->source) {
NAPI_GRO_CB(p)->same_flow = 0;
continue;
}
goto found;
}
p = NULL;
goto out_check_final;
found:
/* Include the IP ID check below from the inner most IP hdr */
flush = NAPI_GRO_CB(p)->flush;
flush |= (__force int)(flags & TCP_FLAG_CWR);
flush |= (__force int)((flags ^ tcp_flag_word(th2)) &
~(TCP_FLAG_CWR | TCP_FLAG_FIN | TCP_FLAG_PSH));
flush |= (__force int)(th->ack_seq ^ th2->ack_seq);
for (i = sizeof(*th); i < thlen; i += 4)
flush |= *(u32 *)((u8 *)th + i) ^
*(u32 *)((u8 *)th2 + i);
/* When we receive our second frame we can made a decision on if we
* continue this flow as an atomic flow with a fixed ID or if we use
* an incrementing ID.
*/
if (NAPI_GRO_CB(p)->flush_id != 1 ||
NAPI_GRO_CB(p)->count != 1 ||
!NAPI_GRO_CB(p)->is_atomic)
flush |= NAPI_GRO_CB(p)->flush_id;
else
NAPI_GRO_CB(p)->is_atomic = false;
mss = skb_shinfo(p)->gso_size;
/* If skb is a GRO packet, make sure its gso_size matches prior packet mss.
* If it is a single frame, do not aggregate it if its length
* is bigger than our mss.
*/
if (unlikely(skb_is_gso(skb)))
flush |= (mss != skb_shinfo(skb)->gso_size);
else
flush |= (len - 1) >= mss;
flush |= (ntohl(th2->seq) + skb_gro_len(p)) ^ ntohl(th->seq);
#ifdef CONFIG_TLS_DEVICE
flush |= p->decrypted ^ skb->decrypted;
#endif
if (flush || skb_gro_receive(p, skb)) {
mss = 1;
goto out_check_final;
}
tcp_flag_word(th2) |= flags & (TCP_FLAG_FIN | TCP_FLAG_PSH);
out_check_final:
/* Force a flush if last segment is smaller than mss. */
if (unlikely(skb_is_gso(skb)))
flush = len != NAPI_GRO_CB(skb)->count * skb_shinfo(skb)->gso_size;
else
flush = len < mss;
flush |= (__force int)(flags & (TCP_FLAG_URG | TCP_FLAG_PSH |
TCP_FLAG_RST | TCP_FLAG_SYN |
TCP_FLAG_FIN));
if (p && (!NAPI_GRO_CB(skb)->same_flow || flush))
pp = p;
out:
NAPI_GRO_CB(skb)->flush |= (flush != 0);
return pp;
}
void tcp_gro_complete(struct sk_buff *skb)
{
struct tcphdr *th = tcp_hdr(skb);
skb->csum_start = (unsigned char *)th - skb->head;
skb->csum_offset = offsetof(struct tcphdr, check);
skb->ip_summed = CHECKSUM_PARTIAL;
skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
if (th->cwr)
skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
if (skb->encapsulation)
skb->inner_transport_header = skb->transport_header;
}
EXPORT_SYMBOL(tcp_gro_complete);
INDIRECT_CALLABLE_SCOPE
struct sk_buff *tcp4_gro_receive(struct list_head *head, struct sk_buff *skb)
{
/* Don't bother verifying checksum if we're going to flush anyway. */
if (!NAPI_GRO_CB(skb)->flush &&
skb_gro_checksum_validate(skb, IPPROTO_TCP,
inet_gro_compute_pseudo)) {
NAPI_GRO_CB(skb)->flush = 1;
return NULL;
}
return tcp_gro_receive(head, skb);
}
INDIRECT_CALLABLE_SCOPE int tcp4_gro_complete(struct sk_buff *skb, int thoff)
{
const struct iphdr *iph = ip_hdr(skb);
struct tcphdr *th = tcp_hdr(skb);
th->check = ~tcp_v4_check(skb->len - thoff, iph->saddr,
iph->daddr, 0);
skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4;
if (NAPI_GRO_CB(skb)->is_atomic)
skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_FIXEDID;
tcp_gro_complete(skb);
return 0;
}
static const struct net_offload tcpv4_offload = {
.callbacks = {
.gso_segment = tcp4_gso_segment,
.gro_receive = tcp4_gro_receive,
.gro_complete = tcp4_gro_complete,
},
};
int __init tcpv4_offload_init(void)
{
return inet_add_offload(&tcpv4_offload, IPPROTO_TCP);
}
| linux-master | net/ipv4/tcp_offload.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Linux NET3: GRE over IP protocol decoder.
*
* Authors: Alexey Kuznetsov ([email protected])
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/capability.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <linux/in.h>
#include <linux/tcp.h>
#include <linux/udp.h>
#include <linux/if_arp.h>
#include <linux/if_vlan.h>
#include <linux/init.h>
#include <linux/in6.h>
#include <linux/inetdevice.h>
#include <linux/igmp.h>
#include <linux/netfilter_ipv4.h>
#include <linux/etherdevice.h>
#include <linux/if_ether.h>
#include <net/sock.h>
#include <net/ip.h>
#include <net/icmp.h>
#include <net/protocol.h>
#include <net/ip_tunnels.h>
#include <net/arp.h>
#include <net/checksum.h>
#include <net/dsfield.h>
#include <net/inet_ecn.h>
#include <net/xfrm.h>
#include <net/net_namespace.h>
#include <net/netns/generic.h>
#include <net/rtnetlink.h>
#include <net/gre.h>
#include <net/dst_metadata.h>
#include <net/erspan.h>
/*
Problems & solutions
--------------------
1. The most important issue is detecting local dead loops.
They would cause complete host lockup in transmit, which
would be "resolved" by stack overflow or, if queueing is enabled,
with infinite looping in net_bh.
We cannot track such dead loops during route installation,
it is infeasible task. The most general solutions would be
to keep skb->encapsulation counter (sort of local ttl),
and silently drop packet when it expires. It is a good
solution, but it supposes maintaining new variable in ALL
skb, even if no tunneling is used.
Current solution: xmit_recursion breaks dead loops. This is a percpu
counter, since when we enter the first ndo_xmit(), cpu migration is
forbidden. We force an exit if this counter reaches RECURSION_LIMIT
2. Networking dead loops would not kill routers, but would really
kill network. IP hop limit plays role of "t->recursion" in this case,
if we copy it from packet being encapsulated to upper header.
It is very good solution, but it introduces two problems:
- Routing protocols, using packets with ttl=1 (OSPF, RIP2),
do not work over tunnels.
- traceroute does not work. I planned to relay ICMP from tunnel,
so that this problem would be solved and traceroute output
would even more informative. This idea appeared to be wrong:
only Linux complies to rfc1812 now (yes, guys, Linux is the only
true router now :-)), all routers (at least, in neighbourhood of mine)
return only 8 bytes of payload. It is the end.
Hence, if we want that OSPF worked or traceroute said something reasonable,
we should search for another solution.
One of them is to parse packet trying to detect inner encapsulation
made by our node. It is difficult or even impossible, especially,
taking into account fragmentation. TO be short, ttl is not solution at all.
Current solution: The solution was UNEXPECTEDLY SIMPLE.
We force DF flag on tunnels with preconfigured hop limit,
that is ALL. :-) Well, it does not remove the problem completely,
but exponential growth of network traffic is changed to linear
(branches, that exceed pmtu are pruned) and tunnel mtu
rapidly degrades to value <68, where looping stops.
Yes, it is not good if there exists a router in the loop,
which does not force DF, even when encapsulating packets have DF set.
But it is not our problem! Nobody could accuse us, we made
all that we could make. Even if it is your gated who injected
fatal route to network, even if it were you who configured
fatal static route: you are innocent. :-)
Alexey Kuznetsov.
*/
static bool log_ecn_error = true;
module_param(log_ecn_error, bool, 0644);
MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
static struct rtnl_link_ops ipgre_link_ops __read_mostly;
static const struct header_ops ipgre_header_ops;
static int ipgre_tunnel_init(struct net_device *dev);
static void erspan_build_header(struct sk_buff *skb,
u32 id, u32 index,
bool truncate, bool is_ipv4);
static unsigned int ipgre_net_id __read_mostly;
static unsigned int gre_tap_net_id __read_mostly;
static unsigned int erspan_net_id __read_mostly;
static int ipgre_err(struct sk_buff *skb, u32 info,
const struct tnl_ptk_info *tpi)
{
/* All the routers (except for Linux) return only
8 bytes of packet payload. It means, that precise relaying of
ICMP in the real Internet is absolutely infeasible.
Moreover, Cisco "wise men" put GRE key to the third word
in GRE header. It makes impossible maintaining even soft
state for keyed GRE tunnels with enabled checksum. Tell
them "thank you".
Well, I wonder, rfc1812 was written by Cisco employee,
what the hell these idiots break standards established
by themselves???
*/
struct net *net = dev_net(skb->dev);
struct ip_tunnel_net *itn;
const struct iphdr *iph;
const int type = icmp_hdr(skb)->type;
const int code = icmp_hdr(skb)->code;
unsigned int data_len = 0;
struct ip_tunnel *t;
if (tpi->proto == htons(ETH_P_TEB))
itn = net_generic(net, gre_tap_net_id);
else if (tpi->proto == htons(ETH_P_ERSPAN) ||
tpi->proto == htons(ETH_P_ERSPAN2))
itn = net_generic(net, erspan_net_id);
else
itn = net_generic(net, ipgre_net_id);
iph = (const struct iphdr *)(icmp_hdr(skb) + 1);
t = ip_tunnel_lookup(itn, skb->dev->ifindex, tpi->flags,
iph->daddr, iph->saddr, tpi->key);
if (!t)
return -ENOENT;
switch (type) {
default:
case ICMP_PARAMETERPROB:
return 0;
case ICMP_DEST_UNREACH:
switch (code) {
case ICMP_SR_FAILED:
case ICMP_PORT_UNREACH:
/* Impossible event. */
return 0;
default:
/* All others are translated to HOST_UNREACH.
rfc2003 contains "deep thoughts" about NET_UNREACH,
I believe they are just ether pollution. --ANK
*/
break;
}
break;
case ICMP_TIME_EXCEEDED:
if (code != ICMP_EXC_TTL)
return 0;
data_len = icmp_hdr(skb)->un.reserved[1] * 4; /* RFC 4884 4.1 */
break;
case ICMP_REDIRECT:
break;
}
#if IS_ENABLED(CONFIG_IPV6)
if (tpi->proto == htons(ETH_P_IPV6) &&
!ip6_err_gen_icmpv6_unreach(skb, iph->ihl * 4 + tpi->hdr_len,
type, data_len))
return 0;
#endif
if (t->parms.iph.daddr == 0 ||
ipv4_is_multicast(t->parms.iph.daddr))
return 0;
if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED)
return 0;
if (time_before(jiffies, t->err_time + IPTUNNEL_ERR_TIMEO))
t->err_count++;
else
t->err_count = 1;
t->err_time = jiffies;
return 0;
}
static void gre_err(struct sk_buff *skb, u32 info)
{
/* All the routers (except for Linux) return only
* 8 bytes of packet payload. It means, that precise relaying of
* ICMP in the real Internet is absolutely infeasible.
*
* Moreover, Cisco "wise men" put GRE key to the third word
* in GRE header. It makes impossible maintaining even soft
* state for keyed
* GRE tunnels with enabled checksum. Tell them "thank you".
*
* Well, I wonder, rfc1812 was written by Cisco employee,
* what the hell these idiots break standards established
* by themselves???
*/
const struct iphdr *iph = (struct iphdr *)skb->data;
const int type = icmp_hdr(skb)->type;
const int code = icmp_hdr(skb)->code;
struct tnl_ptk_info tpi;
if (gre_parse_header(skb, &tpi, NULL, htons(ETH_P_IP),
iph->ihl * 4) < 0)
return;
if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
ipv4_update_pmtu(skb, dev_net(skb->dev), info,
skb->dev->ifindex, IPPROTO_GRE);
return;
}
if (type == ICMP_REDIRECT) {
ipv4_redirect(skb, dev_net(skb->dev), skb->dev->ifindex,
IPPROTO_GRE);
return;
}
ipgre_err(skb, info, &tpi);
}
static bool is_erspan_type1(int gre_hdr_len)
{
/* Both ERSPAN type I (version 0) and type II (version 1) use
* protocol 0x88BE, but the type I has only 4-byte GRE header,
* while type II has 8-byte.
*/
return gre_hdr_len == 4;
}
static int erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi,
int gre_hdr_len)
{
struct net *net = dev_net(skb->dev);
struct metadata_dst *tun_dst = NULL;
struct erspan_base_hdr *ershdr;
struct ip_tunnel_net *itn;
struct ip_tunnel *tunnel;
const struct iphdr *iph;
struct erspan_md2 *md2;
int ver;
int len;
itn = net_generic(net, erspan_net_id);
iph = ip_hdr(skb);
if (is_erspan_type1(gre_hdr_len)) {
ver = 0;
tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex,
tpi->flags | TUNNEL_NO_KEY,
iph->saddr, iph->daddr, 0);
} else {
ershdr = (struct erspan_base_hdr *)(skb->data + gre_hdr_len);
ver = ershdr->ver;
tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex,
tpi->flags | TUNNEL_KEY,
iph->saddr, iph->daddr, tpi->key);
}
if (tunnel) {
if (is_erspan_type1(gre_hdr_len))
len = gre_hdr_len;
else
len = gre_hdr_len + erspan_hdr_len(ver);
if (unlikely(!pskb_may_pull(skb, len)))
return PACKET_REJECT;
if (__iptunnel_pull_header(skb,
len,
htons(ETH_P_TEB),
false, false) < 0)
goto drop;
if (tunnel->collect_md) {
struct erspan_metadata *pkt_md, *md;
struct ip_tunnel_info *info;
unsigned char *gh;
__be64 tun_id;
__be16 flags;
tpi->flags |= TUNNEL_KEY;
flags = tpi->flags;
tun_id = key32_to_tunnel_id(tpi->key);
tun_dst = ip_tun_rx_dst(skb, flags,
tun_id, sizeof(*md));
if (!tun_dst)
return PACKET_REJECT;
/* skb can be uncloned in __iptunnel_pull_header, so
* old pkt_md is no longer valid and we need to reset
* it
*/
gh = skb_network_header(skb) +
skb_network_header_len(skb);
pkt_md = (struct erspan_metadata *)(gh + gre_hdr_len +
sizeof(*ershdr));
md = ip_tunnel_info_opts(&tun_dst->u.tun_info);
md->version = ver;
md2 = &md->u.md2;
memcpy(md2, pkt_md, ver == 1 ? ERSPAN_V1_MDSIZE :
ERSPAN_V2_MDSIZE);
info = &tun_dst->u.tun_info;
info->key.tun_flags |= TUNNEL_ERSPAN_OPT;
info->options_len = sizeof(*md);
}
skb_reset_mac_header(skb);
ip_tunnel_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error);
return PACKET_RCVD;
}
return PACKET_REJECT;
drop:
kfree_skb(skb);
return PACKET_RCVD;
}
static int __ipgre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi,
struct ip_tunnel_net *itn, int hdr_len, bool raw_proto)
{
struct metadata_dst *tun_dst = NULL;
const struct iphdr *iph;
struct ip_tunnel *tunnel;
iph = ip_hdr(skb);
tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, tpi->flags,
iph->saddr, iph->daddr, tpi->key);
if (tunnel) {
const struct iphdr *tnl_params;
if (__iptunnel_pull_header(skb, hdr_len, tpi->proto,
raw_proto, false) < 0)
goto drop;
/* Special case for ipgre_header_parse(), which expects the
* mac_header to point to the outer IP header.
*/
if (tunnel->dev->header_ops == &ipgre_header_ops)
skb_pop_mac_header(skb);
else
skb_reset_mac_header(skb);
tnl_params = &tunnel->parms.iph;
if (tunnel->collect_md || tnl_params->daddr == 0) {
__be16 flags;
__be64 tun_id;
flags = tpi->flags & (TUNNEL_CSUM | TUNNEL_KEY);
tun_id = key32_to_tunnel_id(tpi->key);
tun_dst = ip_tun_rx_dst(skb, flags, tun_id, 0);
if (!tun_dst)
return PACKET_REJECT;
}
ip_tunnel_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error);
return PACKET_RCVD;
}
return PACKET_NEXT;
drop:
kfree_skb(skb);
return PACKET_RCVD;
}
static int ipgre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi,
int hdr_len)
{
struct net *net = dev_net(skb->dev);
struct ip_tunnel_net *itn;
int res;
if (tpi->proto == htons(ETH_P_TEB))
itn = net_generic(net, gre_tap_net_id);
else
itn = net_generic(net, ipgre_net_id);
res = __ipgre_rcv(skb, tpi, itn, hdr_len, false);
if (res == PACKET_NEXT && tpi->proto == htons(ETH_P_TEB)) {
/* ipgre tunnels in collect metadata mode should receive
* also ETH_P_TEB traffic.
*/
itn = net_generic(net, ipgre_net_id);
res = __ipgre_rcv(skb, tpi, itn, hdr_len, true);
}
return res;
}
static int gre_rcv(struct sk_buff *skb)
{
struct tnl_ptk_info tpi;
bool csum_err = false;
int hdr_len;
#ifdef CONFIG_NET_IPGRE_BROADCAST
if (ipv4_is_multicast(ip_hdr(skb)->daddr)) {
/* Looped back packet, drop it! */
if (rt_is_output_route(skb_rtable(skb)))
goto drop;
}
#endif
hdr_len = gre_parse_header(skb, &tpi, &csum_err, htons(ETH_P_IP), 0);
if (hdr_len < 0)
goto drop;
if (unlikely(tpi.proto == htons(ETH_P_ERSPAN) ||
tpi.proto == htons(ETH_P_ERSPAN2))) {
if (erspan_rcv(skb, &tpi, hdr_len) == PACKET_RCVD)
return 0;
goto out;
}
if (ipgre_rcv(skb, &tpi, hdr_len) == PACKET_RCVD)
return 0;
out:
icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
drop:
kfree_skb(skb);
return 0;
}
static void __gre_xmit(struct sk_buff *skb, struct net_device *dev,
const struct iphdr *tnl_params,
__be16 proto)
{
struct ip_tunnel *tunnel = netdev_priv(dev);
__be16 flags = tunnel->parms.o_flags;
/* Push GRE header. */
gre_build_header(skb, tunnel->tun_hlen,
flags, proto, tunnel->parms.o_key,
(flags & TUNNEL_SEQ) ? htonl(atomic_fetch_inc(&tunnel->o_seqno)) : 0);
ip_tunnel_xmit(skb, dev, tnl_params, tnl_params->protocol);
}
static int gre_handle_offloads(struct sk_buff *skb, bool csum)
{
return iptunnel_handle_offloads(skb, csum ? SKB_GSO_GRE_CSUM : SKB_GSO_GRE);
}
static void gre_fb_xmit(struct sk_buff *skb, struct net_device *dev,
__be16 proto)
{
struct ip_tunnel *tunnel = netdev_priv(dev);
struct ip_tunnel_info *tun_info;
const struct ip_tunnel_key *key;
int tunnel_hlen;
__be16 flags;
tun_info = skb_tunnel_info(skb);
if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
ip_tunnel_info_af(tun_info) != AF_INET))
goto err_free_skb;
key = &tun_info->key;
tunnel_hlen = gre_calc_hlen(key->tun_flags);
if (skb_cow_head(skb, dev->needed_headroom))
goto err_free_skb;
/* Push Tunnel header. */
if (gre_handle_offloads(skb, !!(tun_info->key.tun_flags & TUNNEL_CSUM)))
goto err_free_skb;
flags = tun_info->key.tun_flags &
(TUNNEL_CSUM | TUNNEL_KEY | TUNNEL_SEQ);
gre_build_header(skb, tunnel_hlen, flags, proto,
tunnel_id_to_key32(tun_info->key.tun_id),
(flags & TUNNEL_SEQ) ? htonl(atomic_fetch_inc(&tunnel->o_seqno)) : 0);
ip_md_tunnel_xmit(skb, dev, IPPROTO_GRE, tunnel_hlen);
return;
err_free_skb:
kfree_skb(skb);
DEV_STATS_INC(dev, tx_dropped);
}
static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct ip_tunnel *tunnel = netdev_priv(dev);
struct ip_tunnel_info *tun_info;
const struct ip_tunnel_key *key;
struct erspan_metadata *md;
bool truncate = false;
__be16 proto;
int tunnel_hlen;
int version;
int nhoff;
tun_info = skb_tunnel_info(skb);
if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
ip_tunnel_info_af(tun_info) != AF_INET))
goto err_free_skb;
key = &tun_info->key;
if (!(tun_info->key.tun_flags & TUNNEL_ERSPAN_OPT))
goto err_free_skb;
if (tun_info->options_len < sizeof(*md))
goto err_free_skb;
md = ip_tunnel_info_opts(tun_info);
/* ERSPAN has fixed 8 byte GRE header */
version = md->version;
tunnel_hlen = 8 + erspan_hdr_len(version);
if (skb_cow_head(skb, dev->needed_headroom))
goto err_free_skb;
if (gre_handle_offloads(skb, false))
goto err_free_skb;
if (skb->len > dev->mtu + dev->hard_header_len) {
if (pskb_trim(skb, dev->mtu + dev->hard_header_len))
goto err_free_skb;
truncate = true;
}
nhoff = skb_network_offset(skb);
if (skb->protocol == htons(ETH_P_IP) &&
(ntohs(ip_hdr(skb)->tot_len) > skb->len - nhoff))
truncate = true;
if (skb->protocol == htons(ETH_P_IPV6)) {
int thoff;
if (skb_transport_header_was_set(skb))
thoff = skb_transport_offset(skb);
else
thoff = nhoff + sizeof(struct ipv6hdr);
if (ntohs(ipv6_hdr(skb)->payload_len) > skb->len - thoff)
truncate = true;
}
if (version == 1) {
erspan_build_header(skb, ntohl(tunnel_id_to_key32(key->tun_id)),
ntohl(md->u.index), truncate, true);
proto = htons(ETH_P_ERSPAN);
} else if (version == 2) {
erspan_build_header_v2(skb,
ntohl(tunnel_id_to_key32(key->tun_id)),
md->u.md2.dir,
get_hwid(&md->u.md2),
truncate, true);
proto = htons(ETH_P_ERSPAN2);
} else {
goto err_free_skb;
}
gre_build_header(skb, 8, TUNNEL_SEQ,
proto, 0, htonl(atomic_fetch_inc(&tunnel->o_seqno)));
ip_md_tunnel_xmit(skb, dev, IPPROTO_GRE, tunnel_hlen);
return;
err_free_skb:
kfree_skb(skb);
DEV_STATS_INC(dev, tx_dropped);
}
static int gre_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
{
struct ip_tunnel_info *info = skb_tunnel_info(skb);
const struct ip_tunnel_key *key;
struct rtable *rt;
struct flowi4 fl4;
if (ip_tunnel_info_af(info) != AF_INET)
return -EINVAL;
key = &info->key;
ip_tunnel_init_flow(&fl4, IPPROTO_GRE, key->u.ipv4.dst, key->u.ipv4.src,
tunnel_id_to_key32(key->tun_id),
key->tos & ~INET_ECN_MASK, dev_net(dev), 0,
skb->mark, skb_get_hash(skb), key->flow_flags);
rt = ip_route_output_key(dev_net(dev), &fl4);
if (IS_ERR(rt))
return PTR_ERR(rt);
ip_rt_put(rt);
info->key.u.ipv4.src = fl4.saddr;
return 0;
}
static netdev_tx_t ipgre_xmit(struct sk_buff *skb,
struct net_device *dev)
{
struct ip_tunnel *tunnel = netdev_priv(dev);
const struct iphdr *tnl_params;
if (!pskb_inet_may_pull(skb))
goto free_skb;
if (tunnel->collect_md) {
gre_fb_xmit(skb, dev, skb->protocol);
return NETDEV_TX_OK;
}
if (dev->header_ops) {
if (skb_cow_head(skb, 0))
goto free_skb;
tnl_params = (const struct iphdr *)skb->data;
/* Pull skb since ip_tunnel_xmit() needs skb->data pointing
* to gre header.
*/
skb_pull(skb, tunnel->hlen + sizeof(struct iphdr));
skb_reset_mac_header(skb);
if (skb->ip_summed == CHECKSUM_PARTIAL &&
skb_checksum_start(skb) < skb->data)
goto free_skb;
} else {
if (skb_cow_head(skb, dev->needed_headroom))
goto free_skb;
tnl_params = &tunnel->parms.iph;
}
if (gre_handle_offloads(skb, !!(tunnel->parms.o_flags & TUNNEL_CSUM)))
goto free_skb;
__gre_xmit(skb, dev, tnl_params, skb->protocol);
return NETDEV_TX_OK;
free_skb:
kfree_skb(skb);
DEV_STATS_INC(dev, tx_dropped);
return NETDEV_TX_OK;
}
static netdev_tx_t erspan_xmit(struct sk_buff *skb,
struct net_device *dev)
{
struct ip_tunnel *tunnel = netdev_priv(dev);
bool truncate = false;
__be16 proto;
if (!pskb_inet_may_pull(skb))
goto free_skb;
if (tunnel->collect_md) {
erspan_fb_xmit(skb, dev);
return NETDEV_TX_OK;
}
if (gre_handle_offloads(skb, false))
goto free_skb;
if (skb_cow_head(skb, dev->needed_headroom))
goto free_skb;
if (skb->len > dev->mtu + dev->hard_header_len) {
if (pskb_trim(skb, dev->mtu + dev->hard_header_len))
goto free_skb;
truncate = true;
}
/* Push ERSPAN header */
if (tunnel->erspan_ver == 0) {
proto = htons(ETH_P_ERSPAN);
tunnel->parms.o_flags &= ~TUNNEL_SEQ;
} else if (tunnel->erspan_ver == 1) {
erspan_build_header(skb, ntohl(tunnel->parms.o_key),
tunnel->index,
truncate, true);
proto = htons(ETH_P_ERSPAN);
} else if (tunnel->erspan_ver == 2) {
erspan_build_header_v2(skb, ntohl(tunnel->parms.o_key),
tunnel->dir, tunnel->hwid,
truncate, true);
proto = htons(ETH_P_ERSPAN2);
} else {
goto free_skb;
}
tunnel->parms.o_flags &= ~TUNNEL_KEY;
__gre_xmit(skb, dev, &tunnel->parms.iph, proto);
return NETDEV_TX_OK;
free_skb:
kfree_skb(skb);
DEV_STATS_INC(dev, tx_dropped);
return NETDEV_TX_OK;
}
static netdev_tx_t gre_tap_xmit(struct sk_buff *skb,
struct net_device *dev)
{
struct ip_tunnel *tunnel = netdev_priv(dev);
if (!pskb_inet_may_pull(skb))
goto free_skb;
if (tunnel->collect_md) {
gre_fb_xmit(skb, dev, htons(ETH_P_TEB));
return NETDEV_TX_OK;
}
if (gre_handle_offloads(skb, !!(tunnel->parms.o_flags & TUNNEL_CSUM)))
goto free_skb;
if (skb_cow_head(skb, dev->needed_headroom))
goto free_skb;
__gre_xmit(skb, dev, &tunnel->parms.iph, htons(ETH_P_TEB));
return NETDEV_TX_OK;
free_skb:
kfree_skb(skb);
DEV_STATS_INC(dev, tx_dropped);
return NETDEV_TX_OK;
}
static void ipgre_link_update(struct net_device *dev, bool set_mtu)
{
struct ip_tunnel *tunnel = netdev_priv(dev);
__be16 flags;
int len;
len = tunnel->tun_hlen;
tunnel->tun_hlen = gre_calc_hlen(tunnel->parms.o_flags);
len = tunnel->tun_hlen - len;
tunnel->hlen = tunnel->hlen + len;
if (dev->header_ops)
dev->hard_header_len += len;
else
dev->needed_headroom += len;
if (set_mtu)
dev->mtu = max_t(int, dev->mtu - len, 68);
flags = tunnel->parms.o_flags;
if (flags & TUNNEL_SEQ ||
(flags & TUNNEL_CSUM && tunnel->encap.type != TUNNEL_ENCAP_NONE)) {
dev->features &= ~NETIF_F_GSO_SOFTWARE;
dev->hw_features &= ~NETIF_F_GSO_SOFTWARE;
} else {
dev->features |= NETIF_F_GSO_SOFTWARE;
dev->hw_features |= NETIF_F_GSO_SOFTWARE;
}
}
static int ipgre_tunnel_ctl(struct net_device *dev, struct ip_tunnel_parm *p,
int cmd)
{
int err;
if (cmd == SIOCADDTUNNEL || cmd == SIOCCHGTUNNEL) {
if (p->iph.version != 4 || p->iph.protocol != IPPROTO_GRE ||
p->iph.ihl != 5 || (p->iph.frag_off & htons(~IP_DF)) ||
((p->i_flags | p->o_flags) & (GRE_VERSION | GRE_ROUTING)))
return -EINVAL;
}
p->i_flags = gre_flags_to_tnl_flags(p->i_flags);
p->o_flags = gre_flags_to_tnl_flags(p->o_flags);
err = ip_tunnel_ctl(dev, p, cmd);
if (err)
return err;
if (cmd == SIOCCHGTUNNEL) {
struct ip_tunnel *t = netdev_priv(dev);
t->parms.i_flags = p->i_flags;
t->parms.o_flags = p->o_flags;
if (strcmp(dev->rtnl_link_ops->kind, "erspan"))
ipgre_link_update(dev, true);
}
p->i_flags = gre_tnl_flags_to_gre_flags(p->i_flags);
p->o_flags = gre_tnl_flags_to_gre_flags(p->o_flags);
return 0;
}
/* Nice toy. Unfortunately, useless in real life :-)
It allows to construct virtual multiprotocol broadcast "LAN"
over the Internet, provided multicast routing is tuned.
I have no idea was this bicycle invented before me,
so that I had to set ARPHRD_IPGRE to a random value.
I have an impression, that Cisco could make something similar,
but this feature is apparently missing in IOS<=11.2(8).
I set up 10.66.66/24 and fec0:6666:6666::0/96 as virtual networks
with broadcast 224.66.66.66. If you have access to mbone, play with me :-)
ping -t 255 224.66.66.66
If nobody answers, mbone does not work.
ip tunnel add Universe mode gre remote 224.66.66.66 local <Your_real_addr> ttl 255
ip addr add 10.66.66.<somewhat>/24 dev Universe
ifconfig Universe up
ifconfig Universe add fe80::<Your_real_addr>/10
ifconfig Universe add fec0:6666:6666::<Your_real_addr>/96
ftp 10.66.66.66
...
ftp fec0:6666:6666::193.233.7.65
...
*/
static int ipgre_header(struct sk_buff *skb, struct net_device *dev,
unsigned short type,
const void *daddr, const void *saddr, unsigned int len)
{
struct ip_tunnel *t = netdev_priv(dev);
struct iphdr *iph;
struct gre_base_hdr *greh;
iph = skb_push(skb, t->hlen + sizeof(*iph));
greh = (struct gre_base_hdr *)(iph+1);
greh->flags = gre_tnl_flags_to_gre_flags(t->parms.o_flags);
greh->protocol = htons(type);
memcpy(iph, &t->parms.iph, sizeof(struct iphdr));
/* Set the source hardware address. */
if (saddr)
memcpy(&iph->saddr, saddr, 4);
if (daddr)
memcpy(&iph->daddr, daddr, 4);
if (iph->daddr)
return t->hlen + sizeof(*iph);
return -(t->hlen + sizeof(*iph));
}
static int ipgre_header_parse(const struct sk_buff *skb, unsigned char *haddr)
{
const struct iphdr *iph = (const struct iphdr *) skb_mac_header(skb);
memcpy(haddr, &iph->saddr, 4);
return 4;
}
static const struct header_ops ipgre_header_ops = {
.create = ipgre_header,
.parse = ipgre_header_parse,
};
#ifdef CONFIG_NET_IPGRE_BROADCAST
static int ipgre_open(struct net_device *dev)
{
struct ip_tunnel *t = netdev_priv(dev);
if (ipv4_is_multicast(t->parms.iph.daddr)) {
struct flowi4 fl4;
struct rtable *rt;
rt = ip_route_output_gre(t->net, &fl4,
t->parms.iph.daddr,
t->parms.iph.saddr,
t->parms.o_key,
RT_TOS(t->parms.iph.tos),
t->parms.link);
if (IS_ERR(rt))
return -EADDRNOTAVAIL;
dev = rt->dst.dev;
ip_rt_put(rt);
if (!__in_dev_get_rtnl(dev))
return -EADDRNOTAVAIL;
t->mlink = dev->ifindex;
ip_mc_inc_group(__in_dev_get_rtnl(dev), t->parms.iph.daddr);
}
return 0;
}
static int ipgre_close(struct net_device *dev)
{
struct ip_tunnel *t = netdev_priv(dev);
if (ipv4_is_multicast(t->parms.iph.daddr) && t->mlink) {
struct in_device *in_dev;
in_dev = inetdev_by_index(t->net, t->mlink);
if (in_dev)
ip_mc_dec_group(in_dev, t->parms.iph.daddr);
}
return 0;
}
#endif
static const struct net_device_ops ipgre_netdev_ops = {
.ndo_init = ipgre_tunnel_init,
.ndo_uninit = ip_tunnel_uninit,
#ifdef CONFIG_NET_IPGRE_BROADCAST
.ndo_open = ipgre_open,
.ndo_stop = ipgre_close,
#endif
.ndo_start_xmit = ipgre_xmit,
.ndo_siocdevprivate = ip_tunnel_siocdevprivate,
.ndo_change_mtu = ip_tunnel_change_mtu,
.ndo_get_stats64 = dev_get_tstats64,
.ndo_get_iflink = ip_tunnel_get_iflink,
.ndo_tunnel_ctl = ipgre_tunnel_ctl,
};
#define GRE_FEATURES (NETIF_F_SG | \
NETIF_F_FRAGLIST | \
NETIF_F_HIGHDMA | \
NETIF_F_HW_CSUM)
static void ipgre_tunnel_setup(struct net_device *dev)
{
dev->netdev_ops = &ipgre_netdev_ops;
dev->type = ARPHRD_IPGRE;
ip_tunnel_setup(dev, ipgre_net_id);
}
static void __gre_tunnel_init(struct net_device *dev)
{
struct ip_tunnel *tunnel;
__be16 flags;
tunnel = netdev_priv(dev);
tunnel->tun_hlen = gre_calc_hlen(tunnel->parms.o_flags);
tunnel->parms.iph.protocol = IPPROTO_GRE;
tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen;
dev->needed_headroom = tunnel->hlen + sizeof(tunnel->parms.iph);
dev->features |= GRE_FEATURES | NETIF_F_LLTX;
dev->hw_features |= GRE_FEATURES;
flags = tunnel->parms.o_flags;
/* TCP offload with GRE SEQ is not supported, nor can we support 2
* levels of outer headers requiring an update.
*/
if (flags & TUNNEL_SEQ)
return;
if (flags & TUNNEL_CSUM && tunnel->encap.type != TUNNEL_ENCAP_NONE)
return;
dev->features |= NETIF_F_GSO_SOFTWARE;
dev->hw_features |= NETIF_F_GSO_SOFTWARE;
}
static int ipgre_tunnel_init(struct net_device *dev)
{
struct ip_tunnel *tunnel = netdev_priv(dev);
struct iphdr *iph = &tunnel->parms.iph;
__gre_tunnel_init(dev);
__dev_addr_set(dev, &iph->saddr, 4);
memcpy(dev->broadcast, &iph->daddr, 4);
dev->flags = IFF_NOARP;
netif_keep_dst(dev);
dev->addr_len = 4;
if (iph->daddr && !tunnel->collect_md) {
#ifdef CONFIG_NET_IPGRE_BROADCAST
if (ipv4_is_multicast(iph->daddr)) {
if (!iph->saddr)
return -EINVAL;
dev->flags = IFF_BROADCAST;
dev->header_ops = &ipgre_header_ops;
dev->hard_header_len = tunnel->hlen + sizeof(*iph);
dev->needed_headroom = 0;
}
#endif
} else if (!tunnel->collect_md) {
dev->header_ops = &ipgre_header_ops;
dev->hard_header_len = tunnel->hlen + sizeof(*iph);
dev->needed_headroom = 0;
}
return ip_tunnel_init(dev);
}
static const struct gre_protocol ipgre_protocol = {
.handler = gre_rcv,
.err_handler = gre_err,
};
static int __net_init ipgre_init_net(struct net *net)
{
return ip_tunnel_init_net(net, ipgre_net_id, &ipgre_link_ops, NULL);
}
static void __net_exit ipgre_exit_batch_net(struct list_head *list_net)
{
ip_tunnel_delete_nets(list_net, ipgre_net_id, &ipgre_link_ops);
}
static struct pernet_operations ipgre_net_ops = {
.init = ipgre_init_net,
.exit_batch = ipgre_exit_batch_net,
.id = &ipgre_net_id,
.size = sizeof(struct ip_tunnel_net),
};
static int ipgre_tunnel_validate(struct nlattr *tb[], struct nlattr *data[],
struct netlink_ext_ack *extack)
{
__be16 flags;
if (!data)
return 0;
flags = 0;
if (data[IFLA_GRE_IFLAGS])
flags |= nla_get_be16(data[IFLA_GRE_IFLAGS]);
if (data[IFLA_GRE_OFLAGS])
flags |= nla_get_be16(data[IFLA_GRE_OFLAGS]);
if (flags & (GRE_VERSION|GRE_ROUTING))
return -EINVAL;
if (data[IFLA_GRE_COLLECT_METADATA] &&
data[IFLA_GRE_ENCAP_TYPE] &&
nla_get_u16(data[IFLA_GRE_ENCAP_TYPE]) != TUNNEL_ENCAP_NONE)
return -EINVAL;
return 0;
}
static int ipgre_tap_validate(struct nlattr *tb[], struct nlattr *data[],
struct netlink_ext_ack *extack)
{
__be32 daddr;
if (tb[IFLA_ADDRESS]) {
if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
return -EINVAL;
if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
return -EADDRNOTAVAIL;
}
if (!data)
goto out;
if (data[IFLA_GRE_REMOTE]) {
memcpy(&daddr, nla_data(data[IFLA_GRE_REMOTE]), 4);
if (!daddr)
return -EINVAL;
}
out:
return ipgre_tunnel_validate(tb, data, extack);
}
static int erspan_validate(struct nlattr *tb[], struct nlattr *data[],
struct netlink_ext_ack *extack)
{
__be16 flags = 0;
int ret;
if (!data)
return 0;
ret = ipgre_tap_validate(tb, data, extack);
if (ret)
return ret;
if (data[IFLA_GRE_ERSPAN_VER] &&
nla_get_u8(data[IFLA_GRE_ERSPAN_VER]) == 0)
return 0;
/* ERSPAN type II/III should only have GRE sequence and key flag */
if (data[IFLA_GRE_OFLAGS])
flags |= nla_get_be16(data[IFLA_GRE_OFLAGS]);
if (data[IFLA_GRE_IFLAGS])
flags |= nla_get_be16(data[IFLA_GRE_IFLAGS]);
if (!data[IFLA_GRE_COLLECT_METADATA] &&
flags != (GRE_SEQ | GRE_KEY))
return -EINVAL;
/* ERSPAN Session ID only has 10-bit. Since we reuse
* 32-bit key field as ID, check it's range.
*/
if (data[IFLA_GRE_IKEY] &&
(ntohl(nla_get_be32(data[IFLA_GRE_IKEY])) & ~ID_MASK))
return -EINVAL;
if (data[IFLA_GRE_OKEY] &&
(ntohl(nla_get_be32(data[IFLA_GRE_OKEY])) & ~ID_MASK))
return -EINVAL;
return 0;
}
static int ipgre_netlink_parms(struct net_device *dev,
struct nlattr *data[],
struct nlattr *tb[],
struct ip_tunnel_parm *parms,
__u32 *fwmark)
{
struct ip_tunnel *t = netdev_priv(dev);
memset(parms, 0, sizeof(*parms));
parms->iph.protocol = IPPROTO_GRE;
if (!data)
return 0;
if (data[IFLA_GRE_LINK])
parms->link = nla_get_u32(data[IFLA_GRE_LINK]);
if (data[IFLA_GRE_IFLAGS])
parms->i_flags = gre_flags_to_tnl_flags(nla_get_be16(data[IFLA_GRE_IFLAGS]));
if (data[IFLA_GRE_OFLAGS])
parms->o_flags = gre_flags_to_tnl_flags(nla_get_be16(data[IFLA_GRE_OFLAGS]));
if (data[IFLA_GRE_IKEY])
parms->i_key = nla_get_be32(data[IFLA_GRE_IKEY]);
if (data[IFLA_GRE_OKEY])
parms->o_key = nla_get_be32(data[IFLA_GRE_OKEY]);
if (data[IFLA_GRE_LOCAL])
parms->iph.saddr = nla_get_in_addr(data[IFLA_GRE_LOCAL]);
if (data[IFLA_GRE_REMOTE])
parms->iph.daddr = nla_get_in_addr(data[IFLA_GRE_REMOTE]);
if (data[IFLA_GRE_TTL])
parms->iph.ttl = nla_get_u8(data[IFLA_GRE_TTL]);
if (data[IFLA_GRE_TOS])
parms->iph.tos = nla_get_u8(data[IFLA_GRE_TOS]);
if (!data[IFLA_GRE_PMTUDISC] || nla_get_u8(data[IFLA_GRE_PMTUDISC])) {
if (t->ignore_df)
return -EINVAL;
parms->iph.frag_off = htons(IP_DF);
}
if (data[IFLA_GRE_COLLECT_METADATA]) {
t->collect_md = true;
if (dev->type == ARPHRD_IPGRE)
dev->type = ARPHRD_NONE;
}
if (data[IFLA_GRE_IGNORE_DF]) {
if (nla_get_u8(data[IFLA_GRE_IGNORE_DF])
&& (parms->iph.frag_off & htons(IP_DF)))
return -EINVAL;
t->ignore_df = !!nla_get_u8(data[IFLA_GRE_IGNORE_DF]);
}
if (data[IFLA_GRE_FWMARK])
*fwmark = nla_get_u32(data[IFLA_GRE_FWMARK]);
return 0;
}
static int erspan_netlink_parms(struct net_device *dev,
struct nlattr *data[],
struct nlattr *tb[],
struct ip_tunnel_parm *parms,
__u32 *fwmark)
{
struct ip_tunnel *t = netdev_priv(dev);
int err;
err = ipgre_netlink_parms(dev, data, tb, parms, fwmark);
if (err)
return err;
if (!data)
return 0;
if (data[IFLA_GRE_ERSPAN_VER]) {
t->erspan_ver = nla_get_u8(data[IFLA_GRE_ERSPAN_VER]);
if (t->erspan_ver > 2)
return -EINVAL;
}
if (t->erspan_ver == 1) {
if (data[IFLA_GRE_ERSPAN_INDEX]) {
t->index = nla_get_u32(data[IFLA_GRE_ERSPAN_INDEX]);
if (t->index & ~INDEX_MASK)
return -EINVAL;
}
} else if (t->erspan_ver == 2) {
if (data[IFLA_GRE_ERSPAN_DIR]) {
t->dir = nla_get_u8(data[IFLA_GRE_ERSPAN_DIR]);
if (t->dir & ~(DIR_MASK >> DIR_OFFSET))
return -EINVAL;
}
if (data[IFLA_GRE_ERSPAN_HWID]) {
t->hwid = nla_get_u16(data[IFLA_GRE_ERSPAN_HWID]);
if (t->hwid & ~(HWID_MASK >> HWID_OFFSET))
return -EINVAL;
}
}
return 0;
}
/* This function returns true when ENCAP attributes are present in the nl msg */
static bool ipgre_netlink_encap_parms(struct nlattr *data[],
struct ip_tunnel_encap *ipencap)
{
bool ret = false;
memset(ipencap, 0, sizeof(*ipencap));
if (!data)
return ret;
if (data[IFLA_GRE_ENCAP_TYPE]) {
ret = true;
ipencap->type = nla_get_u16(data[IFLA_GRE_ENCAP_TYPE]);
}
if (data[IFLA_GRE_ENCAP_FLAGS]) {
ret = true;
ipencap->flags = nla_get_u16(data[IFLA_GRE_ENCAP_FLAGS]);
}
if (data[IFLA_GRE_ENCAP_SPORT]) {
ret = true;
ipencap->sport = nla_get_be16(data[IFLA_GRE_ENCAP_SPORT]);
}
if (data[IFLA_GRE_ENCAP_DPORT]) {
ret = true;
ipencap->dport = nla_get_be16(data[IFLA_GRE_ENCAP_DPORT]);
}
return ret;
}
static int gre_tap_init(struct net_device *dev)
{
__gre_tunnel_init(dev);
dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
netif_keep_dst(dev);
return ip_tunnel_init(dev);
}
static const struct net_device_ops gre_tap_netdev_ops = {
.ndo_init = gre_tap_init,
.ndo_uninit = ip_tunnel_uninit,
.ndo_start_xmit = gre_tap_xmit,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
.ndo_change_mtu = ip_tunnel_change_mtu,
.ndo_get_stats64 = dev_get_tstats64,
.ndo_get_iflink = ip_tunnel_get_iflink,
.ndo_fill_metadata_dst = gre_fill_metadata_dst,
};
static int erspan_tunnel_init(struct net_device *dev)
{
struct ip_tunnel *tunnel = netdev_priv(dev);
if (tunnel->erspan_ver == 0)
tunnel->tun_hlen = 4; /* 4-byte GRE hdr. */
else
tunnel->tun_hlen = 8; /* 8-byte GRE hdr. */
tunnel->parms.iph.protocol = IPPROTO_GRE;
tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen +
erspan_hdr_len(tunnel->erspan_ver);
dev->features |= GRE_FEATURES;
dev->hw_features |= GRE_FEATURES;
dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
netif_keep_dst(dev);
return ip_tunnel_init(dev);
}
static const struct net_device_ops erspan_netdev_ops = {
.ndo_init = erspan_tunnel_init,
.ndo_uninit = ip_tunnel_uninit,
.ndo_start_xmit = erspan_xmit,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
.ndo_change_mtu = ip_tunnel_change_mtu,
.ndo_get_stats64 = dev_get_tstats64,
.ndo_get_iflink = ip_tunnel_get_iflink,
.ndo_fill_metadata_dst = gre_fill_metadata_dst,
};
static void ipgre_tap_setup(struct net_device *dev)
{
ether_setup(dev);
dev->max_mtu = 0;
dev->netdev_ops = &gre_tap_netdev_ops;
dev->priv_flags &= ~IFF_TX_SKB_SHARING;
dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
ip_tunnel_setup(dev, gre_tap_net_id);
}
static int
ipgre_newlink_encap_setup(struct net_device *dev, struct nlattr *data[])
{
struct ip_tunnel_encap ipencap;
if (ipgre_netlink_encap_parms(data, &ipencap)) {
struct ip_tunnel *t = netdev_priv(dev);
int err = ip_tunnel_encap_setup(t, &ipencap);
if (err < 0)
return err;
}
return 0;
}
static int ipgre_newlink(struct net *src_net, struct net_device *dev,
struct nlattr *tb[], struct nlattr *data[],
struct netlink_ext_ack *extack)
{
struct ip_tunnel_parm p;
__u32 fwmark = 0;
int err;
err = ipgre_newlink_encap_setup(dev, data);
if (err)
return err;
err = ipgre_netlink_parms(dev, data, tb, &p, &fwmark);
if (err < 0)
return err;
return ip_tunnel_newlink(dev, tb, &p, fwmark);
}
static int erspan_newlink(struct net *src_net, struct net_device *dev,
struct nlattr *tb[], struct nlattr *data[],
struct netlink_ext_ack *extack)
{
struct ip_tunnel_parm p;
__u32 fwmark = 0;
int err;
err = ipgre_newlink_encap_setup(dev, data);
if (err)
return err;
err = erspan_netlink_parms(dev, data, tb, &p, &fwmark);
if (err)
return err;
return ip_tunnel_newlink(dev, tb, &p, fwmark);
}
static int ipgre_changelink(struct net_device *dev, struct nlattr *tb[],
struct nlattr *data[],
struct netlink_ext_ack *extack)
{
struct ip_tunnel *t = netdev_priv(dev);
__u32 fwmark = t->fwmark;
struct ip_tunnel_parm p;
int err;
err = ipgre_newlink_encap_setup(dev, data);
if (err)
return err;
err = ipgre_netlink_parms(dev, data, tb, &p, &fwmark);
if (err < 0)
return err;
err = ip_tunnel_changelink(dev, tb, &p, fwmark);
if (err < 0)
return err;
t->parms.i_flags = p.i_flags;
t->parms.o_flags = p.o_flags;
ipgre_link_update(dev, !tb[IFLA_MTU]);
return 0;
}
static int erspan_changelink(struct net_device *dev, struct nlattr *tb[],
struct nlattr *data[],
struct netlink_ext_ack *extack)
{
struct ip_tunnel *t = netdev_priv(dev);
__u32 fwmark = t->fwmark;
struct ip_tunnel_parm p;
int err;
err = ipgre_newlink_encap_setup(dev, data);
if (err)
return err;
err = erspan_netlink_parms(dev, data, tb, &p, &fwmark);
if (err < 0)
return err;
err = ip_tunnel_changelink(dev, tb, &p, fwmark);
if (err < 0)
return err;
t->parms.i_flags = p.i_flags;
t->parms.o_flags = p.o_flags;
return 0;
}
static size_t ipgre_get_size(const struct net_device *dev)
{
return
/* IFLA_GRE_LINK */
nla_total_size(4) +
/* IFLA_GRE_IFLAGS */
nla_total_size(2) +
/* IFLA_GRE_OFLAGS */
nla_total_size(2) +
/* IFLA_GRE_IKEY */
nla_total_size(4) +
/* IFLA_GRE_OKEY */
nla_total_size(4) +
/* IFLA_GRE_LOCAL */
nla_total_size(4) +
/* IFLA_GRE_REMOTE */
nla_total_size(4) +
/* IFLA_GRE_TTL */
nla_total_size(1) +
/* IFLA_GRE_TOS */
nla_total_size(1) +
/* IFLA_GRE_PMTUDISC */
nla_total_size(1) +
/* IFLA_GRE_ENCAP_TYPE */
nla_total_size(2) +
/* IFLA_GRE_ENCAP_FLAGS */
nla_total_size(2) +
/* IFLA_GRE_ENCAP_SPORT */
nla_total_size(2) +
/* IFLA_GRE_ENCAP_DPORT */
nla_total_size(2) +
/* IFLA_GRE_COLLECT_METADATA */
nla_total_size(0) +
/* IFLA_GRE_IGNORE_DF */
nla_total_size(1) +
/* IFLA_GRE_FWMARK */
nla_total_size(4) +
/* IFLA_GRE_ERSPAN_INDEX */
nla_total_size(4) +
/* IFLA_GRE_ERSPAN_VER */
nla_total_size(1) +
/* IFLA_GRE_ERSPAN_DIR */
nla_total_size(1) +
/* IFLA_GRE_ERSPAN_HWID */
nla_total_size(2) +
0;
}
static int ipgre_fill_info(struct sk_buff *skb, const struct net_device *dev)
{
struct ip_tunnel *t = netdev_priv(dev);
struct ip_tunnel_parm *p = &t->parms;
__be16 o_flags = p->o_flags;
if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) ||
nla_put_be16(skb, IFLA_GRE_IFLAGS,
gre_tnl_flags_to_gre_flags(p->i_flags)) ||
nla_put_be16(skb, IFLA_GRE_OFLAGS,
gre_tnl_flags_to_gre_flags(o_flags)) ||
nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) ||
nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) ||
nla_put_in_addr(skb, IFLA_GRE_LOCAL, p->iph.saddr) ||
nla_put_in_addr(skb, IFLA_GRE_REMOTE, p->iph.daddr) ||
nla_put_u8(skb, IFLA_GRE_TTL, p->iph.ttl) ||
nla_put_u8(skb, IFLA_GRE_TOS, p->iph.tos) ||
nla_put_u8(skb, IFLA_GRE_PMTUDISC,
!!(p->iph.frag_off & htons(IP_DF))) ||
nla_put_u32(skb, IFLA_GRE_FWMARK, t->fwmark))
goto nla_put_failure;
if (nla_put_u16(skb, IFLA_GRE_ENCAP_TYPE,
t->encap.type) ||
nla_put_be16(skb, IFLA_GRE_ENCAP_SPORT,
t->encap.sport) ||
nla_put_be16(skb, IFLA_GRE_ENCAP_DPORT,
t->encap.dport) ||
nla_put_u16(skb, IFLA_GRE_ENCAP_FLAGS,
t->encap.flags))
goto nla_put_failure;
if (nla_put_u8(skb, IFLA_GRE_IGNORE_DF, t->ignore_df))
goto nla_put_failure;
if (t->collect_md) {
if (nla_put_flag(skb, IFLA_GRE_COLLECT_METADATA))
goto nla_put_failure;
}
return 0;
nla_put_failure:
return -EMSGSIZE;
}
static int erspan_fill_info(struct sk_buff *skb, const struct net_device *dev)
{
struct ip_tunnel *t = netdev_priv(dev);
if (t->erspan_ver <= 2) {
if (t->erspan_ver != 0 && !t->collect_md)
t->parms.o_flags |= TUNNEL_KEY;
if (nla_put_u8(skb, IFLA_GRE_ERSPAN_VER, t->erspan_ver))
goto nla_put_failure;
if (t->erspan_ver == 1) {
if (nla_put_u32(skb, IFLA_GRE_ERSPAN_INDEX, t->index))
goto nla_put_failure;
} else if (t->erspan_ver == 2) {
if (nla_put_u8(skb, IFLA_GRE_ERSPAN_DIR, t->dir))
goto nla_put_failure;
if (nla_put_u16(skb, IFLA_GRE_ERSPAN_HWID, t->hwid))
goto nla_put_failure;
}
}
return ipgre_fill_info(skb, dev);
nla_put_failure:
return -EMSGSIZE;
}
static void erspan_setup(struct net_device *dev)
{
struct ip_tunnel *t = netdev_priv(dev);
ether_setup(dev);
dev->max_mtu = 0;
dev->netdev_ops = &erspan_netdev_ops;
dev->priv_flags &= ~IFF_TX_SKB_SHARING;
dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
ip_tunnel_setup(dev, erspan_net_id);
t->erspan_ver = 1;
}
static const struct nla_policy ipgre_policy[IFLA_GRE_MAX + 1] = {
[IFLA_GRE_LINK] = { .type = NLA_U32 },
[IFLA_GRE_IFLAGS] = { .type = NLA_U16 },
[IFLA_GRE_OFLAGS] = { .type = NLA_U16 },
[IFLA_GRE_IKEY] = { .type = NLA_U32 },
[IFLA_GRE_OKEY] = { .type = NLA_U32 },
[IFLA_GRE_LOCAL] = { .len = sizeof_field(struct iphdr, saddr) },
[IFLA_GRE_REMOTE] = { .len = sizeof_field(struct iphdr, daddr) },
[IFLA_GRE_TTL] = { .type = NLA_U8 },
[IFLA_GRE_TOS] = { .type = NLA_U8 },
[IFLA_GRE_PMTUDISC] = { .type = NLA_U8 },
[IFLA_GRE_ENCAP_TYPE] = { .type = NLA_U16 },
[IFLA_GRE_ENCAP_FLAGS] = { .type = NLA_U16 },
[IFLA_GRE_ENCAP_SPORT] = { .type = NLA_U16 },
[IFLA_GRE_ENCAP_DPORT] = { .type = NLA_U16 },
[IFLA_GRE_COLLECT_METADATA] = { .type = NLA_FLAG },
[IFLA_GRE_IGNORE_DF] = { .type = NLA_U8 },
[IFLA_GRE_FWMARK] = { .type = NLA_U32 },
[IFLA_GRE_ERSPAN_INDEX] = { .type = NLA_U32 },
[IFLA_GRE_ERSPAN_VER] = { .type = NLA_U8 },
[IFLA_GRE_ERSPAN_DIR] = { .type = NLA_U8 },
[IFLA_GRE_ERSPAN_HWID] = { .type = NLA_U16 },
};
static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
.kind = "gre",
.maxtype = IFLA_GRE_MAX,
.policy = ipgre_policy,
.priv_size = sizeof(struct ip_tunnel),
.setup = ipgre_tunnel_setup,
.validate = ipgre_tunnel_validate,
.newlink = ipgre_newlink,
.changelink = ipgre_changelink,
.dellink = ip_tunnel_dellink,
.get_size = ipgre_get_size,
.fill_info = ipgre_fill_info,
.get_link_net = ip_tunnel_get_link_net,
};
static struct rtnl_link_ops ipgre_tap_ops __read_mostly = {
.kind = "gretap",
.maxtype = IFLA_GRE_MAX,
.policy = ipgre_policy,
.priv_size = sizeof(struct ip_tunnel),
.setup = ipgre_tap_setup,
.validate = ipgre_tap_validate,
.newlink = ipgre_newlink,
.changelink = ipgre_changelink,
.dellink = ip_tunnel_dellink,
.get_size = ipgre_get_size,
.fill_info = ipgre_fill_info,
.get_link_net = ip_tunnel_get_link_net,
};
static struct rtnl_link_ops erspan_link_ops __read_mostly = {
.kind = "erspan",
.maxtype = IFLA_GRE_MAX,
.policy = ipgre_policy,
.priv_size = sizeof(struct ip_tunnel),
.setup = erspan_setup,
.validate = erspan_validate,
.newlink = erspan_newlink,
.changelink = erspan_changelink,
.dellink = ip_tunnel_dellink,
.get_size = ipgre_get_size,
.fill_info = erspan_fill_info,
.get_link_net = ip_tunnel_get_link_net,
};
struct net_device *gretap_fb_dev_create(struct net *net, const char *name,
u8 name_assign_type)
{
struct nlattr *tb[IFLA_MAX + 1];
struct net_device *dev;
LIST_HEAD(list_kill);
struct ip_tunnel *t;
int err;
memset(&tb, 0, sizeof(tb));
dev = rtnl_create_link(net, name, name_assign_type,
&ipgre_tap_ops, tb, NULL);
if (IS_ERR(dev))
return dev;
/* Configure flow based GRE device. */
t = netdev_priv(dev);
t->collect_md = true;
err = ipgre_newlink(net, dev, tb, NULL, NULL);
if (err < 0) {
free_netdev(dev);
return ERR_PTR(err);
}
/* openvswitch users expect packet sizes to be unrestricted,
* so set the largest MTU we can.
*/
err = __ip_tunnel_change_mtu(dev, IP_MAX_MTU, false);
if (err)
goto out;
err = rtnl_configure_link(dev, NULL, 0, NULL);
if (err < 0)
goto out;
return dev;
out:
ip_tunnel_dellink(dev, &list_kill);
unregister_netdevice_many(&list_kill);
return ERR_PTR(err);
}
EXPORT_SYMBOL_GPL(gretap_fb_dev_create);
static int __net_init ipgre_tap_init_net(struct net *net)
{
return ip_tunnel_init_net(net, gre_tap_net_id, &ipgre_tap_ops, "gretap0");
}
static void __net_exit ipgre_tap_exit_batch_net(struct list_head *list_net)
{
ip_tunnel_delete_nets(list_net, gre_tap_net_id, &ipgre_tap_ops);
}
static struct pernet_operations ipgre_tap_net_ops = {
.init = ipgre_tap_init_net,
.exit_batch = ipgre_tap_exit_batch_net,
.id = &gre_tap_net_id,
.size = sizeof(struct ip_tunnel_net),
};
static int __net_init erspan_init_net(struct net *net)
{
return ip_tunnel_init_net(net, erspan_net_id,
&erspan_link_ops, "erspan0");
}
static void __net_exit erspan_exit_batch_net(struct list_head *net_list)
{
ip_tunnel_delete_nets(net_list, erspan_net_id, &erspan_link_ops);
}
static struct pernet_operations erspan_net_ops = {
.init = erspan_init_net,
.exit_batch = erspan_exit_batch_net,
.id = &erspan_net_id,
.size = sizeof(struct ip_tunnel_net),
};
static int __init ipgre_init(void)
{
int err;
pr_info("GRE over IPv4 tunneling driver\n");
err = register_pernet_device(&ipgre_net_ops);
if (err < 0)
return err;
err = register_pernet_device(&ipgre_tap_net_ops);
if (err < 0)
goto pnet_tap_failed;
err = register_pernet_device(&erspan_net_ops);
if (err < 0)
goto pnet_erspan_failed;
err = gre_add_protocol(&ipgre_protocol, GREPROTO_CISCO);
if (err < 0) {
pr_info("%s: can't add protocol\n", __func__);
goto add_proto_failed;
}
err = rtnl_link_register(&ipgre_link_ops);
if (err < 0)
goto rtnl_link_failed;
err = rtnl_link_register(&ipgre_tap_ops);
if (err < 0)
goto tap_ops_failed;
err = rtnl_link_register(&erspan_link_ops);
if (err < 0)
goto erspan_link_failed;
return 0;
erspan_link_failed:
rtnl_link_unregister(&ipgre_tap_ops);
tap_ops_failed:
rtnl_link_unregister(&ipgre_link_ops);
rtnl_link_failed:
gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO);
add_proto_failed:
unregister_pernet_device(&erspan_net_ops);
pnet_erspan_failed:
unregister_pernet_device(&ipgre_tap_net_ops);
pnet_tap_failed:
unregister_pernet_device(&ipgre_net_ops);
return err;
}
static void __exit ipgre_fini(void)
{
rtnl_link_unregister(&ipgre_tap_ops);
rtnl_link_unregister(&ipgre_link_ops);
rtnl_link_unregister(&erspan_link_ops);
gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO);
unregister_pernet_device(&ipgre_tap_net_ops);
unregister_pernet_device(&ipgre_net_ops);
unregister_pernet_device(&erspan_net_ops);
}
module_init(ipgre_init);
module_exit(ipgre_fini);
MODULE_LICENSE("GPL");
MODULE_ALIAS_RTNL_LINK("gre");
MODULE_ALIAS_RTNL_LINK("gretap");
MODULE_ALIAS_RTNL_LINK("erspan");
MODULE_ALIAS_NETDEV("gre0");
MODULE_ALIAS_NETDEV("gretap0");
MODULE_ALIAS_NETDEV("erspan0");
| linux-master | net/ipv4/ip_gre.c |
/* Linux multicast routing support
* Common logic shared by IPv4 [ipmr] and IPv6 [ip6mr] implementation
*/
#include <linux/rhashtable.h>
#include <linux/mroute_base.h>
/* Sets everything common except 'dev', since that is done under locking */
void vif_device_init(struct vif_device *v,
struct net_device *dev,
unsigned long rate_limit,
unsigned char threshold,
unsigned short flags,
unsigned short get_iflink_mask)
{
RCU_INIT_POINTER(v->dev, NULL);
v->bytes_in = 0;
v->bytes_out = 0;
v->pkt_in = 0;
v->pkt_out = 0;
v->rate_limit = rate_limit;
v->flags = flags;
v->threshold = threshold;
if (v->flags & get_iflink_mask)
v->link = dev_get_iflink(dev);
else
v->link = dev->ifindex;
}
EXPORT_SYMBOL(vif_device_init);
struct mr_table *
mr_table_alloc(struct net *net, u32 id,
struct mr_table_ops *ops,
void (*expire_func)(struct timer_list *t),
void (*table_set)(struct mr_table *mrt,
struct net *net))
{
struct mr_table *mrt;
int err;
mrt = kzalloc(sizeof(*mrt), GFP_KERNEL);
if (!mrt)
return ERR_PTR(-ENOMEM);
mrt->id = id;
write_pnet(&mrt->net, net);
mrt->ops = *ops;
err = rhltable_init(&mrt->mfc_hash, mrt->ops.rht_params);
if (err) {
kfree(mrt);
return ERR_PTR(err);
}
INIT_LIST_HEAD(&mrt->mfc_cache_list);
INIT_LIST_HEAD(&mrt->mfc_unres_queue);
timer_setup(&mrt->ipmr_expire_timer, expire_func, 0);
mrt->mroute_reg_vif_num = -1;
table_set(mrt, net);
return mrt;
}
EXPORT_SYMBOL(mr_table_alloc);
void *mr_mfc_find_parent(struct mr_table *mrt, void *hasharg, int parent)
{
struct rhlist_head *tmp, *list;
struct mr_mfc *c;
list = rhltable_lookup(&mrt->mfc_hash, hasharg, *mrt->ops.rht_params);
rhl_for_each_entry_rcu(c, tmp, list, mnode)
if (parent == -1 || parent == c->mfc_parent)
return c;
return NULL;
}
EXPORT_SYMBOL(mr_mfc_find_parent);
void *mr_mfc_find_any_parent(struct mr_table *mrt, int vifi)
{
struct rhlist_head *tmp, *list;
struct mr_mfc *c;
list = rhltable_lookup(&mrt->mfc_hash, mrt->ops.cmparg_any,
*mrt->ops.rht_params);
rhl_for_each_entry_rcu(c, tmp, list, mnode)
if (c->mfc_un.res.ttls[vifi] < 255)
return c;
return NULL;
}
EXPORT_SYMBOL(mr_mfc_find_any_parent);
void *mr_mfc_find_any(struct mr_table *mrt, int vifi, void *hasharg)
{
struct rhlist_head *tmp, *list;
struct mr_mfc *c, *proxy;
list = rhltable_lookup(&mrt->mfc_hash, hasharg, *mrt->ops.rht_params);
rhl_for_each_entry_rcu(c, tmp, list, mnode) {
if (c->mfc_un.res.ttls[vifi] < 255)
return c;
/* It's ok if the vifi is part of the static tree */
proxy = mr_mfc_find_any_parent(mrt, c->mfc_parent);
if (proxy && proxy->mfc_un.res.ttls[vifi] < 255)
return c;
}
return mr_mfc_find_any_parent(mrt, vifi);
}
EXPORT_SYMBOL(mr_mfc_find_any);
#ifdef CONFIG_PROC_FS
void *mr_vif_seq_idx(struct net *net, struct mr_vif_iter *iter, loff_t pos)
{
struct mr_table *mrt = iter->mrt;
for (iter->ct = 0; iter->ct < mrt->maxvif; ++iter->ct) {
if (!VIF_EXISTS(mrt, iter->ct))
continue;
if (pos-- == 0)
return &mrt->vif_table[iter->ct];
}
return NULL;
}
EXPORT_SYMBOL(mr_vif_seq_idx);
void *mr_vif_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
struct mr_vif_iter *iter = seq->private;
struct net *net = seq_file_net(seq);
struct mr_table *mrt = iter->mrt;
++*pos;
if (v == SEQ_START_TOKEN)
return mr_vif_seq_idx(net, iter, 0);
while (++iter->ct < mrt->maxvif) {
if (!VIF_EXISTS(mrt, iter->ct))
continue;
return &mrt->vif_table[iter->ct];
}
return NULL;
}
EXPORT_SYMBOL(mr_vif_seq_next);
void *mr_mfc_seq_idx(struct net *net,
struct mr_mfc_iter *it, loff_t pos)
{
struct mr_table *mrt = it->mrt;
struct mr_mfc *mfc;
rcu_read_lock();
it->cache = &mrt->mfc_cache_list;
list_for_each_entry_rcu(mfc, &mrt->mfc_cache_list, list)
if (pos-- == 0)
return mfc;
rcu_read_unlock();
spin_lock_bh(it->lock);
it->cache = &mrt->mfc_unres_queue;
list_for_each_entry(mfc, it->cache, list)
if (pos-- == 0)
return mfc;
spin_unlock_bh(it->lock);
it->cache = NULL;
return NULL;
}
EXPORT_SYMBOL(mr_mfc_seq_idx);
void *mr_mfc_seq_next(struct seq_file *seq, void *v,
loff_t *pos)
{
struct mr_mfc_iter *it = seq->private;
struct net *net = seq_file_net(seq);
struct mr_table *mrt = it->mrt;
struct mr_mfc *c = v;
++*pos;
if (v == SEQ_START_TOKEN)
return mr_mfc_seq_idx(net, seq->private, 0);
if (c->list.next != it->cache)
return list_entry(c->list.next, struct mr_mfc, list);
if (it->cache == &mrt->mfc_unres_queue)
goto end_of_list;
/* exhausted cache_array, show unresolved */
rcu_read_unlock();
it->cache = &mrt->mfc_unres_queue;
spin_lock_bh(it->lock);
if (!list_empty(it->cache))
return list_first_entry(it->cache, struct mr_mfc, list);
end_of_list:
spin_unlock_bh(it->lock);
it->cache = NULL;
return NULL;
}
EXPORT_SYMBOL(mr_mfc_seq_next);
#endif
int mr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
struct mr_mfc *c, struct rtmsg *rtm)
{
struct net_device *vif_dev;
struct rta_mfc_stats mfcs;
struct nlattr *mp_attr;
struct rtnexthop *nhp;
unsigned long lastuse;
int ct;
/* If cache is unresolved, don't try to parse IIF and OIF */
if (c->mfc_parent >= MAXVIFS) {
rtm->rtm_flags |= RTNH_F_UNRESOLVED;
return -ENOENT;
}
rcu_read_lock();
vif_dev = rcu_dereference(mrt->vif_table[c->mfc_parent].dev);
if (vif_dev && nla_put_u32(skb, RTA_IIF, vif_dev->ifindex) < 0) {
rcu_read_unlock();
return -EMSGSIZE;
}
rcu_read_unlock();
if (c->mfc_flags & MFC_OFFLOAD)
rtm->rtm_flags |= RTNH_F_OFFLOAD;
mp_attr = nla_nest_start_noflag(skb, RTA_MULTIPATH);
if (!mp_attr)
return -EMSGSIZE;
rcu_read_lock();
for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) {
struct vif_device *vif = &mrt->vif_table[ct];
vif_dev = rcu_dereference(vif->dev);
if (vif_dev && c->mfc_un.res.ttls[ct] < 255) {
nhp = nla_reserve_nohdr(skb, sizeof(*nhp));
if (!nhp) {
rcu_read_unlock();
nla_nest_cancel(skb, mp_attr);
return -EMSGSIZE;
}
nhp->rtnh_flags = 0;
nhp->rtnh_hops = c->mfc_un.res.ttls[ct];
nhp->rtnh_ifindex = vif_dev->ifindex;
nhp->rtnh_len = sizeof(*nhp);
}
}
rcu_read_unlock();
nla_nest_end(skb, mp_attr);
lastuse = READ_ONCE(c->mfc_un.res.lastuse);
lastuse = time_after_eq(jiffies, lastuse) ? jiffies - lastuse : 0;
mfcs.mfcs_packets = c->mfc_un.res.pkt;
mfcs.mfcs_bytes = c->mfc_un.res.bytes;
mfcs.mfcs_wrong_if = c->mfc_un.res.wrong_if;
if (nla_put_64bit(skb, RTA_MFC_STATS, sizeof(mfcs), &mfcs, RTA_PAD) ||
nla_put_u64_64bit(skb, RTA_EXPIRES, jiffies_to_clock_t(lastuse),
RTA_PAD))
return -EMSGSIZE;
rtm->rtm_type = RTN_MULTICAST;
return 1;
}
EXPORT_SYMBOL(mr_fill_mroute);
static bool mr_mfc_uses_dev(const struct mr_table *mrt,
const struct mr_mfc *c,
const struct net_device *dev)
{
int ct;
for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) {
const struct net_device *vif_dev;
const struct vif_device *vif;
vif = &mrt->vif_table[ct];
vif_dev = rcu_access_pointer(vif->dev);
if (vif_dev && c->mfc_un.res.ttls[ct] < 255 &&
vif_dev == dev)
return true;
}
return false;
}
int mr_table_dump(struct mr_table *mrt, struct sk_buff *skb,
struct netlink_callback *cb,
int (*fill)(struct mr_table *mrt, struct sk_buff *skb,
u32 portid, u32 seq, struct mr_mfc *c,
int cmd, int flags),
spinlock_t *lock, struct fib_dump_filter *filter)
{
unsigned int e = 0, s_e = cb->args[1];
unsigned int flags = NLM_F_MULTI;
struct mr_mfc *mfc;
int err;
if (filter->filter_set)
flags |= NLM_F_DUMP_FILTERED;
list_for_each_entry_rcu(mfc, &mrt->mfc_cache_list, list) {
if (e < s_e)
goto next_entry;
if (filter->dev &&
!mr_mfc_uses_dev(mrt, mfc, filter->dev))
goto next_entry;
err = fill(mrt, skb, NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq, mfc, RTM_NEWROUTE, flags);
if (err < 0)
goto out;
next_entry:
e++;
}
spin_lock_bh(lock);
list_for_each_entry(mfc, &mrt->mfc_unres_queue, list) {
if (e < s_e)
goto next_entry2;
if (filter->dev &&
!mr_mfc_uses_dev(mrt, mfc, filter->dev))
goto next_entry2;
err = fill(mrt, skb, NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq, mfc, RTM_NEWROUTE, flags);
if (err < 0) {
spin_unlock_bh(lock);
goto out;
}
next_entry2:
e++;
}
spin_unlock_bh(lock);
err = 0;
out:
cb->args[1] = e;
return err;
}
EXPORT_SYMBOL(mr_table_dump);
int mr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb,
struct mr_table *(*iter)(struct net *net,
struct mr_table *mrt),
int (*fill)(struct mr_table *mrt,
struct sk_buff *skb,
u32 portid, u32 seq, struct mr_mfc *c,
int cmd, int flags),
spinlock_t *lock, struct fib_dump_filter *filter)
{
unsigned int t = 0, s_t = cb->args[0];
struct net *net = sock_net(skb->sk);
struct mr_table *mrt;
int err;
/* multicast does not track protocol or have route type other
* than RTN_MULTICAST
*/
if (filter->filter_set) {
if (filter->protocol || filter->flags ||
(filter->rt_type && filter->rt_type != RTN_MULTICAST))
return skb->len;
}
rcu_read_lock();
for (mrt = iter(net, NULL); mrt; mrt = iter(net, mrt)) {
if (t < s_t)
goto next_table;
err = mr_table_dump(mrt, skb, cb, fill, lock, filter);
if (err < 0)
break;
cb->args[1] = 0;
next_table:
t++;
}
rcu_read_unlock();
cb->args[0] = t;
return skb->len;
}
EXPORT_SYMBOL(mr_rtm_dumproute);
int mr_dump(struct net *net, struct notifier_block *nb, unsigned short family,
int (*rules_dump)(struct net *net,
struct notifier_block *nb,
struct netlink_ext_ack *extack),
struct mr_table *(*mr_iter)(struct net *net,
struct mr_table *mrt),
struct netlink_ext_ack *extack)
{
struct mr_table *mrt;
int err;
err = rules_dump(net, nb, extack);
if (err)
return err;
for (mrt = mr_iter(net, NULL); mrt; mrt = mr_iter(net, mrt)) {
struct vif_device *v = &mrt->vif_table[0];
struct net_device *vif_dev;
struct mr_mfc *mfc;
int vifi;
/* Notifiy on table VIF entries */
rcu_read_lock();
for (vifi = 0; vifi < mrt->maxvif; vifi++, v++) {
vif_dev = rcu_dereference(v->dev);
if (!vif_dev)
continue;
err = mr_call_vif_notifier(nb, family,
FIB_EVENT_VIF_ADD, v,
vif_dev, vifi,
mrt->id, extack);
if (err)
break;
}
rcu_read_unlock();
if (err)
return err;
/* Notify on table MFC entries */
list_for_each_entry_rcu(mfc, &mrt->mfc_cache_list, list) {
err = mr_call_mfc_notifier(nb, family,
FIB_EVENT_ENTRY_ADD,
mfc, mrt->id, extack);
if (err)
return err;
}
}
return 0;
}
EXPORT_SYMBOL(mr_dump);
| linux-master | net/ipv4/ipmr_base.c |
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/netlink.h>
#include <linux/rtnetlink.h>
#include <linux/types.h>
#include <net/net_namespace.h>
#include <net/netlink.h>
#include <linux/in6.h>
#include <net/ip.h>
int rtm_getroute_parse_ip_proto(struct nlattr *attr, u8 *ip_proto, u8 family,
struct netlink_ext_ack *extack)
{
*ip_proto = nla_get_u8(attr);
switch (*ip_proto) {
case IPPROTO_TCP:
case IPPROTO_UDP:
return 0;
case IPPROTO_ICMP:
if (family != AF_INET)
break;
return 0;
#if IS_ENABLED(CONFIG_IPV6)
case IPPROTO_ICMPV6:
if (family != AF_INET6)
break;
return 0;
#endif
}
NL_SET_ERR_MSG(extack, "Unsupported ip proto");
return -EOPNOTSUPP;
}
EXPORT_SYMBOL_GPL(rtm_getroute_parse_ip_proto);
| linux-master | net/ipv4/netlink.c |
// SPDX-License-Identifier: GPL-2.0-only
#include <net/tcp.h>
/* The bandwidth estimator estimates the rate at which the network
* can currently deliver outbound data packets for this flow. At a high
* level, it operates by taking a delivery rate sample for each ACK.
*
* A rate sample records the rate at which the network delivered packets
* for this flow, calculated over the time interval between the transmission
* of a data packet and the acknowledgment of that packet.
*
* Specifically, over the interval between each transmit and corresponding ACK,
* the estimator generates a delivery rate sample. Typically it uses the rate
* at which packets were acknowledged. However, the approach of using only the
* acknowledgment rate faces a challenge under the prevalent ACK decimation or
* compression: packets can temporarily appear to be delivered much quicker
* than the bottleneck rate. Since it is physically impossible to do that in a
* sustained fashion, when the estimator notices that the ACK rate is faster
* than the transmit rate, it uses the latter:
*
* send_rate = #pkts_delivered/(last_snd_time - first_snd_time)
* ack_rate = #pkts_delivered/(last_ack_time - first_ack_time)
* bw = min(send_rate, ack_rate)
*
* Notice the estimator essentially estimates the goodput, not always the
* network bottleneck link rate when the sending or receiving is limited by
* other factors like applications or receiver window limits. The estimator
* deliberately avoids using the inter-packet spacing approach because that
* approach requires a large number of samples and sophisticated filtering.
*
* TCP flows can often be application-limited in request/response workloads.
* The estimator marks a bandwidth sample as application-limited if there
* was some moment during the sampled window of packets when there was no data
* ready to send in the write queue.
*/
/* Snapshot the current delivery information in the skb, to generate
* a rate sample later when the skb is (s)acked in tcp_rate_skb_delivered().
*/
void tcp_rate_skb_sent(struct sock *sk, struct sk_buff *skb)
{
struct tcp_sock *tp = tcp_sk(sk);
/* In general we need to start delivery rate samples from the
* time we received the most recent ACK, to ensure we include
* the full time the network needs to deliver all in-flight
* packets. If there are no packets in flight yet, then we
* know that any ACKs after now indicate that the network was
* able to deliver those packets completely in the sampling
* interval between now and the next ACK.
*
* Note that we use packets_out instead of tcp_packets_in_flight(tp)
* because the latter is a guess based on RTO and loss-marking
* heuristics. We don't want spurious RTOs or loss markings to cause
* a spuriously small time interval, causing a spuriously high
* bandwidth estimate.
*/
if (!tp->packets_out) {
u64 tstamp_us = tcp_skb_timestamp_us(skb);
tp->first_tx_mstamp = tstamp_us;
tp->delivered_mstamp = tstamp_us;
}
TCP_SKB_CB(skb)->tx.first_tx_mstamp = tp->first_tx_mstamp;
TCP_SKB_CB(skb)->tx.delivered_mstamp = tp->delivered_mstamp;
TCP_SKB_CB(skb)->tx.delivered = tp->delivered;
TCP_SKB_CB(skb)->tx.delivered_ce = tp->delivered_ce;
TCP_SKB_CB(skb)->tx.is_app_limited = tp->app_limited ? 1 : 0;
}
/* When an skb is sacked or acked, we fill in the rate sample with the (prior)
* delivery information when the skb was last transmitted.
*
* If an ACK (s)acks multiple skbs (e.g., stretched-acks), this function is
* called multiple times. We favor the information from the most recently
* sent skb, i.e., the skb with the most recently sent time and the highest
* sequence.
*/
void tcp_rate_skb_delivered(struct sock *sk, struct sk_buff *skb,
struct rate_sample *rs)
{
struct tcp_sock *tp = tcp_sk(sk);
struct tcp_skb_cb *scb = TCP_SKB_CB(skb);
u64 tx_tstamp;
if (!scb->tx.delivered_mstamp)
return;
tx_tstamp = tcp_skb_timestamp_us(skb);
if (!rs->prior_delivered ||
tcp_skb_sent_after(tx_tstamp, tp->first_tx_mstamp,
scb->end_seq, rs->last_end_seq)) {
rs->prior_delivered_ce = scb->tx.delivered_ce;
rs->prior_delivered = scb->tx.delivered;
rs->prior_mstamp = scb->tx.delivered_mstamp;
rs->is_app_limited = scb->tx.is_app_limited;
rs->is_retrans = scb->sacked & TCPCB_RETRANS;
rs->last_end_seq = scb->end_seq;
/* Record send time of most recently ACKed packet: */
tp->first_tx_mstamp = tx_tstamp;
/* Find the duration of the "send phase" of this window: */
rs->interval_us = tcp_stamp_us_delta(tp->first_tx_mstamp,
scb->tx.first_tx_mstamp);
}
/* Mark off the skb delivered once it's sacked to avoid being
* used again when it's cumulatively acked. For acked packets
* we don't need to reset since it'll be freed soon.
*/
if (scb->sacked & TCPCB_SACKED_ACKED)
scb->tx.delivered_mstamp = 0;
}
/* Update the connection delivery information and generate a rate sample. */
void tcp_rate_gen(struct sock *sk, u32 delivered, u32 lost,
bool is_sack_reneg, struct rate_sample *rs)
{
struct tcp_sock *tp = tcp_sk(sk);
u32 snd_us, ack_us;
/* Clear app limited if bubble is acked and gone. */
if (tp->app_limited && after(tp->delivered, tp->app_limited))
tp->app_limited = 0;
/* TODO: there are multiple places throughout tcp_ack() to get
* current time. Refactor the code using a new "tcp_acktag_state"
* to carry current time, flags, stats like "tcp_sacktag_state".
*/
if (delivered)
tp->delivered_mstamp = tp->tcp_mstamp;
rs->acked_sacked = delivered; /* freshly ACKed or SACKed */
rs->losses = lost; /* freshly marked lost */
/* Return an invalid sample if no timing information is available or
* in recovery from loss with SACK reneging. Rate samples taken during
* a SACK reneging event may overestimate bw by including packets that
* were SACKed before the reneg.
*/
if (!rs->prior_mstamp || is_sack_reneg) {
rs->delivered = -1;
rs->interval_us = -1;
return;
}
rs->delivered = tp->delivered - rs->prior_delivered;
rs->delivered_ce = tp->delivered_ce - rs->prior_delivered_ce;
/* delivered_ce occupies less than 32 bits in the skb control block */
rs->delivered_ce &= TCPCB_DELIVERED_CE_MASK;
/* Model sending data and receiving ACKs as separate pipeline phases
* for a window. Usually the ACK phase is longer, but with ACK
* compression the send phase can be longer. To be safe we use the
* longer phase.
*/
snd_us = rs->interval_us; /* send phase */
ack_us = tcp_stamp_us_delta(tp->tcp_mstamp,
rs->prior_mstamp); /* ack phase */
rs->interval_us = max(snd_us, ack_us);
/* Record both segment send and ack receive intervals */
rs->snd_interval_us = snd_us;
rs->rcv_interval_us = ack_us;
/* Normally we expect interval_us >= min-rtt.
* Note that rate may still be over-estimated when a spuriously
* retransmistted skb was first (s)acked because "interval_us"
* is under-estimated (up to an RTT). However continuously
* measuring the delivery rate during loss recovery is crucial
* for connections suffer heavy or prolonged losses.
*/
if (unlikely(rs->interval_us < tcp_min_rtt(tp))) {
if (!rs->is_retrans)
pr_debug("tcp rate: %ld %d %u %u %u\n",
rs->interval_us, rs->delivered,
inet_csk(sk)->icsk_ca_state,
tp->rx_opt.sack_ok, tcp_min_rtt(tp));
rs->interval_us = -1;
return;
}
/* Record the last non-app-limited or the highest app-limited bw */
if (!rs->is_app_limited ||
((u64)rs->delivered * tp->rate_interval_us >=
(u64)tp->rate_delivered * rs->interval_us)) {
tp->rate_delivered = rs->delivered;
tp->rate_interval_us = rs->interval_us;
tp->rate_app_limited = rs->is_app_limited;
}
}
/* If a gap is detected between sends, mark the socket application-limited. */
void tcp_rate_check_app_limited(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
if (/* We have less than one packet to send. */
tp->write_seq - tp->snd_nxt < tp->mss_cache &&
/* Nothing in sending host's qdisc queues or NIC tx queue. */
sk_wmem_alloc_get(sk) < SKB_TRUESIZE(1) &&
/* We are not limited by CWND. */
tcp_packets_in_flight(tp) < tcp_snd_cwnd(tp) &&
/* All lost packets have been retransmitted. */
tp->lost_out <= tp->retrans_out)
tp->app_limited =
(tp->delivered + tcp_packets_in_flight(tp)) ? : 1;
}
EXPORT_SYMBOL_GPL(tcp_rate_check_app_limited);
| linux-master | net/ipv4/tcp_rate.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Pluggable TCP upper layer protocol support.
*
* Copyright (c) 2016-2017, Mellanox Technologies. All rights reserved.
* Copyright (c) 2016-2017, Dave Watson <[email protected]>. All rights reserved.
*
*/
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/types.h>
#include <linux/list.h>
#include <linux/gfp.h>
#include <net/tcp.h>
static DEFINE_SPINLOCK(tcp_ulp_list_lock);
static LIST_HEAD(tcp_ulp_list);
/* Simple linear search, don't expect many entries! */
static struct tcp_ulp_ops *tcp_ulp_find(const char *name)
{
struct tcp_ulp_ops *e;
list_for_each_entry_rcu(e, &tcp_ulp_list, list,
lockdep_is_held(&tcp_ulp_list_lock)) {
if (strcmp(e->name, name) == 0)
return e;
}
return NULL;
}
static const struct tcp_ulp_ops *__tcp_ulp_find_autoload(const char *name)
{
const struct tcp_ulp_ops *ulp = NULL;
rcu_read_lock();
ulp = tcp_ulp_find(name);
#ifdef CONFIG_MODULES
if (!ulp && capable(CAP_NET_ADMIN)) {
rcu_read_unlock();
request_module("tcp-ulp-%s", name);
rcu_read_lock();
ulp = tcp_ulp_find(name);
}
#endif
if (!ulp || !try_module_get(ulp->owner))
ulp = NULL;
rcu_read_unlock();
return ulp;
}
/* Attach new upper layer protocol to the list
* of available protocols.
*/
int tcp_register_ulp(struct tcp_ulp_ops *ulp)
{
int ret = 0;
spin_lock(&tcp_ulp_list_lock);
if (tcp_ulp_find(ulp->name))
ret = -EEXIST;
else
list_add_tail_rcu(&ulp->list, &tcp_ulp_list);
spin_unlock(&tcp_ulp_list_lock);
return ret;
}
EXPORT_SYMBOL_GPL(tcp_register_ulp);
void tcp_unregister_ulp(struct tcp_ulp_ops *ulp)
{
spin_lock(&tcp_ulp_list_lock);
list_del_rcu(&ulp->list);
spin_unlock(&tcp_ulp_list_lock);
synchronize_rcu();
}
EXPORT_SYMBOL_GPL(tcp_unregister_ulp);
/* Build string with list of available upper layer protocl values */
void tcp_get_available_ulp(char *buf, size_t maxlen)
{
struct tcp_ulp_ops *ulp_ops;
size_t offs = 0;
*buf = '\0';
rcu_read_lock();
list_for_each_entry_rcu(ulp_ops, &tcp_ulp_list, list) {
offs += snprintf(buf + offs, maxlen - offs,
"%s%s",
offs == 0 ? "" : " ", ulp_ops->name);
if (WARN_ON_ONCE(offs >= maxlen))
break;
}
rcu_read_unlock();
}
void tcp_update_ulp(struct sock *sk, struct proto *proto,
void (*write_space)(struct sock *sk))
{
struct inet_connection_sock *icsk = inet_csk(sk);
if (icsk->icsk_ulp_ops->update)
icsk->icsk_ulp_ops->update(sk, proto, write_space);
}
void tcp_cleanup_ulp(struct sock *sk)
{
struct inet_connection_sock *icsk = inet_csk(sk);
/* No sock_owned_by_me() check here as at the time the
* stack calls this function, the socket is dead and
* about to be destroyed.
*/
if (!icsk->icsk_ulp_ops)
return;
if (icsk->icsk_ulp_ops->release)
icsk->icsk_ulp_ops->release(sk);
module_put(icsk->icsk_ulp_ops->owner);
icsk->icsk_ulp_ops = NULL;
}
static int __tcp_set_ulp(struct sock *sk, const struct tcp_ulp_ops *ulp_ops)
{
struct inet_connection_sock *icsk = inet_csk(sk);
int err;
err = -EEXIST;
if (icsk->icsk_ulp_ops)
goto out_err;
if (sk->sk_socket)
clear_bit(SOCK_SUPPORT_ZC, &sk->sk_socket->flags);
err = -ENOTCONN;
if (!ulp_ops->clone && sk->sk_state == TCP_LISTEN)
goto out_err;
err = ulp_ops->init(sk);
if (err)
goto out_err;
icsk->icsk_ulp_ops = ulp_ops;
return 0;
out_err:
module_put(ulp_ops->owner);
return err;
}
int tcp_set_ulp(struct sock *sk, const char *name)
{
const struct tcp_ulp_ops *ulp_ops;
sock_owned_by_me(sk);
ulp_ops = __tcp_ulp_find_autoload(name);
if (!ulp_ops)
return -ENOENT;
return __tcp_set_ulp(sk, ulp_ops);
}
| linux-master | net/ipv4/tcp_ulp.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* TCP Low Priority (TCP-LP)
*
* TCP Low Priority is a distributed algorithm whose goal is to utilize only
* the excess network bandwidth as compared to the ``fair share`` of
* bandwidth as targeted by TCP.
*
* As of 2.6.13, Linux supports pluggable congestion control algorithms.
* Due to the limitation of the API, we take the following changes from
* the original TCP-LP implementation:
* o We use newReno in most core CA handling. Only add some checking
* within cong_avoid.
* o Error correcting in remote HZ, therefore remote HZ will be keeped
* on checking and updating.
* o Handling calculation of One-Way-Delay (OWD) within rtt_sample, since
* OWD have a similar meaning as RTT. Also correct the buggy formular.
* o Handle reaction for Early Congestion Indication (ECI) within
* pkts_acked, as mentioned within pseudo code.
* o OWD is handled in relative format, where local time stamp will in
* tcp_time_stamp format.
*
* Original Author:
* Aleksandar Kuzmanovic <[email protected]>
* Available from:
* http://www.ece.rice.edu/~akuzma/Doc/akuzma/TCP-LP.pdf
* Original implementation for 2.4.19:
* http://www-ece.rice.edu/networks/TCP-LP/
*
* 2.6.x module Authors:
* Wong Hoi Sing, Edison <[email protected]>
* Hung Hing Lun, Mike <[email protected]>
* SourceForge project page:
* http://tcp-lp-mod.sourceforge.net/
*/
#include <linux/module.h>
#include <net/tcp.h>
/* resolution of owd */
#define LP_RESOL TCP_TS_HZ
/**
* enum tcp_lp_state
* @LP_VALID_RHZ: is remote HZ valid?
* @LP_VALID_OWD: is OWD valid?
* @LP_WITHIN_THR: are we within threshold?
* @LP_WITHIN_INF: are we within inference?
*
* TCP-LP's state flags.
* We create this set of state flag mainly for debugging.
*/
enum tcp_lp_state {
LP_VALID_RHZ = (1 << 0),
LP_VALID_OWD = (1 << 1),
LP_WITHIN_THR = (1 << 3),
LP_WITHIN_INF = (1 << 4),
};
/**
* struct lp
* @flag: TCP-LP state flag
* @sowd: smoothed OWD << 3
* @owd_min: min OWD
* @owd_max: max OWD
* @owd_max_rsv: reserved max owd
* @remote_hz: estimated remote HZ
* @remote_ref_time: remote reference time
* @local_ref_time: local reference time
* @last_drop: time for last active drop
* @inference: current inference
*
* TCP-LP's private struct.
* We get the idea from original TCP-LP implementation where only left those we
* found are really useful.
*/
struct lp {
u32 flag;
u32 sowd;
u32 owd_min;
u32 owd_max;
u32 owd_max_rsv;
u32 remote_hz;
u32 remote_ref_time;
u32 local_ref_time;
u32 last_drop;
u32 inference;
};
/**
* tcp_lp_init
* @sk: socket to initialize congestion control algorithm for
*
* Init all required variables.
* Clone the handling from Vegas module implementation.
*/
static void tcp_lp_init(struct sock *sk)
{
struct lp *lp = inet_csk_ca(sk);
lp->flag = 0;
lp->sowd = 0;
lp->owd_min = 0xffffffff;
lp->owd_max = 0;
lp->owd_max_rsv = 0;
lp->remote_hz = 0;
lp->remote_ref_time = 0;
lp->local_ref_time = 0;
lp->last_drop = 0;
lp->inference = 0;
}
/**
* tcp_lp_cong_avoid
* @sk: socket to avoid congesting
*
* Implementation of cong_avoid.
* Will only call newReno CA when away from inference.
* From TCP-LP's paper, this will be handled in additive increasement.
*/
static void tcp_lp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
{
struct lp *lp = inet_csk_ca(sk);
if (!(lp->flag & LP_WITHIN_INF))
tcp_reno_cong_avoid(sk, ack, acked);
}
/**
* tcp_lp_remote_hz_estimator
* @sk: socket which needs an estimate for the remote HZs
*
* Estimate remote HZ.
* We keep on updating the estimated value, where original TCP-LP
* implementation only guest it for once and use forever.
*/
static u32 tcp_lp_remote_hz_estimator(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
struct lp *lp = inet_csk_ca(sk);
s64 rhz = lp->remote_hz << 6; /* remote HZ << 6 */
s64 m = 0;
/* not yet record reference time
* go away!! record it before come back!! */
if (lp->remote_ref_time == 0 || lp->local_ref_time == 0)
goto out;
/* we can't calc remote HZ with no different!! */
if (tp->rx_opt.rcv_tsval == lp->remote_ref_time ||
tp->rx_opt.rcv_tsecr == lp->local_ref_time)
goto out;
m = TCP_TS_HZ *
(tp->rx_opt.rcv_tsval - lp->remote_ref_time) /
(tp->rx_opt.rcv_tsecr - lp->local_ref_time);
if (m < 0)
m = -m;
if (rhz > 0) {
m -= rhz >> 6; /* m is now error in remote HZ est */
rhz += m; /* 63/64 old + 1/64 new */
} else
rhz = m << 6;
out:
/* record time for successful remote HZ calc */
if ((rhz >> 6) > 0)
lp->flag |= LP_VALID_RHZ;
else
lp->flag &= ~LP_VALID_RHZ;
/* record reference time stamp */
lp->remote_ref_time = tp->rx_opt.rcv_tsval;
lp->local_ref_time = tp->rx_opt.rcv_tsecr;
return rhz >> 6;
}
/**
* tcp_lp_owd_calculator
* @sk: socket to calculate one way delay for
*
* Calculate one way delay (in relative format).
* Original implement OWD as minus of remote time difference to local time
* difference directly. As this time difference just simply equal to RTT, when
* the network status is stable, remote RTT will equal to local RTT, and result
* OWD into zero.
* It seems to be a bug and so we fixed it.
*/
static u32 tcp_lp_owd_calculator(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
struct lp *lp = inet_csk_ca(sk);
s64 owd = 0;
lp->remote_hz = tcp_lp_remote_hz_estimator(sk);
if (lp->flag & LP_VALID_RHZ) {
owd =
tp->rx_opt.rcv_tsval * (LP_RESOL / lp->remote_hz) -
tp->rx_opt.rcv_tsecr * (LP_RESOL / TCP_TS_HZ);
if (owd < 0)
owd = -owd;
}
if (owd > 0)
lp->flag |= LP_VALID_OWD;
else
lp->flag &= ~LP_VALID_OWD;
return owd;
}
/**
* tcp_lp_rtt_sample
* @sk: socket to add a rtt sample to
* @rtt: round trip time, which is ignored!
*
* Implementation or rtt_sample.
* Will take the following action,
* 1. calc OWD,
* 2. record the min/max OWD,
* 3. calc smoothed OWD (SOWD).
* Most ideas come from the original TCP-LP implementation.
*/
static void tcp_lp_rtt_sample(struct sock *sk, u32 rtt)
{
struct lp *lp = inet_csk_ca(sk);
s64 mowd = tcp_lp_owd_calculator(sk);
/* sorry that we don't have valid data */
if (!(lp->flag & LP_VALID_RHZ) || !(lp->flag & LP_VALID_OWD))
return;
/* record the next min owd */
if (mowd < lp->owd_min)
lp->owd_min = mowd;
/* always forget the max of the max
* we just set owd_max as one below it */
if (mowd > lp->owd_max) {
if (mowd > lp->owd_max_rsv) {
if (lp->owd_max_rsv == 0)
lp->owd_max = mowd;
else
lp->owd_max = lp->owd_max_rsv;
lp->owd_max_rsv = mowd;
} else
lp->owd_max = mowd;
}
/* calc for smoothed owd */
if (lp->sowd != 0) {
mowd -= lp->sowd >> 3; /* m is now error in owd est */
lp->sowd += mowd; /* owd = 7/8 owd + 1/8 new */
} else
lp->sowd = mowd << 3; /* take the measured time be owd */
}
/**
* tcp_lp_pkts_acked
* @sk: socket requiring congestion avoidance calculations
*
* Implementation of pkts_acked.
* Deal with active drop under Early Congestion Indication.
* Only drop to half and 1 will be handle, because we hope to use back
* newReno in increase case.
* We work it out by following the idea from TCP-LP's paper directly
*/
static void tcp_lp_pkts_acked(struct sock *sk, const struct ack_sample *sample)
{
struct tcp_sock *tp = tcp_sk(sk);
struct lp *lp = inet_csk_ca(sk);
u32 now = tcp_time_stamp(tp);
u32 delta;
if (sample->rtt_us > 0)
tcp_lp_rtt_sample(sk, sample->rtt_us);
/* calc inference */
delta = now - tp->rx_opt.rcv_tsecr;
if ((s32)delta > 0)
lp->inference = 3 * delta;
/* test if within inference */
if (lp->last_drop && (now - lp->last_drop < lp->inference))
lp->flag |= LP_WITHIN_INF;
else
lp->flag &= ~LP_WITHIN_INF;
/* test if within threshold */
if (lp->sowd >> 3 <
lp->owd_min + 15 * (lp->owd_max - lp->owd_min) / 100)
lp->flag |= LP_WITHIN_THR;
else
lp->flag &= ~LP_WITHIN_THR;
pr_debug("TCP-LP: %05o|%5u|%5u|%15u|%15u|%15u\n", lp->flag,
tcp_snd_cwnd(tp), lp->remote_hz, lp->owd_min, lp->owd_max,
lp->sowd >> 3);
if (lp->flag & LP_WITHIN_THR)
return;
/* FIXME: try to reset owd_min and owd_max here
* so decrease the chance the min/max is no longer suitable
* and will usually within threshold when within inference */
lp->owd_min = lp->sowd >> 3;
lp->owd_max = lp->sowd >> 2;
lp->owd_max_rsv = lp->sowd >> 2;
/* happened within inference
* drop snd_cwnd into 1 */
if (lp->flag & LP_WITHIN_INF)
tcp_snd_cwnd_set(tp, 1U);
/* happened after inference
* cut snd_cwnd into half */
else
tcp_snd_cwnd_set(tp, max(tcp_snd_cwnd(tp) >> 1U, 1U));
/* record this drop time */
lp->last_drop = now;
}
static struct tcp_congestion_ops tcp_lp __read_mostly = {
.init = tcp_lp_init,
.ssthresh = tcp_reno_ssthresh,
.undo_cwnd = tcp_reno_undo_cwnd,
.cong_avoid = tcp_lp_cong_avoid,
.pkts_acked = tcp_lp_pkts_acked,
.owner = THIS_MODULE,
.name = "lp"
};
static int __init tcp_lp_register(void)
{
BUILD_BUG_ON(sizeof(struct lp) > ICSK_CA_PRIV_SIZE);
return tcp_register_congestion_control(&tcp_lp);
}
static void __exit tcp_lp_unregister(void)
{
tcp_unregister_congestion_control(&tcp_lp);
}
module_init(tcp_lp_register);
module_exit(tcp_lp_unregister);
MODULE_AUTHOR("Wong Hoi Sing Edison, Hung Hing Lun Mike");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("TCP Low Priority");
| linux-master | net/ipv4/tcp_lp.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* INET An implementation of the TCP/IP protocol suite for the LINUX
* operating system. INET is implemented using the BSD Socket
* interface as the means of communication with the user level.
*
* PF_INET protocol family socket handler.
*
* Authors: Ross Biro
* Fred N. van Kempen, <[email protected]>
* Florian La Roche, <[email protected]>
* Alan Cox, <[email protected]>
*
* Changes (see also sock.c)
*
* piggy,
* Karl Knutson : Socket protocol table
* A.N.Kuznetsov : Socket death error in accept().
* John Richardson : Fix non blocking error in connect()
* so sockets that fail to connect
* don't return -EINPROGRESS.
* Alan Cox : Asynchronous I/O support
* Alan Cox : Keep correct socket pointer on sock
* structures
* when accept() ed
* Alan Cox : Semantics of SO_LINGER aren't state
* moved to close when you look carefully.
* With this fixed and the accept bug fixed
* some RPC stuff seems happier.
* Niibe Yutaka : 4.4BSD style write async I/O
* Alan Cox,
* Tony Gale : Fixed reuse semantics.
* Alan Cox : bind() shouldn't abort existing but dead
* sockets. Stops FTP netin:.. I hope.
* Alan Cox : bind() works correctly for RAW sockets.
* Note that FreeBSD at least was broken
* in this respect so be careful with
* compatibility tests...
* Alan Cox : routing cache support
* Alan Cox : memzero the socket structure for
* compactness.
* Matt Day : nonblock connect error handler
* Alan Cox : Allow large numbers of pending sockets
* (eg for big web sites), but only if
* specifically application requested.
* Alan Cox : New buffering throughout IP. Used
* dumbly.
* Alan Cox : New buffering now used smartly.
* Alan Cox : BSD rather than common sense
* interpretation of listen.
* Germano Caronni : Assorted small races.
* Alan Cox : sendmsg/recvmsg basic support.
* Alan Cox : Only sendmsg/recvmsg now supported.
* Alan Cox : Locked down bind (see security list).
* Alan Cox : Loosened bind a little.
* Mike McLagan : ADD/DEL DLCI Ioctls
* Willy Konynenberg : Transparent proxying support.
* David S. Miller : New socket lookup architecture.
* Some other random speedups.
* Cyrus Durgin : Cleaned up file for kmod hacks.
* Andi Kleen : Fix inet_stream_connect TCP race.
*/
#define pr_fmt(fmt) "IPv4: " fmt
#include <linux/err.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/socket.h>
#include <linux/in.h>
#include <linux/kernel.h>
#include <linux/kmod.h>
#include <linux/sched.h>
#include <linux/timer.h>
#include <linux/string.h>
#include <linux/sockios.h>
#include <linux/net.h>
#include <linux/capability.h>
#include <linux/fcntl.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <linux/stat.h>
#include <linux/init.h>
#include <linux/poll.h>
#include <linux/netfilter_ipv4.h>
#include <linux/random.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <linux/inet.h>
#include <linux/igmp.h>
#include <linux/inetdevice.h>
#include <linux/netdevice.h>
#include <net/checksum.h>
#include <net/ip.h>
#include <net/protocol.h>
#include <net/arp.h>
#include <net/route.h>
#include <net/ip_fib.h>
#include <net/inet_connection_sock.h>
#include <net/gro.h>
#include <net/gso.h>
#include <net/tcp.h>
#include <net/udp.h>
#include <net/udplite.h>
#include <net/ping.h>
#include <linux/skbuff.h>
#include <net/sock.h>
#include <net/raw.h>
#include <net/icmp.h>
#include <net/inet_common.h>
#include <net/ip_tunnels.h>
#include <net/xfrm.h>
#include <net/net_namespace.h>
#include <net/secure_seq.h>
#ifdef CONFIG_IP_MROUTE
#include <linux/mroute.h>
#endif
#include <net/l3mdev.h>
#include <net/compat.h>
#include <trace/events/sock.h>
/* The inetsw table contains everything that inet_create needs to
* build a new socket.
*/
static struct list_head inetsw[SOCK_MAX];
static DEFINE_SPINLOCK(inetsw_lock);
/* New destruction routine */
void inet_sock_destruct(struct sock *sk)
{
struct inet_sock *inet = inet_sk(sk);
__skb_queue_purge(&sk->sk_receive_queue);
__skb_queue_purge(&sk->sk_error_queue);
sk_mem_reclaim_final(sk);
if (sk->sk_type == SOCK_STREAM && sk->sk_state != TCP_CLOSE) {
pr_err("Attempt to release TCP socket in state %d %p\n",
sk->sk_state, sk);
return;
}
if (!sock_flag(sk, SOCK_DEAD)) {
pr_err("Attempt to release alive inet socket %p\n", sk);
return;
}
WARN_ON_ONCE(atomic_read(&sk->sk_rmem_alloc));
WARN_ON_ONCE(refcount_read(&sk->sk_wmem_alloc));
WARN_ON_ONCE(sk->sk_wmem_queued);
WARN_ON_ONCE(sk_forward_alloc_get(sk));
kfree(rcu_dereference_protected(inet->inet_opt, 1));
dst_release(rcu_dereference_protected(sk->sk_dst_cache, 1));
dst_release(rcu_dereference_protected(sk->sk_rx_dst, 1));
}
EXPORT_SYMBOL(inet_sock_destruct);
/*
* The routines beyond this point handle the behaviour of an AF_INET
* socket object. Mostly it punts to the subprotocols of IP to do
* the work.
*/
/*
* Automatically bind an unbound socket.
*/
static int inet_autobind(struct sock *sk)
{
struct inet_sock *inet;
/* We may need to bind the socket. */
lock_sock(sk);
inet = inet_sk(sk);
if (!inet->inet_num) {
if (sk->sk_prot->get_port(sk, 0)) {
release_sock(sk);
return -EAGAIN;
}
inet->inet_sport = htons(inet->inet_num);
}
release_sock(sk);
return 0;
}
int __inet_listen_sk(struct sock *sk, int backlog)
{
unsigned char old_state = sk->sk_state;
int err, tcp_fastopen;
if (!((1 << old_state) & (TCPF_CLOSE | TCPF_LISTEN)))
return -EINVAL;
WRITE_ONCE(sk->sk_max_ack_backlog, backlog);
/* Really, if the socket is already in listen state
* we can only allow the backlog to be adjusted.
*/
if (old_state != TCP_LISTEN) {
/* Enable TFO w/o requiring TCP_FASTOPEN socket option.
* Note that only TCP sockets (SOCK_STREAM) will reach here.
* Also fastopen backlog may already been set via the option
* because the socket was in TCP_LISTEN state previously but
* was shutdown() rather than close().
*/
tcp_fastopen = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fastopen);
if ((tcp_fastopen & TFO_SERVER_WO_SOCKOPT1) &&
(tcp_fastopen & TFO_SERVER_ENABLE) &&
!inet_csk(sk)->icsk_accept_queue.fastopenq.max_qlen) {
fastopen_queue_tune(sk, backlog);
tcp_fastopen_init_key_once(sock_net(sk));
}
err = inet_csk_listen_start(sk);
if (err)
return err;
tcp_call_bpf(sk, BPF_SOCK_OPS_TCP_LISTEN_CB, 0, NULL);
}
return 0;
}
/*
* Move a socket into listening state.
*/
int inet_listen(struct socket *sock, int backlog)
{
struct sock *sk = sock->sk;
int err = -EINVAL;
lock_sock(sk);
if (sock->state != SS_UNCONNECTED || sock->type != SOCK_STREAM)
goto out;
err = __inet_listen_sk(sk, backlog);
out:
release_sock(sk);
return err;
}
EXPORT_SYMBOL(inet_listen);
/*
* Create an inet socket.
*/
static int inet_create(struct net *net, struct socket *sock, int protocol,
int kern)
{
struct sock *sk;
struct inet_protosw *answer;
struct inet_sock *inet;
struct proto *answer_prot;
unsigned char answer_flags;
int try_loading_module = 0;
int err;
if (protocol < 0 || protocol >= IPPROTO_MAX)
return -EINVAL;
sock->state = SS_UNCONNECTED;
/* Look for the requested type/protocol pair. */
lookup_protocol:
err = -ESOCKTNOSUPPORT;
rcu_read_lock();
list_for_each_entry_rcu(answer, &inetsw[sock->type], list) {
err = 0;
/* Check the non-wild match. */
if (protocol == answer->protocol) {
if (protocol != IPPROTO_IP)
break;
} else {
/* Check for the two wild cases. */
if (IPPROTO_IP == protocol) {
protocol = answer->protocol;
break;
}
if (IPPROTO_IP == answer->protocol)
break;
}
err = -EPROTONOSUPPORT;
}
if (unlikely(err)) {
if (try_loading_module < 2) {
rcu_read_unlock();
/*
* Be more specific, e.g. net-pf-2-proto-132-type-1
* (net-pf-PF_INET-proto-IPPROTO_SCTP-type-SOCK_STREAM)
*/
if (++try_loading_module == 1)
request_module("net-pf-%d-proto-%d-type-%d",
PF_INET, protocol, sock->type);
/*
* Fall back to generic, e.g. net-pf-2-proto-132
* (net-pf-PF_INET-proto-IPPROTO_SCTP)
*/
else
request_module("net-pf-%d-proto-%d",
PF_INET, protocol);
goto lookup_protocol;
} else
goto out_rcu_unlock;
}
err = -EPERM;
if (sock->type == SOCK_RAW && !kern &&
!ns_capable(net->user_ns, CAP_NET_RAW))
goto out_rcu_unlock;
sock->ops = answer->ops;
answer_prot = answer->prot;
answer_flags = answer->flags;
rcu_read_unlock();
WARN_ON(!answer_prot->slab);
err = -ENOMEM;
sk = sk_alloc(net, PF_INET, GFP_KERNEL, answer_prot, kern);
if (!sk)
goto out;
err = 0;
if (INET_PROTOSW_REUSE & answer_flags)
sk->sk_reuse = SK_CAN_REUSE;
inet = inet_sk(sk);
inet_assign_bit(IS_ICSK, sk, INET_PROTOSW_ICSK & answer_flags);
inet_clear_bit(NODEFRAG, sk);
if (SOCK_RAW == sock->type) {
inet->inet_num = protocol;
if (IPPROTO_RAW == protocol)
inet_set_bit(HDRINCL, sk);
}
if (READ_ONCE(net->ipv4.sysctl_ip_no_pmtu_disc))
inet->pmtudisc = IP_PMTUDISC_DONT;
else
inet->pmtudisc = IP_PMTUDISC_WANT;
atomic_set(&inet->inet_id, 0);
sock_init_data(sock, sk);
sk->sk_destruct = inet_sock_destruct;
sk->sk_protocol = protocol;
sk->sk_backlog_rcv = sk->sk_prot->backlog_rcv;
sk->sk_txrehash = READ_ONCE(net->core.sysctl_txrehash);
inet->uc_ttl = -1;
inet_set_bit(MC_LOOP, sk);
inet->mc_ttl = 1;
inet_set_bit(MC_ALL, sk);
inet->mc_index = 0;
inet->mc_list = NULL;
inet->rcv_tos = 0;
if (inet->inet_num) {
/* It assumes that any protocol which allows
* the user to assign a number at socket
* creation time automatically
* shares.
*/
inet->inet_sport = htons(inet->inet_num);
/* Add to protocol hash chains. */
err = sk->sk_prot->hash(sk);
if (err) {
sk_common_release(sk);
goto out;
}
}
if (sk->sk_prot->init) {
err = sk->sk_prot->init(sk);
if (err) {
sk_common_release(sk);
goto out;
}
}
if (!kern) {
err = BPF_CGROUP_RUN_PROG_INET_SOCK(sk);
if (err) {
sk_common_release(sk);
goto out;
}
}
out:
return err;
out_rcu_unlock:
rcu_read_unlock();
goto out;
}
/*
* The peer socket should always be NULL (or else). When we call this
* function we are destroying the object and from then on nobody
* should refer to it.
*/
int inet_release(struct socket *sock)
{
struct sock *sk = sock->sk;
if (sk) {
long timeout;
if (!sk->sk_kern_sock)
BPF_CGROUP_RUN_PROG_INET_SOCK_RELEASE(sk);
/* Applications forget to leave groups before exiting */
ip_mc_drop_socket(sk);
/* If linger is set, we don't return until the close
* is complete. Otherwise we return immediately. The
* actually closing is done the same either way.
*
* If the close is due to the process exiting, we never
* linger..
*/
timeout = 0;
if (sock_flag(sk, SOCK_LINGER) &&
!(current->flags & PF_EXITING))
timeout = sk->sk_lingertime;
sk->sk_prot->close(sk, timeout);
sock->sk = NULL;
}
return 0;
}
EXPORT_SYMBOL(inet_release);
int inet_bind_sk(struct sock *sk, struct sockaddr *uaddr, int addr_len)
{
u32 flags = BIND_WITH_LOCK;
int err;
/* If the socket has its own bind function then use it. (RAW) */
if (sk->sk_prot->bind) {
return sk->sk_prot->bind(sk, uaddr, addr_len);
}
if (addr_len < sizeof(struct sockaddr_in))
return -EINVAL;
/* BPF prog is run before any checks are done so that if the prog
* changes context in a wrong way it will be caught.
*/
err = BPF_CGROUP_RUN_PROG_INET_BIND_LOCK(sk, uaddr,
CGROUP_INET4_BIND, &flags);
if (err)
return err;
return __inet_bind(sk, uaddr, addr_len, flags);
}
int inet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
{
return inet_bind_sk(sock->sk, uaddr, addr_len);
}
EXPORT_SYMBOL(inet_bind);
int __inet_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len,
u32 flags)
{
struct sockaddr_in *addr = (struct sockaddr_in *)uaddr;
struct inet_sock *inet = inet_sk(sk);
struct net *net = sock_net(sk);
unsigned short snum;
int chk_addr_ret;
u32 tb_id = RT_TABLE_LOCAL;
int err;
if (addr->sin_family != AF_INET) {
/* Compatibility games : accept AF_UNSPEC (mapped to AF_INET)
* only if s_addr is INADDR_ANY.
*/
err = -EAFNOSUPPORT;
if (addr->sin_family != AF_UNSPEC ||
addr->sin_addr.s_addr != htonl(INADDR_ANY))
goto out;
}
tb_id = l3mdev_fib_table_by_index(net, sk->sk_bound_dev_if) ? : tb_id;
chk_addr_ret = inet_addr_type_table(net, addr->sin_addr.s_addr, tb_id);
/* Not specified by any standard per-se, however it breaks too
* many applications when removed. It is unfortunate since
* allowing applications to make a non-local bind solves
* several problems with systems using dynamic addressing.
* (ie. your servers still start up even if your ISDN link
* is temporarily down)
*/
err = -EADDRNOTAVAIL;
if (!inet_addr_valid_or_nonlocal(net, inet, addr->sin_addr.s_addr,
chk_addr_ret))
goto out;
snum = ntohs(addr->sin_port);
err = -EACCES;
if (!(flags & BIND_NO_CAP_NET_BIND_SERVICE) &&
snum && inet_port_requires_bind_service(net, snum) &&
!ns_capable(net->user_ns, CAP_NET_BIND_SERVICE))
goto out;
/* We keep a pair of addresses. rcv_saddr is the one
* used by hash lookups, and saddr is used for transmit.
*
* In the BSD API these are the same except where it
* would be illegal to use them (multicast/broadcast) in
* which case the sending device address is used.
*/
if (flags & BIND_WITH_LOCK)
lock_sock(sk);
/* Check these errors (active socket, double bind). */
err = -EINVAL;
if (sk->sk_state != TCP_CLOSE || inet->inet_num)
goto out_release_sock;
inet->inet_rcv_saddr = inet->inet_saddr = addr->sin_addr.s_addr;
if (chk_addr_ret == RTN_MULTICAST || chk_addr_ret == RTN_BROADCAST)
inet->inet_saddr = 0; /* Use device */
/* Make sure we are allowed to bind here. */
if (snum || !(inet_test_bit(BIND_ADDRESS_NO_PORT, sk) ||
(flags & BIND_FORCE_ADDRESS_NO_PORT))) {
err = sk->sk_prot->get_port(sk, snum);
if (err) {
inet->inet_saddr = inet->inet_rcv_saddr = 0;
goto out_release_sock;
}
if (!(flags & BIND_FROM_BPF)) {
err = BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk);
if (err) {
inet->inet_saddr = inet->inet_rcv_saddr = 0;
if (sk->sk_prot->put_port)
sk->sk_prot->put_port(sk);
goto out_release_sock;
}
}
}
if (inet->inet_rcv_saddr)
sk->sk_userlocks |= SOCK_BINDADDR_LOCK;
if (snum)
sk->sk_userlocks |= SOCK_BINDPORT_LOCK;
inet->inet_sport = htons(inet->inet_num);
inet->inet_daddr = 0;
inet->inet_dport = 0;
sk_dst_reset(sk);
err = 0;
out_release_sock:
if (flags & BIND_WITH_LOCK)
release_sock(sk);
out:
return err;
}
int inet_dgram_connect(struct socket *sock, struct sockaddr *uaddr,
int addr_len, int flags)
{
struct sock *sk = sock->sk;
const struct proto *prot;
int err;
if (addr_len < sizeof(uaddr->sa_family))
return -EINVAL;
/* IPV6_ADDRFORM can change sk->sk_prot under us. */
prot = READ_ONCE(sk->sk_prot);
if (uaddr->sa_family == AF_UNSPEC)
return prot->disconnect(sk, flags);
if (BPF_CGROUP_PRE_CONNECT_ENABLED(sk)) {
err = prot->pre_connect(sk, uaddr, addr_len);
if (err)
return err;
}
if (data_race(!inet_sk(sk)->inet_num) && inet_autobind(sk))
return -EAGAIN;
return prot->connect(sk, uaddr, addr_len);
}
EXPORT_SYMBOL(inet_dgram_connect);
static long inet_wait_for_connect(struct sock *sk, long timeo, int writebias)
{
DEFINE_WAIT_FUNC(wait, woken_wake_function);
add_wait_queue(sk_sleep(sk), &wait);
sk->sk_write_pending += writebias;
sk->sk_wait_pending++;
/* Basic assumption: if someone sets sk->sk_err, he _must_
* change state of the socket from TCP_SYN_*.
* Connect() does not allow to get error notifications
* without closing the socket.
*/
while ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
release_sock(sk);
timeo = wait_woken(&wait, TASK_INTERRUPTIBLE, timeo);
lock_sock(sk);
if (signal_pending(current) || !timeo)
break;
}
remove_wait_queue(sk_sleep(sk), &wait);
sk->sk_write_pending -= writebias;
sk->sk_wait_pending--;
return timeo;
}
/*
* Connect to a remote host. There is regrettably still a little
* TCP 'magic' in here.
*/
int __inet_stream_connect(struct socket *sock, struct sockaddr *uaddr,
int addr_len, int flags, int is_sendmsg)
{
struct sock *sk = sock->sk;
int err;
long timeo;
/*
* uaddr can be NULL and addr_len can be 0 if:
* sk is a TCP fastopen active socket and
* TCP_FASTOPEN_CONNECT sockopt is set and
* we already have a valid cookie for this socket.
* In this case, user can call write() after connect().
* write() will invoke tcp_sendmsg_fastopen() which calls
* __inet_stream_connect().
*/
if (uaddr) {
if (addr_len < sizeof(uaddr->sa_family))
return -EINVAL;
if (uaddr->sa_family == AF_UNSPEC) {
err = sk->sk_prot->disconnect(sk, flags);
sock->state = err ? SS_DISCONNECTING : SS_UNCONNECTED;
goto out;
}
}
switch (sock->state) {
default:
err = -EINVAL;
goto out;
case SS_CONNECTED:
err = -EISCONN;
goto out;
case SS_CONNECTING:
if (inet_test_bit(DEFER_CONNECT, sk))
err = is_sendmsg ? -EINPROGRESS : -EISCONN;
else
err = -EALREADY;
/* Fall out of switch with err, set for this state */
break;
case SS_UNCONNECTED:
err = -EISCONN;
if (sk->sk_state != TCP_CLOSE)
goto out;
if (BPF_CGROUP_PRE_CONNECT_ENABLED(sk)) {
err = sk->sk_prot->pre_connect(sk, uaddr, addr_len);
if (err)
goto out;
}
err = sk->sk_prot->connect(sk, uaddr, addr_len);
if (err < 0)
goto out;
sock->state = SS_CONNECTING;
if (!err && inet_test_bit(DEFER_CONNECT, sk))
goto out;
/* Just entered SS_CONNECTING state; the only
* difference is that return value in non-blocking
* case is EINPROGRESS, rather than EALREADY.
*/
err = -EINPROGRESS;
break;
}
timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
int writebias = (sk->sk_protocol == IPPROTO_TCP) &&
tcp_sk(sk)->fastopen_req &&
tcp_sk(sk)->fastopen_req->data ? 1 : 0;
/* Error code is set above */
if (!timeo || !inet_wait_for_connect(sk, timeo, writebias))
goto out;
err = sock_intr_errno(timeo);
if (signal_pending(current))
goto out;
}
/* Connection was closed by RST, timeout, ICMP error
* or another process disconnected us.
*/
if (sk->sk_state == TCP_CLOSE)
goto sock_error;
/* sk->sk_err may be not zero now, if RECVERR was ordered by user
* and error was received after socket entered established state.
* Hence, it is handled normally after connect() return successfully.
*/
sock->state = SS_CONNECTED;
err = 0;
out:
return err;
sock_error:
err = sock_error(sk) ? : -ECONNABORTED;
sock->state = SS_UNCONNECTED;
if (sk->sk_prot->disconnect(sk, flags))
sock->state = SS_DISCONNECTING;
goto out;
}
EXPORT_SYMBOL(__inet_stream_connect);
int inet_stream_connect(struct socket *sock, struct sockaddr *uaddr,
int addr_len, int flags)
{
int err;
lock_sock(sock->sk);
err = __inet_stream_connect(sock, uaddr, addr_len, flags, 0);
release_sock(sock->sk);
return err;
}
EXPORT_SYMBOL(inet_stream_connect);
void __inet_accept(struct socket *sock, struct socket *newsock, struct sock *newsk)
{
sock_rps_record_flow(newsk);
WARN_ON(!((1 << newsk->sk_state) &
(TCPF_ESTABLISHED | TCPF_SYN_RECV |
TCPF_CLOSE_WAIT | TCPF_CLOSE)));
if (test_bit(SOCK_SUPPORT_ZC, &sock->flags))
set_bit(SOCK_SUPPORT_ZC, &newsock->flags);
sock_graft(newsk, newsock);
newsock->state = SS_CONNECTED;
}
/*
* Accept a pending connection. The TCP layer now gives BSD semantics.
*/
int inet_accept(struct socket *sock, struct socket *newsock, int flags,
bool kern)
{
struct sock *sk1 = sock->sk, *sk2;
int err = -EINVAL;
/* IPV6_ADDRFORM can change sk->sk_prot under us. */
sk2 = READ_ONCE(sk1->sk_prot)->accept(sk1, flags, &err, kern);
if (!sk2)
return err;
lock_sock(sk2);
__inet_accept(sock, newsock, sk2);
release_sock(sk2);
return 0;
}
EXPORT_SYMBOL(inet_accept);
/*
* This does both peername and sockname.
*/
int inet_getname(struct socket *sock, struct sockaddr *uaddr,
int peer)
{
struct sock *sk = sock->sk;
struct inet_sock *inet = inet_sk(sk);
DECLARE_SOCKADDR(struct sockaddr_in *, sin, uaddr);
sin->sin_family = AF_INET;
lock_sock(sk);
if (peer) {
if (!inet->inet_dport ||
(((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_SYN_SENT)) &&
peer == 1)) {
release_sock(sk);
return -ENOTCONN;
}
sin->sin_port = inet->inet_dport;
sin->sin_addr.s_addr = inet->inet_daddr;
BPF_CGROUP_RUN_SA_PROG(sk, (struct sockaddr *)sin,
CGROUP_INET4_GETPEERNAME);
} else {
__be32 addr = inet->inet_rcv_saddr;
if (!addr)
addr = inet->inet_saddr;
sin->sin_port = inet->inet_sport;
sin->sin_addr.s_addr = addr;
BPF_CGROUP_RUN_SA_PROG(sk, (struct sockaddr *)sin,
CGROUP_INET4_GETSOCKNAME);
}
release_sock(sk);
memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
return sizeof(*sin);
}
EXPORT_SYMBOL(inet_getname);
int inet_send_prepare(struct sock *sk)
{
sock_rps_record_flow(sk);
/* We may need to bind the socket. */
if (data_race(!inet_sk(sk)->inet_num) && !sk->sk_prot->no_autobind &&
inet_autobind(sk))
return -EAGAIN;
return 0;
}
EXPORT_SYMBOL_GPL(inet_send_prepare);
int inet_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
{
struct sock *sk = sock->sk;
if (unlikely(inet_send_prepare(sk)))
return -EAGAIN;
return INDIRECT_CALL_2(sk->sk_prot->sendmsg, tcp_sendmsg, udp_sendmsg,
sk, msg, size);
}
EXPORT_SYMBOL(inet_sendmsg);
void inet_splice_eof(struct socket *sock)
{
const struct proto *prot;
struct sock *sk = sock->sk;
if (unlikely(inet_send_prepare(sk)))
return;
/* IPV6_ADDRFORM can change sk->sk_prot under us. */
prot = READ_ONCE(sk->sk_prot);
if (prot->splice_eof)
prot->splice_eof(sock);
}
EXPORT_SYMBOL_GPL(inet_splice_eof);
INDIRECT_CALLABLE_DECLARE(int udp_recvmsg(struct sock *, struct msghdr *,
size_t, int, int *));
int inet_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
int flags)
{
struct sock *sk = sock->sk;
int addr_len = 0;
int err;
if (likely(!(flags & MSG_ERRQUEUE)))
sock_rps_record_flow(sk);
err = INDIRECT_CALL_2(sk->sk_prot->recvmsg, tcp_recvmsg, udp_recvmsg,
sk, msg, size, flags, &addr_len);
if (err >= 0)
msg->msg_namelen = addr_len;
return err;
}
EXPORT_SYMBOL(inet_recvmsg);
int inet_shutdown(struct socket *sock, int how)
{
struct sock *sk = sock->sk;
int err = 0;
/* This should really check to make sure
* the socket is a TCP socket. (WHY AC...)
*/
how++; /* maps 0->1 has the advantage of making bit 1 rcvs and
1->2 bit 2 snds.
2->3 */
if ((how & ~SHUTDOWN_MASK) || !how) /* MAXINT->0 */
return -EINVAL;
lock_sock(sk);
if (sock->state == SS_CONNECTING) {
if ((1 << sk->sk_state) &
(TCPF_SYN_SENT | TCPF_SYN_RECV | TCPF_CLOSE))
sock->state = SS_DISCONNECTING;
else
sock->state = SS_CONNECTED;
}
switch (sk->sk_state) {
case TCP_CLOSE:
err = -ENOTCONN;
/* Hack to wake up other listeners, who can poll for
EPOLLHUP, even on eg. unconnected UDP sockets -- RR */
fallthrough;
default:
WRITE_ONCE(sk->sk_shutdown, sk->sk_shutdown | how);
if (sk->sk_prot->shutdown)
sk->sk_prot->shutdown(sk, how);
break;
/* Remaining two branches are temporary solution for missing
* close() in multithreaded environment. It is _not_ a good idea,
* but we have no choice until close() is repaired at VFS level.
*/
case TCP_LISTEN:
if (!(how & RCV_SHUTDOWN))
break;
fallthrough;
case TCP_SYN_SENT:
err = sk->sk_prot->disconnect(sk, O_NONBLOCK);
sock->state = err ? SS_DISCONNECTING : SS_UNCONNECTED;
break;
}
/* Wake up anyone sleeping in poll. */
sk->sk_state_change(sk);
release_sock(sk);
return err;
}
EXPORT_SYMBOL(inet_shutdown);
/*
* ioctl() calls you can issue on an INET socket. Most of these are
* device configuration and stuff and very rarely used. Some ioctls
* pass on to the socket itself.
*
* NOTE: I like the idea of a module for the config stuff. ie ifconfig
* loads the devconfigure module does its configuring and unloads it.
* There's a good 20K of config code hanging around the kernel.
*/
int inet_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
{
struct sock *sk = sock->sk;
int err = 0;
struct net *net = sock_net(sk);
void __user *p = (void __user *)arg;
struct ifreq ifr;
struct rtentry rt;
switch (cmd) {
case SIOCADDRT:
case SIOCDELRT:
if (copy_from_user(&rt, p, sizeof(struct rtentry)))
return -EFAULT;
err = ip_rt_ioctl(net, cmd, &rt);
break;
case SIOCRTMSG:
err = -EINVAL;
break;
case SIOCDARP:
case SIOCGARP:
case SIOCSARP:
err = arp_ioctl(net, cmd, (void __user *)arg);
break;
case SIOCGIFADDR:
case SIOCGIFBRDADDR:
case SIOCGIFNETMASK:
case SIOCGIFDSTADDR:
case SIOCGIFPFLAGS:
if (get_user_ifreq(&ifr, NULL, p))
return -EFAULT;
err = devinet_ioctl(net, cmd, &ifr);
if (!err && put_user_ifreq(&ifr, p))
err = -EFAULT;
break;
case SIOCSIFADDR:
case SIOCSIFBRDADDR:
case SIOCSIFNETMASK:
case SIOCSIFDSTADDR:
case SIOCSIFPFLAGS:
case SIOCSIFFLAGS:
if (get_user_ifreq(&ifr, NULL, p))
return -EFAULT;
err = devinet_ioctl(net, cmd, &ifr);
break;
default:
if (sk->sk_prot->ioctl)
err = sk_ioctl(sk, cmd, (void __user *)arg);
else
err = -ENOIOCTLCMD;
break;
}
return err;
}
EXPORT_SYMBOL(inet_ioctl);
#ifdef CONFIG_COMPAT
static int inet_compat_routing_ioctl(struct sock *sk, unsigned int cmd,
struct compat_rtentry __user *ur)
{
compat_uptr_t rtdev;
struct rtentry rt;
if (copy_from_user(&rt.rt_dst, &ur->rt_dst,
3 * sizeof(struct sockaddr)) ||
get_user(rt.rt_flags, &ur->rt_flags) ||
get_user(rt.rt_metric, &ur->rt_metric) ||
get_user(rt.rt_mtu, &ur->rt_mtu) ||
get_user(rt.rt_window, &ur->rt_window) ||
get_user(rt.rt_irtt, &ur->rt_irtt) ||
get_user(rtdev, &ur->rt_dev))
return -EFAULT;
rt.rt_dev = compat_ptr(rtdev);
return ip_rt_ioctl(sock_net(sk), cmd, &rt);
}
static int inet_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
{
void __user *argp = compat_ptr(arg);
struct sock *sk = sock->sk;
switch (cmd) {
case SIOCADDRT:
case SIOCDELRT:
return inet_compat_routing_ioctl(sk, cmd, argp);
default:
if (!sk->sk_prot->compat_ioctl)
return -ENOIOCTLCMD;
return sk->sk_prot->compat_ioctl(sk, cmd, arg);
}
}
#endif /* CONFIG_COMPAT */
const struct proto_ops inet_stream_ops = {
.family = PF_INET,
.owner = THIS_MODULE,
.release = inet_release,
.bind = inet_bind,
.connect = inet_stream_connect,
.socketpair = sock_no_socketpair,
.accept = inet_accept,
.getname = inet_getname,
.poll = tcp_poll,
.ioctl = inet_ioctl,
.gettstamp = sock_gettstamp,
.listen = inet_listen,
.shutdown = inet_shutdown,
.setsockopt = sock_common_setsockopt,
.getsockopt = sock_common_getsockopt,
.sendmsg = inet_sendmsg,
.recvmsg = inet_recvmsg,
#ifdef CONFIG_MMU
.mmap = tcp_mmap,
#endif
.splice_eof = inet_splice_eof,
.splice_read = tcp_splice_read,
.read_sock = tcp_read_sock,
.read_skb = tcp_read_skb,
.sendmsg_locked = tcp_sendmsg_locked,
.peek_len = tcp_peek_len,
#ifdef CONFIG_COMPAT
.compat_ioctl = inet_compat_ioctl,
#endif
.set_rcvlowat = tcp_set_rcvlowat,
};
EXPORT_SYMBOL(inet_stream_ops);
const struct proto_ops inet_dgram_ops = {
.family = PF_INET,
.owner = THIS_MODULE,
.release = inet_release,
.bind = inet_bind,
.connect = inet_dgram_connect,
.socketpair = sock_no_socketpair,
.accept = sock_no_accept,
.getname = inet_getname,
.poll = udp_poll,
.ioctl = inet_ioctl,
.gettstamp = sock_gettstamp,
.listen = sock_no_listen,
.shutdown = inet_shutdown,
.setsockopt = sock_common_setsockopt,
.getsockopt = sock_common_getsockopt,
.sendmsg = inet_sendmsg,
.read_skb = udp_read_skb,
.recvmsg = inet_recvmsg,
.mmap = sock_no_mmap,
.splice_eof = inet_splice_eof,
.set_peek_off = sk_set_peek_off,
#ifdef CONFIG_COMPAT
.compat_ioctl = inet_compat_ioctl,
#endif
};
EXPORT_SYMBOL(inet_dgram_ops);
/*
* For SOCK_RAW sockets; should be the same as inet_dgram_ops but without
* udp_poll
*/
static const struct proto_ops inet_sockraw_ops = {
.family = PF_INET,
.owner = THIS_MODULE,
.release = inet_release,
.bind = inet_bind,
.connect = inet_dgram_connect,
.socketpair = sock_no_socketpair,
.accept = sock_no_accept,
.getname = inet_getname,
.poll = datagram_poll,
.ioctl = inet_ioctl,
.gettstamp = sock_gettstamp,
.listen = sock_no_listen,
.shutdown = inet_shutdown,
.setsockopt = sock_common_setsockopt,
.getsockopt = sock_common_getsockopt,
.sendmsg = inet_sendmsg,
.recvmsg = inet_recvmsg,
.mmap = sock_no_mmap,
.splice_eof = inet_splice_eof,
#ifdef CONFIG_COMPAT
.compat_ioctl = inet_compat_ioctl,
#endif
};
static const struct net_proto_family inet_family_ops = {
.family = PF_INET,
.create = inet_create,
.owner = THIS_MODULE,
};
/* Upon startup we insert all the elements in inetsw_array[] into
* the linked list inetsw.
*/
static struct inet_protosw inetsw_array[] =
{
{
.type = SOCK_STREAM,
.protocol = IPPROTO_TCP,
.prot = &tcp_prot,
.ops = &inet_stream_ops,
.flags = INET_PROTOSW_PERMANENT |
INET_PROTOSW_ICSK,
},
{
.type = SOCK_DGRAM,
.protocol = IPPROTO_UDP,
.prot = &udp_prot,
.ops = &inet_dgram_ops,
.flags = INET_PROTOSW_PERMANENT,
},
{
.type = SOCK_DGRAM,
.protocol = IPPROTO_ICMP,
.prot = &ping_prot,
.ops = &inet_sockraw_ops,
.flags = INET_PROTOSW_REUSE,
},
{
.type = SOCK_RAW,
.protocol = IPPROTO_IP, /* wild card */
.prot = &raw_prot,
.ops = &inet_sockraw_ops,
.flags = INET_PROTOSW_REUSE,
}
};
#define INETSW_ARRAY_LEN ARRAY_SIZE(inetsw_array)
void inet_register_protosw(struct inet_protosw *p)
{
struct list_head *lh;
struct inet_protosw *answer;
int protocol = p->protocol;
struct list_head *last_perm;
spin_lock_bh(&inetsw_lock);
if (p->type >= SOCK_MAX)
goto out_illegal;
/* If we are trying to override a permanent protocol, bail. */
last_perm = &inetsw[p->type];
list_for_each(lh, &inetsw[p->type]) {
answer = list_entry(lh, struct inet_protosw, list);
/* Check only the non-wild match. */
if ((INET_PROTOSW_PERMANENT & answer->flags) == 0)
break;
if (protocol == answer->protocol)
goto out_permanent;
last_perm = lh;
}
/* Add the new entry after the last permanent entry if any, so that
* the new entry does not override a permanent entry when matched with
* a wild-card protocol. But it is allowed to override any existing
* non-permanent entry. This means that when we remove this entry, the
* system automatically returns to the old behavior.
*/
list_add_rcu(&p->list, last_perm);
out:
spin_unlock_bh(&inetsw_lock);
return;
out_permanent:
pr_err("Attempt to override permanent protocol %d\n", protocol);
goto out;
out_illegal:
pr_err("Ignoring attempt to register invalid socket type %d\n",
p->type);
goto out;
}
EXPORT_SYMBOL(inet_register_protosw);
void inet_unregister_protosw(struct inet_protosw *p)
{
if (INET_PROTOSW_PERMANENT & p->flags) {
pr_err("Attempt to unregister permanent protocol %d\n",
p->protocol);
} else {
spin_lock_bh(&inetsw_lock);
list_del_rcu(&p->list);
spin_unlock_bh(&inetsw_lock);
synchronize_net();
}
}
EXPORT_SYMBOL(inet_unregister_protosw);
static int inet_sk_reselect_saddr(struct sock *sk)
{
struct inet_sock *inet = inet_sk(sk);
__be32 old_saddr = inet->inet_saddr;
__be32 daddr = inet->inet_daddr;
struct flowi4 *fl4;
struct rtable *rt;
__be32 new_saddr;
struct ip_options_rcu *inet_opt;
int err;
inet_opt = rcu_dereference_protected(inet->inet_opt,
lockdep_sock_is_held(sk));
if (inet_opt && inet_opt->opt.srr)
daddr = inet_opt->opt.faddr;
/* Query new route. */
fl4 = &inet->cork.fl.u.ip4;
rt = ip_route_connect(fl4, daddr, 0, sk->sk_bound_dev_if,
sk->sk_protocol, inet->inet_sport,
inet->inet_dport, sk);
if (IS_ERR(rt))
return PTR_ERR(rt);
new_saddr = fl4->saddr;
if (new_saddr == old_saddr) {
sk_setup_caps(sk, &rt->dst);
return 0;
}
err = inet_bhash2_update_saddr(sk, &new_saddr, AF_INET);
if (err) {
ip_rt_put(rt);
return err;
}
sk_setup_caps(sk, &rt->dst);
if (READ_ONCE(sock_net(sk)->ipv4.sysctl_ip_dynaddr) > 1) {
pr_info("%s(): shifting inet->saddr from %pI4 to %pI4\n",
__func__, &old_saddr, &new_saddr);
}
/*
* XXX The only one ugly spot where we need to
* XXX really change the sockets identity after
* XXX it has entered the hashes. -DaveM
*
* Besides that, it does not check for connection
* uniqueness. Wait for troubles.
*/
return __sk_prot_rehash(sk);
}
int inet_sk_rebuild_header(struct sock *sk)
{
struct inet_sock *inet = inet_sk(sk);
struct rtable *rt = (struct rtable *)__sk_dst_check(sk, 0);
__be32 daddr;
struct ip_options_rcu *inet_opt;
struct flowi4 *fl4;
int err;
/* Route is OK, nothing to do. */
if (rt)
return 0;
/* Reroute. */
rcu_read_lock();
inet_opt = rcu_dereference(inet->inet_opt);
daddr = inet->inet_daddr;
if (inet_opt && inet_opt->opt.srr)
daddr = inet_opt->opt.faddr;
rcu_read_unlock();
fl4 = &inet->cork.fl.u.ip4;
rt = ip_route_output_ports(sock_net(sk), fl4, sk, daddr, inet->inet_saddr,
inet->inet_dport, inet->inet_sport,
sk->sk_protocol, RT_CONN_FLAGS(sk),
sk->sk_bound_dev_if);
if (!IS_ERR(rt)) {
err = 0;
sk_setup_caps(sk, &rt->dst);
} else {
err = PTR_ERR(rt);
/* Routing failed... */
sk->sk_route_caps = 0;
/*
* Other protocols have to map its equivalent state to TCP_SYN_SENT.
* DCCP maps its DCCP_REQUESTING state to TCP_SYN_SENT. -acme
*/
if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_ip_dynaddr) ||
sk->sk_state != TCP_SYN_SENT ||
(sk->sk_userlocks & SOCK_BINDADDR_LOCK) ||
(err = inet_sk_reselect_saddr(sk)) != 0)
WRITE_ONCE(sk->sk_err_soft, -err);
}
return err;
}
EXPORT_SYMBOL(inet_sk_rebuild_header);
void inet_sk_set_state(struct sock *sk, int state)
{
trace_inet_sock_set_state(sk, sk->sk_state, state);
sk->sk_state = state;
}
EXPORT_SYMBOL(inet_sk_set_state);
void inet_sk_state_store(struct sock *sk, int newstate)
{
trace_inet_sock_set_state(sk, sk->sk_state, newstate);
smp_store_release(&sk->sk_state, newstate);
}
struct sk_buff *inet_gso_segment(struct sk_buff *skb,
netdev_features_t features)
{
bool udpfrag = false, fixedid = false, gso_partial, encap;
struct sk_buff *segs = ERR_PTR(-EINVAL);
const struct net_offload *ops;
unsigned int offset = 0;
struct iphdr *iph;
int proto, tot_len;
int nhoff;
int ihl;
int id;
skb_reset_network_header(skb);
nhoff = skb_network_header(skb) - skb_mac_header(skb);
if (unlikely(!pskb_may_pull(skb, sizeof(*iph))))
goto out;
iph = ip_hdr(skb);
ihl = iph->ihl * 4;
if (ihl < sizeof(*iph))
goto out;
id = ntohs(iph->id);
proto = iph->protocol;
/* Warning: after this point, iph might be no longer valid */
if (unlikely(!pskb_may_pull(skb, ihl)))
goto out;
__skb_pull(skb, ihl);
encap = SKB_GSO_CB(skb)->encap_level > 0;
if (encap)
features &= skb->dev->hw_enc_features;
SKB_GSO_CB(skb)->encap_level += ihl;
skb_reset_transport_header(skb);
segs = ERR_PTR(-EPROTONOSUPPORT);
if (!skb->encapsulation || encap) {
udpfrag = !!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP);
fixedid = !!(skb_shinfo(skb)->gso_type & SKB_GSO_TCP_FIXEDID);
/* fixed ID is invalid if DF bit is not set */
if (fixedid && !(ip_hdr(skb)->frag_off & htons(IP_DF)))
goto out;
}
ops = rcu_dereference(inet_offloads[proto]);
if (likely(ops && ops->callbacks.gso_segment)) {
segs = ops->callbacks.gso_segment(skb, features);
if (!segs)
skb->network_header = skb_mac_header(skb) + nhoff - skb->head;
}
if (IS_ERR_OR_NULL(segs))
goto out;
gso_partial = !!(skb_shinfo(segs)->gso_type & SKB_GSO_PARTIAL);
skb = segs;
do {
iph = (struct iphdr *)(skb_mac_header(skb) + nhoff);
if (udpfrag) {
iph->frag_off = htons(offset >> 3);
if (skb->next)
iph->frag_off |= htons(IP_MF);
offset += skb->len - nhoff - ihl;
tot_len = skb->len - nhoff;
} else if (skb_is_gso(skb)) {
if (!fixedid) {
iph->id = htons(id);
id += skb_shinfo(skb)->gso_segs;
}
if (gso_partial)
tot_len = skb_shinfo(skb)->gso_size +
SKB_GSO_CB(skb)->data_offset +
skb->head - (unsigned char *)iph;
else
tot_len = skb->len - nhoff;
} else {
if (!fixedid)
iph->id = htons(id++);
tot_len = skb->len - nhoff;
}
iph->tot_len = htons(tot_len);
ip_send_check(iph);
if (encap)
skb_reset_inner_headers(skb);
skb->network_header = (u8 *)iph - skb->head;
skb_reset_mac_len(skb);
} while ((skb = skb->next));
out:
return segs;
}
static struct sk_buff *ipip_gso_segment(struct sk_buff *skb,
netdev_features_t features)
{
if (!(skb_shinfo(skb)->gso_type & SKB_GSO_IPXIP4))
return ERR_PTR(-EINVAL);
return inet_gso_segment(skb, features);
}
struct sk_buff *inet_gro_receive(struct list_head *head, struct sk_buff *skb)
{
const struct net_offload *ops;
struct sk_buff *pp = NULL;
const struct iphdr *iph;
struct sk_buff *p;
unsigned int hlen;
unsigned int off;
unsigned int id;
int flush = 1;
int proto;
off = skb_gro_offset(skb);
hlen = off + sizeof(*iph);
iph = skb_gro_header(skb, hlen, off);
if (unlikely(!iph))
goto out;
proto = iph->protocol;
ops = rcu_dereference(inet_offloads[proto]);
if (!ops || !ops->callbacks.gro_receive)
goto out;
if (*(u8 *)iph != 0x45)
goto out;
if (ip_is_fragment(iph))
goto out;
if (unlikely(ip_fast_csum((u8 *)iph, 5)))
goto out;
NAPI_GRO_CB(skb)->proto = proto;
id = ntohl(*(__be32 *)&iph->id);
flush = (u16)((ntohl(*(__be32 *)iph) ^ skb_gro_len(skb)) | (id & ~IP_DF));
id >>= 16;
list_for_each_entry(p, head, list) {
struct iphdr *iph2;
u16 flush_id;
if (!NAPI_GRO_CB(p)->same_flow)
continue;
iph2 = (struct iphdr *)(p->data + off);
/* The above works because, with the exception of the top
* (inner most) layer, we only aggregate pkts with the same
* hdr length so all the hdrs we'll need to verify will start
* at the same offset.
*/
if ((iph->protocol ^ iph2->protocol) |
((__force u32)iph->saddr ^ (__force u32)iph2->saddr) |
((__force u32)iph->daddr ^ (__force u32)iph2->daddr)) {
NAPI_GRO_CB(p)->same_flow = 0;
continue;
}
/* All fields must match except length and checksum. */
NAPI_GRO_CB(p)->flush |=
(iph->ttl ^ iph2->ttl) |
(iph->tos ^ iph2->tos) |
((iph->frag_off ^ iph2->frag_off) & htons(IP_DF));
NAPI_GRO_CB(p)->flush |= flush;
/* We need to store of the IP ID check to be included later
* when we can verify that this packet does in fact belong
* to a given flow.
*/
flush_id = (u16)(id - ntohs(iph2->id));
/* This bit of code makes it much easier for us to identify
* the cases where we are doing atomic vs non-atomic IP ID
* checks. Specifically an atomic check can return IP ID
* values 0 - 0xFFFF, while a non-atomic check can only
* return 0 or 0xFFFF.
*/
if (!NAPI_GRO_CB(p)->is_atomic ||
!(iph->frag_off & htons(IP_DF))) {
flush_id ^= NAPI_GRO_CB(p)->count;
flush_id = flush_id ? 0xFFFF : 0;
}
/* If the previous IP ID value was based on an atomic
* datagram we can overwrite the value and ignore it.
*/
if (NAPI_GRO_CB(skb)->is_atomic)
NAPI_GRO_CB(p)->flush_id = flush_id;
else
NAPI_GRO_CB(p)->flush_id |= flush_id;
}
NAPI_GRO_CB(skb)->is_atomic = !!(iph->frag_off & htons(IP_DF));
NAPI_GRO_CB(skb)->flush |= flush;
skb_set_network_header(skb, off);
/* The above will be needed by the transport layer if there is one
* immediately following this IP hdr.
*/
/* Note : No need to call skb_gro_postpull_rcsum() here,
* as we already checked checksum over ipv4 header was 0
*/
skb_gro_pull(skb, sizeof(*iph));
skb_set_transport_header(skb, skb_gro_offset(skb));
pp = indirect_call_gro_receive(tcp4_gro_receive, udp4_gro_receive,
ops->callbacks.gro_receive, head, skb);
out:
skb_gro_flush_final(skb, pp, flush);
return pp;
}
static struct sk_buff *ipip_gro_receive(struct list_head *head,
struct sk_buff *skb)
{
if (NAPI_GRO_CB(skb)->encap_mark) {
NAPI_GRO_CB(skb)->flush = 1;
return NULL;
}
NAPI_GRO_CB(skb)->encap_mark = 1;
return inet_gro_receive(head, skb);
}
#define SECONDS_PER_DAY 86400
/* inet_current_timestamp - Return IP network timestamp
*
* Return milliseconds since midnight in network byte order.
*/
__be32 inet_current_timestamp(void)
{
u32 secs;
u32 msecs;
struct timespec64 ts;
ktime_get_real_ts64(&ts);
/* Get secs since midnight. */
(void)div_u64_rem(ts.tv_sec, SECONDS_PER_DAY, &secs);
/* Convert to msecs. */
msecs = secs * MSEC_PER_SEC;
/* Convert nsec to msec. */
msecs += (u32)ts.tv_nsec / NSEC_PER_MSEC;
/* Convert to network byte order. */
return htonl(msecs);
}
EXPORT_SYMBOL(inet_current_timestamp);
int inet_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
{
if (sk->sk_family == AF_INET)
return ip_recv_error(sk, msg, len, addr_len);
#if IS_ENABLED(CONFIG_IPV6)
if (sk->sk_family == AF_INET6)
return pingv6_ops.ipv6_recv_error(sk, msg, len, addr_len);
#endif
return -EINVAL;
}
int inet_gro_complete(struct sk_buff *skb, int nhoff)
{
struct iphdr *iph = (struct iphdr *)(skb->data + nhoff);
const struct net_offload *ops;
__be16 totlen = iph->tot_len;
int proto = iph->protocol;
int err = -ENOSYS;
if (skb->encapsulation) {
skb_set_inner_protocol(skb, cpu_to_be16(ETH_P_IP));
skb_set_inner_network_header(skb, nhoff);
}
iph_set_totlen(iph, skb->len - nhoff);
csum_replace2(&iph->check, totlen, iph->tot_len);
ops = rcu_dereference(inet_offloads[proto]);
if (WARN_ON(!ops || !ops->callbacks.gro_complete))
goto out;
/* Only need to add sizeof(*iph) to get to the next hdr below
* because any hdr with option will have been flushed in
* inet_gro_receive().
*/
err = INDIRECT_CALL_2(ops->callbacks.gro_complete,
tcp4_gro_complete, udp4_gro_complete,
skb, nhoff + sizeof(*iph));
out:
return err;
}
static int ipip_gro_complete(struct sk_buff *skb, int nhoff)
{
skb->encapsulation = 1;
skb_shinfo(skb)->gso_type |= SKB_GSO_IPXIP4;
return inet_gro_complete(skb, nhoff);
}
int inet_ctl_sock_create(struct sock **sk, unsigned short family,
unsigned short type, unsigned char protocol,
struct net *net)
{
struct socket *sock;
int rc = sock_create_kern(net, family, type, protocol, &sock);
if (rc == 0) {
*sk = sock->sk;
(*sk)->sk_allocation = GFP_ATOMIC;
(*sk)->sk_use_task_frag = false;
/*
* Unhash it so that IP input processing does not even see it,
* we do not wish this socket to see incoming packets.
*/
(*sk)->sk_prot->unhash(*sk);
}
return rc;
}
EXPORT_SYMBOL_GPL(inet_ctl_sock_create);
unsigned long snmp_fold_field(void __percpu *mib, int offt)
{
unsigned long res = 0;
int i;
for_each_possible_cpu(i)
res += snmp_get_cpu_field(mib, i, offt);
return res;
}
EXPORT_SYMBOL_GPL(snmp_fold_field);
#if BITS_PER_LONG==32
u64 snmp_get_cpu_field64(void __percpu *mib, int cpu, int offt,
size_t syncp_offset)
{
void *bhptr;
struct u64_stats_sync *syncp;
u64 v;
unsigned int start;
bhptr = per_cpu_ptr(mib, cpu);
syncp = (struct u64_stats_sync *)(bhptr + syncp_offset);
do {
start = u64_stats_fetch_begin(syncp);
v = *(((u64 *)bhptr) + offt);
} while (u64_stats_fetch_retry(syncp, start));
return v;
}
EXPORT_SYMBOL_GPL(snmp_get_cpu_field64);
u64 snmp_fold_field64(void __percpu *mib, int offt, size_t syncp_offset)
{
u64 res = 0;
int cpu;
for_each_possible_cpu(cpu) {
res += snmp_get_cpu_field64(mib, cpu, offt, syncp_offset);
}
return res;
}
EXPORT_SYMBOL_GPL(snmp_fold_field64);
#endif
#ifdef CONFIG_IP_MULTICAST
static const struct net_protocol igmp_protocol = {
.handler = igmp_rcv,
};
#endif
static const struct net_protocol tcp_protocol = {
.handler = tcp_v4_rcv,
.err_handler = tcp_v4_err,
.no_policy = 1,
.icmp_strict_tag_validation = 1,
};
static const struct net_protocol udp_protocol = {
.handler = udp_rcv,
.err_handler = udp_err,
.no_policy = 1,
};
static const struct net_protocol icmp_protocol = {
.handler = icmp_rcv,
.err_handler = icmp_err,
.no_policy = 1,
};
static __net_init int ipv4_mib_init_net(struct net *net)
{
int i;
net->mib.tcp_statistics = alloc_percpu(struct tcp_mib);
if (!net->mib.tcp_statistics)
goto err_tcp_mib;
net->mib.ip_statistics = alloc_percpu(struct ipstats_mib);
if (!net->mib.ip_statistics)
goto err_ip_mib;
for_each_possible_cpu(i) {
struct ipstats_mib *af_inet_stats;
af_inet_stats = per_cpu_ptr(net->mib.ip_statistics, i);
u64_stats_init(&af_inet_stats->syncp);
}
net->mib.net_statistics = alloc_percpu(struct linux_mib);
if (!net->mib.net_statistics)
goto err_net_mib;
net->mib.udp_statistics = alloc_percpu(struct udp_mib);
if (!net->mib.udp_statistics)
goto err_udp_mib;
net->mib.udplite_statistics = alloc_percpu(struct udp_mib);
if (!net->mib.udplite_statistics)
goto err_udplite_mib;
net->mib.icmp_statistics = alloc_percpu(struct icmp_mib);
if (!net->mib.icmp_statistics)
goto err_icmp_mib;
net->mib.icmpmsg_statistics = kzalloc(sizeof(struct icmpmsg_mib),
GFP_KERNEL);
if (!net->mib.icmpmsg_statistics)
goto err_icmpmsg_mib;
tcp_mib_init(net);
return 0;
err_icmpmsg_mib:
free_percpu(net->mib.icmp_statistics);
err_icmp_mib:
free_percpu(net->mib.udplite_statistics);
err_udplite_mib:
free_percpu(net->mib.udp_statistics);
err_udp_mib:
free_percpu(net->mib.net_statistics);
err_net_mib:
free_percpu(net->mib.ip_statistics);
err_ip_mib:
free_percpu(net->mib.tcp_statistics);
err_tcp_mib:
return -ENOMEM;
}
static __net_exit void ipv4_mib_exit_net(struct net *net)
{
kfree(net->mib.icmpmsg_statistics);
free_percpu(net->mib.icmp_statistics);
free_percpu(net->mib.udplite_statistics);
free_percpu(net->mib.udp_statistics);
free_percpu(net->mib.net_statistics);
free_percpu(net->mib.ip_statistics);
free_percpu(net->mib.tcp_statistics);
#ifdef CONFIG_MPTCP
/* allocated on demand, see mptcp_init_sock() */
free_percpu(net->mib.mptcp_statistics);
#endif
}
static __net_initdata struct pernet_operations ipv4_mib_ops = {
.init = ipv4_mib_init_net,
.exit = ipv4_mib_exit_net,
};
static int __init init_ipv4_mibs(void)
{
return register_pernet_subsys(&ipv4_mib_ops);
}
static __net_init int inet_init_net(struct net *net)
{
/*
* Set defaults for local port range
*/
seqlock_init(&net->ipv4.ip_local_ports.lock);
net->ipv4.ip_local_ports.range[0] = 32768;
net->ipv4.ip_local_ports.range[1] = 60999;
seqlock_init(&net->ipv4.ping_group_range.lock);
/*
* Sane defaults - nobody may create ping sockets.
* Boot scripts should set this to distro-specific group.
*/
net->ipv4.ping_group_range.range[0] = make_kgid(&init_user_ns, 1);
net->ipv4.ping_group_range.range[1] = make_kgid(&init_user_ns, 0);
/* Default values for sysctl-controlled parameters.
* We set them here, in case sysctl is not compiled.
*/
net->ipv4.sysctl_ip_default_ttl = IPDEFTTL;
net->ipv4.sysctl_ip_fwd_update_priority = 1;
net->ipv4.sysctl_ip_dynaddr = 0;
net->ipv4.sysctl_ip_early_demux = 1;
net->ipv4.sysctl_udp_early_demux = 1;
net->ipv4.sysctl_tcp_early_demux = 1;
net->ipv4.sysctl_nexthop_compat_mode = 1;
#ifdef CONFIG_SYSCTL
net->ipv4.sysctl_ip_prot_sock = PROT_SOCK;
#endif
/* Some igmp sysctl, whose values are always used */
net->ipv4.sysctl_igmp_max_memberships = 20;
net->ipv4.sysctl_igmp_max_msf = 10;
/* IGMP reports for link-local multicast groups are enabled by default */
net->ipv4.sysctl_igmp_llm_reports = 1;
net->ipv4.sysctl_igmp_qrv = 2;
net->ipv4.sysctl_fib_notify_on_flag_change = 0;
return 0;
}
static __net_initdata struct pernet_operations af_inet_ops = {
.init = inet_init_net,
};
static int __init init_inet_pernet_ops(void)
{
return register_pernet_subsys(&af_inet_ops);
}
static int ipv4_proc_init(void);
/*
* IP protocol layer initialiser
*/
static struct packet_offload ip_packet_offload __read_mostly = {
.type = cpu_to_be16(ETH_P_IP),
.callbacks = {
.gso_segment = inet_gso_segment,
.gro_receive = inet_gro_receive,
.gro_complete = inet_gro_complete,
},
};
static const struct net_offload ipip_offload = {
.callbacks = {
.gso_segment = ipip_gso_segment,
.gro_receive = ipip_gro_receive,
.gro_complete = ipip_gro_complete,
},
};
static int __init ipip_offload_init(void)
{
return inet_add_offload(&ipip_offload, IPPROTO_IPIP);
}
static int __init ipv4_offload_init(void)
{
/*
* Add offloads
*/
if (udpv4_offload_init() < 0)
pr_crit("%s: Cannot add UDP protocol offload\n", __func__);
if (tcpv4_offload_init() < 0)
pr_crit("%s: Cannot add TCP protocol offload\n", __func__);
if (ipip_offload_init() < 0)
pr_crit("%s: Cannot add IPIP protocol offload\n", __func__);
dev_add_offload(&ip_packet_offload);
return 0;
}
fs_initcall(ipv4_offload_init);
static struct packet_type ip_packet_type __read_mostly = {
.type = cpu_to_be16(ETH_P_IP),
.func = ip_rcv,
.list_func = ip_list_rcv,
};
static int __init inet_init(void)
{
struct inet_protosw *q;
struct list_head *r;
int rc;
sock_skb_cb_check_size(sizeof(struct inet_skb_parm));
raw_hashinfo_init(&raw_v4_hashinfo);
rc = proto_register(&tcp_prot, 1);
if (rc)
goto out;
rc = proto_register(&udp_prot, 1);
if (rc)
goto out_unregister_tcp_proto;
rc = proto_register(&raw_prot, 1);
if (rc)
goto out_unregister_udp_proto;
rc = proto_register(&ping_prot, 1);
if (rc)
goto out_unregister_raw_proto;
/*
* Tell SOCKET that we are alive...
*/
(void)sock_register(&inet_family_ops);
#ifdef CONFIG_SYSCTL
ip_static_sysctl_init();
#endif
/*
* Add all the base protocols.
*/
if (inet_add_protocol(&icmp_protocol, IPPROTO_ICMP) < 0)
pr_crit("%s: Cannot add ICMP protocol\n", __func__);
if (inet_add_protocol(&udp_protocol, IPPROTO_UDP) < 0)
pr_crit("%s: Cannot add UDP protocol\n", __func__);
if (inet_add_protocol(&tcp_protocol, IPPROTO_TCP) < 0)
pr_crit("%s: Cannot add TCP protocol\n", __func__);
#ifdef CONFIG_IP_MULTICAST
if (inet_add_protocol(&igmp_protocol, IPPROTO_IGMP) < 0)
pr_crit("%s: Cannot add IGMP protocol\n", __func__);
#endif
/* Register the socket-side information for inet_create. */
for (r = &inetsw[0]; r < &inetsw[SOCK_MAX]; ++r)
INIT_LIST_HEAD(r);
for (q = inetsw_array; q < &inetsw_array[INETSW_ARRAY_LEN]; ++q)
inet_register_protosw(q);
/*
* Set the ARP module up
*/
arp_init();
/*
* Set the IP module up
*/
ip_init();
/* Initialise per-cpu ipv4 mibs */
if (init_ipv4_mibs())
panic("%s: Cannot init ipv4 mibs\n", __func__);
/* Setup TCP slab cache for open requests. */
tcp_init();
/* Setup UDP memory threshold */
udp_init();
/* Add UDP-Lite (RFC 3828) */
udplite4_register();
raw_init();
ping_init();
/*
* Set the ICMP layer up
*/
if (icmp_init() < 0)
panic("Failed to create the ICMP control socket.\n");
/*
* Initialise the multicast router
*/
#if defined(CONFIG_IP_MROUTE)
if (ip_mr_init())
pr_crit("%s: Cannot init ipv4 mroute\n", __func__);
#endif
if (init_inet_pernet_ops())
pr_crit("%s: Cannot init ipv4 inet pernet ops\n", __func__);
ipv4_proc_init();
ipfrag_init();
dev_add_pack(&ip_packet_type);
ip_tunnel_core_init();
rc = 0;
out:
return rc;
out_unregister_raw_proto:
proto_unregister(&raw_prot);
out_unregister_udp_proto:
proto_unregister(&udp_prot);
out_unregister_tcp_proto:
proto_unregister(&tcp_prot);
goto out;
}
fs_initcall(inet_init);
/* ------------------------------------------------------------------------ */
#ifdef CONFIG_PROC_FS
static int __init ipv4_proc_init(void)
{
int rc = 0;
if (raw_proc_init())
goto out_raw;
if (tcp4_proc_init())
goto out_tcp;
if (udp4_proc_init())
goto out_udp;
if (ping_proc_init())
goto out_ping;
if (ip_misc_proc_init())
goto out_misc;
out:
return rc;
out_misc:
ping_proc_exit();
out_ping:
udp4_proc_exit();
out_udp:
tcp4_proc_exit();
out_tcp:
raw_proc_exit();
out_raw:
rc = -ENOMEM;
goto out;
}
#else /* CONFIG_PROC_FS */
static int __init ipv4_proc_init(void)
{
return 0;
}
#endif /* CONFIG_PROC_FS */
| linux-master | net/ipv4/af_inet.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* tcp_diag.c Module for monitoring TCP transport protocols sockets.
*
* Authors: Alexey Kuznetsov, <[email protected]>
*/
#include <linux/module.h>
#include <linux/net.h>
#include <linux/sock_diag.h>
#include <linux/inet_diag.h>
#include <linux/tcp.h>
#include <net/netlink.h>
#include <net/tcp.h>
static void tcp_diag_get_info(struct sock *sk, struct inet_diag_msg *r,
void *_info)
{
struct tcp_info *info = _info;
if (inet_sk_state_load(sk) == TCP_LISTEN) {
r->idiag_rqueue = READ_ONCE(sk->sk_ack_backlog);
r->idiag_wqueue = READ_ONCE(sk->sk_max_ack_backlog);
} else if (sk->sk_type == SOCK_STREAM) {
const struct tcp_sock *tp = tcp_sk(sk);
r->idiag_rqueue = max_t(int, READ_ONCE(tp->rcv_nxt) -
READ_ONCE(tp->copied_seq), 0);
r->idiag_wqueue = READ_ONCE(tp->write_seq) - tp->snd_una;
}
if (info)
tcp_get_info(sk, info);
}
#ifdef CONFIG_TCP_MD5SIG
static void tcp_diag_md5sig_fill(struct tcp_diag_md5sig *info,
const struct tcp_md5sig_key *key)
{
info->tcpm_family = key->family;
info->tcpm_prefixlen = key->prefixlen;
info->tcpm_keylen = key->keylen;
memcpy(info->tcpm_key, key->key, key->keylen);
if (key->family == AF_INET)
info->tcpm_addr[0] = key->addr.a4.s_addr;
#if IS_ENABLED(CONFIG_IPV6)
else if (key->family == AF_INET6)
memcpy(&info->tcpm_addr, &key->addr.a6,
sizeof(info->tcpm_addr));
#endif
}
static int tcp_diag_put_md5sig(struct sk_buff *skb,
const struct tcp_md5sig_info *md5sig)
{
const struct tcp_md5sig_key *key;
struct tcp_diag_md5sig *info;
struct nlattr *attr;
int md5sig_count = 0;
hlist_for_each_entry_rcu(key, &md5sig->head, node)
md5sig_count++;
if (md5sig_count == 0)
return 0;
attr = nla_reserve(skb, INET_DIAG_MD5SIG,
md5sig_count * sizeof(struct tcp_diag_md5sig));
if (!attr)
return -EMSGSIZE;
info = nla_data(attr);
memset(info, 0, md5sig_count * sizeof(struct tcp_diag_md5sig));
hlist_for_each_entry_rcu(key, &md5sig->head, node) {
tcp_diag_md5sig_fill(info++, key);
if (--md5sig_count == 0)
break;
}
return 0;
}
#endif
static int tcp_diag_put_ulp(struct sk_buff *skb, struct sock *sk,
const struct tcp_ulp_ops *ulp_ops)
{
struct nlattr *nest;
int err;
nest = nla_nest_start_noflag(skb, INET_DIAG_ULP_INFO);
if (!nest)
return -EMSGSIZE;
err = nla_put_string(skb, INET_ULP_INFO_NAME, ulp_ops->name);
if (err)
goto nla_failure;
if (ulp_ops->get_info)
err = ulp_ops->get_info(sk, skb);
if (err)
goto nla_failure;
nla_nest_end(skb, nest);
return 0;
nla_failure:
nla_nest_cancel(skb, nest);
return err;
}
static int tcp_diag_get_aux(struct sock *sk, bool net_admin,
struct sk_buff *skb)
{
struct inet_connection_sock *icsk = inet_csk(sk);
int err = 0;
#ifdef CONFIG_TCP_MD5SIG
if (net_admin) {
struct tcp_md5sig_info *md5sig;
rcu_read_lock();
md5sig = rcu_dereference(tcp_sk(sk)->md5sig_info);
if (md5sig)
err = tcp_diag_put_md5sig(skb, md5sig);
rcu_read_unlock();
if (err < 0)
return err;
}
#endif
if (net_admin) {
const struct tcp_ulp_ops *ulp_ops;
ulp_ops = icsk->icsk_ulp_ops;
if (ulp_ops)
err = tcp_diag_put_ulp(skb, sk, ulp_ops);
if (err)
return err;
}
return 0;
}
static size_t tcp_diag_get_aux_size(struct sock *sk, bool net_admin)
{
struct inet_connection_sock *icsk = inet_csk(sk);
size_t size = 0;
#ifdef CONFIG_TCP_MD5SIG
if (net_admin && sk_fullsock(sk)) {
const struct tcp_md5sig_info *md5sig;
const struct tcp_md5sig_key *key;
size_t md5sig_count = 0;
rcu_read_lock();
md5sig = rcu_dereference(tcp_sk(sk)->md5sig_info);
if (md5sig) {
hlist_for_each_entry_rcu(key, &md5sig->head, node)
md5sig_count++;
}
rcu_read_unlock();
size += nla_total_size(md5sig_count *
sizeof(struct tcp_diag_md5sig));
}
#endif
if (net_admin && sk_fullsock(sk)) {
const struct tcp_ulp_ops *ulp_ops;
ulp_ops = icsk->icsk_ulp_ops;
if (ulp_ops) {
size += nla_total_size(0) +
nla_total_size(TCP_ULP_NAME_MAX);
if (ulp_ops->get_info_size)
size += ulp_ops->get_info_size(sk);
}
}
return size;
}
static void tcp_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
const struct inet_diag_req_v2 *r)
{
struct inet_hashinfo *hinfo;
hinfo = sock_net(cb->skb->sk)->ipv4.tcp_death_row.hashinfo;
inet_diag_dump_icsk(hinfo, skb, cb, r);
}
static int tcp_diag_dump_one(struct netlink_callback *cb,
const struct inet_diag_req_v2 *req)
{
struct inet_hashinfo *hinfo;
hinfo = sock_net(cb->skb->sk)->ipv4.tcp_death_row.hashinfo;
return inet_diag_dump_one_icsk(hinfo, cb, req);
}
#ifdef CONFIG_INET_DIAG_DESTROY
static int tcp_diag_destroy(struct sk_buff *in_skb,
const struct inet_diag_req_v2 *req)
{
struct net *net = sock_net(in_skb->sk);
struct inet_hashinfo *hinfo;
struct sock *sk;
int err;
hinfo = net->ipv4.tcp_death_row.hashinfo;
sk = inet_diag_find_one_icsk(net, hinfo, req);
if (IS_ERR(sk))
return PTR_ERR(sk);
err = sock_diag_destroy(sk, ECONNABORTED);
sock_gen_put(sk);
return err;
}
#endif
static const struct inet_diag_handler tcp_diag_handler = {
.dump = tcp_diag_dump,
.dump_one = tcp_diag_dump_one,
.idiag_get_info = tcp_diag_get_info,
.idiag_get_aux = tcp_diag_get_aux,
.idiag_get_aux_size = tcp_diag_get_aux_size,
.idiag_type = IPPROTO_TCP,
.idiag_info_size = sizeof(struct tcp_info),
#ifdef CONFIG_INET_DIAG_DESTROY
.destroy = tcp_diag_destroy,
#endif
};
static int __init tcp_diag_init(void)
{
return inet_diag_register(&tcp_diag_handler);
}
static void __exit tcp_diag_exit(void)
{
inet_diag_unregister(&tcp_diag_handler);
}
module_init(tcp_diag_init);
module_exit(tcp_diag_exit);
MODULE_LICENSE("GPL");
MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 2-6 /* AF_INET - IPPROTO_TCP */);
| linux-master | net/ipv4/tcp_diag.c |
// SPDX-License-Identifier: GPL-2.0
/*
* xfrm4_policy.c
*
* Changes:
* Kazunori MIYAZAWA @USAGI
* YOSHIFUJI Hideaki @USAGI
* Split up af-specific portion
*
*/
#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/inetdevice.h>
#include <net/dst.h>
#include <net/xfrm.h>
#include <net/ip.h>
#include <net/l3mdev.h>
static struct dst_entry *__xfrm4_dst_lookup(struct net *net, struct flowi4 *fl4,
int tos, int oif,
const xfrm_address_t *saddr,
const xfrm_address_t *daddr,
u32 mark)
{
struct rtable *rt;
memset(fl4, 0, sizeof(*fl4));
fl4->daddr = daddr->a4;
fl4->flowi4_tos = tos;
fl4->flowi4_l3mdev = l3mdev_master_ifindex_by_index(net, oif);
fl4->flowi4_mark = mark;
if (saddr)
fl4->saddr = saddr->a4;
rt = __ip_route_output_key(net, fl4);
if (!IS_ERR(rt))
return &rt->dst;
return ERR_CAST(rt);
}
static struct dst_entry *xfrm4_dst_lookup(struct net *net, int tos, int oif,
const xfrm_address_t *saddr,
const xfrm_address_t *daddr,
u32 mark)
{
struct flowi4 fl4;
return __xfrm4_dst_lookup(net, &fl4, tos, oif, saddr, daddr, mark);
}
static int xfrm4_get_saddr(struct net *net, int oif,
xfrm_address_t *saddr, xfrm_address_t *daddr,
u32 mark)
{
struct dst_entry *dst;
struct flowi4 fl4;
dst = __xfrm4_dst_lookup(net, &fl4, 0, oif, NULL, daddr, mark);
if (IS_ERR(dst))
return -EHOSTUNREACH;
saddr->a4 = fl4.saddr;
dst_release(dst);
return 0;
}
static int xfrm4_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
const struct flowi *fl)
{
struct rtable *rt = (struct rtable *)xdst->route;
const struct flowi4 *fl4 = &fl->u.ip4;
xdst->u.rt.rt_iif = fl4->flowi4_iif;
xdst->u.dst.dev = dev;
netdev_hold(dev, &xdst->u.dst.dev_tracker, GFP_ATOMIC);
/* Sheit... I remember I did this right. Apparently,
* it was magically lost, so this code needs audit */
xdst->u.rt.rt_is_input = rt->rt_is_input;
xdst->u.rt.rt_flags = rt->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST |
RTCF_LOCAL);
xdst->u.rt.rt_type = rt->rt_type;
xdst->u.rt.rt_uses_gateway = rt->rt_uses_gateway;
xdst->u.rt.rt_gw_family = rt->rt_gw_family;
if (rt->rt_gw_family == AF_INET)
xdst->u.rt.rt_gw4 = rt->rt_gw4;
else if (rt->rt_gw_family == AF_INET6)
xdst->u.rt.rt_gw6 = rt->rt_gw6;
xdst->u.rt.rt_pmtu = rt->rt_pmtu;
xdst->u.rt.rt_mtu_locked = rt->rt_mtu_locked;
rt_add_uncached_list(&xdst->u.rt);
return 0;
}
static void xfrm4_update_pmtu(struct dst_entry *dst, struct sock *sk,
struct sk_buff *skb, u32 mtu,
bool confirm_neigh)
{
struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
struct dst_entry *path = xdst->route;
path->ops->update_pmtu(path, sk, skb, mtu, confirm_neigh);
}
static void xfrm4_redirect(struct dst_entry *dst, struct sock *sk,
struct sk_buff *skb)
{
struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
struct dst_entry *path = xdst->route;
path->ops->redirect(path, sk, skb);
}
static void xfrm4_dst_destroy(struct dst_entry *dst)
{
struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
dst_destroy_metrics_generic(dst);
rt_del_uncached_list(&xdst->u.rt);
xfrm_dst_destroy(xdst);
}
static struct dst_ops xfrm4_dst_ops_template = {
.family = AF_INET,
.update_pmtu = xfrm4_update_pmtu,
.redirect = xfrm4_redirect,
.cow_metrics = dst_cow_metrics_generic,
.destroy = xfrm4_dst_destroy,
.ifdown = xfrm_dst_ifdown,
.local_out = __ip_local_out,
.gc_thresh = 32768,
};
static const struct xfrm_policy_afinfo xfrm4_policy_afinfo = {
.dst_ops = &xfrm4_dst_ops_template,
.dst_lookup = xfrm4_dst_lookup,
.get_saddr = xfrm4_get_saddr,
.fill_dst = xfrm4_fill_dst,
.blackhole_route = ipv4_blackhole_route,
};
#ifdef CONFIG_SYSCTL
static struct ctl_table xfrm4_policy_table[] = {
{
.procname = "xfrm4_gc_thresh",
.data = &init_net.xfrm.xfrm4_dst_ops.gc_thresh,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{ }
};
static __net_init int xfrm4_net_sysctl_init(struct net *net)
{
struct ctl_table *table;
struct ctl_table_header *hdr;
table = xfrm4_policy_table;
if (!net_eq(net, &init_net)) {
table = kmemdup(table, sizeof(xfrm4_policy_table), GFP_KERNEL);
if (!table)
goto err_alloc;
table[0].data = &net->xfrm.xfrm4_dst_ops.gc_thresh;
}
hdr = register_net_sysctl_sz(net, "net/ipv4", table,
ARRAY_SIZE(xfrm4_policy_table));
if (!hdr)
goto err_reg;
net->ipv4.xfrm4_hdr = hdr;
return 0;
err_reg:
if (!net_eq(net, &init_net))
kfree(table);
err_alloc:
return -ENOMEM;
}
static __net_exit void xfrm4_net_sysctl_exit(struct net *net)
{
struct ctl_table *table;
if (!net->ipv4.xfrm4_hdr)
return;
table = net->ipv4.xfrm4_hdr->ctl_table_arg;
unregister_net_sysctl_table(net->ipv4.xfrm4_hdr);
if (!net_eq(net, &init_net))
kfree(table);
}
#else /* CONFIG_SYSCTL */
static inline int xfrm4_net_sysctl_init(struct net *net)
{
return 0;
}
static inline void xfrm4_net_sysctl_exit(struct net *net)
{
}
#endif
static int __net_init xfrm4_net_init(struct net *net)
{
int ret;
memcpy(&net->xfrm.xfrm4_dst_ops, &xfrm4_dst_ops_template,
sizeof(xfrm4_dst_ops_template));
ret = dst_entries_init(&net->xfrm.xfrm4_dst_ops);
if (ret)
return ret;
ret = xfrm4_net_sysctl_init(net);
if (ret)
dst_entries_destroy(&net->xfrm.xfrm4_dst_ops);
return ret;
}
static void __net_exit xfrm4_net_exit(struct net *net)
{
xfrm4_net_sysctl_exit(net);
dst_entries_destroy(&net->xfrm.xfrm4_dst_ops);
}
static struct pernet_operations __net_initdata xfrm4_net_ops = {
.init = xfrm4_net_init,
.exit = xfrm4_net_exit,
};
static void __init xfrm4_policy_init(void)
{
xfrm_policy_register_afinfo(&xfrm4_policy_afinfo, AF_INET);
}
void __init xfrm4_init(void)
{
xfrm4_state_init();
xfrm4_policy_init();
xfrm4_protocol_init();
register_pernet_subsys(&xfrm4_net_ops);
}
| linux-master | net/ipv4/xfrm4_policy.c |
// SPDX-License-Identifier: GPL-2.0
/*
* sysctl_net_ipv4.c: sysctl interface to net IPV4 subsystem.
*
* Begun April 1, 1996, Mike Shaver.
* Added /proc/sys/net/ipv4 directory entry (empty =) ). [MS]
*/
#include <linux/sysctl.h>
#include <linux/seqlock.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <net/icmp.h>
#include <net/ip.h>
#include <net/ip_fib.h>
#include <net/tcp.h>
#include <net/udp.h>
#include <net/cipso_ipv4.h>
#include <net/ping.h>
#include <net/protocol.h>
#include <net/netevent.h>
static int tcp_retr1_max = 255;
static int ip_local_port_range_min[] = { 1, 1 };
static int ip_local_port_range_max[] = { 65535, 65535 };
static int tcp_adv_win_scale_min = -31;
static int tcp_adv_win_scale_max = 31;
static int tcp_app_win_max = 31;
static int tcp_min_snd_mss_min = TCP_MIN_SND_MSS;
static int tcp_min_snd_mss_max = 65535;
static int ip_privileged_port_min;
static int ip_privileged_port_max = 65535;
static int ip_ttl_min = 1;
static int ip_ttl_max = 255;
static int tcp_syn_retries_min = 1;
static int tcp_syn_retries_max = MAX_TCP_SYNCNT;
static int tcp_syn_linear_timeouts_max = MAX_TCP_SYNCNT;
static unsigned long ip_ping_group_range_min[] = { 0, 0 };
static unsigned long ip_ping_group_range_max[] = { GID_T_MAX, GID_T_MAX };
static u32 u32_max_div_HZ = UINT_MAX / HZ;
static int one_day_secs = 24 * 3600;
static u32 fib_multipath_hash_fields_all_mask __maybe_unused =
FIB_MULTIPATH_HASH_FIELD_ALL_MASK;
static unsigned int tcp_child_ehash_entries_max = 16 * 1024 * 1024;
static unsigned int udp_child_hash_entries_max = UDP_HTABLE_SIZE_MAX;
static int tcp_plb_max_rounds = 31;
static int tcp_plb_max_cong_thresh = 256;
/* obsolete */
static int sysctl_tcp_low_latency __read_mostly;
/* Update system visible IP port range */
static void set_local_port_range(struct net *net, int range[2])
{
bool same_parity = !((range[0] ^ range[1]) & 1);
write_seqlock_bh(&net->ipv4.ip_local_ports.lock);
if (same_parity && !net->ipv4.ip_local_ports.warned) {
net->ipv4.ip_local_ports.warned = true;
pr_err_ratelimited("ip_local_port_range: prefer different parity for start/end values.\n");
}
net->ipv4.ip_local_ports.range[0] = range[0];
net->ipv4.ip_local_ports.range[1] = range[1];
write_sequnlock_bh(&net->ipv4.ip_local_ports.lock);
}
/* Validate changes from /proc interface. */
static int ipv4_local_port_range(struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
struct net *net =
container_of(table->data, struct net, ipv4.ip_local_ports.range);
int ret;
int range[2];
struct ctl_table tmp = {
.data = &range,
.maxlen = sizeof(range),
.mode = table->mode,
.extra1 = &ip_local_port_range_min,
.extra2 = &ip_local_port_range_max,
};
inet_get_local_port_range(net, &range[0], &range[1]);
ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
if (write && ret == 0) {
/* Ensure that the upper limit is not smaller than the lower,
* and that the lower does not encroach upon the privileged
* port limit.
*/
if ((range[1] < range[0]) ||
(range[0] < READ_ONCE(net->ipv4.sysctl_ip_prot_sock)))
ret = -EINVAL;
else
set_local_port_range(net, range);
}
return ret;
}
/* Validate changes from /proc interface. */
static int ipv4_privileged_ports(struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
struct net *net = container_of(table->data, struct net,
ipv4.sysctl_ip_prot_sock);
int ret;
int pports;
int range[2];
struct ctl_table tmp = {
.data = &pports,
.maxlen = sizeof(pports),
.mode = table->mode,
.extra1 = &ip_privileged_port_min,
.extra2 = &ip_privileged_port_max,
};
pports = READ_ONCE(net->ipv4.sysctl_ip_prot_sock);
ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
if (write && ret == 0) {
inet_get_local_port_range(net, &range[0], &range[1]);
/* Ensure that the local port range doesn't overlap with the
* privileged port range.
*/
if (range[0] < pports)
ret = -EINVAL;
else
WRITE_ONCE(net->ipv4.sysctl_ip_prot_sock, pports);
}
return ret;
}
static void inet_get_ping_group_range_table(struct ctl_table *table, kgid_t *low, kgid_t *high)
{
kgid_t *data = table->data;
struct net *net =
container_of(table->data, struct net, ipv4.ping_group_range.range);
unsigned int seq;
do {
seq = read_seqbegin(&net->ipv4.ping_group_range.lock);
*low = data[0];
*high = data[1];
} while (read_seqretry(&net->ipv4.ping_group_range.lock, seq));
}
/* Update system visible IP port range */
static void set_ping_group_range(struct ctl_table *table, kgid_t low, kgid_t high)
{
kgid_t *data = table->data;
struct net *net =
container_of(table->data, struct net, ipv4.ping_group_range.range);
write_seqlock(&net->ipv4.ping_group_range.lock);
data[0] = low;
data[1] = high;
write_sequnlock(&net->ipv4.ping_group_range.lock);
}
/* Validate changes from /proc interface. */
static int ipv4_ping_group_range(struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
struct user_namespace *user_ns = current_user_ns();
int ret;
unsigned long urange[2];
kgid_t low, high;
struct ctl_table tmp = {
.data = &urange,
.maxlen = sizeof(urange),
.mode = table->mode,
.extra1 = &ip_ping_group_range_min,
.extra2 = &ip_ping_group_range_max,
};
inet_get_ping_group_range_table(table, &low, &high);
urange[0] = from_kgid_munged(user_ns, low);
urange[1] = from_kgid_munged(user_ns, high);
ret = proc_doulongvec_minmax(&tmp, write, buffer, lenp, ppos);
if (write && ret == 0) {
low = make_kgid(user_ns, urange[0]);
high = make_kgid(user_ns, urange[1]);
if (!gid_valid(low) || !gid_valid(high))
return -EINVAL;
if (urange[1] < urange[0] || gid_lt(high, low)) {
low = make_kgid(&init_user_ns, 1);
high = make_kgid(&init_user_ns, 0);
}
set_ping_group_range(table, low, high);
}
return ret;
}
static int ipv4_fwd_update_priority(struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
struct net *net;
int ret;
net = container_of(table->data, struct net,
ipv4.sysctl_ip_fwd_update_priority);
ret = proc_dou8vec_minmax(table, write, buffer, lenp, ppos);
if (write && ret == 0)
call_netevent_notifiers(NETEVENT_IPV4_FWD_UPDATE_PRIORITY_UPDATE,
net);
return ret;
}
static int proc_tcp_congestion_control(struct ctl_table *ctl, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
struct net *net = container_of(ctl->data, struct net,
ipv4.tcp_congestion_control);
char val[TCP_CA_NAME_MAX];
struct ctl_table tbl = {
.data = val,
.maxlen = TCP_CA_NAME_MAX,
};
int ret;
tcp_get_default_congestion_control(net, val);
ret = proc_dostring(&tbl, write, buffer, lenp, ppos);
if (write && ret == 0)
ret = tcp_set_default_congestion_control(net, val);
return ret;
}
static int proc_tcp_available_congestion_control(struct ctl_table *ctl,
int write, void *buffer,
size_t *lenp, loff_t *ppos)
{
struct ctl_table tbl = { .maxlen = TCP_CA_BUF_MAX, };
int ret;
tbl.data = kmalloc(tbl.maxlen, GFP_USER);
if (!tbl.data)
return -ENOMEM;
tcp_get_available_congestion_control(tbl.data, TCP_CA_BUF_MAX);
ret = proc_dostring(&tbl, write, buffer, lenp, ppos);
kfree(tbl.data);
return ret;
}
static int proc_allowed_congestion_control(struct ctl_table *ctl,
int write, void *buffer,
size_t *lenp, loff_t *ppos)
{
struct ctl_table tbl = { .maxlen = TCP_CA_BUF_MAX };
int ret;
tbl.data = kmalloc(tbl.maxlen, GFP_USER);
if (!tbl.data)
return -ENOMEM;
tcp_get_allowed_congestion_control(tbl.data, tbl.maxlen);
ret = proc_dostring(&tbl, write, buffer, lenp, ppos);
if (write && ret == 0)
ret = tcp_set_allowed_congestion_control(tbl.data);
kfree(tbl.data);
return ret;
}
static int sscanf_key(char *buf, __le32 *key)
{
u32 user_key[4];
int i, ret = 0;
if (sscanf(buf, "%x-%x-%x-%x", user_key, user_key + 1,
user_key + 2, user_key + 3) != 4) {
ret = -EINVAL;
} else {
for (i = 0; i < ARRAY_SIZE(user_key); i++)
key[i] = cpu_to_le32(user_key[i]);
}
pr_debug("proc TFO key set 0x%x-%x-%x-%x <- 0x%s: %u\n",
user_key[0], user_key[1], user_key[2], user_key[3], buf, ret);
return ret;
}
static int proc_tcp_fastopen_key(struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
struct net *net = container_of(table->data, struct net,
ipv4.sysctl_tcp_fastopen);
/* maxlen to print the list of keys in hex (*2), with dashes
* separating doublewords and a comma in between keys.
*/
struct ctl_table tbl = { .maxlen = ((TCP_FASTOPEN_KEY_LENGTH *
2 * TCP_FASTOPEN_KEY_MAX) +
(TCP_FASTOPEN_KEY_MAX * 5)) };
u32 user_key[TCP_FASTOPEN_KEY_BUF_LENGTH / sizeof(u32)];
__le32 key[TCP_FASTOPEN_KEY_BUF_LENGTH / sizeof(__le32)];
char *backup_data;
int ret, i = 0, off = 0, n_keys;
tbl.data = kmalloc(tbl.maxlen, GFP_KERNEL);
if (!tbl.data)
return -ENOMEM;
n_keys = tcp_fastopen_get_cipher(net, NULL, (u64 *)key);
if (!n_keys) {
memset(&key[0], 0, TCP_FASTOPEN_KEY_LENGTH);
n_keys = 1;
}
for (i = 0; i < n_keys * 4; i++)
user_key[i] = le32_to_cpu(key[i]);
for (i = 0; i < n_keys; i++) {
off += snprintf(tbl.data + off, tbl.maxlen - off,
"%08x-%08x-%08x-%08x",
user_key[i * 4],
user_key[i * 4 + 1],
user_key[i * 4 + 2],
user_key[i * 4 + 3]);
if (WARN_ON_ONCE(off >= tbl.maxlen - 1))
break;
if (i + 1 < n_keys)
off += snprintf(tbl.data + off, tbl.maxlen - off, ",");
}
ret = proc_dostring(&tbl, write, buffer, lenp, ppos);
if (write && ret == 0) {
backup_data = strchr(tbl.data, ',');
if (backup_data) {
*backup_data = '\0';
backup_data++;
}
if (sscanf_key(tbl.data, key)) {
ret = -EINVAL;
goto bad_key;
}
if (backup_data) {
if (sscanf_key(backup_data, key + 4)) {
ret = -EINVAL;
goto bad_key;
}
}
tcp_fastopen_reset_cipher(net, NULL, key,
backup_data ? key + 4 : NULL);
}
bad_key:
kfree(tbl.data);
return ret;
}
static int proc_tfo_blackhole_detect_timeout(struct ctl_table *table,
int write, void *buffer,
size_t *lenp, loff_t *ppos)
{
struct net *net = container_of(table->data, struct net,
ipv4.sysctl_tcp_fastopen_blackhole_timeout);
int ret;
ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
if (write && ret == 0)
atomic_set(&net->ipv4.tfo_active_disable_times, 0);
return ret;
}
static int proc_tcp_available_ulp(struct ctl_table *ctl,
int write, void *buffer, size_t *lenp,
loff_t *ppos)
{
struct ctl_table tbl = { .maxlen = TCP_ULP_BUF_MAX, };
int ret;
tbl.data = kmalloc(tbl.maxlen, GFP_USER);
if (!tbl.data)
return -ENOMEM;
tcp_get_available_ulp(tbl.data, TCP_ULP_BUF_MAX);
ret = proc_dostring(&tbl, write, buffer, lenp, ppos);
kfree(tbl.data);
return ret;
}
static int proc_tcp_ehash_entries(struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
struct net *net = container_of(table->data, struct net,
ipv4.sysctl_tcp_child_ehash_entries);
struct inet_hashinfo *hinfo = net->ipv4.tcp_death_row.hashinfo;
int tcp_ehash_entries;
struct ctl_table tbl;
tcp_ehash_entries = hinfo->ehash_mask + 1;
/* A negative number indicates that the child netns
* shares the global ehash.
*/
if (!net_eq(net, &init_net) && !hinfo->pernet)
tcp_ehash_entries *= -1;
memset(&tbl, 0, sizeof(tbl));
tbl.data = &tcp_ehash_entries;
tbl.maxlen = sizeof(int);
return proc_dointvec(&tbl, write, buffer, lenp, ppos);
}
static int proc_udp_hash_entries(struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
struct net *net = container_of(table->data, struct net,
ipv4.sysctl_udp_child_hash_entries);
int udp_hash_entries;
struct ctl_table tbl;
udp_hash_entries = net->ipv4.udp_table->mask + 1;
/* A negative number indicates that the child netns
* shares the global udp_table.
*/
if (!net_eq(net, &init_net) && net->ipv4.udp_table == &udp_table)
udp_hash_entries *= -1;
memset(&tbl, 0, sizeof(tbl));
tbl.data = &udp_hash_entries;
tbl.maxlen = sizeof(int);
return proc_dointvec(&tbl, write, buffer, lenp, ppos);
}
#ifdef CONFIG_IP_ROUTE_MULTIPATH
static int proc_fib_multipath_hash_policy(struct ctl_table *table, int write,
void *buffer, size_t *lenp,
loff_t *ppos)
{
struct net *net = container_of(table->data, struct net,
ipv4.sysctl_fib_multipath_hash_policy);
int ret;
ret = proc_dou8vec_minmax(table, write, buffer, lenp, ppos);
if (write && ret == 0)
call_netevent_notifiers(NETEVENT_IPV4_MPATH_HASH_UPDATE, net);
return ret;
}
static int proc_fib_multipath_hash_fields(struct ctl_table *table, int write,
void *buffer, size_t *lenp,
loff_t *ppos)
{
struct net *net;
int ret;
net = container_of(table->data, struct net,
ipv4.sysctl_fib_multipath_hash_fields);
ret = proc_douintvec_minmax(table, write, buffer, lenp, ppos);
if (write && ret == 0)
call_netevent_notifiers(NETEVENT_IPV4_MPATH_HASH_UPDATE, net);
return ret;
}
#endif
static struct ctl_table ipv4_table[] = {
{
.procname = "tcp_max_orphans",
.data = &sysctl_tcp_max_orphans,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec
},
{
.procname = "inet_peer_threshold",
.data = &inet_peer_threshold,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec
},
{
.procname = "inet_peer_minttl",
.data = &inet_peer_minttl,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "inet_peer_maxttl",
.data = &inet_peer_maxttl,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "tcp_mem",
.maxlen = sizeof(sysctl_tcp_mem),
.data = &sysctl_tcp_mem,
.mode = 0644,
.proc_handler = proc_doulongvec_minmax,
},
{
.procname = "tcp_low_latency",
.data = &sysctl_tcp_low_latency,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec
},
#ifdef CONFIG_NETLABEL
{
.procname = "cipso_cache_enable",
.data = &cipso_v4_cache_enabled,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "cipso_cache_bucket_size",
.data = &cipso_v4_cache_bucketsize,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "cipso_rbm_optfmt",
.data = &cipso_v4_rbm_optfmt,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "cipso_rbm_strictvalid",
.data = &cipso_v4_rbm_strictvalid,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
#endif /* CONFIG_NETLABEL */
{
.procname = "tcp_available_ulp",
.maxlen = TCP_ULP_BUF_MAX,
.mode = 0444,
.proc_handler = proc_tcp_available_ulp,
},
{
.procname = "icmp_msgs_per_sec",
.data = &sysctl_icmp_msgs_per_sec,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = SYSCTL_ZERO,
},
{
.procname = "icmp_msgs_burst",
.data = &sysctl_icmp_msgs_burst,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = SYSCTL_ZERO,
},
{
.procname = "udp_mem",
.data = &sysctl_udp_mem,
.maxlen = sizeof(sysctl_udp_mem),
.mode = 0644,
.proc_handler = proc_doulongvec_minmax,
},
{
.procname = "fib_sync_mem",
.data = &sysctl_fib_sync_mem,
.maxlen = sizeof(sysctl_fib_sync_mem),
.mode = 0644,
.proc_handler = proc_douintvec_minmax,
.extra1 = &sysctl_fib_sync_mem_min,
.extra2 = &sysctl_fib_sync_mem_max,
},
{ }
};
static struct ctl_table ipv4_net_table[] = {
{
.procname = "tcp_max_tw_buckets",
.data = &init_net.ipv4.tcp_death_row.sysctl_max_tw_buckets,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec
},
{
.procname = "icmp_echo_ignore_all",
.data = &init_net.ipv4.sysctl_icmp_echo_ignore_all,
.maxlen = sizeof(u8),
.mode = 0644,
.proc_handler = proc_dou8vec_minmax,
.extra1 = SYSCTL_ZERO,
.extra2 = SYSCTL_ONE
},
{
.procname = "icmp_echo_enable_probe",
.data = &init_net.ipv4.sysctl_icmp_echo_enable_probe,
.maxlen = sizeof(u8),
.mode = 0644,
.proc_handler = proc_dou8vec_minmax,
.extra1 = SYSCTL_ZERO,
.extra2 = SYSCTL_ONE
},
{
.procname = "icmp_echo_ignore_broadcasts",
.data = &init_net.ipv4.sysctl_icmp_echo_ignore_broadcasts,
.maxlen = sizeof(u8),
.mode = 0644,
.proc_handler = proc_dou8vec_minmax,
.extra1 = SYSCTL_ZERO,
.extra2 = SYSCTL_ONE
},
{
.procname = "icmp_ignore_bogus_error_responses",
.data = &init_net.ipv4.sysctl_icmp_ignore_bogus_error_responses,
.maxlen = sizeof(u8),
.mode = 0644,
.proc_handler = proc_dou8vec_minmax,
.extra1 = SYSCTL_ZERO,
.extra2 = SYSCTL_ONE
},
{
.procname = "icmp_errors_use_inbound_ifaddr",
.data = &init_net.ipv4.sysctl_icmp_errors_use_inbound_ifaddr,
.maxlen = sizeof(u8),
.mode = 0644,
.proc_handler = proc_dou8vec_minmax,
.extra1 = SYSCTL_ZERO,
.extra2 = SYSCTL_ONE
},
{
.procname = "icmp_ratelimit",
.data = &init_net.ipv4.sysctl_icmp_ratelimit,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_ms_jiffies,
},
{
.procname = "icmp_ratemask",
.data = &init_net.ipv4.sysctl_icmp_ratemask,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec
},
{
.procname = "ping_group_range",
.data = &init_net.ipv4.ping_group_range.range,
.maxlen = sizeof(gid_t)*2,
.mode = 0644,
.proc_handler = ipv4_ping_group_range,
},
#ifdef CONFIG_NET_L3_MASTER_DEV
{
.procname = "raw_l3mdev_accept",
.data = &init_net.ipv4.sysctl_raw_l3mdev_accept,
.maxlen = sizeof(u8),
.mode = 0644,
.proc_handler = proc_dou8vec_minmax,
.extra1 = SYSCTL_ZERO,
.extra2 = SYSCTL_ONE,
},
#endif
{
.procname = "tcp_ecn",
.data = &init_net.ipv4.sysctl_tcp_ecn,
.maxlen = sizeof(u8),
.mode = 0644,
.proc_handler = proc_dou8vec_minmax,
.extra1 = SYSCTL_ZERO,
.extra2 = SYSCTL_TWO,
},
{
.procname = "tcp_ecn_fallback",
.data = &init_net.ipv4.sysctl_tcp_ecn_fallback,
.maxlen = sizeof(u8),
.mode = 0644,
.proc_handler = proc_dou8vec_minmax,
.extra1 = SYSCTL_ZERO,
.extra2 = SYSCTL_ONE,
},
{
.procname = "ip_dynaddr",
.data = &init_net.ipv4.sysctl_ip_dynaddr,
.maxlen = sizeof(u8),
.mode = 0644,
.proc_handler = proc_dou8vec_minmax,
},
{
.procname = "ip_early_demux",
.data = &init_net.ipv4.sysctl_ip_early_demux,
.maxlen = sizeof(u8),
.mode = 0644,
.proc_handler = proc_dou8vec_minmax,
},
{
.procname = "udp_early_demux",
.data = &init_net.ipv4.sysctl_udp_early_demux,
.maxlen = sizeof(u8),
.mode = 0644,
.proc_handler = proc_dou8vec_minmax,
},
{
.procname = "tcp_early_demux",
.data = &init_net.ipv4.sysctl_tcp_early_demux,
.maxlen = sizeof(u8),
.mode = 0644,
.proc_handler = proc_dou8vec_minmax,
},
{
.procname = "nexthop_compat_mode",
.data = &init_net.ipv4.sysctl_nexthop_compat_mode,
.maxlen = sizeof(u8),
.mode = 0644,
.proc_handler = proc_dou8vec_minmax,
.extra1 = SYSCTL_ZERO,
.extra2 = SYSCTL_ONE,
},
{
.procname = "ip_default_ttl",
.data = &init_net.ipv4.sysctl_ip_default_ttl,
.maxlen = sizeof(u8),
.mode = 0644,
.proc_handler = proc_dou8vec_minmax,
.extra1 = &ip_ttl_min,
.extra2 = &ip_ttl_max,
},
{
.procname = "ip_local_port_range",
.maxlen = sizeof(init_net.ipv4.ip_local_ports.range),
.data = &init_net.ipv4.ip_local_ports.range,
.mode = 0644,
.proc_handler = ipv4_local_port_range,
},
{
.procname = "ip_local_reserved_ports",
.data = &init_net.ipv4.sysctl_local_reserved_ports,
.maxlen = 65536,
.mode = 0644,
.proc_handler = proc_do_large_bitmap,
},
{
.procname = "ip_no_pmtu_disc",
.data = &init_net.ipv4.sysctl_ip_no_pmtu_disc,
.maxlen = sizeof(u8),
.mode = 0644,
.proc_handler = proc_dou8vec_minmax,
},
{
.procname = "ip_forward_use_pmtu",
.data = &init_net.ipv4.sysctl_ip_fwd_use_pmtu,
.maxlen = sizeof(u8),
.mode = 0644,
.proc_handler = proc_dou8vec_minmax,
},
{
.procname = "ip_forward_update_priority",
.data = &init_net.ipv4.sysctl_ip_fwd_update_priority,
.maxlen = sizeof(u8),
.mode = 0644,
.proc_handler = ipv4_fwd_update_priority,
.extra1 = SYSCTL_ZERO,
.extra2 = SYSCTL_ONE,
},
{
.procname = "ip_nonlocal_bind",
.data = &init_net.ipv4.sysctl_ip_nonlocal_bind,
.maxlen = sizeof(u8),
.mode = 0644,
.proc_handler = proc_dou8vec_minmax,
},
{
.procname = "ip_autobind_reuse",
.data = &init_net.ipv4.sysctl_ip_autobind_reuse,
.maxlen = sizeof(u8),
.mode = 0644,
.proc_handler = proc_dou8vec_minmax,
.extra1 = SYSCTL_ZERO,
.extra2 = SYSCTL_ONE,
},
{
.procname = "fwmark_reflect",
.data = &init_net.ipv4.sysctl_fwmark_reflect,
.maxlen = sizeof(u8),
.mode = 0644,
.proc_handler = proc_dou8vec_minmax,
},
{
.procname = "tcp_fwmark_accept",
.data = &init_net.ipv4.sysctl_tcp_fwmark_accept,
.maxlen = sizeof(u8),
.mode = 0644,
.proc_handler = proc_dou8vec_minmax,
},
#ifdef CONFIG_NET_L3_MASTER_DEV
{
.procname = "tcp_l3mdev_accept",
.data = &init_net.ipv4.sysctl_tcp_l3mdev_accept,
.maxlen = sizeof(u8),
.mode = 0644,
.proc_handler = proc_dou8vec_minmax,
.extra1 = SYSCTL_ZERO,
.extra2 = SYSCTL_ONE,
},
#endif
{
.procname = "tcp_mtu_probing",
.data = &init_net.ipv4.sysctl_tcp_mtu_probing,
.maxlen = sizeof(u8),
.mode = 0644,
.proc_handler = proc_dou8vec_minmax,
},
{
.procname = "tcp_base_mss",
.data = &init_net.ipv4.sysctl_tcp_base_mss,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "tcp_min_snd_mss",
.data = &init_net.ipv4.sysctl_tcp_min_snd_mss,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = &tcp_min_snd_mss_min,
.extra2 = &tcp_min_snd_mss_max,
},
{
.procname = "tcp_mtu_probe_floor",
.data = &init_net.ipv4.sysctl_tcp_mtu_probe_floor,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = &tcp_min_snd_mss_min,
.extra2 = &tcp_min_snd_mss_max,
},
{
.procname = "tcp_probe_threshold",
.data = &init_net.ipv4.sysctl_tcp_probe_threshold,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "tcp_probe_interval",
.data = &init_net.ipv4.sysctl_tcp_probe_interval,
.maxlen = sizeof(u32),
.mode = 0644,
.proc_handler = proc_douintvec_minmax,
.extra2 = &u32_max_div_HZ,
},
{
.procname = "igmp_link_local_mcast_reports",
.data = &init_net.ipv4.sysctl_igmp_llm_reports,
.maxlen = sizeof(u8),
.mode = 0644,
.proc_handler = proc_dou8vec_minmax,
},
{
.procname = "igmp_max_memberships",
.data = &init_net.ipv4.sysctl_igmp_max_memberships,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec
},
{
.procname = "igmp_max_msf",
.data = &init_net.ipv4.sysctl_igmp_max_msf,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec
},
#ifdef CONFIG_IP_MULTICAST
{
.procname = "igmp_qrv",
.data = &init_net.ipv4.sysctl_igmp_qrv,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = SYSCTL_ONE
},
#endif
{
.procname = "tcp_congestion_control",
.data = &init_net.ipv4.tcp_congestion_control,
.mode = 0644,
.maxlen = TCP_CA_NAME_MAX,
.proc_handler = proc_tcp_congestion_control,
},
{
.procname = "tcp_available_congestion_control",
.maxlen = TCP_CA_BUF_MAX,
.mode = 0444,
.proc_handler = proc_tcp_available_congestion_control,
},
{
.procname = "tcp_allowed_congestion_control",
.maxlen = TCP_CA_BUF_MAX,
.mode = 0644,
.proc_handler = proc_allowed_congestion_control,
},
{
.procname = "tcp_keepalive_time",
.data = &init_net.ipv4.sysctl_tcp_keepalive_time,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "tcp_keepalive_probes",
.data = &init_net.ipv4.sysctl_tcp_keepalive_probes,
.maxlen = sizeof(u8),
.mode = 0644,
.proc_handler = proc_dou8vec_minmax,
},
{
.procname = "tcp_keepalive_intvl",
.data = &init_net.ipv4.sysctl_tcp_keepalive_intvl,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "tcp_syn_retries",
.data = &init_net.ipv4.sysctl_tcp_syn_retries,
.maxlen = sizeof(u8),
.mode = 0644,
.proc_handler = proc_dou8vec_minmax,
.extra1 = &tcp_syn_retries_min,
.extra2 = &tcp_syn_retries_max
},
{
.procname = "tcp_synack_retries",
.data = &init_net.ipv4.sysctl_tcp_synack_retries,
.maxlen = sizeof(u8),
.mode = 0644,
.proc_handler = proc_dou8vec_minmax,
},
#ifdef CONFIG_SYN_COOKIES
{
.procname = "tcp_syncookies",
.data = &init_net.ipv4.sysctl_tcp_syncookies,
.maxlen = sizeof(u8),
.mode = 0644,
.proc_handler = proc_dou8vec_minmax,
},
#endif
{
.procname = "tcp_migrate_req",
.data = &init_net.ipv4.sysctl_tcp_migrate_req,
.maxlen = sizeof(u8),
.mode = 0644,
.proc_handler = proc_dou8vec_minmax,
.extra1 = SYSCTL_ZERO,
.extra2 = SYSCTL_ONE
},
{
.procname = "tcp_reordering",
.data = &init_net.ipv4.sysctl_tcp_reordering,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec
},
{
.procname = "tcp_retries1",
.data = &init_net.ipv4.sysctl_tcp_retries1,
.maxlen = sizeof(u8),
.mode = 0644,
.proc_handler = proc_dou8vec_minmax,
.extra2 = &tcp_retr1_max
},
{
.procname = "tcp_retries2",
.data = &init_net.ipv4.sysctl_tcp_retries2,
.maxlen = sizeof(u8),
.mode = 0644,
.proc_handler = proc_dou8vec_minmax,
},
{
.procname = "tcp_orphan_retries",
.data = &init_net.ipv4.sysctl_tcp_orphan_retries,
.maxlen = sizeof(u8),
.mode = 0644,
.proc_handler = proc_dou8vec_minmax,
},
{
.procname = "tcp_fin_timeout",
.data = &init_net.ipv4.sysctl_tcp_fin_timeout,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "tcp_notsent_lowat",
.data = &init_net.ipv4.sysctl_tcp_notsent_lowat,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_douintvec,
},
{
.procname = "tcp_tw_reuse",
.data = &init_net.ipv4.sysctl_tcp_tw_reuse,
.maxlen = sizeof(u8),
.mode = 0644,
.proc_handler = proc_dou8vec_minmax,
.extra1 = SYSCTL_ZERO,
.extra2 = SYSCTL_TWO,
},
{
.procname = "tcp_max_syn_backlog",
.data = &init_net.ipv4.sysctl_max_syn_backlog,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec
},
{
.procname = "tcp_fastopen",
.data = &init_net.ipv4.sysctl_tcp_fastopen,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "tcp_fastopen_key",
.mode = 0600,
.data = &init_net.ipv4.sysctl_tcp_fastopen,
/* maxlen to print the list of keys in hex (*2), with dashes
* separating doublewords and a comma in between keys.
*/
.maxlen = ((TCP_FASTOPEN_KEY_LENGTH *
2 * TCP_FASTOPEN_KEY_MAX) +
(TCP_FASTOPEN_KEY_MAX * 5)),
.proc_handler = proc_tcp_fastopen_key,
},
{
.procname = "tcp_fastopen_blackhole_timeout_sec",
.data = &init_net.ipv4.sysctl_tcp_fastopen_blackhole_timeout,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_tfo_blackhole_detect_timeout,
.extra1 = SYSCTL_ZERO,
},
#ifdef CONFIG_IP_ROUTE_MULTIPATH
{
.procname = "fib_multipath_use_neigh",
.data = &init_net.ipv4.sysctl_fib_multipath_use_neigh,
.maxlen = sizeof(u8),
.mode = 0644,
.proc_handler = proc_dou8vec_minmax,
.extra1 = SYSCTL_ZERO,
.extra2 = SYSCTL_ONE,
},
{
.procname = "fib_multipath_hash_policy",
.data = &init_net.ipv4.sysctl_fib_multipath_hash_policy,
.maxlen = sizeof(u8),
.mode = 0644,
.proc_handler = proc_fib_multipath_hash_policy,
.extra1 = SYSCTL_ZERO,
.extra2 = SYSCTL_THREE,
},
{
.procname = "fib_multipath_hash_fields",
.data = &init_net.ipv4.sysctl_fib_multipath_hash_fields,
.maxlen = sizeof(u32),
.mode = 0644,
.proc_handler = proc_fib_multipath_hash_fields,
.extra1 = SYSCTL_ONE,
.extra2 = &fib_multipath_hash_fields_all_mask,
},
#endif
{
.procname = "ip_unprivileged_port_start",
.maxlen = sizeof(int),
.data = &init_net.ipv4.sysctl_ip_prot_sock,
.mode = 0644,
.proc_handler = ipv4_privileged_ports,
},
#ifdef CONFIG_NET_L3_MASTER_DEV
{
.procname = "udp_l3mdev_accept",
.data = &init_net.ipv4.sysctl_udp_l3mdev_accept,
.maxlen = sizeof(u8),
.mode = 0644,
.proc_handler = proc_dou8vec_minmax,
.extra1 = SYSCTL_ZERO,
.extra2 = SYSCTL_ONE,
},
#endif
{
.procname = "tcp_sack",
.data = &init_net.ipv4.sysctl_tcp_sack,
.maxlen = sizeof(u8),
.mode = 0644,
.proc_handler = proc_dou8vec_minmax,
},
{
.procname = "tcp_window_scaling",
.data = &init_net.ipv4.sysctl_tcp_window_scaling,
.maxlen = sizeof(u8),
.mode = 0644,
.proc_handler = proc_dou8vec_minmax,
},
{
.procname = "tcp_timestamps",
.data = &init_net.ipv4.sysctl_tcp_timestamps,
.maxlen = sizeof(u8),
.mode = 0644,
.proc_handler = proc_dou8vec_minmax,
},
{
.procname = "tcp_early_retrans",
.data = &init_net.ipv4.sysctl_tcp_early_retrans,
.maxlen = sizeof(u8),
.mode = 0644,
.proc_handler = proc_dou8vec_minmax,
.extra1 = SYSCTL_ZERO,
.extra2 = SYSCTL_FOUR,
},
{
.procname = "tcp_recovery",
.data = &init_net.ipv4.sysctl_tcp_recovery,
.maxlen = sizeof(u8),
.mode = 0644,
.proc_handler = proc_dou8vec_minmax,
},
{
.procname = "tcp_thin_linear_timeouts",
.data = &init_net.ipv4.sysctl_tcp_thin_linear_timeouts,
.maxlen = sizeof(u8),
.mode = 0644,
.proc_handler = proc_dou8vec_minmax,
},
{
.procname = "tcp_slow_start_after_idle",
.data = &init_net.ipv4.sysctl_tcp_slow_start_after_idle,
.maxlen = sizeof(u8),
.mode = 0644,
.proc_handler = proc_dou8vec_minmax,
},
{
.procname = "tcp_retrans_collapse",
.data = &init_net.ipv4.sysctl_tcp_retrans_collapse,
.maxlen = sizeof(u8),
.mode = 0644,
.proc_handler = proc_dou8vec_minmax,
},
{
.procname = "tcp_stdurg",
.data = &init_net.ipv4.sysctl_tcp_stdurg,
.maxlen = sizeof(u8),
.mode = 0644,
.proc_handler = proc_dou8vec_minmax,
},
{
.procname = "tcp_rfc1337",
.data = &init_net.ipv4.sysctl_tcp_rfc1337,
.maxlen = sizeof(u8),
.mode = 0644,
.proc_handler = proc_dou8vec_minmax,
},
{
.procname = "tcp_abort_on_overflow",
.data = &init_net.ipv4.sysctl_tcp_abort_on_overflow,
.maxlen = sizeof(u8),
.mode = 0644,
.proc_handler = proc_dou8vec_minmax,
},
{
.procname = "tcp_fack",
.data = &init_net.ipv4.sysctl_tcp_fack,
.maxlen = sizeof(u8),
.mode = 0644,
.proc_handler = proc_dou8vec_minmax,
},
{
.procname = "tcp_max_reordering",
.data = &init_net.ipv4.sysctl_tcp_max_reordering,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec
},
{
.procname = "tcp_dsack",
.data = &init_net.ipv4.sysctl_tcp_dsack,
.maxlen = sizeof(u8),
.mode = 0644,
.proc_handler = proc_dou8vec_minmax,
},
{
.procname = "tcp_app_win",
.data = &init_net.ipv4.sysctl_tcp_app_win,
.maxlen = sizeof(u8),
.mode = 0644,
.proc_handler = proc_dou8vec_minmax,
.extra1 = SYSCTL_ZERO,
.extra2 = &tcp_app_win_max,
},
{
.procname = "tcp_adv_win_scale",
.data = &init_net.ipv4.sysctl_tcp_adv_win_scale,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = &tcp_adv_win_scale_min,
.extra2 = &tcp_adv_win_scale_max,
},
{
.procname = "tcp_frto",
.data = &init_net.ipv4.sysctl_tcp_frto,
.maxlen = sizeof(u8),
.mode = 0644,
.proc_handler = proc_dou8vec_minmax,
},
{
.procname = "tcp_no_metrics_save",
.data = &init_net.ipv4.sysctl_tcp_nometrics_save,
.maxlen = sizeof(u8),
.mode = 0644,
.proc_handler = proc_dou8vec_minmax,
},
{
.procname = "tcp_no_ssthresh_metrics_save",
.data = &init_net.ipv4.sysctl_tcp_no_ssthresh_metrics_save,
.maxlen = sizeof(u8),
.mode = 0644,
.proc_handler = proc_dou8vec_minmax,
.extra1 = SYSCTL_ZERO,
.extra2 = SYSCTL_ONE,
},
{
.procname = "tcp_moderate_rcvbuf",
.data = &init_net.ipv4.sysctl_tcp_moderate_rcvbuf,
.maxlen = sizeof(u8),
.mode = 0644,
.proc_handler = proc_dou8vec_minmax,
},
{
.procname = "tcp_tso_win_divisor",
.data = &init_net.ipv4.sysctl_tcp_tso_win_divisor,
.maxlen = sizeof(u8),
.mode = 0644,
.proc_handler = proc_dou8vec_minmax,
},
{
.procname = "tcp_workaround_signed_windows",
.data = &init_net.ipv4.sysctl_tcp_workaround_signed_windows,
.maxlen = sizeof(u8),
.mode = 0644,
.proc_handler = proc_dou8vec_minmax,
},
{
.procname = "tcp_limit_output_bytes",
.data = &init_net.ipv4.sysctl_tcp_limit_output_bytes,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec
},
{
.procname = "tcp_challenge_ack_limit",
.data = &init_net.ipv4.sysctl_tcp_challenge_ack_limit,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec
},
{
.procname = "tcp_min_tso_segs",
.data = &init_net.ipv4.sysctl_tcp_min_tso_segs,
.maxlen = sizeof(u8),
.mode = 0644,
.proc_handler = proc_dou8vec_minmax,
.extra1 = SYSCTL_ONE,
},
{
.procname = "tcp_tso_rtt_log",
.data = &init_net.ipv4.sysctl_tcp_tso_rtt_log,
.maxlen = sizeof(u8),
.mode = 0644,
.proc_handler = proc_dou8vec_minmax,
},
{
.procname = "tcp_min_rtt_wlen",
.data = &init_net.ipv4.sysctl_tcp_min_rtt_wlen,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = SYSCTL_ZERO,
.extra2 = &one_day_secs
},
{
.procname = "tcp_autocorking",
.data = &init_net.ipv4.sysctl_tcp_autocorking,
.maxlen = sizeof(u8),
.mode = 0644,
.proc_handler = proc_dou8vec_minmax,
.extra1 = SYSCTL_ZERO,
.extra2 = SYSCTL_ONE,
},
{
.procname = "tcp_invalid_ratelimit",
.data = &init_net.ipv4.sysctl_tcp_invalid_ratelimit,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_ms_jiffies,
},
{
.procname = "tcp_pacing_ss_ratio",
.data = &init_net.ipv4.sysctl_tcp_pacing_ss_ratio,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = SYSCTL_ZERO,
.extra2 = SYSCTL_ONE_THOUSAND,
},
{
.procname = "tcp_pacing_ca_ratio",
.data = &init_net.ipv4.sysctl_tcp_pacing_ca_ratio,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = SYSCTL_ZERO,
.extra2 = SYSCTL_ONE_THOUSAND,
},
{
.procname = "tcp_wmem",
.data = &init_net.ipv4.sysctl_tcp_wmem,
.maxlen = sizeof(init_net.ipv4.sysctl_tcp_wmem),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = SYSCTL_ONE,
},
{
.procname = "tcp_rmem",
.data = &init_net.ipv4.sysctl_tcp_rmem,
.maxlen = sizeof(init_net.ipv4.sysctl_tcp_rmem),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = SYSCTL_ONE,
},
{
.procname = "tcp_comp_sack_delay_ns",
.data = &init_net.ipv4.sysctl_tcp_comp_sack_delay_ns,
.maxlen = sizeof(unsigned long),
.mode = 0644,
.proc_handler = proc_doulongvec_minmax,
},
{
.procname = "tcp_comp_sack_slack_ns",
.data = &init_net.ipv4.sysctl_tcp_comp_sack_slack_ns,
.maxlen = sizeof(unsigned long),
.mode = 0644,
.proc_handler = proc_doulongvec_minmax,
},
{
.procname = "tcp_comp_sack_nr",
.data = &init_net.ipv4.sysctl_tcp_comp_sack_nr,
.maxlen = sizeof(u8),
.mode = 0644,
.proc_handler = proc_dou8vec_minmax,
.extra1 = SYSCTL_ZERO,
},
{
.procname = "tcp_reflect_tos",
.data = &init_net.ipv4.sysctl_tcp_reflect_tos,
.maxlen = sizeof(u8),
.mode = 0644,
.proc_handler = proc_dou8vec_minmax,
.extra1 = SYSCTL_ZERO,
.extra2 = SYSCTL_ONE,
},
{
.procname = "tcp_ehash_entries",
.data = &init_net.ipv4.sysctl_tcp_child_ehash_entries,
.mode = 0444,
.proc_handler = proc_tcp_ehash_entries,
},
{
.procname = "tcp_child_ehash_entries",
.data = &init_net.ipv4.sysctl_tcp_child_ehash_entries,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_douintvec_minmax,
.extra1 = SYSCTL_ZERO,
.extra2 = &tcp_child_ehash_entries_max,
},
{
.procname = "udp_hash_entries",
.data = &init_net.ipv4.sysctl_udp_child_hash_entries,
.mode = 0444,
.proc_handler = proc_udp_hash_entries,
},
{
.procname = "udp_child_hash_entries",
.data = &init_net.ipv4.sysctl_udp_child_hash_entries,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_douintvec_minmax,
.extra1 = SYSCTL_ZERO,
.extra2 = &udp_child_hash_entries_max,
},
{
.procname = "udp_rmem_min",
.data = &init_net.ipv4.sysctl_udp_rmem_min,
.maxlen = sizeof(init_net.ipv4.sysctl_udp_rmem_min),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = SYSCTL_ONE
},
{
.procname = "udp_wmem_min",
.data = &init_net.ipv4.sysctl_udp_wmem_min,
.maxlen = sizeof(init_net.ipv4.sysctl_udp_wmem_min),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = SYSCTL_ONE
},
{
.procname = "fib_notify_on_flag_change",
.data = &init_net.ipv4.sysctl_fib_notify_on_flag_change,
.maxlen = sizeof(u8),
.mode = 0644,
.proc_handler = proc_dou8vec_minmax,
.extra1 = SYSCTL_ZERO,
.extra2 = SYSCTL_TWO,
},
{
.procname = "tcp_plb_enabled",
.data = &init_net.ipv4.sysctl_tcp_plb_enabled,
.maxlen = sizeof(u8),
.mode = 0644,
.proc_handler = proc_dou8vec_minmax,
.extra1 = SYSCTL_ZERO,
.extra2 = SYSCTL_ONE,
},
{
.procname = "tcp_plb_idle_rehash_rounds",
.data = &init_net.ipv4.sysctl_tcp_plb_idle_rehash_rounds,
.maxlen = sizeof(u8),
.mode = 0644,
.proc_handler = proc_dou8vec_minmax,
.extra2 = &tcp_plb_max_rounds,
},
{
.procname = "tcp_plb_rehash_rounds",
.data = &init_net.ipv4.sysctl_tcp_plb_rehash_rounds,
.maxlen = sizeof(u8),
.mode = 0644,
.proc_handler = proc_dou8vec_minmax,
.extra2 = &tcp_plb_max_rounds,
},
{
.procname = "tcp_plb_suspend_rto_sec",
.data = &init_net.ipv4.sysctl_tcp_plb_suspend_rto_sec,
.maxlen = sizeof(u8),
.mode = 0644,
.proc_handler = proc_dou8vec_minmax,
},
{
.procname = "tcp_plb_cong_thresh",
.data = &init_net.ipv4.sysctl_tcp_plb_cong_thresh,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = SYSCTL_ZERO,
.extra2 = &tcp_plb_max_cong_thresh,
},
{
.procname = "tcp_syn_linear_timeouts",
.data = &init_net.ipv4.sysctl_tcp_syn_linear_timeouts,
.maxlen = sizeof(u8),
.mode = 0644,
.proc_handler = proc_dou8vec_minmax,
.extra1 = SYSCTL_ZERO,
.extra2 = &tcp_syn_linear_timeouts_max,
},
{
.procname = "tcp_shrink_window",
.data = &init_net.ipv4.sysctl_tcp_shrink_window,
.maxlen = sizeof(u8),
.mode = 0644,
.proc_handler = proc_dou8vec_minmax,
.extra1 = SYSCTL_ZERO,
.extra2 = SYSCTL_ONE,
},
{ }
};
static __net_init int ipv4_sysctl_init_net(struct net *net)
{
struct ctl_table *table;
table = ipv4_net_table;
if (!net_eq(net, &init_net)) {
int i;
table = kmemdup(table, sizeof(ipv4_net_table), GFP_KERNEL);
if (!table)
goto err_alloc;
for (i = 0; i < ARRAY_SIZE(ipv4_net_table) - 1; i++) {
if (table[i].data) {
/* Update the variables to point into
* the current struct net
*/
table[i].data += (void *)net - (void *)&init_net;
} else {
/* Entries without data pointer are global;
* Make them read-only in non-init_net ns
*/
table[i].mode &= ~0222;
}
}
}
net->ipv4.ipv4_hdr = register_net_sysctl_sz(net, "net/ipv4", table,
ARRAY_SIZE(ipv4_net_table));
if (!net->ipv4.ipv4_hdr)
goto err_reg;
net->ipv4.sysctl_local_reserved_ports = kzalloc(65536 / 8, GFP_KERNEL);
if (!net->ipv4.sysctl_local_reserved_ports)
goto err_ports;
return 0;
err_ports:
unregister_net_sysctl_table(net->ipv4.ipv4_hdr);
err_reg:
if (!net_eq(net, &init_net))
kfree(table);
err_alloc:
return -ENOMEM;
}
static __net_exit void ipv4_sysctl_exit_net(struct net *net)
{
struct ctl_table *table;
kfree(net->ipv4.sysctl_local_reserved_ports);
table = net->ipv4.ipv4_hdr->ctl_table_arg;
unregister_net_sysctl_table(net->ipv4.ipv4_hdr);
kfree(table);
}
static __net_initdata struct pernet_operations ipv4_sysctl_ops = {
.init = ipv4_sysctl_init_net,
.exit = ipv4_sysctl_exit_net,
};
static __init int sysctl_ipv4_init(void)
{
struct ctl_table_header *hdr;
hdr = register_net_sysctl(&init_net, "net/ipv4", ipv4_table);
if (!hdr)
return -ENOMEM;
if (register_pernet_subsys(&ipv4_sysctl_ops)) {
unregister_net_sysctl_table(hdr);
return -ENOMEM;
}
return 0;
}
__initcall(sysctl_ipv4_init);
| linux-master | net/ipv4/sysctl_net_ipv4.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* CIPSO - Commercial IP Security Option
*
* This is an implementation of the CIPSO 2.2 protocol as specified in
* draft-ietf-cipso-ipsecurity-01.txt with additional tag types as found in
* FIPS-188. While CIPSO never became a full IETF RFC standard many vendors
* have chosen to adopt the protocol and over the years it has become a
* de-facto standard for labeled networking.
*
* The CIPSO draft specification can be found in the kernel's Documentation
* directory as well as the following URL:
* https://tools.ietf.org/id/draft-ietf-cipso-ipsecurity-01.txt
* The FIPS-188 specification can be found at the following URL:
* https://www.itl.nist.gov/fipspubs/fip188.htm
*
* Author: Paul Moore <[email protected]>
*/
/*
* (c) Copyright Hewlett-Packard Development Company, L.P., 2006, 2008
*/
#include <linux/init.h>
#include <linux/types.h>
#include <linux/rcupdate.h>
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/string.h>
#include <linux/jhash.h>
#include <linux/audit.h>
#include <linux/slab.h>
#include <net/ip.h>
#include <net/icmp.h>
#include <net/tcp.h>
#include <net/netlabel.h>
#include <net/cipso_ipv4.h>
#include <linux/atomic.h>
#include <linux/bug.h>
#include <asm/unaligned.h>
/* List of available DOI definitions */
/* XXX - This currently assumes a minimal number of different DOIs in use,
* if in practice there are a lot of different DOIs this list should
* probably be turned into a hash table or something similar so we
* can do quick lookups. */
static DEFINE_SPINLOCK(cipso_v4_doi_list_lock);
static LIST_HEAD(cipso_v4_doi_list);
/* Label mapping cache */
int cipso_v4_cache_enabled = 1;
int cipso_v4_cache_bucketsize = 10;
#define CIPSO_V4_CACHE_BUCKETBITS 7
#define CIPSO_V4_CACHE_BUCKETS (1 << CIPSO_V4_CACHE_BUCKETBITS)
#define CIPSO_V4_CACHE_REORDERLIMIT 10
struct cipso_v4_map_cache_bkt {
spinlock_t lock;
u32 size;
struct list_head list;
};
struct cipso_v4_map_cache_entry {
u32 hash;
unsigned char *key;
size_t key_len;
struct netlbl_lsm_cache *lsm_data;
u32 activity;
struct list_head list;
};
static struct cipso_v4_map_cache_bkt *cipso_v4_cache;
/* Restricted bitmap (tag #1) flags */
int cipso_v4_rbm_optfmt;
int cipso_v4_rbm_strictvalid = 1;
/*
* Protocol Constants
*/
/* Maximum size of the CIPSO IP option, derived from the fact that the maximum
* IPv4 header size is 60 bytes and the base IPv4 header is 20 bytes long. */
#define CIPSO_V4_OPT_LEN_MAX 40
/* Length of the base CIPSO option, this includes the option type (1 byte), the
* option length (1 byte), and the DOI (4 bytes). */
#define CIPSO_V4_HDR_LEN 6
/* Base length of the restrictive category bitmap tag (tag #1). */
#define CIPSO_V4_TAG_RBM_BLEN 4
/* Base length of the enumerated category tag (tag #2). */
#define CIPSO_V4_TAG_ENUM_BLEN 4
/* Base length of the ranged categories bitmap tag (tag #5). */
#define CIPSO_V4_TAG_RNG_BLEN 4
/* The maximum number of category ranges permitted in the ranged category tag
* (tag #5). You may note that the IETF draft states that the maximum number
* of category ranges is 7, but if the low end of the last category range is
* zero then it is possible to fit 8 category ranges because the zero should
* be omitted. */
#define CIPSO_V4_TAG_RNG_CAT_MAX 8
/* Base length of the local tag (non-standard tag).
* Tag definition (may change between kernel versions)
*
* 0 8 16 24 32
* +----------+----------+----------+----------+
* | 10000000 | 00000110 | 32-bit secid value |
* +----------+----------+----------+----------+
* | in (host byte order)|
* +----------+----------+
*
*/
#define CIPSO_V4_TAG_LOC_BLEN 6
/*
* Helper Functions
*/
/**
* cipso_v4_cache_entry_free - Frees a cache entry
* @entry: the entry to free
*
* Description:
* This function frees the memory associated with a cache entry including the
* LSM cache data if there are no longer any users, i.e. reference count == 0.
*
*/
static void cipso_v4_cache_entry_free(struct cipso_v4_map_cache_entry *entry)
{
if (entry->lsm_data)
netlbl_secattr_cache_free(entry->lsm_data);
kfree(entry->key);
kfree(entry);
}
/**
* cipso_v4_map_cache_hash - Hashing function for the CIPSO cache
* @key: the hash key
* @key_len: the length of the key in bytes
*
* Description:
* The CIPSO tag hashing function. Returns a 32-bit hash value.
*
*/
static u32 cipso_v4_map_cache_hash(const unsigned char *key, u32 key_len)
{
return jhash(key, key_len, 0);
}
/*
* Label Mapping Cache Functions
*/
/**
* cipso_v4_cache_init - Initialize the CIPSO cache
*
* Description:
* Initializes the CIPSO label mapping cache, this function should be called
* before any of the other functions defined in this file. Returns zero on
* success, negative values on error.
*
*/
static int __init cipso_v4_cache_init(void)
{
u32 iter;
cipso_v4_cache = kcalloc(CIPSO_V4_CACHE_BUCKETS,
sizeof(struct cipso_v4_map_cache_bkt),
GFP_KERNEL);
if (!cipso_v4_cache)
return -ENOMEM;
for (iter = 0; iter < CIPSO_V4_CACHE_BUCKETS; iter++) {
spin_lock_init(&cipso_v4_cache[iter].lock);
cipso_v4_cache[iter].size = 0;
INIT_LIST_HEAD(&cipso_v4_cache[iter].list);
}
return 0;
}
/**
* cipso_v4_cache_invalidate - Invalidates the current CIPSO cache
*
* Description:
* Invalidates and frees any entries in the CIPSO cache.
*
*/
void cipso_v4_cache_invalidate(void)
{
struct cipso_v4_map_cache_entry *entry, *tmp_entry;
u32 iter;
for (iter = 0; iter < CIPSO_V4_CACHE_BUCKETS; iter++) {
spin_lock_bh(&cipso_v4_cache[iter].lock);
list_for_each_entry_safe(entry,
tmp_entry,
&cipso_v4_cache[iter].list, list) {
list_del(&entry->list);
cipso_v4_cache_entry_free(entry);
}
cipso_v4_cache[iter].size = 0;
spin_unlock_bh(&cipso_v4_cache[iter].lock);
}
}
/**
* cipso_v4_cache_check - Check the CIPSO cache for a label mapping
* @key: the buffer to check
* @key_len: buffer length in bytes
* @secattr: the security attribute struct to use
*
* Description:
* This function checks the cache to see if a label mapping already exists for
* the given key. If there is a match then the cache is adjusted and the
* @secattr struct is populated with the correct LSM security attributes. The
* cache is adjusted in the following manner if the entry is not already the
* first in the cache bucket:
*
* 1. The cache entry's activity counter is incremented
* 2. The previous (higher ranking) entry's activity counter is decremented
* 3. If the difference between the two activity counters is geater than
* CIPSO_V4_CACHE_REORDERLIMIT the two entries are swapped
*
* Returns zero on success, -ENOENT for a cache miss, and other negative values
* on error.
*
*/
static int cipso_v4_cache_check(const unsigned char *key,
u32 key_len,
struct netlbl_lsm_secattr *secattr)
{
u32 bkt;
struct cipso_v4_map_cache_entry *entry;
struct cipso_v4_map_cache_entry *prev_entry = NULL;
u32 hash;
if (!READ_ONCE(cipso_v4_cache_enabled))
return -ENOENT;
hash = cipso_v4_map_cache_hash(key, key_len);
bkt = hash & (CIPSO_V4_CACHE_BUCKETS - 1);
spin_lock_bh(&cipso_v4_cache[bkt].lock);
list_for_each_entry(entry, &cipso_v4_cache[bkt].list, list) {
if (entry->hash == hash &&
entry->key_len == key_len &&
memcmp(entry->key, key, key_len) == 0) {
entry->activity += 1;
refcount_inc(&entry->lsm_data->refcount);
secattr->cache = entry->lsm_data;
secattr->flags |= NETLBL_SECATTR_CACHE;
secattr->type = NETLBL_NLTYPE_CIPSOV4;
if (!prev_entry) {
spin_unlock_bh(&cipso_v4_cache[bkt].lock);
return 0;
}
if (prev_entry->activity > 0)
prev_entry->activity -= 1;
if (entry->activity > prev_entry->activity &&
entry->activity - prev_entry->activity >
CIPSO_V4_CACHE_REORDERLIMIT) {
__list_del(entry->list.prev, entry->list.next);
__list_add(&entry->list,
prev_entry->list.prev,
&prev_entry->list);
}
spin_unlock_bh(&cipso_v4_cache[bkt].lock);
return 0;
}
prev_entry = entry;
}
spin_unlock_bh(&cipso_v4_cache[bkt].lock);
return -ENOENT;
}
/**
* cipso_v4_cache_add - Add an entry to the CIPSO cache
* @cipso_ptr: pointer to CIPSO IP option
* @secattr: the packet's security attributes
*
* Description:
* Add a new entry into the CIPSO label mapping cache. Add the new entry to
* head of the cache bucket's list, if the cache bucket is out of room remove
* the last entry in the list first. It is important to note that there is
* currently no checking for duplicate keys. Returns zero on success,
* negative values on failure.
*
*/
int cipso_v4_cache_add(const unsigned char *cipso_ptr,
const struct netlbl_lsm_secattr *secattr)
{
int bkt_size = READ_ONCE(cipso_v4_cache_bucketsize);
int ret_val = -EPERM;
u32 bkt;
struct cipso_v4_map_cache_entry *entry = NULL;
struct cipso_v4_map_cache_entry *old_entry = NULL;
u32 cipso_ptr_len;
if (!READ_ONCE(cipso_v4_cache_enabled) || bkt_size <= 0)
return 0;
cipso_ptr_len = cipso_ptr[1];
entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
if (!entry)
return -ENOMEM;
entry->key = kmemdup(cipso_ptr, cipso_ptr_len, GFP_ATOMIC);
if (!entry->key) {
ret_val = -ENOMEM;
goto cache_add_failure;
}
entry->key_len = cipso_ptr_len;
entry->hash = cipso_v4_map_cache_hash(cipso_ptr, cipso_ptr_len);
refcount_inc(&secattr->cache->refcount);
entry->lsm_data = secattr->cache;
bkt = entry->hash & (CIPSO_V4_CACHE_BUCKETS - 1);
spin_lock_bh(&cipso_v4_cache[bkt].lock);
if (cipso_v4_cache[bkt].size < bkt_size) {
list_add(&entry->list, &cipso_v4_cache[bkt].list);
cipso_v4_cache[bkt].size += 1;
} else {
old_entry = list_entry(cipso_v4_cache[bkt].list.prev,
struct cipso_v4_map_cache_entry, list);
list_del(&old_entry->list);
list_add(&entry->list, &cipso_v4_cache[bkt].list);
cipso_v4_cache_entry_free(old_entry);
}
spin_unlock_bh(&cipso_v4_cache[bkt].lock);
return 0;
cache_add_failure:
if (entry)
cipso_v4_cache_entry_free(entry);
return ret_val;
}
/*
* DOI List Functions
*/
/**
* cipso_v4_doi_search - Searches for a DOI definition
* @doi: the DOI to search for
*
* Description:
* Search the DOI definition list for a DOI definition with a DOI value that
* matches @doi. The caller is responsible for calling rcu_read_[un]lock().
* Returns a pointer to the DOI definition on success and NULL on failure.
*/
static struct cipso_v4_doi *cipso_v4_doi_search(u32 doi)
{
struct cipso_v4_doi *iter;
list_for_each_entry_rcu(iter, &cipso_v4_doi_list, list)
if (iter->doi == doi && refcount_read(&iter->refcount))
return iter;
return NULL;
}
/**
* cipso_v4_doi_add - Add a new DOI to the CIPSO protocol engine
* @doi_def: the DOI structure
* @audit_info: NetLabel audit information
*
* Description:
* The caller defines a new DOI for use by the CIPSO engine and calls this
* function to add it to the list of acceptable domains. The caller must
* ensure that the mapping table specified in @doi_def->map meets all of the
* requirements of the mapping type (see cipso_ipv4.h for details). Returns
* zero on success and non-zero on failure.
*
*/
int cipso_v4_doi_add(struct cipso_v4_doi *doi_def,
struct netlbl_audit *audit_info)
{
int ret_val = -EINVAL;
u32 iter;
u32 doi;
u32 doi_type;
struct audit_buffer *audit_buf;
doi = doi_def->doi;
doi_type = doi_def->type;
if (doi_def->doi == CIPSO_V4_DOI_UNKNOWN)
goto doi_add_return;
for (iter = 0; iter < CIPSO_V4_TAG_MAXCNT; iter++) {
switch (doi_def->tags[iter]) {
case CIPSO_V4_TAG_RBITMAP:
break;
case CIPSO_V4_TAG_RANGE:
case CIPSO_V4_TAG_ENUM:
if (doi_def->type != CIPSO_V4_MAP_PASS)
goto doi_add_return;
break;
case CIPSO_V4_TAG_LOCAL:
if (doi_def->type != CIPSO_V4_MAP_LOCAL)
goto doi_add_return;
break;
case CIPSO_V4_TAG_INVALID:
if (iter == 0)
goto doi_add_return;
break;
default:
goto doi_add_return;
}
}
refcount_set(&doi_def->refcount, 1);
spin_lock(&cipso_v4_doi_list_lock);
if (cipso_v4_doi_search(doi_def->doi)) {
spin_unlock(&cipso_v4_doi_list_lock);
ret_val = -EEXIST;
goto doi_add_return;
}
list_add_tail_rcu(&doi_def->list, &cipso_v4_doi_list);
spin_unlock(&cipso_v4_doi_list_lock);
ret_val = 0;
doi_add_return:
audit_buf = netlbl_audit_start(AUDIT_MAC_CIPSOV4_ADD, audit_info);
if (audit_buf) {
const char *type_str;
switch (doi_type) {
case CIPSO_V4_MAP_TRANS:
type_str = "trans";
break;
case CIPSO_V4_MAP_PASS:
type_str = "pass";
break;
case CIPSO_V4_MAP_LOCAL:
type_str = "local";
break;
default:
type_str = "(unknown)";
}
audit_log_format(audit_buf,
" cipso_doi=%u cipso_type=%s res=%u",
doi, type_str, ret_val == 0 ? 1 : 0);
audit_log_end(audit_buf);
}
return ret_val;
}
/**
* cipso_v4_doi_free - Frees a DOI definition
* @doi_def: the DOI definition
*
* Description:
* This function frees all of the memory associated with a DOI definition.
*
*/
void cipso_v4_doi_free(struct cipso_v4_doi *doi_def)
{
if (!doi_def)
return;
switch (doi_def->type) {
case CIPSO_V4_MAP_TRANS:
kfree(doi_def->map.std->lvl.cipso);
kfree(doi_def->map.std->lvl.local);
kfree(doi_def->map.std->cat.cipso);
kfree(doi_def->map.std->cat.local);
kfree(doi_def->map.std);
break;
}
kfree(doi_def);
}
/**
* cipso_v4_doi_free_rcu - Frees a DOI definition via the RCU pointer
* @entry: the entry's RCU field
*
* Description:
* This function is designed to be used as a callback to the call_rcu()
* function so that the memory allocated to the DOI definition can be released
* safely.
*
*/
static void cipso_v4_doi_free_rcu(struct rcu_head *entry)
{
struct cipso_v4_doi *doi_def;
doi_def = container_of(entry, struct cipso_v4_doi, rcu);
cipso_v4_doi_free(doi_def);
}
/**
* cipso_v4_doi_remove - Remove an existing DOI from the CIPSO protocol engine
* @doi: the DOI value
* @audit_info: NetLabel audit information
*
* Description:
* Removes a DOI definition from the CIPSO engine. The NetLabel routines will
* be called to release their own LSM domain mappings as well as our own
* domain list. Returns zero on success and negative values on failure.
*
*/
int cipso_v4_doi_remove(u32 doi, struct netlbl_audit *audit_info)
{
int ret_val;
struct cipso_v4_doi *doi_def;
struct audit_buffer *audit_buf;
spin_lock(&cipso_v4_doi_list_lock);
doi_def = cipso_v4_doi_search(doi);
if (!doi_def) {
spin_unlock(&cipso_v4_doi_list_lock);
ret_val = -ENOENT;
goto doi_remove_return;
}
list_del_rcu(&doi_def->list);
spin_unlock(&cipso_v4_doi_list_lock);
cipso_v4_doi_putdef(doi_def);
ret_val = 0;
doi_remove_return:
audit_buf = netlbl_audit_start(AUDIT_MAC_CIPSOV4_DEL, audit_info);
if (audit_buf) {
audit_log_format(audit_buf,
" cipso_doi=%u res=%u",
doi, ret_val == 0 ? 1 : 0);
audit_log_end(audit_buf);
}
return ret_val;
}
/**
* cipso_v4_doi_getdef - Returns a reference to a valid DOI definition
* @doi: the DOI value
*
* Description:
* Searches for a valid DOI definition and if one is found it is returned to
* the caller. Otherwise NULL is returned. The caller must ensure that
* rcu_read_lock() is held while accessing the returned definition and the DOI
* definition reference count is decremented when the caller is done.
*
*/
struct cipso_v4_doi *cipso_v4_doi_getdef(u32 doi)
{
struct cipso_v4_doi *doi_def;
rcu_read_lock();
doi_def = cipso_v4_doi_search(doi);
if (!doi_def)
goto doi_getdef_return;
if (!refcount_inc_not_zero(&doi_def->refcount))
doi_def = NULL;
doi_getdef_return:
rcu_read_unlock();
return doi_def;
}
/**
* cipso_v4_doi_putdef - Releases a reference for the given DOI definition
* @doi_def: the DOI definition
*
* Description:
* Releases a DOI definition reference obtained from cipso_v4_doi_getdef().
*
*/
void cipso_v4_doi_putdef(struct cipso_v4_doi *doi_def)
{
if (!doi_def)
return;
if (!refcount_dec_and_test(&doi_def->refcount))
return;
cipso_v4_cache_invalidate();
call_rcu(&doi_def->rcu, cipso_v4_doi_free_rcu);
}
/**
* cipso_v4_doi_walk - Iterate through the DOI definitions
* @skip_cnt: skip past this number of DOI definitions, updated
* @callback: callback for each DOI definition
* @cb_arg: argument for the callback function
*
* Description:
* Iterate over the DOI definition list, skipping the first @skip_cnt entries.
* For each entry call @callback, if @callback returns a negative value stop
* 'walking' through the list and return. Updates the value in @skip_cnt upon
* return. Returns zero on success, negative values on failure.
*
*/
int cipso_v4_doi_walk(u32 *skip_cnt,
int (*callback) (struct cipso_v4_doi *doi_def, void *arg),
void *cb_arg)
{
int ret_val = -ENOENT;
u32 doi_cnt = 0;
struct cipso_v4_doi *iter_doi;
rcu_read_lock();
list_for_each_entry_rcu(iter_doi, &cipso_v4_doi_list, list)
if (refcount_read(&iter_doi->refcount) > 0) {
if (doi_cnt++ < *skip_cnt)
continue;
ret_val = callback(iter_doi, cb_arg);
if (ret_val < 0) {
doi_cnt--;
goto doi_walk_return;
}
}
doi_walk_return:
rcu_read_unlock();
*skip_cnt = doi_cnt;
return ret_val;
}
/*
* Label Mapping Functions
*/
/**
* cipso_v4_map_lvl_valid - Checks to see if the given level is understood
* @doi_def: the DOI definition
* @level: the level to check
*
* Description:
* Checks the given level against the given DOI definition and returns a
* negative value if the level does not have a valid mapping and a zero value
* if the level is defined by the DOI.
*
*/
static int cipso_v4_map_lvl_valid(const struct cipso_v4_doi *doi_def, u8 level)
{
switch (doi_def->type) {
case CIPSO_V4_MAP_PASS:
return 0;
case CIPSO_V4_MAP_TRANS:
if ((level < doi_def->map.std->lvl.cipso_size) &&
(doi_def->map.std->lvl.cipso[level] < CIPSO_V4_INV_LVL))
return 0;
break;
}
return -EFAULT;
}
/**
* cipso_v4_map_lvl_hton - Perform a level mapping from the host to the network
* @doi_def: the DOI definition
* @host_lvl: the host MLS level
* @net_lvl: the network/CIPSO MLS level
*
* Description:
* Perform a label mapping to translate a local MLS level to the correct
* CIPSO level using the given DOI definition. Returns zero on success,
* negative values otherwise.
*
*/
static int cipso_v4_map_lvl_hton(const struct cipso_v4_doi *doi_def,
u32 host_lvl,
u32 *net_lvl)
{
switch (doi_def->type) {
case CIPSO_V4_MAP_PASS:
*net_lvl = host_lvl;
return 0;
case CIPSO_V4_MAP_TRANS:
if (host_lvl < doi_def->map.std->lvl.local_size &&
doi_def->map.std->lvl.local[host_lvl] < CIPSO_V4_INV_LVL) {
*net_lvl = doi_def->map.std->lvl.local[host_lvl];
return 0;
}
return -EPERM;
}
return -EINVAL;
}
/**
* cipso_v4_map_lvl_ntoh - Perform a level mapping from the network to the host
* @doi_def: the DOI definition
* @net_lvl: the network/CIPSO MLS level
* @host_lvl: the host MLS level
*
* Description:
* Perform a label mapping to translate a CIPSO level to the correct local MLS
* level using the given DOI definition. Returns zero on success, negative
* values otherwise.
*
*/
static int cipso_v4_map_lvl_ntoh(const struct cipso_v4_doi *doi_def,
u32 net_lvl,
u32 *host_lvl)
{
struct cipso_v4_std_map_tbl *map_tbl;
switch (doi_def->type) {
case CIPSO_V4_MAP_PASS:
*host_lvl = net_lvl;
return 0;
case CIPSO_V4_MAP_TRANS:
map_tbl = doi_def->map.std;
if (net_lvl < map_tbl->lvl.cipso_size &&
map_tbl->lvl.cipso[net_lvl] < CIPSO_V4_INV_LVL) {
*host_lvl = doi_def->map.std->lvl.cipso[net_lvl];
return 0;
}
return -EPERM;
}
return -EINVAL;
}
/**
* cipso_v4_map_cat_rbm_valid - Checks to see if the category bitmap is valid
* @doi_def: the DOI definition
* @bitmap: category bitmap
* @bitmap_len: bitmap length in bytes
*
* Description:
* Checks the given category bitmap against the given DOI definition and
* returns a negative value if any of the categories in the bitmap do not have
* a valid mapping and a zero value if all of the categories are valid.
*
*/
static int cipso_v4_map_cat_rbm_valid(const struct cipso_v4_doi *doi_def,
const unsigned char *bitmap,
u32 bitmap_len)
{
int cat = -1;
u32 bitmap_len_bits = bitmap_len * 8;
u32 cipso_cat_size;
u32 *cipso_array;
switch (doi_def->type) {
case CIPSO_V4_MAP_PASS:
return 0;
case CIPSO_V4_MAP_TRANS:
cipso_cat_size = doi_def->map.std->cat.cipso_size;
cipso_array = doi_def->map.std->cat.cipso;
for (;;) {
cat = netlbl_bitmap_walk(bitmap,
bitmap_len_bits,
cat + 1,
1);
if (cat < 0)
break;
if (cat >= cipso_cat_size ||
cipso_array[cat] >= CIPSO_V4_INV_CAT)
return -EFAULT;
}
if (cat == -1)
return 0;
break;
}
return -EFAULT;
}
/**
* cipso_v4_map_cat_rbm_hton - Perform a category mapping from host to network
* @doi_def: the DOI definition
* @secattr: the security attributes
* @net_cat: the zero'd out category bitmap in network/CIPSO format
* @net_cat_len: the length of the CIPSO bitmap in bytes
*
* Description:
* Perform a label mapping to translate a local MLS category bitmap to the
* correct CIPSO bitmap using the given DOI definition. Returns the minimum
* size in bytes of the network bitmap on success, negative values otherwise.
*
*/
static int cipso_v4_map_cat_rbm_hton(const struct cipso_v4_doi *doi_def,
const struct netlbl_lsm_secattr *secattr,
unsigned char *net_cat,
u32 net_cat_len)
{
int host_spot = -1;
u32 net_spot = CIPSO_V4_INV_CAT;
u32 net_spot_max = 0;
u32 net_clen_bits = net_cat_len * 8;
u32 host_cat_size = 0;
u32 *host_cat_array = NULL;
if (doi_def->type == CIPSO_V4_MAP_TRANS) {
host_cat_size = doi_def->map.std->cat.local_size;
host_cat_array = doi_def->map.std->cat.local;
}
for (;;) {
host_spot = netlbl_catmap_walk(secattr->attr.mls.cat,
host_spot + 1);
if (host_spot < 0)
break;
switch (doi_def->type) {
case CIPSO_V4_MAP_PASS:
net_spot = host_spot;
break;
case CIPSO_V4_MAP_TRANS:
if (host_spot >= host_cat_size)
return -EPERM;
net_spot = host_cat_array[host_spot];
if (net_spot >= CIPSO_V4_INV_CAT)
return -EPERM;
break;
}
if (net_spot >= net_clen_bits)
return -ENOSPC;
netlbl_bitmap_setbit(net_cat, net_spot, 1);
if (net_spot > net_spot_max)
net_spot_max = net_spot;
}
if (++net_spot_max % 8)
return net_spot_max / 8 + 1;
return net_spot_max / 8;
}
/**
* cipso_v4_map_cat_rbm_ntoh - Perform a category mapping from network to host
* @doi_def: the DOI definition
* @net_cat: the category bitmap in network/CIPSO format
* @net_cat_len: the length of the CIPSO bitmap in bytes
* @secattr: the security attributes
*
* Description:
* Perform a label mapping to translate a CIPSO bitmap to the correct local
* MLS category bitmap using the given DOI definition. Returns zero on
* success, negative values on failure.
*
*/
static int cipso_v4_map_cat_rbm_ntoh(const struct cipso_v4_doi *doi_def,
const unsigned char *net_cat,
u32 net_cat_len,
struct netlbl_lsm_secattr *secattr)
{
int ret_val;
int net_spot = -1;
u32 host_spot = CIPSO_V4_INV_CAT;
u32 net_clen_bits = net_cat_len * 8;
u32 net_cat_size = 0;
u32 *net_cat_array = NULL;
if (doi_def->type == CIPSO_V4_MAP_TRANS) {
net_cat_size = doi_def->map.std->cat.cipso_size;
net_cat_array = doi_def->map.std->cat.cipso;
}
for (;;) {
net_spot = netlbl_bitmap_walk(net_cat,
net_clen_bits,
net_spot + 1,
1);
if (net_spot < 0) {
if (net_spot == -2)
return -EFAULT;
return 0;
}
switch (doi_def->type) {
case CIPSO_V4_MAP_PASS:
host_spot = net_spot;
break;
case CIPSO_V4_MAP_TRANS:
if (net_spot >= net_cat_size)
return -EPERM;
host_spot = net_cat_array[net_spot];
if (host_spot >= CIPSO_V4_INV_CAT)
return -EPERM;
break;
}
ret_val = netlbl_catmap_setbit(&secattr->attr.mls.cat,
host_spot,
GFP_ATOMIC);
if (ret_val != 0)
return ret_val;
}
return -EINVAL;
}
/**
* cipso_v4_map_cat_enum_valid - Checks to see if the categories are valid
* @doi_def: the DOI definition
* @enumcat: category list
* @enumcat_len: length of the category list in bytes
*
* Description:
* Checks the given categories against the given DOI definition and returns a
* negative value if any of the categories do not have a valid mapping and a
* zero value if all of the categories are valid.
*
*/
static int cipso_v4_map_cat_enum_valid(const struct cipso_v4_doi *doi_def,
const unsigned char *enumcat,
u32 enumcat_len)
{
u16 cat;
int cat_prev = -1;
u32 iter;
if (doi_def->type != CIPSO_V4_MAP_PASS || enumcat_len & 0x01)
return -EFAULT;
for (iter = 0; iter < enumcat_len; iter += 2) {
cat = get_unaligned_be16(&enumcat[iter]);
if (cat <= cat_prev)
return -EFAULT;
cat_prev = cat;
}
return 0;
}
/**
* cipso_v4_map_cat_enum_hton - Perform a category mapping from host to network
* @doi_def: the DOI definition
* @secattr: the security attributes
* @net_cat: the zero'd out category list in network/CIPSO format
* @net_cat_len: the length of the CIPSO category list in bytes
*
* Description:
* Perform a label mapping to translate a local MLS category bitmap to the
* correct CIPSO category list using the given DOI definition. Returns the
* size in bytes of the network category bitmap on success, negative values
* otherwise.
*
*/
static int cipso_v4_map_cat_enum_hton(const struct cipso_v4_doi *doi_def,
const struct netlbl_lsm_secattr *secattr,
unsigned char *net_cat,
u32 net_cat_len)
{
int cat = -1;
u32 cat_iter = 0;
for (;;) {
cat = netlbl_catmap_walk(secattr->attr.mls.cat, cat + 1);
if (cat < 0)
break;
if ((cat_iter + 2) > net_cat_len)
return -ENOSPC;
*((__be16 *)&net_cat[cat_iter]) = htons(cat);
cat_iter += 2;
}
return cat_iter;
}
/**
* cipso_v4_map_cat_enum_ntoh - Perform a category mapping from network to host
* @doi_def: the DOI definition
* @net_cat: the category list in network/CIPSO format
* @net_cat_len: the length of the CIPSO bitmap in bytes
* @secattr: the security attributes
*
* Description:
* Perform a label mapping to translate a CIPSO category list to the correct
* local MLS category bitmap using the given DOI definition. Returns zero on
* success, negative values on failure.
*
*/
static int cipso_v4_map_cat_enum_ntoh(const struct cipso_v4_doi *doi_def,
const unsigned char *net_cat,
u32 net_cat_len,
struct netlbl_lsm_secattr *secattr)
{
int ret_val;
u32 iter;
for (iter = 0; iter < net_cat_len; iter += 2) {
ret_val = netlbl_catmap_setbit(&secattr->attr.mls.cat,
get_unaligned_be16(&net_cat[iter]),
GFP_ATOMIC);
if (ret_val != 0)
return ret_val;
}
return 0;
}
/**
* cipso_v4_map_cat_rng_valid - Checks to see if the categories are valid
* @doi_def: the DOI definition
* @rngcat: category list
* @rngcat_len: length of the category list in bytes
*
* Description:
* Checks the given categories against the given DOI definition and returns a
* negative value if any of the categories do not have a valid mapping and a
* zero value if all of the categories are valid.
*
*/
static int cipso_v4_map_cat_rng_valid(const struct cipso_v4_doi *doi_def,
const unsigned char *rngcat,
u32 rngcat_len)
{
u16 cat_high;
u16 cat_low;
u32 cat_prev = CIPSO_V4_MAX_REM_CATS + 1;
u32 iter;
if (doi_def->type != CIPSO_V4_MAP_PASS || rngcat_len & 0x01)
return -EFAULT;
for (iter = 0; iter < rngcat_len; iter += 4) {
cat_high = get_unaligned_be16(&rngcat[iter]);
if ((iter + 4) <= rngcat_len)
cat_low = get_unaligned_be16(&rngcat[iter + 2]);
else
cat_low = 0;
if (cat_high > cat_prev)
return -EFAULT;
cat_prev = cat_low;
}
return 0;
}
/**
* cipso_v4_map_cat_rng_hton - Perform a category mapping from host to network
* @doi_def: the DOI definition
* @secattr: the security attributes
* @net_cat: the zero'd out category list in network/CIPSO format
* @net_cat_len: the length of the CIPSO category list in bytes
*
* Description:
* Perform a label mapping to translate a local MLS category bitmap to the
* correct CIPSO category list using the given DOI definition. Returns the
* size in bytes of the network category bitmap on success, negative values
* otherwise.
*
*/
static int cipso_v4_map_cat_rng_hton(const struct cipso_v4_doi *doi_def,
const struct netlbl_lsm_secattr *secattr,
unsigned char *net_cat,
u32 net_cat_len)
{
int iter = -1;
u16 array[CIPSO_V4_TAG_RNG_CAT_MAX * 2];
u32 array_cnt = 0;
u32 cat_size = 0;
/* make sure we don't overflow the 'array[]' variable */
if (net_cat_len >
(CIPSO_V4_OPT_LEN_MAX - CIPSO_V4_HDR_LEN - CIPSO_V4_TAG_RNG_BLEN))
return -ENOSPC;
for (;;) {
iter = netlbl_catmap_walk(secattr->attr.mls.cat, iter + 1);
if (iter < 0)
break;
cat_size += (iter == 0 ? 0 : sizeof(u16));
if (cat_size > net_cat_len)
return -ENOSPC;
array[array_cnt++] = iter;
iter = netlbl_catmap_walkrng(secattr->attr.mls.cat, iter);
if (iter < 0)
return -EFAULT;
cat_size += sizeof(u16);
if (cat_size > net_cat_len)
return -ENOSPC;
array[array_cnt++] = iter;
}
for (iter = 0; array_cnt > 0;) {
*((__be16 *)&net_cat[iter]) = htons(array[--array_cnt]);
iter += 2;
array_cnt--;
if (array[array_cnt] != 0) {
*((__be16 *)&net_cat[iter]) = htons(array[array_cnt]);
iter += 2;
}
}
return cat_size;
}
/**
* cipso_v4_map_cat_rng_ntoh - Perform a category mapping from network to host
* @doi_def: the DOI definition
* @net_cat: the category list in network/CIPSO format
* @net_cat_len: the length of the CIPSO bitmap in bytes
* @secattr: the security attributes
*
* Description:
* Perform a label mapping to translate a CIPSO category list to the correct
* local MLS category bitmap using the given DOI definition. Returns zero on
* success, negative values on failure.
*
*/
static int cipso_v4_map_cat_rng_ntoh(const struct cipso_v4_doi *doi_def,
const unsigned char *net_cat,
u32 net_cat_len,
struct netlbl_lsm_secattr *secattr)
{
int ret_val;
u32 net_iter;
u16 cat_low;
u16 cat_high;
for (net_iter = 0; net_iter < net_cat_len; net_iter += 4) {
cat_high = get_unaligned_be16(&net_cat[net_iter]);
if ((net_iter + 4) <= net_cat_len)
cat_low = get_unaligned_be16(&net_cat[net_iter + 2]);
else
cat_low = 0;
ret_val = netlbl_catmap_setrng(&secattr->attr.mls.cat,
cat_low,
cat_high,
GFP_ATOMIC);
if (ret_val != 0)
return ret_val;
}
return 0;
}
/*
* Protocol Handling Functions
*/
/**
* cipso_v4_gentag_hdr - Generate a CIPSO option header
* @doi_def: the DOI definition
* @len: the total tag length in bytes, not including this header
* @buf: the CIPSO option buffer
*
* Description:
* Write a CIPSO header into the beginning of @buffer.
*
*/
static void cipso_v4_gentag_hdr(const struct cipso_v4_doi *doi_def,
unsigned char *buf,
u32 len)
{
buf[0] = IPOPT_CIPSO;
buf[1] = CIPSO_V4_HDR_LEN + len;
put_unaligned_be32(doi_def->doi, &buf[2]);
}
/**
* cipso_v4_gentag_rbm - Generate a CIPSO restricted bitmap tag (type #1)
* @doi_def: the DOI definition
* @secattr: the security attributes
* @buffer: the option buffer
* @buffer_len: length of buffer in bytes
*
* Description:
* Generate a CIPSO option using the restricted bitmap tag, tag type #1. The
* actual buffer length may be larger than the indicated size due to
* translation between host and network category bitmaps. Returns the size of
* the tag on success, negative values on failure.
*
*/
static int cipso_v4_gentag_rbm(const struct cipso_v4_doi *doi_def,
const struct netlbl_lsm_secattr *secattr,
unsigned char *buffer,
u32 buffer_len)
{
int ret_val;
u32 tag_len;
u32 level;
if ((secattr->flags & NETLBL_SECATTR_MLS_LVL) == 0)
return -EPERM;
ret_val = cipso_v4_map_lvl_hton(doi_def,
secattr->attr.mls.lvl,
&level);
if (ret_val != 0)
return ret_val;
if (secattr->flags & NETLBL_SECATTR_MLS_CAT) {
ret_val = cipso_v4_map_cat_rbm_hton(doi_def,
secattr,
&buffer[4],
buffer_len - 4);
if (ret_val < 0)
return ret_val;
/* This will send packets using the "optimized" format when
* possible as specified in section 3.4.2.6 of the
* CIPSO draft. */
if (READ_ONCE(cipso_v4_rbm_optfmt) && ret_val > 0 &&
ret_val <= 10)
tag_len = 14;
else
tag_len = 4 + ret_val;
} else
tag_len = 4;
buffer[0] = CIPSO_V4_TAG_RBITMAP;
buffer[1] = tag_len;
buffer[3] = level;
return tag_len;
}
/**
* cipso_v4_parsetag_rbm - Parse a CIPSO restricted bitmap tag
* @doi_def: the DOI definition
* @tag: the CIPSO tag
* @secattr: the security attributes
*
* Description:
* Parse a CIPSO restricted bitmap tag (tag type #1) and return the security
* attributes in @secattr. Return zero on success, negatives values on
* failure.
*
*/
static int cipso_v4_parsetag_rbm(const struct cipso_v4_doi *doi_def,
const unsigned char *tag,
struct netlbl_lsm_secattr *secattr)
{
int ret_val;
u8 tag_len = tag[1];
u32 level;
ret_val = cipso_v4_map_lvl_ntoh(doi_def, tag[3], &level);
if (ret_val != 0)
return ret_val;
secattr->attr.mls.lvl = level;
secattr->flags |= NETLBL_SECATTR_MLS_LVL;
if (tag_len > 4) {
ret_val = cipso_v4_map_cat_rbm_ntoh(doi_def,
&tag[4],
tag_len - 4,
secattr);
if (ret_val != 0) {
netlbl_catmap_free(secattr->attr.mls.cat);
return ret_val;
}
if (secattr->attr.mls.cat)
secattr->flags |= NETLBL_SECATTR_MLS_CAT;
}
return 0;
}
/**
* cipso_v4_gentag_enum - Generate a CIPSO enumerated tag (type #2)
* @doi_def: the DOI definition
* @secattr: the security attributes
* @buffer: the option buffer
* @buffer_len: length of buffer in bytes
*
* Description:
* Generate a CIPSO option using the enumerated tag, tag type #2. Returns the
* size of the tag on success, negative values on failure.
*
*/
static int cipso_v4_gentag_enum(const struct cipso_v4_doi *doi_def,
const struct netlbl_lsm_secattr *secattr,
unsigned char *buffer,
u32 buffer_len)
{
int ret_val;
u32 tag_len;
u32 level;
if (!(secattr->flags & NETLBL_SECATTR_MLS_LVL))
return -EPERM;
ret_val = cipso_v4_map_lvl_hton(doi_def,
secattr->attr.mls.lvl,
&level);
if (ret_val != 0)
return ret_val;
if (secattr->flags & NETLBL_SECATTR_MLS_CAT) {
ret_val = cipso_v4_map_cat_enum_hton(doi_def,
secattr,
&buffer[4],
buffer_len - 4);
if (ret_val < 0)
return ret_val;
tag_len = 4 + ret_val;
} else
tag_len = 4;
buffer[0] = CIPSO_V4_TAG_ENUM;
buffer[1] = tag_len;
buffer[3] = level;
return tag_len;
}
/**
* cipso_v4_parsetag_enum - Parse a CIPSO enumerated tag
* @doi_def: the DOI definition
* @tag: the CIPSO tag
* @secattr: the security attributes
*
* Description:
* Parse a CIPSO enumerated tag (tag type #2) and return the security
* attributes in @secattr. Return zero on success, negatives values on
* failure.
*
*/
static int cipso_v4_parsetag_enum(const struct cipso_v4_doi *doi_def,
const unsigned char *tag,
struct netlbl_lsm_secattr *secattr)
{
int ret_val;
u8 tag_len = tag[1];
u32 level;
ret_val = cipso_v4_map_lvl_ntoh(doi_def, tag[3], &level);
if (ret_val != 0)
return ret_val;
secattr->attr.mls.lvl = level;
secattr->flags |= NETLBL_SECATTR_MLS_LVL;
if (tag_len > 4) {
ret_val = cipso_v4_map_cat_enum_ntoh(doi_def,
&tag[4],
tag_len - 4,
secattr);
if (ret_val != 0) {
netlbl_catmap_free(secattr->attr.mls.cat);
return ret_val;
}
secattr->flags |= NETLBL_SECATTR_MLS_CAT;
}
return 0;
}
/**
* cipso_v4_gentag_rng - Generate a CIPSO ranged tag (type #5)
* @doi_def: the DOI definition
* @secattr: the security attributes
* @buffer: the option buffer
* @buffer_len: length of buffer in bytes
*
* Description:
* Generate a CIPSO option using the ranged tag, tag type #5. Returns the
* size of the tag on success, negative values on failure.
*
*/
static int cipso_v4_gentag_rng(const struct cipso_v4_doi *doi_def,
const struct netlbl_lsm_secattr *secattr,
unsigned char *buffer,
u32 buffer_len)
{
int ret_val;
u32 tag_len;
u32 level;
if (!(secattr->flags & NETLBL_SECATTR_MLS_LVL))
return -EPERM;
ret_val = cipso_v4_map_lvl_hton(doi_def,
secattr->attr.mls.lvl,
&level);
if (ret_val != 0)
return ret_val;
if (secattr->flags & NETLBL_SECATTR_MLS_CAT) {
ret_val = cipso_v4_map_cat_rng_hton(doi_def,
secattr,
&buffer[4],
buffer_len - 4);
if (ret_val < 0)
return ret_val;
tag_len = 4 + ret_val;
} else
tag_len = 4;
buffer[0] = CIPSO_V4_TAG_RANGE;
buffer[1] = tag_len;
buffer[3] = level;
return tag_len;
}
/**
* cipso_v4_parsetag_rng - Parse a CIPSO ranged tag
* @doi_def: the DOI definition
* @tag: the CIPSO tag
* @secattr: the security attributes
*
* Description:
* Parse a CIPSO ranged tag (tag type #5) and return the security attributes
* in @secattr. Return zero on success, negatives values on failure.
*
*/
static int cipso_v4_parsetag_rng(const struct cipso_v4_doi *doi_def,
const unsigned char *tag,
struct netlbl_lsm_secattr *secattr)
{
int ret_val;
u8 tag_len = tag[1];
u32 level;
ret_val = cipso_v4_map_lvl_ntoh(doi_def, tag[3], &level);
if (ret_val != 0)
return ret_val;
secattr->attr.mls.lvl = level;
secattr->flags |= NETLBL_SECATTR_MLS_LVL;
if (tag_len > 4) {
ret_val = cipso_v4_map_cat_rng_ntoh(doi_def,
&tag[4],
tag_len - 4,
secattr);
if (ret_val != 0) {
netlbl_catmap_free(secattr->attr.mls.cat);
return ret_val;
}
if (secattr->attr.mls.cat)
secattr->flags |= NETLBL_SECATTR_MLS_CAT;
}
return 0;
}
/**
* cipso_v4_gentag_loc - Generate a CIPSO local tag (non-standard)
* @doi_def: the DOI definition
* @secattr: the security attributes
* @buffer: the option buffer
* @buffer_len: length of buffer in bytes
*
* Description:
* Generate a CIPSO option using the local tag. Returns the size of the tag
* on success, negative values on failure.
*
*/
static int cipso_v4_gentag_loc(const struct cipso_v4_doi *doi_def,
const struct netlbl_lsm_secattr *secattr,
unsigned char *buffer,
u32 buffer_len)
{
if (!(secattr->flags & NETLBL_SECATTR_SECID))
return -EPERM;
buffer[0] = CIPSO_V4_TAG_LOCAL;
buffer[1] = CIPSO_V4_TAG_LOC_BLEN;
*(u32 *)&buffer[2] = secattr->attr.secid;
return CIPSO_V4_TAG_LOC_BLEN;
}
/**
* cipso_v4_parsetag_loc - Parse a CIPSO local tag
* @doi_def: the DOI definition
* @tag: the CIPSO tag
* @secattr: the security attributes
*
* Description:
* Parse a CIPSO local tag and return the security attributes in @secattr.
* Return zero on success, negatives values on failure.
*
*/
static int cipso_v4_parsetag_loc(const struct cipso_v4_doi *doi_def,
const unsigned char *tag,
struct netlbl_lsm_secattr *secattr)
{
secattr->attr.secid = *(u32 *)&tag[2];
secattr->flags |= NETLBL_SECATTR_SECID;
return 0;
}
/**
* cipso_v4_optptr - Find the CIPSO option in the packet
* @skb: the packet
*
* Description:
* Parse the packet's IP header looking for a CIPSO option. Returns a pointer
* to the start of the CIPSO option on success, NULL if one is not found.
*
*/
unsigned char *cipso_v4_optptr(const struct sk_buff *skb)
{
const struct iphdr *iph = ip_hdr(skb);
unsigned char *optptr = (unsigned char *)&(ip_hdr(skb)[1]);
int optlen;
int taglen;
for (optlen = iph->ihl*4 - sizeof(struct iphdr); optlen > 1; ) {
switch (optptr[0]) {
case IPOPT_END:
return NULL;
case IPOPT_NOOP:
taglen = 1;
break;
default:
taglen = optptr[1];
}
if (!taglen || taglen > optlen)
return NULL;
if (optptr[0] == IPOPT_CIPSO)
return optptr;
optlen -= taglen;
optptr += taglen;
}
return NULL;
}
/**
* cipso_v4_validate - Validate a CIPSO option
* @skb: the packet
* @option: the start of the option, on error it is set to point to the error
*
* Description:
* This routine is called to validate a CIPSO option, it checks all of the
* fields to ensure that they are at least valid, see the draft snippet below
* for details. If the option is valid then a zero value is returned and
* the value of @option is unchanged. If the option is invalid then a
* non-zero value is returned and @option is adjusted to point to the
* offending portion of the option. From the IETF draft ...
*
* "If any field within the CIPSO options, such as the DOI identifier, is not
* recognized the IP datagram is discarded and an ICMP 'parameter problem'
* (type 12) is generated and returned. The ICMP code field is set to 'bad
* parameter' (code 0) and the pointer is set to the start of the CIPSO field
* that is unrecognized."
*
*/
int cipso_v4_validate(const struct sk_buff *skb, unsigned char **option)
{
unsigned char *opt = *option;
unsigned char *tag;
unsigned char opt_iter;
unsigned char err_offset = 0;
u8 opt_len;
u8 tag_len;
struct cipso_v4_doi *doi_def = NULL;
u32 tag_iter;
/* caller already checks for length values that are too large */
opt_len = opt[1];
if (opt_len < 8) {
err_offset = 1;
goto validate_return;
}
rcu_read_lock();
doi_def = cipso_v4_doi_search(get_unaligned_be32(&opt[2]));
if (!doi_def) {
err_offset = 2;
goto validate_return_locked;
}
opt_iter = CIPSO_V4_HDR_LEN;
tag = opt + opt_iter;
while (opt_iter < opt_len) {
for (tag_iter = 0; doi_def->tags[tag_iter] != tag[0];)
if (doi_def->tags[tag_iter] == CIPSO_V4_TAG_INVALID ||
++tag_iter == CIPSO_V4_TAG_MAXCNT) {
err_offset = opt_iter;
goto validate_return_locked;
}
if (opt_iter + 1 == opt_len) {
err_offset = opt_iter;
goto validate_return_locked;
}
tag_len = tag[1];
if (tag_len > (opt_len - opt_iter)) {
err_offset = opt_iter + 1;
goto validate_return_locked;
}
switch (tag[0]) {
case CIPSO_V4_TAG_RBITMAP:
if (tag_len < CIPSO_V4_TAG_RBM_BLEN) {
err_offset = opt_iter + 1;
goto validate_return_locked;
}
/* We are already going to do all the verification
* necessary at the socket layer so from our point of
* view it is safe to turn these checks off (and less
* work), however, the CIPSO draft says we should do
* all the CIPSO validations here but it doesn't
* really specify _exactly_ what we need to validate
* ... so, just make it a sysctl tunable. */
if (READ_ONCE(cipso_v4_rbm_strictvalid)) {
if (cipso_v4_map_lvl_valid(doi_def,
tag[3]) < 0) {
err_offset = opt_iter + 3;
goto validate_return_locked;
}
if (tag_len > CIPSO_V4_TAG_RBM_BLEN &&
cipso_v4_map_cat_rbm_valid(doi_def,
&tag[4],
tag_len - 4) < 0) {
err_offset = opt_iter + 4;
goto validate_return_locked;
}
}
break;
case CIPSO_V4_TAG_ENUM:
if (tag_len < CIPSO_V4_TAG_ENUM_BLEN) {
err_offset = opt_iter + 1;
goto validate_return_locked;
}
if (cipso_v4_map_lvl_valid(doi_def,
tag[3]) < 0) {
err_offset = opt_iter + 3;
goto validate_return_locked;
}
if (tag_len > CIPSO_V4_TAG_ENUM_BLEN &&
cipso_v4_map_cat_enum_valid(doi_def,
&tag[4],
tag_len - 4) < 0) {
err_offset = opt_iter + 4;
goto validate_return_locked;
}
break;
case CIPSO_V4_TAG_RANGE:
if (tag_len < CIPSO_V4_TAG_RNG_BLEN) {
err_offset = opt_iter + 1;
goto validate_return_locked;
}
if (cipso_v4_map_lvl_valid(doi_def,
tag[3]) < 0) {
err_offset = opt_iter + 3;
goto validate_return_locked;
}
if (tag_len > CIPSO_V4_TAG_RNG_BLEN &&
cipso_v4_map_cat_rng_valid(doi_def,
&tag[4],
tag_len - 4) < 0) {
err_offset = opt_iter + 4;
goto validate_return_locked;
}
break;
case CIPSO_V4_TAG_LOCAL:
/* This is a non-standard tag that we only allow for
* local connections, so if the incoming interface is
* not the loopback device drop the packet. Further,
* there is no legitimate reason for setting this from
* userspace so reject it if skb is NULL. */
if (!skb || !(skb->dev->flags & IFF_LOOPBACK)) {
err_offset = opt_iter;
goto validate_return_locked;
}
if (tag_len != CIPSO_V4_TAG_LOC_BLEN) {
err_offset = opt_iter + 1;
goto validate_return_locked;
}
break;
default:
err_offset = opt_iter;
goto validate_return_locked;
}
tag += tag_len;
opt_iter += tag_len;
}
validate_return_locked:
rcu_read_unlock();
validate_return:
*option = opt + err_offset;
return err_offset;
}
/**
* cipso_v4_error - Send the correct response for a bad packet
* @skb: the packet
* @error: the error code
* @gateway: CIPSO gateway flag
*
* Description:
* Based on the error code given in @error, send an ICMP error message back to
* the originating host. From the IETF draft ...
*
* "If the contents of the CIPSO [option] are valid but the security label is
* outside of the configured host or port label range, the datagram is
* discarded and an ICMP 'destination unreachable' (type 3) is generated and
* returned. The code field of the ICMP is set to 'communication with
* destination network administratively prohibited' (code 9) or to
* 'communication with destination host administratively prohibited'
* (code 10). The value of the code is dependent on whether the originator
* of the ICMP message is acting as a CIPSO host or a CIPSO gateway. The
* recipient of the ICMP message MUST be able to handle either value. The
* same procedure is performed if a CIPSO [option] can not be added to an
* IP packet because it is too large to fit in the IP options area."
*
* "If the error is triggered by receipt of an ICMP message, the message is
* discarded and no response is permitted (consistent with general ICMP
* processing rules)."
*
*/
void cipso_v4_error(struct sk_buff *skb, int error, u32 gateway)
{
unsigned char optbuf[sizeof(struct ip_options) + 40];
struct ip_options *opt = (struct ip_options *)optbuf;
int res;
if (ip_hdr(skb)->protocol == IPPROTO_ICMP || error != -EACCES)
return;
/*
* We might be called above the IP layer,
* so we can not use icmp_send and IPCB here.
*/
memset(opt, 0, sizeof(struct ip_options));
opt->optlen = ip_hdr(skb)->ihl*4 - sizeof(struct iphdr);
rcu_read_lock();
res = __ip_options_compile(dev_net(skb->dev), opt, skb, NULL);
rcu_read_unlock();
if (res)
return;
if (gateway)
__icmp_send(skb, ICMP_DEST_UNREACH, ICMP_NET_ANO, 0, opt);
else
__icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_ANO, 0, opt);
}
/**
* cipso_v4_genopt - Generate a CIPSO option
* @buf: the option buffer
* @buf_len: the size of opt_buf
* @doi_def: the CIPSO DOI to use
* @secattr: the security attributes
*
* Description:
* Generate a CIPSO option using the DOI definition and security attributes
* passed to the function. Returns the length of the option on success and
* negative values on failure.
*
*/
static int cipso_v4_genopt(unsigned char *buf, u32 buf_len,
const struct cipso_v4_doi *doi_def,
const struct netlbl_lsm_secattr *secattr)
{
int ret_val;
u32 iter;
if (buf_len <= CIPSO_V4_HDR_LEN)
return -ENOSPC;
/* XXX - This code assumes only one tag per CIPSO option which isn't
* really a good assumption to make but since we only support the MAC
* tags right now it is a safe assumption. */
iter = 0;
do {
memset(buf, 0, buf_len);
switch (doi_def->tags[iter]) {
case CIPSO_V4_TAG_RBITMAP:
ret_val = cipso_v4_gentag_rbm(doi_def,
secattr,
&buf[CIPSO_V4_HDR_LEN],
buf_len - CIPSO_V4_HDR_LEN);
break;
case CIPSO_V4_TAG_ENUM:
ret_val = cipso_v4_gentag_enum(doi_def,
secattr,
&buf[CIPSO_V4_HDR_LEN],
buf_len - CIPSO_V4_HDR_LEN);
break;
case CIPSO_V4_TAG_RANGE:
ret_val = cipso_v4_gentag_rng(doi_def,
secattr,
&buf[CIPSO_V4_HDR_LEN],
buf_len - CIPSO_V4_HDR_LEN);
break;
case CIPSO_V4_TAG_LOCAL:
ret_val = cipso_v4_gentag_loc(doi_def,
secattr,
&buf[CIPSO_V4_HDR_LEN],
buf_len - CIPSO_V4_HDR_LEN);
break;
default:
return -EPERM;
}
iter++;
} while (ret_val < 0 &&
iter < CIPSO_V4_TAG_MAXCNT &&
doi_def->tags[iter] != CIPSO_V4_TAG_INVALID);
if (ret_val < 0)
return ret_val;
cipso_v4_gentag_hdr(doi_def, buf, ret_val);
return CIPSO_V4_HDR_LEN + ret_val;
}
/**
* cipso_v4_sock_setattr - Add a CIPSO option to a socket
* @sk: the socket
* @doi_def: the CIPSO DOI to use
* @secattr: the specific security attributes of the socket
*
* Description:
* Set the CIPSO option on the given socket using the DOI definition and
* security attributes passed to the function. This function requires
* exclusive access to @sk, which means it either needs to be in the
* process of being created or locked. Returns zero on success and negative
* values on failure.
*
*/
int cipso_v4_sock_setattr(struct sock *sk,
const struct cipso_v4_doi *doi_def,
const struct netlbl_lsm_secattr *secattr)
{
int ret_val = -EPERM;
unsigned char *buf = NULL;
u32 buf_len;
u32 opt_len;
struct ip_options_rcu *old, *opt = NULL;
struct inet_sock *sk_inet;
struct inet_connection_sock *sk_conn;
/* In the case of sock_create_lite(), the sock->sk field is not
* defined yet but it is not a problem as the only users of these
* "lite" PF_INET sockets are functions which do an accept() call
* afterwards so we will label the socket as part of the accept(). */
if (!sk)
return 0;
/* We allocate the maximum CIPSO option size here so we are probably
* being a little wasteful, but it makes our life _much_ easier later
* on and after all we are only talking about 40 bytes. */
buf_len = CIPSO_V4_OPT_LEN_MAX;
buf = kmalloc(buf_len, GFP_ATOMIC);
if (!buf) {
ret_val = -ENOMEM;
goto socket_setattr_failure;
}
ret_val = cipso_v4_genopt(buf, buf_len, doi_def, secattr);
if (ret_val < 0)
goto socket_setattr_failure;
buf_len = ret_val;
/* We can't use ip_options_get() directly because it makes a call to
* ip_options_get_alloc() which allocates memory with GFP_KERNEL and
* we won't always have CAP_NET_RAW even though we _always_ want to
* set the IPOPT_CIPSO option. */
opt_len = (buf_len + 3) & ~3;
opt = kzalloc(sizeof(*opt) + opt_len, GFP_ATOMIC);
if (!opt) {
ret_val = -ENOMEM;
goto socket_setattr_failure;
}
memcpy(opt->opt.__data, buf, buf_len);
opt->opt.optlen = opt_len;
opt->opt.cipso = sizeof(struct iphdr);
kfree(buf);
buf = NULL;
sk_inet = inet_sk(sk);
old = rcu_dereference_protected(sk_inet->inet_opt,
lockdep_sock_is_held(sk));
if (inet_test_bit(IS_ICSK, sk)) {
sk_conn = inet_csk(sk);
if (old)
sk_conn->icsk_ext_hdr_len -= old->opt.optlen;
sk_conn->icsk_ext_hdr_len += opt->opt.optlen;
sk_conn->icsk_sync_mss(sk, sk_conn->icsk_pmtu_cookie);
}
rcu_assign_pointer(sk_inet->inet_opt, opt);
if (old)
kfree_rcu(old, rcu);
return 0;
socket_setattr_failure:
kfree(buf);
kfree(opt);
return ret_val;
}
/**
* cipso_v4_req_setattr - Add a CIPSO option to a connection request socket
* @req: the connection request socket
* @doi_def: the CIPSO DOI to use
* @secattr: the specific security attributes of the socket
*
* Description:
* Set the CIPSO option on the given socket using the DOI definition and
* security attributes passed to the function. Returns zero on success and
* negative values on failure.
*
*/
int cipso_v4_req_setattr(struct request_sock *req,
const struct cipso_v4_doi *doi_def,
const struct netlbl_lsm_secattr *secattr)
{
int ret_val = -EPERM;
unsigned char *buf = NULL;
u32 buf_len;
u32 opt_len;
struct ip_options_rcu *opt = NULL;
struct inet_request_sock *req_inet;
/* We allocate the maximum CIPSO option size here so we are probably
* being a little wasteful, but it makes our life _much_ easier later
* on and after all we are only talking about 40 bytes. */
buf_len = CIPSO_V4_OPT_LEN_MAX;
buf = kmalloc(buf_len, GFP_ATOMIC);
if (!buf) {
ret_val = -ENOMEM;
goto req_setattr_failure;
}
ret_val = cipso_v4_genopt(buf, buf_len, doi_def, secattr);
if (ret_val < 0)
goto req_setattr_failure;
buf_len = ret_val;
/* We can't use ip_options_get() directly because it makes a call to
* ip_options_get_alloc() which allocates memory with GFP_KERNEL and
* we won't always have CAP_NET_RAW even though we _always_ want to
* set the IPOPT_CIPSO option. */
opt_len = (buf_len + 3) & ~3;
opt = kzalloc(sizeof(*opt) + opt_len, GFP_ATOMIC);
if (!opt) {
ret_val = -ENOMEM;
goto req_setattr_failure;
}
memcpy(opt->opt.__data, buf, buf_len);
opt->opt.optlen = opt_len;
opt->opt.cipso = sizeof(struct iphdr);
kfree(buf);
buf = NULL;
req_inet = inet_rsk(req);
opt = xchg((__force struct ip_options_rcu **)&req_inet->ireq_opt, opt);
if (opt)
kfree_rcu(opt, rcu);
return 0;
req_setattr_failure:
kfree(buf);
kfree(opt);
return ret_val;
}
/**
* cipso_v4_delopt - Delete the CIPSO option from a set of IP options
* @opt_ptr: IP option pointer
*
* Description:
* Deletes the CIPSO IP option from a set of IP options and makes the necessary
* adjustments to the IP option structure. Returns zero on success, negative
* values on failure.
*
*/
static int cipso_v4_delopt(struct ip_options_rcu __rcu **opt_ptr)
{
struct ip_options_rcu *opt = rcu_dereference_protected(*opt_ptr, 1);
int hdr_delta = 0;
if (!opt || opt->opt.cipso == 0)
return 0;
if (opt->opt.srr || opt->opt.rr || opt->opt.ts || opt->opt.router_alert) {
u8 cipso_len;
u8 cipso_off;
unsigned char *cipso_ptr;
int iter;
int optlen_new;
cipso_off = opt->opt.cipso - sizeof(struct iphdr);
cipso_ptr = &opt->opt.__data[cipso_off];
cipso_len = cipso_ptr[1];
if (opt->opt.srr > opt->opt.cipso)
opt->opt.srr -= cipso_len;
if (opt->opt.rr > opt->opt.cipso)
opt->opt.rr -= cipso_len;
if (opt->opt.ts > opt->opt.cipso)
opt->opt.ts -= cipso_len;
if (opt->opt.router_alert > opt->opt.cipso)
opt->opt.router_alert -= cipso_len;
opt->opt.cipso = 0;
memmove(cipso_ptr, cipso_ptr + cipso_len,
opt->opt.optlen - cipso_off - cipso_len);
/* determining the new total option length is tricky because of
* the padding necessary, the only thing i can think to do at
* this point is walk the options one-by-one, skipping the
* padding at the end to determine the actual option size and
* from there we can determine the new total option length */
iter = 0;
optlen_new = 0;
while (iter < opt->opt.optlen)
if (opt->opt.__data[iter] != IPOPT_NOP) {
iter += opt->opt.__data[iter + 1];
optlen_new = iter;
} else
iter++;
hdr_delta = opt->opt.optlen;
opt->opt.optlen = (optlen_new + 3) & ~3;
hdr_delta -= opt->opt.optlen;
} else {
/* only the cipso option was present on the socket so we can
* remove the entire option struct */
*opt_ptr = NULL;
hdr_delta = opt->opt.optlen;
kfree_rcu(opt, rcu);
}
return hdr_delta;
}
/**
* cipso_v4_sock_delattr - Delete the CIPSO option from a socket
* @sk: the socket
*
* Description:
* Removes the CIPSO option from a socket, if present.
*
*/
void cipso_v4_sock_delattr(struct sock *sk)
{
struct inet_sock *sk_inet;
int hdr_delta;
sk_inet = inet_sk(sk);
hdr_delta = cipso_v4_delopt(&sk_inet->inet_opt);
if (inet_test_bit(IS_ICSK, sk) && hdr_delta > 0) {
struct inet_connection_sock *sk_conn = inet_csk(sk);
sk_conn->icsk_ext_hdr_len -= hdr_delta;
sk_conn->icsk_sync_mss(sk, sk_conn->icsk_pmtu_cookie);
}
}
/**
* cipso_v4_req_delattr - Delete the CIPSO option from a request socket
* @req: the request socket
*
* Description:
* Removes the CIPSO option from a request socket, if present.
*
*/
void cipso_v4_req_delattr(struct request_sock *req)
{
cipso_v4_delopt(&inet_rsk(req)->ireq_opt);
}
/**
* cipso_v4_getattr - Helper function for the cipso_v4_*_getattr functions
* @cipso: the CIPSO v4 option
* @secattr: the security attributes
*
* Description:
* Inspect @cipso and return the security attributes in @secattr. Returns zero
* on success and negative values on failure.
*
*/
int cipso_v4_getattr(const unsigned char *cipso,
struct netlbl_lsm_secattr *secattr)
{
int ret_val = -ENOMSG;
u32 doi;
struct cipso_v4_doi *doi_def;
if (cipso_v4_cache_check(cipso, cipso[1], secattr) == 0)
return 0;
doi = get_unaligned_be32(&cipso[2]);
rcu_read_lock();
doi_def = cipso_v4_doi_search(doi);
if (!doi_def)
goto getattr_return;
/* XXX - This code assumes only one tag per CIPSO option which isn't
* really a good assumption to make but since we only support the MAC
* tags right now it is a safe assumption. */
switch (cipso[6]) {
case CIPSO_V4_TAG_RBITMAP:
ret_val = cipso_v4_parsetag_rbm(doi_def, &cipso[6], secattr);
break;
case CIPSO_V4_TAG_ENUM:
ret_val = cipso_v4_parsetag_enum(doi_def, &cipso[6], secattr);
break;
case CIPSO_V4_TAG_RANGE:
ret_val = cipso_v4_parsetag_rng(doi_def, &cipso[6], secattr);
break;
case CIPSO_V4_TAG_LOCAL:
ret_val = cipso_v4_parsetag_loc(doi_def, &cipso[6], secattr);
break;
}
if (ret_val == 0)
secattr->type = NETLBL_NLTYPE_CIPSOV4;
getattr_return:
rcu_read_unlock();
return ret_val;
}
/**
* cipso_v4_sock_getattr - Get the security attributes from a sock
* @sk: the sock
* @secattr: the security attributes
*
* Description:
* Query @sk to see if there is a CIPSO option attached to the sock and if
* there is return the CIPSO security attributes in @secattr. This function
* requires that @sk be locked, or privately held, but it does not do any
* locking itself. Returns zero on success and negative values on failure.
*
*/
int cipso_v4_sock_getattr(struct sock *sk, struct netlbl_lsm_secattr *secattr)
{
struct ip_options_rcu *opt;
int res = -ENOMSG;
rcu_read_lock();
opt = rcu_dereference(inet_sk(sk)->inet_opt);
if (opt && opt->opt.cipso)
res = cipso_v4_getattr(opt->opt.__data +
opt->opt.cipso -
sizeof(struct iphdr),
secattr);
rcu_read_unlock();
return res;
}
/**
* cipso_v4_skbuff_setattr - Set the CIPSO option on a packet
* @skb: the packet
* @doi_def: the DOI structure
* @secattr: the security attributes
*
* Description:
* Set the CIPSO option on the given packet based on the security attributes.
* Returns a pointer to the IP header on success and NULL on failure.
*
*/
int cipso_v4_skbuff_setattr(struct sk_buff *skb,
const struct cipso_v4_doi *doi_def,
const struct netlbl_lsm_secattr *secattr)
{
int ret_val;
struct iphdr *iph;
struct ip_options *opt = &IPCB(skb)->opt;
unsigned char buf[CIPSO_V4_OPT_LEN_MAX];
u32 buf_len = CIPSO_V4_OPT_LEN_MAX;
u32 opt_len;
int len_delta;
ret_val = cipso_v4_genopt(buf, buf_len, doi_def, secattr);
if (ret_val < 0)
return ret_val;
buf_len = ret_val;
opt_len = (buf_len + 3) & ~3;
/* we overwrite any existing options to ensure that we have enough
* room for the CIPSO option, the reason is that we _need_ to guarantee
* that the security label is applied to the packet - we do the same
* thing when using the socket options and it hasn't caused a problem,
* if we need to we can always revisit this choice later */
len_delta = opt_len - opt->optlen;
/* if we don't ensure enough headroom we could panic on the skb_push()
* call below so make sure we have enough, we are also "mangling" the
* packet so we should probably do a copy-on-write call anyway */
ret_val = skb_cow(skb, skb_headroom(skb) + len_delta);
if (ret_val < 0)
return ret_val;
if (len_delta > 0) {
/* we assume that the header + opt->optlen have already been
* "pushed" in ip_options_build() or similar */
iph = ip_hdr(skb);
skb_push(skb, len_delta);
memmove((char *)iph - len_delta, iph, iph->ihl << 2);
skb_reset_network_header(skb);
iph = ip_hdr(skb);
} else if (len_delta < 0) {
iph = ip_hdr(skb);
memset(iph + 1, IPOPT_NOP, opt->optlen);
} else
iph = ip_hdr(skb);
if (opt->optlen > 0)
memset(opt, 0, sizeof(*opt));
opt->optlen = opt_len;
opt->cipso = sizeof(struct iphdr);
opt->is_changed = 1;
/* we have to do the following because we are being called from a
* netfilter hook which means the packet already has had the header
* fields populated and the checksum calculated - yes this means we
* are doing more work than needed but we do it to keep the core
* stack clean and tidy */
memcpy(iph + 1, buf, buf_len);
if (opt_len > buf_len)
memset((char *)(iph + 1) + buf_len, 0, opt_len - buf_len);
if (len_delta != 0) {
iph->ihl = 5 + (opt_len >> 2);
iph_set_totlen(iph, skb->len);
}
ip_send_check(iph);
return 0;
}
/**
* cipso_v4_skbuff_delattr - Delete any CIPSO options from a packet
* @skb: the packet
*
* Description:
* Removes any and all CIPSO options from the given packet. Returns zero on
* success, negative values on failure.
*
*/
int cipso_v4_skbuff_delattr(struct sk_buff *skb)
{
int ret_val;
struct iphdr *iph;
struct ip_options *opt = &IPCB(skb)->opt;
unsigned char *cipso_ptr;
if (opt->cipso == 0)
return 0;
/* since we are changing the packet we should make a copy */
ret_val = skb_cow(skb, skb_headroom(skb));
if (ret_val < 0)
return ret_val;
/* the easiest thing to do is just replace the cipso option with noop
* options since we don't change the size of the packet, although we
* still need to recalculate the checksum */
iph = ip_hdr(skb);
cipso_ptr = (unsigned char *)iph + opt->cipso;
memset(cipso_ptr, IPOPT_NOOP, cipso_ptr[1]);
opt->cipso = 0;
opt->is_changed = 1;
ip_send_check(iph);
return 0;
}
/*
* Setup Functions
*/
/**
* cipso_v4_init - Initialize the CIPSO module
*
* Description:
* Initialize the CIPSO module and prepare it for use. Returns zero on success
* and negative values on failure.
*
*/
static int __init cipso_v4_init(void)
{
int ret_val;
ret_val = cipso_v4_cache_init();
if (ret_val != 0)
panic("Failed to initialize the CIPSO/IPv4 cache (%d)\n",
ret_val);
return 0;
}
subsys_initcall(cipso_v4_init);
| linux-master | net/ipv4/cipso_ipv4.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/rcupdate.h>
#include <linux/spinlock.h>
#include <linux/jiffies.h>
#include <linux/module.h>
#include <linux/cache.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/tcp.h>
#include <linux/hash.h>
#include <linux/tcp_metrics.h>
#include <linux/vmalloc.h>
#include <net/inet_connection_sock.h>
#include <net/net_namespace.h>
#include <net/request_sock.h>
#include <net/inetpeer.h>
#include <net/sock.h>
#include <net/ipv6.h>
#include <net/dst.h>
#include <net/tcp.h>
#include <net/genetlink.h>
static struct tcp_metrics_block *__tcp_get_metrics(const struct inetpeer_addr *saddr,
const struct inetpeer_addr *daddr,
struct net *net, unsigned int hash);
struct tcp_fastopen_metrics {
u16 mss;
u16 syn_loss:10, /* Recurring Fast Open SYN losses */
try_exp:2; /* Request w/ exp. option (once) */
unsigned long last_syn_loss; /* Last Fast Open SYN loss */
struct tcp_fastopen_cookie cookie;
};
/* TCP_METRIC_MAX includes 2 extra fields for userspace compatibility
* Kernel only stores RTT and RTTVAR in usec resolution
*/
#define TCP_METRIC_MAX_KERNEL (TCP_METRIC_MAX - 2)
struct tcp_metrics_block {
struct tcp_metrics_block __rcu *tcpm_next;
struct net *tcpm_net;
struct inetpeer_addr tcpm_saddr;
struct inetpeer_addr tcpm_daddr;
unsigned long tcpm_stamp;
u32 tcpm_lock;
u32 tcpm_vals[TCP_METRIC_MAX_KERNEL + 1];
struct tcp_fastopen_metrics tcpm_fastopen;
struct rcu_head rcu_head;
};
static inline struct net *tm_net(const struct tcp_metrics_block *tm)
{
/* Paired with the WRITE_ONCE() in tcpm_new() */
return READ_ONCE(tm->tcpm_net);
}
static bool tcp_metric_locked(struct tcp_metrics_block *tm,
enum tcp_metric_index idx)
{
/* Paired with WRITE_ONCE() in tcpm_suck_dst() */
return READ_ONCE(tm->tcpm_lock) & (1 << idx);
}
static u32 tcp_metric_get(const struct tcp_metrics_block *tm,
enum tcp_metric_index idx)
{
/* Paired with WRITE_ONCE() in tcp_metric_set() */
return READ_ONCE(tm->tcpm_vals[idx]);
}
static void tcp_metric_set(struct tcp_metrics_block *tm,
enum tcp_metric_index idx,
u32 val)
{
/* Paired with READ_ONCE() in tcp_metric_get() */
WRITE_ONCE(tm->tcpm_vals[idx], val);
}
static bool addr_same(const struct inetpeer_addr *a,
const struct inetpeer_addr *b)
{
return (a->family == b->family) && !inetpeer_addr_cmp(a, b);
}
struct tcpm_hash_bucket {
struct tcp_metrics_block __rcu *chain;
};
static struct tcpm_hash_bucket *tcp_metrics_hash __read_mostly;
static unsigned int tcp_metrics_hash_log __read_mostly;
static DEFINE_SPINLOCK(tcp_metrics_lock);
static DEFINE_SEQLOCK(fastopen_seqlock);
static void tcpm_suck_dst(struct tcp_metrics_block *tm,
const struct dst_entry *dst,
bool fastopen_clear)
{
u32 msval;
u32 val;
WRITE_ONCE(tm->tcpm_stamp, jiffies);
val = 0;
if (dst_metric_locked(dst, RTAX_RTT))
val |= 1 << TCP_METRIC_RTT;
if (dst_metric_locked(dst, RTAX_RTTVAR))
val |= 1 << TCP_METRIC_RTTVAR;
if (dst_metric_locked(dst, RTAX_SSTHRESH))
val |= 1 << TCP_METRIC_SSTHRESH;
if (dst_metric_locked(dst, RTAX_CWND))
val |= 1 << TCP_METRIC_CWND;
if (dst_metric_locked(dst, RTAX_REORDERING))
val |= 1 << TCP_METRIC_REORDERING;
/* Paired with READ_ONCE() in tcp_metric_locked() */
WRITE_ONCE(tm->tcpm_lock, val);
msval = dst_metric_raw(dst, RTAX_RTT);
tcp_metric_set(tm, TCP_METRIC_RTT, msval * USEC_PER_MSEC);
msval = dst_metric_raw(dst, RTAX_RTTVAR);
tcp_metric_set(tm, TCP_METRIC_RTTVAR, msval * USEC_PER_MSEC);
tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
dst_metric_raw(dst, RTAX_SSTHRESH));
tcp_metric_set(tm, TCP_METRIC_CWND,
dst_metric_raw(dst, RTAX_CWND));
tcp_metric_set(tm, TCP_METRIC_REORDERING,
dst_metric_raw(dst, RTAX_REORDERING));
if (fastopen_clear) {
write_seqlock(&fastopen_seqlock);
tm->tcpm_fastopen.mss = 0;
tm->tcpm_fastopen.syn_loss = 0;
tm->tcpm_fastopen.try_exp = 0;
tm->tcpm_fastopen.cookie.exp = false;
tm->tcpm_fastopen.cookie.len = 0;
write_sequnlock(&fastopen_seqlock);
}
}
#define TCP_METRICS_TIMEOUT (60 * 60 * HZ)
static void tcpm_check_stamp(struct tcp_metrics_block *tm,
const struct dst_entry *dst)
{
unsigned long limit;
if (!tm)
return;
limit = READ_ONCE(tm->tcpm_stamp) + TCP_METRICS_TIMEOUT;
if (unlikely(time_after(jiffies, limit)))
tcpm_suck_dst(tm, dst, false);
}
#define TCP_METRICS_RECLAIM_DEPTH 5
#define TCP_METRICS_RECLAIM_PTR (struct tcp_metrics_block *) 0x1UL
#define deref_locked(p) \
rcu_dereference_protected(p, lockdep_is_held(&tcp_metrics_lock))
static struct tcp_metrics_block *tcpm_new(struct dst_entry *dst,
struct inetpeer_addr *saddr,
struct inetpeer_addr *daddr,
unsigned int hash)
{
struct tcp_metrics_block *tm;
struct net *net;
bool reclaim = false;
spin_lock_bh(&tcp_metrics_lock);
net = dev_net(dst->dev);
/* While waiting for the spin-lock the cache might have been populated
* with this entry and so we have to check again.
*/
tm = __tcp_get_metrics(saddr, daddr, net, hash);
if (tm == TCP_METRICS_RECLAIM_PTR) {
reclaim = true;
tm = NULL;
}
if (tm) {
tcpm_check_stamp(tm, dst);
goto out_unlock;
}
if (unlikely(reclaim)) {
struct tcp_metrics_block *oldest;
oldest = deref_locked(tcp_metrics_hash[hash].chain);
for (tm = deref_locked(oldest->tcpm_next); tm;
tm = deref_locked(tm->tcpm_next)) {
if (time_before(READ_ONCE(tm->tcpm_stamp),
READ_ONCE(oldest->tcpm_stamp)))
oldest = tm;
}
tm = oldest;
} else {
tm = kzalloc(sizeof(*tm), GFP_ATOMIC);
if (!tm)
goto out_unlock;
}
/* Paired with the READ_ONCE() in tm_net() */
WRITE_ONCE(tm->tcpm_net, net);
tm->tcpm_saddr = *saddr;
tm->tcpm_daddr = *daddr;
tcpm_suck_dst(tm, dst, reclaim);
if (likely(!reclaim)) {
tm->tcpm_next = tcp_metrics_hash[hash].chain;
rcu_assign_pointer(tcp_metrics_hash[hash].chain, tm);
}
out_unlock:
spin_unlock_bh(&tcp_metrics_lock);
return tm;
}
static struct tcp_metrics_block *tcp_get_encode(struct tcp_metrics_block *tm, int depth)
{
if (tm)
return tm;
if (depth > TCP_METRICS_RECLAIM_DEPTH)
return TCP_METRICS_RECLAIM_PTR;
return NULL;
}
static struct tcp_metrics_block *__tcp_get_metrics(const struct inetpeer_addr *saddr,
const struct inetpeer_addr *daddr,
struct net *net, unsigned int hash)
{
struct tcp_metrics_block *tm;
int depth = 0;
for (tm = rcu_dereference(tcp_metrics_hash[hash].chain); tm;
tm = rcu_dereference(tm->tcpm_next)) {
if (addr_same(&tm->tcpm_saddr, saddr) &&
addr_same(&tm->tcpm_daddr, daddr) &&
net_eq(tm_net(tm), net))
break;
depth++;
}
return tcp_get_encode(tm, depth);
}
static struct tcp_metrics_block *__tcp_get_metrics_req(struct request_sock *req,
struct dst_entry *dst)
{
struct tcp_metrics_block *tm;
struct inetpeer_addr saddr, daddr;
unsigned int hash;
struct net *net;
saddr.family = req->rsk_ops->family;
daddr.family = req->rsk_ops->family;
switch (daddr.family) {
case AF_INET:
inetpeer_set_addr_v4(&saddr, inet_rsk(req)->ir_loc_addr);
inetpeer_set_addr_v4(&daddr, inet_rsk(req)->ir_rmt_addr);
hash = ipv4_addr_hash(inet_rsk(req)->ir_rmt_addr);
break;
#if IS_ENABLED(CONFIG_IPV6)
case AF_INET6:
inetpeer_set_addr_v6(&saddr, &inet_rsk(req)->ir_v6_loc_addr);
inetpeer_set_addr_v6(&daddr, &inet_rsk(req)->ir_v6_rmt_addr);
hash = ipv6_addr_hash(&inet_rsk(req)->ir_v6_rmt_addr);
break;
#endif
default:
return NULL;
}
net = dev_net(dst->dev);
hash ^= net_hash_mix(net);
hash = hash_32(hash, tcp_metrics_hash_log);
for (tm = rcu_dereference(tcp_metrics_hash[hash].chain); tm;
tm = rcu_dereference(tm->tcpm_next)) {
if (addr_same(&tm->tcpm_saddr, &saddr) &&
addr_same(&tm->tcpm_daddr, &daddr) &&
net_eq(tm_net(tm), net))
break;
}
tcpm_check_stamp(tm, dst);
return tm;
}
static struct tcp_metrics_block *tcp_get_metrics(struct sock *sk,
struct dst_entry *dst,
bool create)
{
struct tcp_metrics_block *tm;
struct inetpeer_addr saddr, daddr;
unsigned int hash;
struct net *net;
if (sk->sk_family == AF_INET) {
inetpeer_set_addr_v4(&saddr, inet_sk(sk)->inet_saddr);
inetpeer_set_addr_v4(&daddr, inet_sk(sk)->inet_daddr);
hash = ipv4_addr_hash(inet_sk(sk)->inet_daddr);
}
#if IS_ENABLED(CONFIG_IPV6)
else if (sk->sk_family == AF_INET6) {
if (ipv6_addr_v4mapped(&sk->sk_v6_daddr)) {
inetpeer_set_addr_v4(&saddr, inet_sk(sk)->inet_saddr);
inetpeer_set_addr_v4(&daddr, inet_sk(sk)->inet_daddr);
hash = ipv4_addr_hash(inet_sk(sk)->inet_daddr);
} else {
inetpeer_set_addr_v6(&saddr, &sk->sk_v6_rcv_saddr);
inetpeer_set_addr_v6(&daddr, &sk->sk_v6_daddr);
hash = ipv6_addr_hash(&sk->sk_v6_daddr);
}
}
#endif
else
return NULL;
net = dev_net(dst->dev);
hash ^= net_hash_mix(net);
hash = hash_32(hash, tcp_metrics_hash_log);
tm = __tcp_get_metrics(&saddr, &daddr, net, hash);
if (tm == TCP_METRICS_RECLAIM_PTR)
tm = NULL;
if (!tm && create)
tm = tcpm_new(dst, &saddr, &daddr, hash);
else
tcpm_check_stamp(tm, dst);
return tm;
}
/* Save metrics learned by this TCP session. This function is called
* only, when TCP finishes successfully i.e. when it enters TIME-WAIT
* or goes from LAST-ACK to CLOSE.
*/
void tcp_update_metrics(struct sock *sk)
{
const struct inet_connection_sock *icsk = inet_csk(sk);
struct dst_entry *dst = __sk_dst_get(sk);
struct tcp_sock *tp = tcp_sk(sk);
struct net *net = sock_net(sk);
struct tcp_metrics_block *tm;
unsigned long rtt;
u32 val;
int m;
sk_dst_confirm(sk);
if (READ_ONCE(net->ipv4.sysctl_tcp_nometrics_save) || !dst)
return;
rcu_read_lock();
if (icsk->icsk_backoff || !tp->srtt_us) {
/* This session failed to estimate rtt. Why?
* Probably, no packets returned in time. Reset our
* results.
*/
tm = tcp_get_metrics(sk, dst, false);
if (tm && !tcp_metric_locked(tm, TCP_METRIC_RTT))
tcp_metric_set(tm, TCP_METRIC_RTT, 0);
goto out_unlock;
} else
tm = tcp_get_metrics(sk, dst, true);
if (!tm)
goto out_unlock;
rtt = tcp_metric_get(tm, TCP_METRIC_RTT);
m = rtt - tp->srtt_us;
/* If newly calculated rtt larger than stored one, store new
* one. Otherwise, use EWMA. Remember, rtt overestimation is
* always better than underestimation.
*/
if (!tcp_metric_locked(tm, TCP_METRIC_RTT)) {
if (m <= 0)
rtt = tp->srtt_us;
else
rtt -= (m >> 3);
tcp_metric_set(tm, TCP_METRIC_RTT, rtt);
}
if (!tcp_metric_locked(tm, TCP_METRIC_RTTVAR)) {
unsigned long var;
if (m < 0)
m = -m;
/* Scale deviation to rttvar fixed point */
m >>= 1;
if (m < tp->mdev_us)
m = tp->mdev_us;
var = tcp_metric_get(tm, TCP_METRIC_RTTVAR);
if (m >= var)
var = m;
else
var -= (var - m) >> 2;
tcp_metric_set(tm, TCP_METRIC_RTTVAR, var);
}
if (tcp_in_initial_slowstart(tp)) {
/* Slow start still did not finish. */
if (!READ_ONCE(net->ipv4.sysctl_tcp_no_ssthresh_metrics_save) &&
!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH)) {
val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
if (val && (tcp_snd_cwnd(tp) >> 1) > val)
tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
tcp_snd_cwnd(tp) >> 1);
}
if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) {
val = tcp_metric_get(tm, TCP_METRIC_CWND);
if (tcp_snd_cwnd(tp) > val)
tcp_metric_set(tm, TCP_METRIC_CWND,
tcp_snd_cwnd(tp));
}
} else if (!tcp_in_slow_start(tp) &&
icsk->icsk_ca_state == TCP_CA_Open) {
/* Cong. avoidance phase, cwnd is reliable. */
if (!READ_ONCE(net->ipv4.sysctl_tcp_no_ssthresh_metrics_save) &&
!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH))
tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
max(tcp_snd_cwnd(tp) >> 1, tp->snd_ssthresh));
if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) {
val = tcp_metric_get(tm, TCP_METRIC_CWND);
tcp_metric_set(tm, TCP_METRIC_CWND, (val + tcp_snd_cwnd(tp)) >> 1);
}
} else {
/* Else slow start did not finish, cwnd is non-sense,
* ssthresh may be also invalid.
*/
if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) {
val = tcp_metric_get(tm, TCP_METRIC_CWND);
tcp_metric_set(tm, TCP_METRIC_CWND,
(val + tp->snd_ssthresh) >> 1);
}
if (!READ_ONCE(net->ipv4.sysctl_tcp_no_ssthresh_metrics_save) &&
!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH)) {
val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
if (val && tp->snd_ssthresh > val)
tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
tp->snd_ssthresh);
}
if (!tcp_metric_locked(tm, TCP_METRIC_REORDERING)) {
val = tcp_metric_get(tm, TCP_METRIC_REORDERING);
if (val < tp->reordering &&
tp->reordering !=
READ_ONCE(net->ipv4.sysctl_tcp_reordering))
tcp_metric_set(tm, TCP_METRIC_REORDERING,
tp->reordering);
}
}
WRITE_ONCE(tm->tcpm_stamp, jiffies);
out_unlock:
rcu_read_unlock();
}
/* Initialize metrics on socket. */
void tcp_init_metrics(struct sock *sk)
{
struct dst_entry *dst = __sk_dst_get(sk);
struct tcp_sock *tp = tcp_sk(sk);
struct net *net = sock_net(sk);
struct tcp_metrics_block *tm;
u32 val, crtt = 0; /* cached RTT scaled by 8 */
sk_dst_confirm(sk);
if (!dst)
goto reset;
rcu_read_lock();
tm = tcp_get_metrics(sk, dst, true);
if (!tm) {
rcu_read_unlock();
goto reset;
}
if (tcp_metric_locked(tm, TCP_METRIC_CWND))
tp->snd_cwnd_clamp = tcp_metric_get(tm, TCP_METRIC_CWND);
val = READ_ONCE(net->ipv4.sysctl_tcp_no_ssthresh_metrics_save) ?
0 : tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
if (val) {
tp->snd_ssthresh = val;
if (tp->snd_ssthresh > tp->snd_cwnd_clamp)
tp->snd_ssthresh = tp->snd_cwnd_clamp;
} else {
/* ssthresh may have been reduced unnecessarily during.
* 3WHS. Restore it back to its initial default.
*/
tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
}
val = tcp_metric_get(tm, TCP_METRIC_REORDERING);
if (val && tp->reordering != val)
tp->reordering = val;
crtt = tcp_metric_get(tm, TCP_METRIC_RTT);
rcu_read_unlock();
reset:
/* The initial RTT measurement from the SYN/SYN-ACK is not ideal
* to seed the RTO for later data packets because SYN packets are
* small. Use the per-dst cached values to seed the RTO but keep
* the RTT estimator variables intact (e.g., srtt, mdev, rttvar).
* Later the RTO will be updated immediately upon obtaining the first
* data RTT sample (tcp_rtt_estimator()). Hence the cached RTT only
* influences the first RTO but not later RTT estimation.
*
* But if RTT is not available from the SYN (due to retransmits or
* syn cookies) or the cache, force a conservative 3secs timeout.
*
* A bit of theory. RTT is time passed after "normal" sized packet
* is sent until it is ACKed. In normal circumstances sending small
* packets force peer to delay ACKs and calculation is correct too.
* The algorithm is adaptive and, provided we follow specs, it
* NEVER underestimate RTT. BUT! If peer tries to make some clever
* tricks sort of "quick acks" for time long enough to decrease RTT
* to low value, and then abruptly stops to do it and starts to delay
* ACKs, wait for troubles.
*/
if (crtt > tp->srtt_us) {
/* Set RTO like tcp_rtt_estimator(), but from cached RTT. */
crtt /= 8 * USEC_PER_SEC / HZ;
inet_csk(sk)->icsk_rto = crtt + max(2 * crtt, tcp_rto_min(sk));
} else if (tp->srtt_us == 0) {
/* RFC6298: 5.7 We've failed to get a valid RTT sample from
* 3WHS. This is most likely due to retransmission,
* including spurious one. Reset the RTO back to 3secs
* from the more aggressive 1sec to avoid more spurious
* retransmission.
*/
tp->rttvar_us = jiffies_to_usecs(TCP_TIMEOUT_FALLBACK);
tp->mdev_us = tp->mdev_max_us = tp->rttvar_us;
inet_csk(sk)->icsk_rto = TCP_TIMEOUT_FALLBACK;
}
}
bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst)
{
struct tcp_metrics_block *tm;
bool ret;
if (!dst)
return false;
rcu_read_lock();
tm = __tcp_get_metrics_req(req, dst);
if (tm && tcp_metric_get(tm, TCP_METRIC_RTT))
ret = true;
else
ret = false;
rcu_read_unlock();
return ret;
}
void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
struct tcp_fastopen_cookie *cookie)
{
struct tcp_metrics_block *tm;
rcu_read_lock();
tm = tcp_get_metrics(sk, __sk_dst_get(sk), false);
if (tm) {
struct tcp_fastopen_metrics *tfom = &tm->tcpm_fastopen;
unsigned int seq;
do {
seq = read_seqbegin(&fastopen_seqlock);
if (tfom->mss)
*mss = tfom->mss;
*cookie = tfom->cookie;
if (cookie->len <= 0 && tfom->try_exp == 1)
cookie->exp = true;
} while (read_seqretry(&fastopen_seqlock, seq));
}
rcu_read_unlock();
}
void tcp_fastopen_cache_set(struct sock *sk, u16 mss,
struct tcp_fastopen_cookie *cookie, bool syn_lost,
u16 try_exp)
{
struct dst_entry *dst = __sk_dst_get(sk);
struct tcp_metrics_block *tm;
if (!dst)
return;
rcu_read_lock();
tm = tcp_get_metrics(sk, dst, true);
if (tm) {
struct tcp_fastopen_metrics *tfom = &tm->tcpm_fastopen;
write_seqlock_bh(&fastopen_seqlock);
if (mss)
tfom->mss = mss;
if (cookie && cookie->len > 0)
tfom->cookie = *cookie;
else if (try_exp > tfom->try_exp &&
tfom->cookie.len <= 0 && !tfom->cookie.exp)
tfom->try_exp = try_exp;
if (syn_lost) {
++tfom->syn_loss;
tfom->last_syn_loss = jiffies;
} else
tfom->syn_loss = 0;
write_sequnlock_bh(&fastopen_seqlock);
}
rcu_read_unlock();
}
static struct genl_family tcp_metrics_nl_family;
static const struct nla_policy tcp_metrics_nl_policy[TCP_METRICS_ATTR_MAX + 1] = {
[TCP_METRICS_ATTR_ADDR_IPV4] = { .type = NLA_U32, },
[TCP_METRICS_ATTR_ADDR_IPV6] = { .type = NLA_BINARY,
.len = sizeof(struct in6_addr), },
/* Following attributes are not received for GET/DEL,
* we keep them for reference
*/
#if 0
[TCP_METRICS_ATTR_AGE] = { .type = NLA_MSECS, },
[TCP_METRICS_ATTR_TW_TSVAL] = { .type = NLA_U32, },
[TCP_METRICS_ATTR_TW_TS_STAMP] = { .type = NLA_S32, },
[TCP_METRICS_ATTR_VALS] = { .type = NLA_NESTED, },
[TCP_METRICS_ATTR_FOPEN_MSS] = { .type = NLA_U16, },
[TCP_METRICS_ATTR_FOPEN_SYN_DROPS] = { .type = NLA_U16, },
[TCP_METRICS_ATTR_FOPEN_SYN_DROP_TS] = { .type = NLA_MSECS, },
[TCP_METRICS_ATTR_FOPEN_COOKIE] = { .type = NLA_BINARY,
.len = TCP_FASTOPEN_COOKIE_MAX, },
#endif
};
/* Add attributes, caller cancels its header on failure */
static int tcp_metrics_fill_info(struct sk_buff *msg,
struct tcp_metrics_block *tm)
{
struct nlattr *nest;
int i;
switch (tm->tcpm_daddr.family) {
case AF_INET:
if (nla_put_in_addr(msg, TCP_METRICS_ATTR_ADDR_IPV4,
inetpeer_get_addr_v4(&tm->tcpm_daddr)) < 0)
goto nla_put_failure;
if (nla_put_in_addr(msg, TCP_METRICS_ATTR_SADDR_IPV4,
inetpeer_get_addr_v4(&tm->tcpm_saddr)) < 0)
goto nla_put_failure;
break;
case AF_INET6:
if (nla_put_in6_addr(msg, TCP_METRICS_ATTR_ADDR_IPV6,
inetpeer_get_addr_v6(&tm->tcpm_daddr)) < 0)
goto nla_put_failure;
if (nla_put_in6_addr(msg, TCP_METRICS_ATTR_SADDR_IPV6,
inetpeer_get_addr_v6(&tm->tcpm_saddr)) < 0)
goto nla_put_failure;
break;
default:
return -EAFNOSUPPORT;
}
if (nla_put_msecs(msg, TCP_METRICS_ATTR_AGE,
jiffies - READ_ONCE(tm->tcpm_stamp),
TCP_METRICS_ATTR_PAD) < 0)
goto nla_put_failure;
{
int n = 0;
nest = nla_nest_start_noflag(msg, TCP_METRICS_ATTR_VALS);
if (!nest)
goto nla_put_failure;
for (i = 0; i < TCP_METRIC_MAX_KERNEL + 1; i++) {
u32 val = tcp_metric_get(tm, i);
if (!val)
continue;
if (i == TCP_METRIC_RTT) {
if (nla_put_u32(msg, TCP_METRIC_RTT_US + 1,
val) < 0)
goto nla_put_failure;
n++;
val = max(val / 1000, 1U);
}
if (i == TCP_METRIC_RTTVAR) {
if (nla_put_u32(msg, TCP_METRIC_RTTVAR_US + 1,
val) < 0)
goto nla_put_failure;
n++;
val = max(val / 1000, 1U);
}
if (nla_put_u32(msg, i + 1, val) < 0)
goto nla_put_failure;
n++;
}
if (n)
nla_nest_end(msg, nest);
else
nla_nest_cancel(msg, nest);
}
{
struct tcp_fastopen_metrics tfom_copy[1], *tfom;
unsigned int seq;
do {
seq = read_seqbegin(&fastopen_seqlock);
tfom_copy[0] = tm->tcpm_fastopen;
} while (read_seqretry(&fastopen_seqlock, seq));
tfom = tfom_copy;
if (tfom->mss &&
nla_put_u16(msg, TCP_METRICS_ATTR_FOPEN_MSS,
tfom->mss) < 0)
goto nla_put_failure;
if (tfom->syn_loss &&
(nla_put_u16(msg, TCP_METRICS_ATTR_FOPEN_SYN_DROPS,
tfom->syn_loss) < 0 ||
nla_put_msecs(msg, TCP_METRICS_ATTR_FOPEN_SYN_DROP_TS,
jiffies - tfom->last_syn_loss,
TCP_METRICS_ATTR_PAD) < 0))
goto nla_put_failure;
if (tfom->cookie.len > 0 &&
nla_put(msg, TCP_METRICS_ATTR_FOPEN_COOKIE,
tfom->cookie.len, tfom->cookie.val) < 0)
goto nla_put_failure;
}
return 0;
nla_put_failure:
return -EMSGSIZE;
}
static int tcp_metrics_dump_info(struct sk_buff *skb,
struct netlink_callback *cb,
struct tcp_metrics_block *tm)
{
void *hdr;
hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
&tcp_metrics_nl_family, NLM_F_MULTI,
TCP_METRICS_CMD_GET);
if (!hdr)
return -EMSGSIZE;
if (tcp_metrics_fill_info(skb, tm) < 0)
goto nla_put_failure;
genlmsg_end(skb, hdr);
return 0;
nla_put_failure:
genlmsg_cancel(skb, hdr);
return -EMSGSIZE;
}
static int tcp_metrics_nl_dump(struct sk_buff *skb,
struct netlink_callback *cb)
{
struct net *net = sock_net(skb->sk);
unsigned int max_rows = 1U << tcp_metrics_hash_log;
unsigned int row, s_row = cb->args[0];
int s_col = cb->args[1], col = s_col;
for (row = s_row; row < max_rows; row++, s_col = 0) {
struct tcp_metrics_block *tm;
struct tcpm_hash_bucket *hb = tcp_metrics_hash + row;
rcu_read_lock();
for (col = 0, tm = rcu_dereference(hb->chain); tm;
tm = rcu_dereference(tm->tcpm_next), col++) {
if (!net_eq(tm_net(tm), net))
continue;
if (col < s_col)
continue;
if (tcp_metrics_dump_info(skb, cb, tm) < 0) {
rcu_read_unlock();
goto done;
}
}
rcu_read_unlock();
}
done:
cb->args[0] = row;
cb->args[1] = col;
return skb->len;
}
static int __parse_nl_addr(struct genl_info *info, struct inetpeer_addr *addr,
unsigned int *hash, int optional, int v4, int v6)
{
struct nlattr *a;
a = info->attrs[v4];
if (a) {
inetpeer_set_addr_v4(addr, nla_get_in_addr(a));
if (hash)
*hash = ipv4_addr_hash(inetpeer_get_addr_v4(addr));
return 0;
}
a = info->attrs[v6];
if (a) {
struct in6_addr in6;
if (nla_len(a) != sizeof(struct in6_addr))
return -EINVAL;
in6 = nla_get_in6_addr(a);
inetpeer_set_addr_v6(addr, &in6);
if (hash)
*hash = ipv6_addr_hash(inetpeer_get_addr_v6(addr));
return 0;
}
return optional ? 1 : -EAFNOSUPPORT;
}
static int parse_nl_addr(struct genl_info *info, struct inetpeer_addr *addr,
unsigned int *hash, int optional)
{
return __parse_nl_addr(info, addr, hash, optional,
TCP_METRICS_ATTR_ADDR_IPV4,
TCP_METRICS_ATTR_ADDR_IPV6);
}
static int parse_nl_saddr(struct genl_info *info, struct inetpeer_addr *addr)
{
return __parse_nl_addr(info, addr, NULL, 0,
TCP_METRICS_ATTR_SADDR_IPV4,
TCP_METRICS_ATTR_SADDR_IPV6);
}
static int tcp_metrics_nl_cmd_get(struct sk_buff *skb, struct genl_info *info)
{
struct tcp_metrics_block *tm;
struct inetpeer_addr saddr, daddr;
unsigned int hash;
struct sk_buff *msg;
struct net *net = genl_info_net(info);
void *reply;
int ret;
bool src = true;
ret = parse_nl_addr(info, &daddr, &hash, 0);
if (ret < 0)
return ret;
ret = parse_nl_saddr(info, &saddr);
if (ret < 0)
src = false;
msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!msg)
return -ENOMEM;
reply = genlmsg_put_reply(msg, info, &tcp_metrics_nl_family, 0,
info->genlhdr->cmd);
if (!reply)
goto nla_put_failure;
hash ^= net_hash_mix(net);
hash = hash_32(hash, tcp_metrics_hash_log);
ret = -ESRCH;
rcu_read_lock();
for (tm = rcu_dereference(tcp_metrics_hash[hash].chain); tm;
tm = rcu_dereference(tm->tcpm_next)) {
if (addr_same(&tm->tcpm_daddr, &daddr) &&
(!src || addr_same(&tm->tcpm_saddr, &saddr)) &&
net_eq(tm_net(tm), net)) {
ret = tcp_metrics_fill_info(msg, tm);
break;
}
}
rcu_read_unlock();
if (ret < 0)
goto out_free;
genlmsg_end(msg, reply);
return genlmsg_reply(msg, info);
nla_put_failure:
ret = -EMSGSIZE;
out_free:
nlmsg_free(msg);
return ret;
}
static void tcp_metrics_flush_all(struct net *net)
{
unsigned int max_rows = 1U << tcp_metrics_hash_log;
struct tcpm_hash_bucket *hb = tcp_metrics_hash;
struct tcp_metrics_block *tm;
unsigned int row;
for (row = 0; row < max_rows; row++, hb++) {
struct tcp_metrics_block __rcu **pp;
bool match;
spin_lock_bh(&tcp_metrics_lock);
pp = &hb->chain;
for (tm = deref_locked(*pp); tm; tm = deref_locked(*pp)) {
match = net ? net_eq(tm_net(tm), net) :
!refcount_read(&tm_net(tm)->ns.count);
if (match) {
*pp = tm->tcpm_next;
kfree_rcu(tm, rcu_head);
} else {
pp = &tm->tcpm_next;
}
}
spin_unlock_bh(&tcp_metrics_lock);
}
}
static int tcp_metrics_nl_cmd_del(struct sk_buff *skb, struct genl_info *info)
{
struct tcpm_hash_bucket *hb;
struct tcp_metrics_block *tm;
struct tcp_metrics_block __rcu **pp;
struct inetpeer_addr saddr, daddr;
unsigned int hash;
struct net *net = genl_info_net(info);
int ret;
bool src = true, found = false;
ret = parse_nl_addr(info, &daddr, &hash, 1);
if (ret < 0)
return ret;
if (ret > 0) {
tcp_metrics_flush_all(net);
return 0;
}
ret = parse_nl_saddr(info, &saddr);
if (ret < 0)
src = false;
hash ^= net_hash_mix(net);
hash = hash_32(hash, tcp_metrics_hash_log);
hb = tcp_metrics_hash + hash;
pp = &hb->chain;
spin_lock_bh(&tcp_metrics_lock);
for (tm = deref_locked(*pp); tm; tm = deref_locked(*pp)) {
if (addr_same(&tm->tcpm_daddr, &daddr) &&
(!src || addr_same(&tm->tcpm_saddr, &saddr)) &&
net_eq(tm_net(tm), net)) {
*pp = tm->tcpm_next;
kfree_rcu(tm, rcu_head);
found = true;
} else {
pp = &tm->tcpm_next;
}
}
spin_unlock_bh(&tcp_metrics_lock);
if (!found)
return -ESRCH;
return 0;
}
static const struct genl_small_ops tcp_metrics_nl_ops[] = {
{
.cmd = TCP_METRICS_CMD_GET,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = tcp_metrics_nl_cmd_get,
.dumpit = tcp_metrics_nl_dump,
},
{
.cmd = TCP_METRICS_CMD_DEL,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = tcp_metrics_nl_cmd_del,
.flags = GENL_ADMIN_PERM,
},
};
static struct genl_family tcp_metrics_nl_family __ro_after_init = {
.hdrsize = 0,
.name = TCP_METRICS_GENL_NAME,
.version = TCP_METRICS_GENL_VERSION,
.maxattr = TCP_METRICS_ATTR_MAX,
.policy = tcp_metrics_nl_policy,
.netnsok = true,
.module = THIS_MODULE,
.small_ops = tcp_metrics_nl_ops,
.n_small_ops = ARRAY_SIZE(tcp_metrics_nl_ops),
.resv_start_op = TCP_METRICS_CMD_DEL + 1,
};
static unsigned int tcpmhash_entries __initdata;
static int __init set_tcpmhash_entries(char *str)
{
ssize_t ret;
if (!str)
return 0;
ret = kstrtouint(str, 0, &tcpmhash_entries);
if (ret)
return 0;
return 1;
}
__setup("tcpmhash_entries=", set_tcpmhash_entries);
static void __init tcp_metrics_hash_alloc(void)
{
unsigned int slots = tcpmhash_entries;
size_t size;
if (!slots) {
if (totalram_pages() >= 128 * 1024)
slots = 16 * 1024;
else
slots = 8 * 1024;
}
tcp_metrics_hash_log = order_base_2(slots);
size = sizeof(struct tcpm_hash_bucket) << tcp_metrics_hash_log;
tcp_metrics_hash = kvzalloc(size, GFP_KERNEL);
if (!tcp_metrics_hash)
panic("Could not allocate the tcp_metrics hash table\n");
}
static void __net_exit tcp_net_metrics_exit_batch(struct list_head *net_exit_list)
{
tcp_metrics_flush_all(NULL);
}
static __net_initdata struct pernet_operations tcp_net_metrics_ops = {
.exit_batch = tcp_net_metrics_exit_batch,
};
void __init tcp_metrics_init(void)
{
int ret;
tcp_metrics_hash_alloc();
ret = register_pernet_subsys(&tcp_net_metrics_ops);
if (ret < 0)
panic("Could not register tcp_net_metrics_ops\n");
ret = genl_register_family(&tcp_metrics_nl_family);
if (ret < 0)
panic("Could not register tcp_metrics generic netlink\n");
}
| linux-master | net/ipv4/tcp_metrics.c |
/* Protective Load Balancing (PLB)
*
* PLB was designed to reduce link load imbalance across datacenter
* switches. PLB is a host-based optimization; it leverages congestion
* signals from the transport layer to randomly change the path of the
* connection experiencing sustained congestion. PLB prefers to repath
* after idle periods to minimize packet reordering. It repaths by
* changing the IPv6 Flow Label on the packets of a connection, which
* datacenter switches include as part of ECMP/WCMP hashing.
*
* PLB is described in detail in:
*
* Mubashir Adnan Qureshi, Yuchung Cheng, Qianwen Yin, Qiaobin Fu,
* Gautam Kumar, Masoud Moshref, Junhua Yan, Van Jacobson,
* David Wetherall,Abdul Kabbani:
* "PLB: Congestion Signals are Simple and Effective for
* Network Load Balancing"
* In ACM SIGCOMM 2022, Amsterdam Netherlands.
*
*/
#include <net/tcp.h>
/* Called once per round-trip to update PLB state for a connection. */
void tcp_plb_update_state(const struct sock *sk, struct tcp_plb_state *plb,
const int cong_ratio)
{
struct net *net = sock_net(sk);
if (!READ_ONCE(net->ipv4.sysctl_tcp_plb_enabled))
return;
if (cong_ratio >= 0) {
if (cong_ratio < READ_ONCE(net->ipv4.sysctl_tcp_plb_cong_thresh))
plb->consec_cong_rounds = 0;
else if (plb->consec_cong_rounds <
READ_ONCE(net->ipv4.sysctl_tcp_plb_rehash_rounds))
plb->consec_cong_rounds++;
}
}
EXPORT_SYMBOL_GPL(tcp_plb_update_state);
/* Check whether recent congestion has been persistent enough to warrant
* a load balancing decision that switches the connection to another path.
*/
void tcp_plb_check_rehash(struct sock *sk, struct tcp_plb_state *plb)
{
struct net *net = sock_net(sk);
u32 max_suspend;
bool forced_rehash = false, idle_rehash = false;
if (!READ_ONCE(net->ipv4.sysctl_tcp_plb_enabled))
return;
forced_rehash = plb->consec_cong_rounds >=
READ_ONCE(net->ipv4.sysctl_tcp_plb_rehash_rounds);
/* If sender goes idle then we check whether to rehash. */
idle_rehash = READ_ONCE(net->ipv4.sysctl_tcp_plb_idle_rehash_rounds) &&
!tcp_sk(sk)->packets_out &&
plb->consec_cong_rounds >=
READ_ONCE(net->ipv4.sysctl_tcp_plb_idle_rehash_rounds);
if (!forced_rehash && !idle_rehash)
return;
/* Note that tcp_jiffies32 can wrap; we detect wraps by checking for
* cases where the max suspension end is before the actual suspension
* end. We clear pause_until to 0 to indicate there is no recent
* RTO event that constrains PLB rehashing.
*/
max_suspend = 2 * READ_ONCE(net->ipv4.sysctl_tcp_plb_suspend_rto_sec) * HZ;
if (plb->pause_until &&
(!before(tcp_jiffies32, plb->pause_until) ||
before(tcp_jiffies32 + max_suspend, plb->pause_until)))
plb->pause_until = 0;
if (plb->pause_until)
return;
sk_rethink_txhash(sk);
plb->consec_cong_rounds = 0;
tcp_sk(sk)->plb_rehash++;
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPPLBREHASH);
}
EXPORT_SYMBOL_GPL(tcp_plb_check_rehash);
/* Upon RTO, disallow load balancing for a while, to avoid having load
* balancing decisions switch traffic to a black-holed path that was
* previously avoided with a sk_rethink_txhash() call at RTO time.
*/
void tcp_plb_update_state_upon_rto(struct sock *sk, struct tcp_plb_state *plb)
{
struct net *net = sock_net(sk);
u32 pause;
if (!READ_ONCE(net->ipv4.sysctl_tcp_plb_enabled))
return;
pause = READ_ONCE(net->ipv4.sysctl_tcp_plb_suspend_rto_sec) * HZ;
pause += get_random_u32_below(pause);
plb->pause_until = tcp_jiffies32 + pause;
/* Reset PLB state upon RTO, since an RTO causes a sk_rethink_txhash() call
* that may switch this connection to a path with completely different
* congestion characteristics.
*/
plb->consec_cong_rounds = 0;
}
EXPORT_SYMBOL_GPL(tcp_plb_update_state_upon_rto);
| linux-master | net/ipv4/tcp_plb.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* INET An implementation of the TCP/IP protocol suite for the LINUX
* operating system. INET is implemented using the BSD Socket
* interface as the means of communication with the user level.
*
* The Internet Protocol (IP) output module.
*
* Authors: Ross Biro
* Fred N. van Kempen, <[email protected]>
* Donald Becker, <[email protected]>
* Alan Cox, <[email protected]>
* Richard Underwood
* Stefan Becker, <[email protected]>
* Jorge Cwik, <[email protected]>
* Arnt Gulbrandsen, <[email protected]>
* Hirokazu Takahashi, <[email protected]>
*
* See ip_input.c for original log
*
* Fixes:
* Alan Cox : Missing nonblock feature in ip_build_xmit.
* Mike Kilburn : htons() missing in ip_build_xmit.
* Bradford Johnson: Fix faulty handling of some frames when
* no route is found.
* Alexander Demenshin: Missing sk/skb free in ip_queue_xmit
* (in case if packet not accepted by
* output firewall rules)
* Mike McLagan : Routing by source
* Alexey Kuznetsov: use new route cache
* Andi Kleen: Fix broken PMTU recovery and remove
* some redundant tests.
* Vitaly E. Lavrov : Transparent proxy revived after year coma.
* Andi Kleen : Replace ip_reply with ip_send_reply.
* Andi Kleen : Split fast and slow ip_build_xmit path
* for decreased register pressure on x86
* and more readability.
* Marc Boucher : When call_out_firewall returns FW_QUEUE,
* silently drop skb instead of failing with -EPERM.
* Detlev Wengorz : Copy protocol for fragments.
* Hirokazu Takahashi: HW checksumming for outgoing UDP
* datagrams.
* Hirokazu Takahashi: sendfile() on UDP works now.
*/
#include <linux/uaccess.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/highmem.h>
#include <linux/slab.h>
#include <linux/socket.h>
#include <linux/sockios.h>
#include <linux/in.h>
#include <linux/inet.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/proc_fs.h>
#include <linux/stat.h>
#include <linux/init.h>
#include <net/snmp.h>
#include <net/ip.h>
#include <net/protocol.h>
#include <net/route.h>
#include <net/xfrm.h>
#include <linux/skbuff.h>
#include <net/sock.h>
#include <net/arp.h>
#include <net/icmp.h>
#include <net/checksum.h>
#include <net/gso.h>
#include <net/inetpeer.h>
#include <net/inet_ecn.h>
#include <net/lwtunnel.h>
#include <linux/bpf-cgroup.h>
#include <linux/igmp.h>
#include <linux/netfilter_ipv4.h>
#include <linux/netfilter_bridge.h>
#include <linux/netlink.h>
#include <linux/tcp.h>
static int
ip_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
unsigned int mtu,
int (*output)(struct net *, struct sock *, struct sk_buff *));
/* Generate a checksum for an outgoing IP datagram. */
void ip_send_check(struct iphdr *iph)
{
iph->check = 0;
iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
}
EXPORT_SYMBOL(ip_send_check);
int __ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb)
{
struct iphdr *iph = ip_hdr(skb);
iph_set_totlen(iph, skb->len);
ip_send_check(iph);
/* if egress device is enslaved to an L3 master device pass the
* skb to its handler for processing
*/
skb = l3mdev_ip_out(sk, skb);
if (unlikely(!skb))
return 0;
skb->protocol = htons(ETH_P_IP);
return nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT,
net, sk, skb, NULL, skb_dst(skb)->dev,
dst_output);
}
int ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb)
{
int err;
err = __ip_local_out(net, sk, skb);
if (likely(err == 1))
err = dst_output(net, sk, skb);
return err;
}
EXPORT_SYMBOL_GPL(ip_local_out);
static inline int ip_select_ttl(const struct inet_sock *inet,
const struct dst_entry *dst)
{
int ttl = READ_ONCE(inet->uc_ttl);
if (ttl < 0)
ttl = ip4_dst_hoplimit(dst);
return ttl;
}
/*
* Add an ip header to a skbuff and send it out.
*
*/
int ip_build_and_send_pkt(struct sk_buff *skb, const struct sock *sk,
__be32 saddr, __be32 daddr, struct ip_options_rcu *opt,
u8 tos)
{
const struct inet_sock *inet = inet_sk(sk);
struct rtable *rt = skb_rtable(skb);
struct net *net = sock_net(sk);
struct iphdr *iph;
/* Build the IP header. */
skb_push(skb, sizeof(struct iphdr) + (opt ? opt->opt.optlen : 0));
skb_reset_network_header(skb);
iph = ip_hdr(skb);
iph->version = 4;
iph->ihl = 5;
iph->tos = tos;
iph->ttl = ip_select_ttl(inet, &rt->dst);
iph->daddr = (opt && opt->opt.srr ? opt->opt.faddr : daddr);
iph->saddr = saddr;
iph->protocol = sk->sk_protocol;
/* Do not bother generating IPID for small packets (eg SYNACK) */
if (skb->len <= IPV4_MIN_MTU || ip_dont_fragment(sk, &rt->dst)) {
iph->frag_off = htons(IP_DF);
iph->id = 0;
} else {
iph->frag_off = 0;
/* TCP packets here are SYNACK with fat IPv4/TCP options.
* Avoid using the hashed IP ident generator.
*/
if (sk->sk_protocol == IPPROTO_TCP)
iph->id = (__force __be16)get_random_u16();
else
__ip_select_ident(net, iph, 1);
}
if (opt && opt->opt.optlen) {
iph->ihl += opt->opt.optlen>>2;
ip_options_build(skb, &opt->opt, daddr, rt);
}
skb->priority = READ_ONCE(sk->sk_priority);
if (!skb->mark)
skb->mark = READ_ONCE(sk->sk_mark);
/* Send it out. */
return ip_local_out(net, skb->sk, skb);
}
EXPORT_SYMBOL_GPL(ip_build_and_send_pkt);
static int ip_finish_output2(struct net *net, struct sock *sk, struct sk_buff *skb)
{
struct dst_entry *dst = skb_dst(skb);
struct rtable *rt = (struct rtable *)dst;
struct net_device *dev = dst->dev;
unsigned int hh_len = LL_RESERVED_SPACE(dev);
struct neighbour *neigh;
bool is_v6gw = false;
if (rt->rt_type == RTN_MULTICAST) {
IP_UPD_PO_STATS(net, IPSTATS_MIB_OUTMCAST, skb->len);
} else if (rt->rt_type == RTN_BROADCAST)
IP_UPD_PO_STATS(net, IPSTATS_MIB_OUTBCAST, skb->len);
/* OUTOCTETS should be counted after fragment */
IP_UPD_PO_STATS(net, IPSTATS_MIB_OUT, skb->len);
if (unlikely(skb_headroom(skb) < hh_len && dev->header_ops)) {
skb = skb_expand_head(skb, hh_len);
if (!skb)
return -ENOMEM;
}
if (lwtunnel_xmit_redirect(dst->lwtstate)) {
int res = lwtunnel_xmit(skb);
if (res != LWTUNNEL_XMIT_CONTINUE)
return res;
}
rcu_read_lock();
neigh = ip_neigh_for_gw(rt, skb, &is_v6gw);
if (!IS_ERR(neigh)) {
int res;
sock_confirm_neigh(skb, neigh);
/* if crossing protocols, can not use the cached header */
res = neigh_output(neigh, skb, is_v6gw);
rcu_read_unlock();
return res;
}
rcu_read_unlock();
net_dbg_ratelimited("%s: No header cache and no neighbour!\n",
__func__);
kfree_skb_reason(skb, SKB_DROP_REASON_NEIGH_CREATEFAIL);
return PTR_ERR(neigh);
}
static int ip_finish_output_gso(struct net *net, struct sock *sk,
struct sk_buff *skb, unsigned int mtu)
{
struct sk_buff *segs, *nskb;
netdev_features_t features;
int ret = 0;
/* common case: seglen is <= mtu
*/
if (skb_gso_validate_network_len(skb, mtu))
return ip_finish_output2(net, sk, skb);
/* Slowpath - GSO segment length exceeds the egress MTU.
*
* This can happen in several cases:
* - Forwarding of a TCP GRO skb, when DF flag is not set.
* - Forwarding of an skb that arrived on a virtualization interface
* (virtio-net/vhost/tap) with TSO/GSO size set by other network
* stack.
* - Local GSO skb transmitted on an NETIF_F_TSO tunnel stacked over an
* interface with a smaller MTU.
* - Arriving GRO skb (or GSO skb in a virtualized environment) that is
* bridged to a NETIF_F_TSO tunnel stacked over an interface with an
* insufficient MTU.
*/
features = netif_skb_features(skb);
BUILD_BUG_ON(sizeof(*IPCB(skb)) > SKB_GSO_CB_OFFSET);
segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
if (IS_ERR_OR_NULL(segs)) {
kfree_skb(skb);
return -ENOMEM;
}
consume_skb(skb);
skb_list_walk_safe(segs, segs, nskb) {
int err;
skb_mark_not_on_list(segs);
err = ip_fragment(net, sk, segs, mtu, ip_finish_output2);
if (err && ret == 0)
ret = err;
}
return ret;
}
static int __ip_finish_output(struct net *net, struct sock *sk, struct sk_buff *skb)
{
unsigned int mtu;
#if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM)
/* Policy lookup after SNAT yielded a new policy */
if (skb_dst(skb)->xfrm) {
IPCB(skb)->flags |= IPSKB_REROUTED;
return dst_output(net, sk, skb);
}
#endif
mtu = ip_skb_dst_mtu(sk, skb);
if (skb_is_gso(skb))
return ip_finish_output_gso(net, sk, skb, mtu);
if (skb->len > mtu || IPCB(skb)->frag_max_size)
return ip_fragment(net, sk, skb, mtu, ip_finish_output2);
return ip_finish_output2(net, sk, skb);
}
static int ip_finish_output(struct net *net, struct sock *sk, struct sk_buff *skb)
{
int ret;
ret = BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb);
switch (ret) {
case NET_XMIT_SUCCESS:
return __ip_finish_output(net, sk, skb);
case NET_XMIT_CN:
return __ip_finish_output(net, sk, skb) ? : ret;
default:
kfree_skb_reason(skb, SKB_DROP_REASON_BPF_CGROUP_EGRESS);
return ret;
}
}
static int ip_mc_finish_output(struct net *net, struct sock *sk,
struct sk_buff *skb)
{
struct rtable *new_rt;
bool do_cn = false;
int ret, err;
ret = BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb);
switch (ret) {
case NET_XMIT_CN:
do_cn = true;
fallthrough;
case NET_XMIT_SUCCESS:
break;
default:
kfree_skb_reason(skb, SKB_DROP_REASON_BPF_CGROUP_EGRESS);
return ret;
}
/* Reset rt_iif so that inet_iif() will return skb->skb_iif. Setting
* this to non-zero causes ipi_ifindex in in_pktinfo to be overwritten,
* see ipv4_pktinfo_prepare().
*/
new_rt = rt_dst_clone(net->loopback_dev, skb_rtable(skb));
if (new_rt) {
new_rt->rt_iif = 0;
skb_dst_drop(skb);
skb_dst_set(skb, &new_rt->dst);
}
err = dev_loopback_xmit(net, sk, skb);
return (do_cn && err) ? ret : err;
}
int ip_mc_output(struct net *net, struct sock *sk, struct sk_buff *skb)
{
struct rtable *rt = skb_rtable(skb);
struct net_device *dev = rt->dst.dev;
/*
* If the indicated interface is up and running, send the packet.
*/
skb->dev = dev;
skb->protocol = htons(ETH_P_IP);
/*
* Multicasts are looped back for other local users
*/
if (rt->rt_flags&RTCF_MULTICAST) {
if (sk_mc_loop(sk)
#ifdef CONFIG_IP_MROUTE
/* Small optimization: do not loopback not local frames,
which returned after forwarding; they will be dropped
by ip_mr_input in any case.
Note, that local frames are looped back to be delivered
to local recipients.
This check is duplicated in ip_mr_input at the moment.
*/
&&
((rt->rt_flags & RTCF_LOCAL) ||
!(IPCB(skb)->flags & IPSKB_FORWARDED))
#endif
) {
struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
if (newskb)
NF_HOOK(NFPROTO_IPV4, NF_INET_POST_ROUTING,
net, sk, newskb, NULL, newskb->dev,
ip_mc_finish_output);
}
/* Multicasts with ttl 0 must not go beyond the host */
if (ip_hdr(skb)->ttl == 0) {
kfree_skb(skb);
return 0;
}
}
if (rt->rt_flags&RTCF_BROADCAST) {
struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
if (newskb)
NF_HOOK(NFPROTO_IPV4, NF_INET_POST_ROUTING,
net, sk, newskb, NULL, newskb->dev,
ip_mc_finish_output);
}
return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING,
net, sk, skb, NULL, skb->dev,
ip_finish_output,
!(IPCB(skb)->flags & IPSKB_REROUTED));
}
int ip_output(struct net *net, struct sock *sk, struct sk_buff *skb)
{
struct net_device *dev = skb_dst(skb)->dev, *indev = skb->dev;
skb->dev = dev;
skb->protocol = htons(ETH_P_IP);
return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING,
net, sk, skb, indev, dev,
ip_finish_output,
!(IPCB(skb)->flags & IPSKB_REROUTED));
}
EXPORT_SYMBOL(ip_output);
/*
* copy saddr and daddr, possibly using 64bit load/stores
* Equivalent to :
* iph->saddr = fl4->saddr;
* iph->daddr = fl4->daddr;
*/
static void ip_copy_addrs(struct iphdr *iph, const struct flowi4 *fl4)
{
BUILD_BUG_ON(offsetof(typeof(*fl4), daddr) !=
offsetof(typeof(*fl4), saddr) + sizeof(fl4->saddr));
iph->saddr = fl4->saddr;
iph->daddr = fl4->daddr;
}
/* Note: skb->sk can be different from sk, in case of tunnels */
int __ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl,
__u8 tos)
{
struct inet_sock *inet = inet_sk(sk);
struct net *net = sock_net(sk);
struct ip_options_rcu *inet_opt;
struct flowi4 *fl4;
struct rtable *rt;
struct iphdr *iph;
int res;
/* Skip all of this if the packet is already routed,
* f.e. by something like SCTP.
*/
rcu_read_lock();
inet_opt = rcu_dereference(inet->inet_opt);
fl4 = &fl->u.ip4;
rt = skb_rtable(skb);
if (rt)
goto packet_routed;
/* Make sure we can route this packet. */
rt = (struct rtable *)__sk_dst_check(sk, 0);
if (!rt) {
__be32 daddr;
/* Use correct destination address if we have options. */
daddr = inet->inet_daddr;
if (inet_opt && inet_opt->opt.srr)
daddr = inet_opt->opt.faddr;
/* If this fails, retransmit mechanism of transport layer will
* keep trying until route appears or the connection times
* itself out.
*/
rt = ip_route_output_ports(net, fl4, sk,
daddr, inet->inet_saddr,
inet->inet_dport,
inet->inet_sport,
sk->sk_protocol,
RT_CONN_FLAGS_TOS(sk, tos),
sk->sk_bound_dev_if);
if (IS_ERR(rt))
goto no_route;
sk_setup_caps(sk, &rt->dst);
}
skb_dst_set_noref(skb, &rt->dst);
packet_routed:
if (inet_opt && inet_opt->opt.is_strictroute && rt->rt_uses_gateway)
goto no_route;
/* OK, we know where to send it, allocate and build IP header. */
skb_push(skb, sizeof(struct iphdr) + (inet_opt ? inet_opt->opt.optlen : 0));
skb_reset_network_header(skb);
iph = ip_hdr(skb);
*((__be16 *)iph) = htons((4 << 12) | (5 << 8) | (tos & 0xff));
if (ip_dont_fragment(sk, &rt->dst) && !skb->ignore_df)
iph->frag_off = htons(IP_DF);
else
iph->frag_off = 0;
iph->ttl = ip_select_ttl(inet, &rt->dst);
iph->protocol = sk->sk_protocol;
ip_copy_addrs(iph, fl4);
/* Transport layer set skb->h.foo itself. */
if (inet_opt && inet_opt->opt.optlen) {
iph->ihl += inet_opt->opt.optlen >> 2;
ip_options_build(skb, &inet_opt->opt, inet->inet_daddr, rt);
}
ip_select_ident_segs(net, skb, sk,
skb_shinfo(skb)->gso_segs ?: 1);
/* TODO : should we use skb->sk here instead of sk ? */
skb->priority = READ_ONCE(sk->sk_priority);
skb->mark = READ_ONCE(sk->sk_mark);
res = ip_local_out(net, sk, skb);
rcu_read_unlock();
return res;
no_route:
rcu_read_unlock();
IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
kfree_skb_reason(skb, SKB_DROP_REASON_IP_OUTNOROUTES);
return -EHOSTUNREACH;
}
EXPORT_SYMBOL(__ip_queue_xmit);
int ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl)
{
return __ip_queue_xmit(sk, skb, fl, inet_sk(sk)->tos);
}
EXPORT_SYMBOL(ip_queue_xmit);
static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from)
{
to->pkt_type = from->pkt_type;
to->priority = from->priority;
to->protocol = from->protocol;
to->skb_iif = from->skb_iif;
skb_dst_drop(to);
skb_dst_copy(to, from);
to->dev = from->dev;
to->mark = from->mark;
skb_copy_hash(to, from);
#ifdef CONFIG_NET_SCHED
to->tc_index = from->tc_index;
#endif
nf_copy(to, from);
skb_ext_copy(to, from);
#if IS_ENABLED(CONFIG_IP_VS)
to->ipvs_property = from->ipvs_property;
#endif
skb_copy_secmark(to, from);
}
static int ip_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
unsigned int mtu,
int (*output)(struct net *, struct sock *, struct sk_buff *))
{
struct iphdr *iph = ip_hdr(skb);
if ((iph->frag_off & htons(IP_DF)) == 0)
return ip_do_fragment(net, sk, skb, output);
if (unlikely(!skb->ignore_df ||
(IPCB(skb)->frag_max_size &&
IPCB(skb)->frag_max_size > mtu))) {
IP_INC_STATS(net, IPSTATS_MIB_FRAGFAILS);
icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
htonl(mtu));
kfree_skb(skb);
return -EMSGSIZE;
}
return ip_do_fragment(net, sk, skb, output);
}
void ip_fraglist_init(struct sk_buff *skb, struct iphdr *iph,
unsigned int hlen, struct ip_fraglist_iter *iter)
{
unsigned int first_len = skb_pagelen(skb);
iter->frag = skb_shinfo(skb)->frag_list;
skb_frag_list_init(skb);
iter->offset = 0;
iter->iph = iph;
iter->hlen = hlen;
skb->data_len = first_len - skb_headlen(skb);
skb->len = first_len;
iph->tot_len = htons(first_len);
iph->frag_off = htons(IP_MF);
ip_send_check(iph);
}
EXPORT_SYMBOL(ip_fraglist_init);
void ip_fraglist_prepare(struct sk_buff *skb, struct ip_fraglist_iter *iter)
{
unsigned int hlen = iter->hlen;
struct iphdr *iph = iter->iph;
struct sk_buff *frag;
frag = iter->frag;
frag->ip_summed = CHECKSUM_NONE;
skb_reset_transport_header(frag);
__skb_push(frag, hlen);
skb_reset_network_header(frag);
memcpy(skb_network_header(frag), iph, hlen);
iter->iph = ip_hdr(frag);
iph = iter->iph;
iph->tot_len = htons(frag->len);
ip_copy_metadata(frag, skb);
iter->offset += skb->len - hlen;
iph->frag_off = htons(iter->offset >> 3);
if (frag->next)
iph->frag_off |= htons(IP_MF);
/* Ready, complete checksum */
ip_send_check(iph);
}
EXPORT_SYMBOL(ip_fraglist_prepare);
void ip_frag_init(struct sk_buff *skb, unsigned int hlen,
unsigned int ll_rs, unsigned int mtu, bool DF,
struct ip_frag_state *state)
{
struct iphdr *iph = ip_hdr(skb);
state->DF = DF;
state->hlen = hlen;
state->ll_rs = ll_rs;
state->mtu = mtu;
state->left = skb->len - hlen; /* Space per frame */
state->ptr = hlen; /* Where to start from */
state->offset = (ntohs(iph->frag_off) & IP_OFFSET) << 3;
state->not_last_frag = iph->frag_off & htons(IP_MF);
}
EXPORT_SYMBOL(ip_frag_init);
static void ip_frag_ipcb(struct sk_buff *from, struct sk_buff *to,
bool first_frag)
{
/* Copy the flags to each fragment. */
IPCB(to)->flags = IPCB(from)->flags;
/* ANK: dirty, but effective trick. Upgrade options only if
* the segment to be fragmented was THE FIRST (otherwise,
* options are already fixed) and make it ONCE
* on the initial skb, so that all the following fragments
* will inherit fixed options.
*/
if (first_frag)
ip_options_fragment(from);
}
struct sk_buff *ip_frag_next(struct sk_buff *skb, struct ip_frag_state *state)
{
unsigned int len = state->left;
struct sk_buff *skb2;
struct iphdr *iph;
/* IF: it doesn't fit, use 'mtu' - the data space left */
if (len > state->mtu)
len = state->mtu;
/* IF: we are not sending up to and including the packet end
then align the next start on an eight byte boundary */
if (len < state->left) {
len &= ~7;
}
/* Allocate buffer */
skb2 = alloc_skb(len + state->hlen + state->ll_rs, GFP_ATOMIC);
if (!skb2)
return ERR_PTR(-ENOMEM);
/*
* Set up data on packet
*/
ip_copy_metadata(skb2, skb);
skb_reserve(skb2, state->ll_rs);
skb_put(skb2, len + state->hlen);
skb_reset_network_header(skb2);
skb2->transport_header = skb2->network_header + state->hlen;
/*
* Charge the memory for the fragment to any owner
* it might possess
*/
if (skb->sk)
skb_set_owner_w(skb2, skb->sk);
/*
* Copy the packet header into the new buffer.
*/
skb_copy_from_linear_data(skb, skb_network_header(skb2), state->hlen);
/*
* Copy a block of the IP datagram.
*/
if (skb_copy_bits(skb, state->ptr, skb_transport_header(skb2), len))
BUG();
state->left -= len;
/*
* Fill in the new header fields.
*/
iph = ip_hdr(skb2);
iph->frag_off = htons((state->offset >> 3));
if (state->DF)
iph->frag_off |= htons(IP_DF);
/*
* Added AC : If we are fragmenting a fragment that's not the
* last fragment then keep MF on each bit
*/
if (state->left > 0 || state->not_last_frag)
iph->frag_off |= htons(IP_MF);
state->ptr += len;
state->offset += len;
iph->tot_len = htons(len + state->hlen);
ip_send_check(iph);
return skb2;
}
EXPORT_SYMBOL(ip_frag_next);
/*
* This IP datagram is too large to be sent in one piece. Break it up into
* smaller pieces (each of size equal to IP header plus
* a block of the data of the original IP data part) that will yet fit in a
* single device frame, and queue such a frame for sending.
*/
int ip_do_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
int (*output)(struct net *, struct sock *, struct sk_buff *))
{
struct iphdr *iph;
struct sk_buff *skb2;
bool mono_delivery_time = skb->mono_delivery_time;
struct rtable *rt = skb_rtable(skb);
unsigned int mtu, hlen, ll_rs;
struct ip_fraglist_iter iter;
ktime_t tstamp = skb->tstamp;
struct ip_frag_state state;
int err = 0;
/* for offloaded checksums cleanup checksum before fragmentation */
if (skb->ip_summed == CHECKSUM_PARTIAL &&
(err = skb_checksum_help(skb)))
goto fail;
/*
* Point into the IP datagram header.
*/
iph = ip_hdr(skb);
mtu = ip_skb_dst_mtu(sk, skb);
if (IPCB(skb)->frag_max_size && IPCB(skb)->frag_max_size < mtu)
mtu = IPCB(skb)->frag_max_size;
/*
* Setup starting values.
*/
hlen = iph->ihl * 4;
mtu = mtu - hlen; /* Size of data space */
IPCB(skb)->flags |= IPSKB_FRAG_COMPLETE;
ll_rs = LL_RESERVED_SPACE(rt->dst.dev);
/* When frag_list is given, use it. First, check its validity:
* some transformers could create wrong frag_list or break existing
* one, it is not prohibited. In this case fall back to copying.
*
* LATER: this step can be merged to real generation of fragments,
* we can switch to copy when see the first bad fragment.
*/
if (skb_has_frag_list(skb)) {
struct sk_buff *frag, *frag2;
unsigned int first_len = skb_pagelen(skb);
if (first_len - hlen > mtu ||
((first_len - hlen) & 7) ||
ip_is_fragment(iph) ||
skb_cloned(skb) ||
skb_headroom(skb) < ll_rs)
goto slow_path;
skb_walk_frags(skb, frag) {
/* Correct geometry. */
if (frag->len > mtu ||
((frag->len & 7) && frag->next) ||
skb_headroom(frag) < hlen + ll_rs)
goto slow_path_clean;
/* Partially cloned skb? */
if (skb_shared(frag))
goto slow_path_clean;
BUG_ON(frag->sk);
if (skb->sk) {
frag->sk = skb->sk;
frag->destructor = sock_wfree;
}
skb->truesize -= frag->truesize;
}
/* Everything is OK. Generate! */
ip_fraglist_init(skb, iph, hlen, &iter);
for (;;) {
/* Prepare header of the next frame,
* before previous one went down. */
if (iter.frag) {
bool first_frag = (iter.offset == 0);
IPCB(iter.frag)->flags = IPCB(skb)->flags;
ip_fraglist_prepare(skb, &iter);
if (first_frag && IPCB(skb)->opt.optlen) {
/* ipcb->opt is not populated for frags
* coming from __ip_make_skb(),
* ip_options_fragment() needs optlen
*/
IPCB(iter.frag)->opt.optlen =
IPCB(skb)->opt.optlen;
ip_options_fragment(iter.frag);
ip_send_check(iter.iph);
}
}
skb_set_delivery_time(skb, tstamp, mono_delivery_time);
err = output(net, sk, skb);
if (!err)
IP_INC_STATS(net, IPSTATS_MIB_FRAGCREATES);
if (err || !iter.frag)
break;
skb = ip_fraglist_next(&iter);
}
if (err == 0) {
IP_INC_STATS(net, IPSTATS_MIB_FRAGOKS);
return 0;
}
kfree_skb_list(iter.frag);
IP_INC_STATS(net, IPSTATS_MIB_FRAGFAILS);
return err;
slow_path_clean:
skb_walk_frags(skb, frag2) {
if (frag2 == frag)
break;
frag2->sk = NULL;
frag2->destructor = NULL;
skb->truesize += frag2->truesize;
}
}
slow_path:
/*
* Fragment the datagram.
*/
ip_frag_init(skb, hlen, ll_rs, mtu, IPCB(skb)->flags & IPSKB_FRAG_PMTU,
&state);
/*
* Keep copying data until we run out.
*/
while (state.left > 0) {
bool first_frag = (state.offset == 0);
skb2 = ip_frag_next(skb, &state);
if (IS_ERR(skb2)) {
err = PTR_ERR(skb2);
goto fail;
}
ip_frag_ipcb(skb, skb2, first_frag);
/*
* Put this fragment into the sending queue.
*/
skb_set_delivery_time(skb2, tstamp, mono_delivery_time);
err = output(net, sk, skb2);
if (err)
goto fail;
IP_INC_STATS(net, IPSTATS_MIB_FRAGCREATES);
}
consume_skb(skb);
IP_INC_STATS(net, IPSTATS_MIB_FRAGOKS);
return err;
fail:
kfree_skb(skb);
IP_INC_STATS(net, IPSTATS_MIB_FRAGFAILS);
return err;
}
EXPORT_SYMBOL(ip_do_fragment);
int
ip_generic_getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb)
{
struct msghdr *msg = from;
if (skb->ip_summed == CHECKSUM_PARTIAL) {
if (!copy_from_iter_full(to, len, &msg->msg_iter))
return -EFAULT;
} else {
__wsum csum = 0;
if (!csum_and_copy_from_iter_full(to, len, &csum, &msg->msg_iter))
return -EFAULT;
skb->csum = csum_block_add(skb->csum, csum, odd);
}
return 0;
}
EXPORT_SYMBOL(ip_generic_getfrag);
static int __ip_append_data(struct sock *sk,
struct flowi4 *fl4,
struct sk_buff_head *queue,
struct inet_cork *cork,
struct page_frag *pfrag,
int getfrag(void *from, char *to, int offset,
int len, int odd, struct sk_buff *skb),
void *from, int length, int transhdrlen,
unsigned int flags)
{
struct inet_sock *inet = inet_sk(sk);
struct ubuf_info *uarg = NULL;
struct sk_buff *skb;
struct ip_options *opt = cork->opt;
int hh_len;
int exthdrlen;
int mtu;
int copy;
int err;
int offset = 0;
bool zc = false;
unsigned int maxfraglen, fragheaderlen, maxnonfragsize;
int csummode = CHECKSUM_NONE;
struct rtable *rt = (struct rtable *)cork->dst;
unsigned int wmem_alloc_delta = 0;
bool paged, extra_uref = false;
u32 tskey = 0;
skb = skb_peek_tail(queue);
exthdrlen = !skb ? rt->dst.header_len : 0;
mtu = cork->gso_size ? IP_MAX_MTU : cork->fragsize;
paged = !!cork->gso_size;
if (cork->tx_flags & SKBTX_ANY_TSTAMP &&
READ_ONCE(sk->sk_tsflags) & SOF_TIMESTAMPING_OPT_ID)
tskey = atomic_inc_return(&sk->sk_tskey) - 1;
hh_len = LL_RESERVED_SPACE(rt->dst.dev);
fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen;
maxnonfragsize = ip_sk_ignore_df(sk) ? IP_MAX_MTU : mtu;
if (cork->length + length > maxnonfragsize - fragheaderlen) {
ip_local_error(sk, EMSGSIZE, fl4->daddr, inet->inet_dport,
mtu - (opt ? opt->optlen : 0));
return -EMSGSIZE;
}
/*
* transhdrlen > 0 means that this is the first fragment and we wish
* it won't be fragmented in the future.
*/
if (transhdrlen &&
length + fragheaderlen <= mtu &&
rt->dst.dev->features & (NETIF_F_HW_CSUM | NETIF_F_IP_CSUM) &&
(!(flags & MSG_MORE) || cork->gso_size) &&
(!exthdrlen || (rt->dst.dev->features & NETIF_F_HW_ESP_TX_CSUM)))
csummode = CHECKSUM_PARTIAL;
if ((flags & MSG_ZEROCOPY) && length) {
struct msghdr *msg = from;
if (getfrag == ip_generic_getfrag && msg->msg_ubuf) {
if (skb_zcopy(skb) && msg->msg_ubuf != skb_zcopy(skb))
return -EINVAL;
/* Leave uarg NULL if can't zerocopy, callers should
* be able to handle it.
*/
if ((rt->dst.dev->features & NETIF_F_SG) &&
csummode == CHECKSUM_PARTIAL) {
paged = true;
zc = true;
uarg = msg->msg_ubuf;
}
} else if (sock_flag(sk, SOCK_ZEROCOPY)) {
uarg = msg_zerocopy_realloc(sk, length, skb_zcopy(skb));
if (!uarg)
return -ENOBUFS;
extra_uref = !skb_zcopy(skb); /* only ref on new uarg */
if (rt->dst.dev->features & NETIF_F_SG &&
csummode == CHECKSUM_PARTIAL) {
paged = true;
zc = true;
} else {
uarg_to_msgzc(uarg)->zerocopy = 0;
skb_zcopy_set(skb, uarg, &extra_uref);
}
}
} else if ((flags & MSG_SPLICE_PAGES) && length) {
if (inet_test_bit(HDRINCL, sk))
return -EPERM;
if (rt->dst.dev->features & NETIF_F_SG &&
getfrag == ip_generic_getfrag)
/* We need an empty buffer to attach stuff to */
paged = true;
else
flags &= ~MSG_SPLICE_PAGES;
}
cork->length += length;
/* So, what's going on in the loop below?
*
* We use calculated fragment length to generate chained skb,
* each of segments is IP fragment ready for sending to network after
* adding appropriate IP header.
*/
if (!skb)
goto alloc_new_skb;
while (length > 0) {
/* Check if the remaining data fits into current packet. */
copy = mtu - skb->len;
if (copy < length)
copy = maxfraglen - skb->len;
if (copy <= 0) {
char *data;
unsigned int datalen;
unsigned int fraglen;
unsigned int fraggap;
unsigned int alloclen, alloc_extra;
unsigned int pagedlen;
struct sk_buff *skb_prev;
alloc_new_skb:
skb_prev = skb;
if (skb_prev)
fraggap = skb_prev->len - maxfraglen;
else
fraggap = 0;
/*
* If remaining data exceeds the mtu,
* we know we need more fragment(s).
*/
datalen = length + fraggap;
if (datalen > mtu - fragheaderlen)
datalen = maxfraglen - fragheaderlen;
fraglen = datalen + fragheaderlen;
pagedlen = 0;
alloc_extra = hh_len + 15;
alloc_extra += exthdrlen;
/* The last fragment gets additional space at tail.
* Note, with MSG_MORE we overallocate on fragments,
* because we have no idea what fragment will be
* the last.
*/
if (datalen == length + fraggap)
alloc_extra += rt->dst.trailer_len;
if ((flags & MSG_MORE) &&
!(rt->dst.dev->features&NETIF_F_SG))
alloclen = mtu;
else if (!paged &&
(fraglen + alloc_extra < SKB_MAX_ALLOC ||
!(rt->dst.dev->features & NETIF_F_SG)))
alloclen = fraglen;
else {
alloclen = fragheaderlen + transhdrlen;
pagedlen = datalen - transhdrlen;
}
alloclen += alloc_extra;
if (transhdrlen) {
skb = sock_alloc_send_skb(sk, alloclen,
(flags & MSG_DONTWAIT), &err);
} else {
skb = NULL;
if (refcount_read(&sk->sk_wmem_alloc) + wmem_alloc_delta <=
2 * sk->sk_sndbuf)
skb = alloc_skb(alloclen,
sk->sk_allocation);
if (unlikely(!skb))
err = -ENOBUFS;
}
if (!skb)
goto error;
/*
* Fill in the control structures
*/
skb->ip_summed = csummode;
skb->csum = 0;
skb_reserve(skb, hh_len);
/*
* Find where to start putting bytes.
*/
data = skb_put(skb, fraglen + exthdrlen - pagedlen);
skb_set_network_header(skb, exthdrlen);
skb->transport_header = (skb->network_header +
fragheaderlen);
data += fragheaderlen + exthdrlen;
if (fraggap) {
skb->csum = skb_copy_and_csum_bits(
skb_prev, maxfraglen,
data + transhdrlen, fraggap);
skb_prev->csum = csum_sub(skb_prev->csum,
skb->csum);
data += fraggap;
pskb_trim_unique(skb_prev, maxfraglen);
}
copy = datalen - transhdrlen - fraggap - pagedlen;
/* [!] NOTE: copy will be negative if pagedlen>0
* because then the equation reduces to -fraggap.
*/
if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) {
err = -EFAULT;
kfree_skb(skb);
goto error;
} else if (flags & MSG_SPLICE_PAGES) {
copy = 0;
}
offset += copy;
length -= copy + transhdrlen;
transhdrlen = 0;
exthdrlen = 0;
csummode = CHECKSUM_NONE;
/* only the initial fragment is time stamped */
skb_shinfo(skb)->tx_flags = cork->tx_flags;
cork->tx_flags = 0;
skb_shinfo(skb)->tskey = tskey;
tskey = 0;
skb_zcopy_set(skb, uarg, &extra_uref);
if ((flags & MSG_CONFIRM) && !skb_prev)
skb_set_dst_pending_confirm(skb, 1);
/*
* Put the packet on the pending queue.
*/
if (!skb->destructor) {
skb->destructor = sock_wfree;
skb->sk = sk;
wmem_alloc_delta += skb->truesize;
}
__skb_queue_tail(queue, skb);
continue;
}
if (copy > length)
copy = length;
if (!(rt->dst.dev->features&NETIF_F_SG) &&
skb_tailroom(skb) >= copy) {
unsigned int off;
off = skb->len;
if (getfrag(from, skb_put(skb, copy),
offset, copy, off, skb) < 0) {
__skb_trim(skb, off);
err = -EFAULT;
goto error;
}
} else if (flags & MSG_SPLICE_PAGES) {
struct msghdr *msg = from;
err = -EIO;
if (WARN_ON_ONCE(copy > msg->msg_iter.count))
goto error;
err = skb_splice_from_iter(skb, &msg->msg_iter, copy,
sk->sk_allocation);
if (err < 0)
goto error;
copy = err;
wmem_alloc_delta += copy;
} else if (!zc) {
int i = skb_shinfo(skb)->nr_frags;
err = -ENOMEM;
if (!sk_page_frag_refill(sk, pfrag))
goto error;
skb_zcopy_downgrade_managed(skb);
if (!skb_can_coalesce(skb, i, pfrag->page,
pfrag->offset)) {
err = -EMSGSIZE;
if (i == MAX_SKB_FRAGS)
goto error;
__skb_fill_page_desc(skb, i, pfrag->page,
pfrag->offset, 0);
skb_shinfo(skb)->nr_frags = ++i;
get_page(pfrag->page);
}
copy = min_t(int, copy, pfrag->size - pfrag->offset);
if (getfrag(from,
page_address(pfrag->page) + pfrag->offset,
offset, copy, skb->len, skb) < 0)
goto error_efault;
pfrag->offset += copy;
skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
skb_len_add(skb, copy);
wmem_alloc_delta += copy;
} else {
err = skb_zerocopy_iter_dgram(skb, from, copy);
if (err < 0)
goto error;
}
offset += copy;
length -= copy;
}
if (wmem_alloc_delta)
refcount_add(wmem_alloc_delta, &sk->sk_wmem_alloc);
return 0;
error_efault:
err = -EFAULT;
error:
net_zcopy_put_abort(uarg, extra_uref);
cork->length -= length;
IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTDISCARDS);
refcount_add(wmem_alloc_delta, &sk->sk_wmem_alloc);
return err;
}
static int ip_setup_cork(struct sock *sk, struct inet_cork *cork,
struct ipcm_cookie *ipc, struct rtable **rtp)
{
struct ip_options_rcu *opt;
struct rtable *rt;
rt = *rtp;
if (unlikely(!rt))
return -EFAULT;
/*
* setup for corking.
*/
opt = ipc->opt;
if (opt) {
if (!cork->opt) {
cork->opt = kmalloc(sizeof(struct ip_options) + 40,
sk->sk_allocation);
if (unlikely(!cork->opt))
return -ENOBUFS;
}
memcpy(cork->opt, &opt->opt, sizeof(struct ip_options) + opt->opt.optlen);
cork->flags |= IPCORK_OPT;
cork->addr = ipc->addr;
}
cork->fragsize = ip_sk_use_pmtu(sk) ?
dst_mtu(&rt->dst) : READ_ONCE(rt->dst.dev->mtu);
if (!inetdev_valid_mtu(cork->fragsize))
return -ENETUNREACH;
cork->gso_size = ipc->gso_size;
cork->dst = &rt->dst;
/* We stole this route, caller should not release it. */
*rtp = NULL;
cork->length = 0;
cork->ttl = ipc->ttl;
cork->tos = ipc->tos;
cork->mark = ipc->sockc.mark;
cork->priority = ipc->priority;
cork->transmit_time = ipc->sockc.transmit_time;
cork->tx_flags = 0;
sock_tx_timestamp(sk, ipc->sockc.tsflags, &cork->tx_flags);
return 0;
}
/*
* ip_append_data() can make one large IP datagram from many pieces of
* data. Each piece will be held on the socket until
* ip_push_pending_frames() is called. Each piece can be a page or
* non-page data.
*
* Not only UDP, other transport protocols - e.g. raw sockets - can use
* this interface potentially.
*
* LATER: length must be adjusted by pad at tail, when it is required.
*/
int ip_append_data(struct sock *sk, struct flowi4 *fl4,
int getfrag(void *from, char *to, int offset, int len,
int odd, struct sk_buff *skb),
void *from, int length, int transhdrlen,
struct ipcm_cookie *ipc, struct rtable **rtp,
unsigned int flags)
{
struct inet_sock *inet = inet_sk(sk);
int err;
if (flags&MSG_PROBE)
return 0;
if (skb_queue_empty(&sk->sk_write_queue)) {
err = ip_setup_cork(sk, &inet->cork.base, ipc, rtp);
if (err)
return err;
} else {
transhdrlen = 0;
}
return __ip_append_data(sk, fl4, &sk->sk_write_queue, &inet->cork.base,
sk_page_frag(sk), getfrag,
from, length, transhdrlen, flags);
}
static void ip_cork_release(struct inet_cork *cork)
{
cork->flags &= ~IPCORK_OPT;
kfree(cork->opt);
cork->opt = NULL;
dst_release(cork->dst);
cork->dst = NULL;
}
/*
* Combined all pending IP fragments on the socket as one IP datagram
* and push them out.
*/
struct sk_buff *__ip_make_skb(struct sock *sk,
struct flowi4 *fl4,
struct sk_buff_head *queue,
struct inet_cork *cork)
{
struct sk_buff *skb, *tmp_skb;
struct sk_buff **tail_skb;
struct inet_sock *inet = inet_sk(sk);
struct net *net = sock_net(sk);
struct ip_options *opt = NULL;
struct rtable *rt = (struct rtable *)cork->dst;
struct iphdr *iph;
__be16 df = 0;
__u8 ttl;
skb = __skb_dequeue(queue);
if (!skb)
goto out;
tail_skb = &(skb_shinfo(skb)->frag_list);
/* move skb->data to ip header from ext header */
if (skb->data < skb_network_header(skb))
__skb_pull(skb, skb_network_offset(skb));
while ((tmp_skb = __skb_dequeue(queue)) != NULL) {
__skb_pull(tmp_skb, skb_network_header_len(skb));
*tail_skb = tmp_skb;
tail_skb = &(tmp_skb->next);
skb->len += tmp_skb->len;
skb->data_len += tmp_skb->len;
skb->truesize += tmp_skb->truesize;
tmp_skb->destructor = NULL;
tmp_skb->sk = NULL;
}
/* Unless user demanded real pmtu discovery (IP_PMTUDISC_DO), we allow
* to fragment the frame generated here. No matter, what transforms
* how transforms change size of the packet, it will come out.
*/
skb->ignore_df = ip_sk_ignore_df(sk);
/* DF bit is set when we want to see DF on outgoing frames.
* If ignore_df is set too, we still allow to fragment this frame
* locally. */
if (inet->pmtudisc == IP_PMTUDISC_DO ||
inet->pmtudisc == IP_PMTUDISC_PROBE ||
(skb->len <= dst_mtu(&rt->dst) &&
ip_dont_fragment(sk, &rt->dst)))
df = htons(IP_DF);
if (cork->flags & IPCORK_OPT)
opt = cork->opt;
if (cork->ttl != 0)
ttl = cork->ttl;
else if (rt->rt_type == RTN_MULTICAST)
ttl = inet->mc_ttl;
else
ttl = ip_select_ttl(inet, &rt->dst);
iph = ip_hdr(skb);
iph->version = 4;
iph->ihl = 5;
iph->tos = (cork->tos != -1) ? cork->tos : inet->tos;
iph->frag_off = df;
iph->ttl = ttl;
iph->protocol = sk->sk_protocol;
ip_copy_addrs(iph, fl4);
ip_select_ident(net, skb, sk);
if (opt) {
iph->ihl += opt->optlen >> 2;
ip_options_build(skb, opt, cork->addr, rt);
}
skb->priority = (cork->tos != -1) ? cork->priority: sk->sk_priority;
skb->mark = cork->mark;
skb->tstamp = cork->transmit_time;
/*
* Steal rt from cork.dst to avoid a pair of atomic_inc/atomic_dec
* on dst refcount
*/
cork->dst = NULL;
skb_dst_set(skb, &rt->dst);
if (iph->protocol == IPPROTO_ICMP) {
u8 icmp_type;
/* For such sockets, transhdrlen is zero when do ip_append_data(),
* so icmphdr does not in skb linear region and can not get icmp_type
* by icmp_hdr(skb)->type.
*/
if (sk->sk_type == SOCK_RAW &&
!inet_test_bit(HDRINCL, sk))
icmp_type = fl4->fl4_icmp_type;
else
icmp_type = icmp_hdr(skb)->type;
icmp_out_count(net, icmp_type);
}
ip_cork_release(cork);
out:
return skb;
}
int ip_send_skb(struct net *net, struct sk_buff *skb)
{
int err;
err = ip_local_out(net, skb->sk, skb);
if (err) {
if (err > 0)
err = net_xmit_errno(err);
if (err)
IP_INC_STATS(net, IPSTATS_MIB_OUTDISCARDS);
}
return err;
}
int ip_push_pending_frames(struct sock *sk, struct flowi4 *fl4)
{
struct sk_buff *skb;
skb = ip_finish_skb(sk, fl4);
if (!skb)
return 0;
/* Netfilter gets whole the not fragmented skb. */
return ip_send_skb(sock_net(sk), skb);
}
/*
* Throw away all pending data on the socket.
*/
static void __ip_flush_pending_frames(struct sock *sk,
struct sk_buff_head *queue,
struct inet_cork *cork)
{
struct sk_buff *skb;
while ((skb = __skb_dequeue_tail(queue)) != NULL)
kfree_skb(skb);
ip_cork_release(cork);
}
void ip_flush_pending_frames(struct sock *sk)
{
__ip_flush_pending_frames(sk, &sk->sk_write_queue, &inet_sk(sk)->cork.base);
}
struct sk_buff *ip_make_skb(struct sock *sk,
struct flowi4 *fl4,
int getfrag(void *from, char *to, int offset,
int len, int odd, struct sk_buff *skb),
void *from, int length, int transhdrlen,
struct ipcm_cookie *ipc, struct rtable **rtp,
struct inet_cork *cork, unsigned int flags)
{
struct sk_buff_head queue;
int err;
if (flags & MSG_PROBE)
return NULL;
__skb_queue_head_init(&queue);
cork->flags = 0;
cork->addr = 0;
cork->opt = NULL;
err = ip_setup_cork(sk, cork, ipc, rtp);
if (err)
return ERR_PTR(err);
err = __ip_append_data(sk, fl4, &queue, cork,
¤t->task_frag, getfrag,
from, length, transhdrlen, flags);
if (err) {
__ip_flush_pending_frames(sk, &queue, cork);
return ERR_PTR(err);
}
return __ip_make_skb(sk, fl4, &queue, cork);
}
/*
* Fetch data from kernel space and fill in checksum if needed.
*/
static int ip_reply_glue_bits(void *dptr, char *to, int offset,
int len, int odd, struct sk_buff *skb)
{
__wsum csum;
csum = csum_partial_copy_nocheck(dptr+offset, to, len);
skb->csum = csum_block_add(skb->csum, csum, odd);
return 0;
}
/*
* Generic function to send a packet as reply to another packet.
* Used to send some TCP resets/acks so far.
*/
void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb,
const struct ip_options *sopt,
__be32 daddr, __be32 saddr,
const struct ip_reply_arg *arg,
unsigned int len, u64 transmit_time, u32 txhash)
{
struct ip_options_data replyopts;
struct ipcm_cookie ipc;
struct flowi4 fl4;
struct rtable *rt = skb_rtable(skb);
struct net *net = sock_net(sk);
struct sk_buff *nskb;
int err;
int oif;
if (__ip_options_echo(net, &replyopts.opt.opt, skb, sopt))
return;
ipcm_init(&ipc);
ipc.addr = daddr;
ipc.sockc.transmit_time = transmit_time;
if (replyopts.opt.opt.optlen) {
ipc.opt = &replyopts.opt;
if (replyopts.opt.opt.srr)
daddr = replyopts.opt.opt.faddr;
}
oif = arg->bound_dev_if;
if (!oif && netif_index_is_l3_master(net, skb->skb_iif))
oif = skb->skb_iif;
flowi4_init_output(&fl4, oif,
IP4_REPLY_MARK(net, skb->mark) ?: sk->sk_mark,
RT_TOS(arg->tos),
RT_SCOPE_UNIVERSE, ip_hdr(skb)->protocol,
ip_reply_arg_flowi_flags(arg),
daddr, saddr,
tcp_hdr(skb)->source, tcp_hdr(skb)->dest,
arg->uid);
security_skb_classify_flow(skb, flowi4_to_flowi_common(&fl4));
rt = ip_route_output_flow(net, &fl4, sk);
if (IS_ERR(rt))
return;
inet_sk(sk)->tos = arg->tos & ~INET_ECN_MASK;
sk->sk_protocol = ip_hdr(skb)->protocol;
sk->sk_bound_dev_if = arg->bound_dev_if;
sk->sk_sndbuf = READ_ONCE(sysctl_wmem_default);
ipc.sockc.mark = fl4.flowi4_mark;
err = ip_append_data(sk, &fl4, ip_reply_glue_bits, arg->iov->iov_base,
len, 0, &ipc, &rt, MSG_DONTWAIT);
if (unlikely(err)) {
ip_flush_pending_frames(sk);
goto out;
}
nskb = skb_peek(&sk->sk_write_queue);
if (nskb) {
if (arg->csumoffset >= 0)
*((__sum16 *)skb_transport_header(nskb) +
arg->csumoffset) = csum_fold(csum_add(nskb->csum,
arg->csum));
nskb->ip_summed = CHECKSUM_NONE;
nskb->mono_delivery_time = !!transmit_time;
if (txhash)
skb_set_hash(nskb, txhash, PKT_HASH_TYPE_L4);
ip_push_pending_frames(sk, &fl4);
}
out:
ip_rt_put(rt);
}
void __init ip_init(void)
{
ip_rt_init();
inet_initpeers();
#if defined(CONFIG_IP_MULTICAST)
igmp_mc_init();
#endif
}
| linux-master | net/ipv4/ip_output.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
*
* Robert Olsson <[email protected]> Uppsala Universitet
* & Swedish University of Agricultural Sciences.
*
* Jens Laas <[email protected]> Swedish University of
* Agricultural Sciences.
*
* Hans Liss <[email protected]> Uppsala Universitet
*
* This work is based on the LPC-trie which is originally described in:
*
* An experimental study of compression methods for dynamic tries
* Stefan Nilsson and Matti Tikkanen. Algorithmica, 33(1):19-33, 2002.
* https://www.csc.kth.se/~snilsson/software/dyntrie2/
*
* IP-address lookup using LC-tries. Stefan Nilsson and Gunnar Karlsson
* IEEE Journal on Selected Areas in Communications, 17(6):1083-1092, June 1999
*
* Code from fib_hash has been reused which includes the following header:
*
* INET An implementation of the TCP/IP protocol suite for the LINUX
* operating system. INET is implemented using the BSD Socket
* interface as the means of communication with the user level.
*
* IPv4 FIB: lookup engine and maintenance routines.
*
* Authors: Alexey Kuznetsov, <[email protected]>
*
* Substantial contributions to this work comes from:
*
* David S. Miller, <[email protected]>
* Stephen Hemminger <[email protected]>
* Paul E. McKenney <[email protected]>
* Patrick McHardy <[email protected]>
*/
#include <linux/cache.h>
#include <linux/uaccess.h>
#include <linux/bitops.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/string.h>
#include <linux/socket.h>
#include <linux/sockios.h>
#include <linux/errno.h>
#include <linux/in.h>
#include <linux/inet.h>
#include <linux/inetdevice.h>
#include <linux/netdevice.h>
#include <linux/if_arp.h>
#include <linux/proc_fs.h>
#include <linux/rcupdate.h>
#include <linux/skbuff.h>
#include <linux/netlink.h>
#include <linux/init.h>
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/export.h>
#include <linux/vmalloc.h>
#include <linux/notifier.h>
#include <net/net_namespace.h>
#include <net/inet_dscp.h>
#include <net/ip.h>
#include <net/protocol.h>
#include <net/route.h>
#include <net/tcp.h>
#include <net/sock.h>
#include <net/ip_fib.h>
#include <net/fib_notifier.h>
#include <trace/events/fib.h>
#include "fib_lookup.h"
static int call_fib_entry_notifier(struct notifier_block *nb,
enum fib_event_type event_type, u32 dst,
int dst_len, struct fib_alias *fa,
struct netlink_ext_ack *extack)
{
struct fib_entry_notifier_info info = {
.info.extack = extack,
.dst = dst,
.dst_len = dst_len,
.fi = fa->fa_info,
.dscp = fa->fa_dscp,
.type = fa->fa_type,
.tb_id = fa->tb_id,
};
return call_fib4_notifier(nb, event_type, &info.info);
}
static int call_fib_entry_notifiers(struct net *net,
enum fib_event_type event_type, u32 dst,
int dst_len, struct fib_alias *fa,
struct netlink_ext_ack *extack)
{
struct fib_entry_notifier_info info = {
.info.extack = extack,
.dst = dst,
.dst_len = dst_len,
.fi = fa->fa_info,
.dscp = fa->fa_dscp,
.type = fa->fa_type,
.tb_id = fa->tb_id,
};
return call_fib4_notifiers(net, event_type, &info.info);
}
#define MAX_STAT_DEPTH 32
#define KEYLENGTH (8*sizeof(t_key))
#define KEY_MAX ((t_key)~0)
typedef unsigned int t_key;
#define IS_TRIE(n) ((n)->pos >= KEYLENGTH)
#define IS_TNODE(n) ((n)->bits)
#define IS_LEAF(n) (!(n)->bits)
struct key_vector {
t_key key;
unsigned char pos; /* 2log(KEYLENGTH) bits needed */
unsigned char bits; /* 2log(KEYLENGTH) bits needed */
unsigned char slen;
union {
/* This list pointer if valid if (pos | bits) == 0 (LEAF) */
struct hlist_head leaf;
/* This array is valid if (pos | bits) > 0 (TNODE) */
DECLARE_FLEX_ARRAY(struct key_vector __rcu *, tnode);
};
};
struct tnode {
struct rcu_head rcu;
t_key empty_children; /* KEYLENGTH bits needed */
t_key full_children; /* KEYLENGTH bits needed */
struct key_vector __rcu *parent;
struct key_vector kv[1];
#define tn_bits kv[0].bits
};
#define TNODE_SIZE(n) offsetof(struct tnode, kv[0].tnode[n])
#define LEAF_SIZE TNODE_SIZE(1)
#ifdef CONFIG_IP_FIB_TRIE_STATS
struct trie_use_stats {
unsigned int gets;
unsigned int backtrack;
unsigned int semantic_match_passed;
unsigned int semantic_match_miss;
unsigned int null_node_hit;
unsigned int resize_node_skipped;
};
#endif
struct trie_stat {
unsigned int totdepth;
unsigned int maxdepth;
unsigned int tnodes;
unsigned int leaves;
unsigned int nullpointers;
unsigned int prefixes;
unsigned int nodesizes[MAX_STAT_DEPTH];
};
struct trie {
struct key_vector kv[1];
#ifdef CONFIG_IP_FIB_TRIE_STATS
struct trie_use_stats __percpu *stats;
#endif
};
static struct key_vector *resize(struct trie *t, struct key_vector *tn);
static unsigned int tnode_free_size;
/*
* synchronize_rcu after call_rcu for outstanding dirty memory; it should be
* especially useful before resizing the root node with PREEMPT_NONE configs;
* the value was obtained experimentally, aiming to avoid visible slowdown.
*/
unsigned int sysctl_fib_sync_mem = 512 * 1024;
unsigned int sysctl_fib_sync_mem_min = 64 * 1024;
unsigned int sysctl_fib_sync_mem_max = 64 * 1024 * 1024;
static struct kmem_cache *fn_alias_kmem __ro_after_init;
static struct kmem_cache *trie_leaf_kmem __ro_after_init;
static inline struct tnode *tn_info(struct key_vector *kv)
{
return container_of(kv, struct tnode, kv[0]);
}
/* caller must hold RTNL */
#define node_parent(tn) rtnl_dereference(tn_info(tn)->parent)
#define get_child(tn, i) rtnl_dereference((tn)->tnode[i])
/* caller must hold RCU read lock or RTNL */
#define node_parent_rcu(tn) rcu_dereference_rtnl(tn_info(tn)->parent)
#define get_child_rcu(tn, i) rcu_dereference_rtnl((tn)->tnode[i])
/* wrapper for rcu_assign_pointer */
static inline void node_set_parent(struct key_vector *n, struct key_vector *tp)
{
if (n)
rcu_assign_pointer(tn_info(n)->parent, tp);
}
#define NODE_INIT_PARENT(n, p) RCU_INIT_POINTER(tn_info(n)->parent, p)
/* This provides us with the number of children in this node, in the case of a
* leaf this will return 0 meaning none of the children are accessible.
*/
static inline unsigned long child_length(const struct key_vector *tn)
{
return (1ul << tn->bits) & ~(1ul);
}
#define get_cindex(key, kv) (((key) ^ (kv)->key) >> (kv)->pos)
static inline unsigned long get_index(t_key key, struct key_vector *kv)
{
unsigned long index = key ^ kv->key;
if ((BITS_PER_LONG <= KEYLENGTH) && (KEYLENGTH == kv->pos))
return 0;
return index >> kv->pos;
}
/* To understand this stuff, an understanding of keys and all their bits is
* necessary. Every node in the trie has a key associated with it, but not
* all of the bits in that key are significant.
*
* Consider a node 'n' and its parent 'tp'.
*
* If n is a leaf, every bit in its key is significant. Its presence is
* necessitated by path compression, since during a tree traversal (when
* searching for a leaf - unless we are doing an insertion) we will completely
* ignore all skipped bits we encounter. Thus we need to verify, at the end of
* a potentially successful search, that we have indeed been walking the
* correct key path.
*
* Note that we can never "miss" the correct key in the tree if present by
* following the wrong path. Path compression ensures that segments of the key
* that are the same for all keys with a given prefix are skipped, but the
* skipped part *is* identical for each node in the subtrie below the skipped
* bit! trie_insert() in this implementation takes care of that.
*
* if n is an internal node - a 'tnode' here, the various parts of its key
* have many different meanings.
*
* Example:
* _________________________________________________________________
* | i | i | i | i | i | i | i | N | N | N | S | S | S | S | S | C |
* -----------------------------------------------------------------
* 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16
*
* _________________________________________________________________
* | C | C | C | u | u | u | u | u | u | u | u | u | u | u | u | u |
* -----------------------------------------------------------------
* 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1 0
*
* tp->pos = 22
* tp->bits = 3
* n->pos = 13
* n->bits = 4
*
* First, let's just ignore the bits that come before the parent tp, that is
* the bits from (tp->pos + tp->bits) to 31. They are *known* but at this
* point we do not use them for anything.
*
* The bits from (tp->pos) to (tp->pos + tp->bits - 1) - "N", above - are the
* index into the parent's child array. That is, they will be used to find
* 'n' among tp's children.
*
* The bits from (n->pos + n->bits) to (tp->pos - 1) - "S" - are skipped bits
* for the node n.
*
* All the bits we have seen so far are significant to the node n. The rest
* of the bits are really not needed or indeed known in n->key.
*
* The bits from (n->pos) to (n->pos + n->bits - 1) - "C" - are the index into
* n's child array, and will of course be different for each child.
*
* The rest of the bits, from 0 to (n->pos -1) - "u" - are completely unknown
* at this point.
*/
static const int halve_threshold = 25;
static const int inflate_threshold = 50;
static const int halve_threshold_root = 15;
static const int inflate_threshold_root = 30;
static void __alias_free_mem(struct rcu_head *head)
{
struct fib_alias *fa = container_of(head, struct fib_alias, rcu);
kmem_cache_free(fn_alias_kmem, fa);
}
static inline void alias_free_mem_rcu(struct fib_alias *fa)
{
call_rcu(&fa->rcu, __alias_free_mem);
}
#define TNODE_VMALLOC_MAX \
ilog2((SIZE_MAX - TNODE_SIZE(0)) / sizeof(struct key_vector *))
static void __node_free_rcu(struct rcu_head *head)
{
struct tnode *n = container_of(head, struct tnode, rcu);
if (!n->tn_bits)
kmem_cache_free(trie_leaf_kmem, n);
else
kvfree(n);
}
#define node_free(n) call_rcu(&tn_info(n)->rcu, __node_free_rcu)
static struct tnode *tnode_alloc(int bits)
{
size_t size;
/* verify bits is within bounds */
if (bits > TNODE_VMALLOC_MAX)
return NULL;
/* determine size and verify it is non-zero and didn't overflow */
size = TNODE_SIZE(1ul << bits);
if (size <= PAGE_SIZE)
return kzalloc(size, GFP_KERNEL);
else
return vzalloc(size);
}
static inline void empty_child_inc(struct key_vector *n)
{
tn_info(n)->empty_children++;
if (!tn_info(n)->empty_children)
tn_info(n)->full_children++;
}
static inline void empty_child_dec(struct key_vector *n)
{
if (!tn_info(n)->empty_children)
tn_info(n)->full_children--;
tn_info(n)->empty_children--;
}
static struct key_vector *leaf_new(t_key key, struct fib_alias *fa)
{
struct key_vector *l;
struct tnode *kv;
kv = kmem_cache_alloc(trie_leaf_kmem, GFP_KERNEL);
if (!kv)
return NULL;
/* initialize key vector */
l = kv->kv;
l->key = key;
l->pos = 0;
l->bits = 0;
l->slen = fa->fa_slen;
/* link leaf to fib alias */
INIT_HLIST_HEAD(&l->leaf);
hlist_add_head(&fa->fa_list, &l->leaf);
return l;
}
static struct key_vector *tnode_new(t_key key, int pos, int bits)
{
unsigned int shift = pos + bits;
struct key_vector *tn;
struct tnode *tnode;
/* verify bits and pos their msb bits clear and values are valid */
BUG_ON(!bits || (shift > KEYLENGTH));
tnode = tnode_alloc(bits);
if (!tnode)
return NULL;
pr_debug("AT %p s=%zu %zu\n", tnode, TNODE_SIZE(0),
sizeof(struct key_vector *) << bits);
if (bits == KEYLENGTH)
tnode->full_children = 1;
else
tnode->empty_children = 1ul << bits;
tn = tnode->kv;
tn->key = (shift < KEYLENGTH) ? (key >> shift) << shift : 0;
tn->pos = pos;
tn->bits = bits;
tn->slen = pos;
return tn;
}
/* Check whether a tnode 'n' is "full", i.e. it is an internal node
* and no bits are skipped. See discussion in dyntree paper p. 6
*/
static inline int tnode_full(struct key_vector *tn, struct key_vector *n)
{
return n && ((n->pos + n->bits) == tn->pos) && IS_TNODE(n);
}
/* Add a child at position i overwriting the old value.
* Update the value of full_children and empty_children.
*/
static void put_child(struct key_vector *tn, unsigned long i,
struct key_vector *n)
{
struct key_vector *chi = get_child(tn, i);
int isfull, wasfull;
BUG_ON(i >= child_length(tn));
/* update emptyChildren, overflow into fullChildren */
if (!n && chi)
empty_child_inc(tn);
if (n && !chi)
empty_child_dec(tn);
/* update fullChildren */
wasfull = tnode_full(tn, chi);
isfull = tnode_full(tn, n);
if (wasfull && !isfull)
tn_info(tn)->full_children--;
else if (!wasfull && isfull)
tn_info(tn)->full_children++;
if (n && (tn->slen < n->slen))
tn->slen = n->slen;
rcu_assign_pointer(tn->tnode[i], n);
}
static void update_children(struct key_vector *tn)
{
unsigned long i;
/* update all of the child parent pointers */
for (i = child_length(tn); i;) {
struct key_vector *inode = get_child(tn, --i);
if (!inode)
continue;
/* Either update the children of a tnode that
* already belongs to us or update the child
* to point to ourselves.
*/
if (node_parent(inode) == tn)
update_children(inode);
else
node_set_parent(inode, tn);
}
}
static inline void put_child_root(struct key_vector *tp, t_key key,
struct key_vector *n)
{
if (IS_TRIE(tp))
rcu_assign_pointer(tp->tnode[0], n);
else
put_child(tp, get_index(key, tp), n);
}
static inline void tnode_free_init(struct key_vector *tn)
{
tn_info(tn)->rcu.next = NULL;
}
static inline void tnode_free_append(struct key_vector *tn,
struct key_vector *n)
{
tn_info(n)->rcu.next = tn_info(tn)->rcu.next;
tn_info(tn)->rcu.next = &tn_info(n)->rcu;
}
static void tnode_free(struct key_vector *tn)
{
struct callback_head *head = &tn_info(tn)->rcu;
while (head) {
head = head->next;
tnode_free_size += TNODE_SIZE(1ul << tn->bits);
node_free(tn);
tn = container_of(head, struct tnode, rcu)->kv;
}
if (tnode_free_size >= READ_ONCE(sysctl_fib_sync_mem)) {
tnode_free_size = 0;
synchronize_rcu();
}
}
static struct key_vector *replace(struct trie *t,
struct key_vector *oldtnode,
struct key_vector *tn)
{
struct key_vector *tp = node_parent(oldtnode);
unsigned long i;
/* setup the parent pointer out of and back into this node */
NODE_INIT_PARENT(tn, tp);
put_child_root(tp, tn->key, tn);
/* update all of the child parent pointers */
update_children(tn);
/* all pointers should be clean so we are done */
tnode_free(oldtnode);
/* resize children now that oldtnode is freed */
for (i = child_length(tn); i;) {
struct key_vector *inode = get_child(tn, --i);
/* resize child node */
if (tnode_full(tn, inode))
tn = resize(t, inode);
}
return tp;
}
static struct key_vector *inflate(struct trie *t,
struct key_vector *oldtnode)
{
struct key_vector *tn;
unsigned long i;
t_key m;
pr_debug("In inflate\n");
tn = tnode_new(oldtnode->key, oldtnode->pos - 1, oldtnode->bits + 1);
if (!tn)
goto notnode;
/* prepare oldtnode to be freed */
tnode_free_init(oldtnode);
/* Assemble all of the pointers in our cluster, in this case that
* represents all of the pointers out of our allocated nodes that
* point to existing tnodes and the links between our allocated
* nodes.
*/
for (i = child_length(oldtnode), m = 1u << tn->pos; i;) {
struct key_vector *inode = get_child(oldtnode, --i);
struct key_vector *node0, *node1;
unsigned long j, k;
/* An empty child */
if (!inode)
continue;
/* A leaf or an internal node with skipped bits */
if (!tnode_full(oldtnode, inode)) {
put_child(tn, get_index(inode->key, tn), inode);
continue;
}
/* drop the node in the old tnode free list */
tnode_free_append(oldtnode, inode);
/* An internal node with two children */
if (inode->bits == 1) {
put_child(tn, 2 * i + 1, get_child(inode, 1));
put_child(tn, 2 * i, get_child(inode, 0));
continue;
}
/* We will replace this node 'inode' with two new
* ones, 'node0' and 'node1', each with half of the
* original children. The two new nodes will have
* a position one bit further down the key and this
* means that the "significant" part of their keys
* (see the discussion near the top of this file)
* will differ by one bit, which will be "0" in
* node0's key and "1" in node1's key. Since we are
* moving the key position by one step, the bit that
* we are moving away from - the bit at position
* (tn->pos) - is the one that will differ between
* node0 and node1. So... we synthesize that bit in the
* two new keys.
*/
node1 = tnode_new(inode->key | m, inode->pos, inode->bits - 1);
if (!node1)
goto nomem;
node0 = tnode_new(inode->key, inode->pos, inode->bits - 1);
tnode_free_append(tn, node1);
if (!node0)
goto nomem;
tnode_free_append(tn, node0);
/* populate child pointers in new nodes */
for (k = child_length(inode), j = k / 2; j;) {
put_child(node1, --j, get_child(inode, --k));
put_child(node0, j, get_child(inode, j));
put_child(node1, --j, get_child(inode, --k));
put_child(node0, j, get_child(inode, j));
}
/* link new nodes to parent */
NODE_INIT_PARENT(node1, tn);
NODE_INIT_PARENT(node0, tn);
/* link parent to nodes */
put_child(tn, 2 * i + 1, node1);
put_child(tn, 2 * i, node0);
}
/* setup the parent pointers into and out of this node */
return replace(t, oldtnode, tn);
nomem:
/* all pointers should be clean so we are done */
tnode_free(tn);
notnode:
return NULL;
}
static struct key_vector *halve(struct trie *t,
struct key_vector *oldtnode)
{
struct key_vector *tn;
unsigned long i;
pr_debug("In halve\n");
tn = tnode_new(oldtnode->key, oldtnode->pos + 1, oldtnode->bits - 1);
if (!tn)
goto notnode;
/* prepare oldtnode to be freed */
tnode_free_init(oldtnode);
/* Assemble all of the pointers in our cluster, in this case that
* represents all of the pointers out of our allocated nodes that
* point to existing tnodes and the links between our allocated
* nodes.
*/
for (i = child_length(oldtnode); i;) {
struct key_vector *node1 = get_child(oldtnode, --i);
struct key_vector *node0 = get_child(oldtnode, --i);
struct key_vector *inode;
/* At least one of the children is empty */
if (!node1 || !node0) {
put_child(tn, i / 2, node1 ? : node0);
continue;
}
/* Two nonempty children */
inode = tnode_new(node0->key, oldtnode->pos, 1);
if (!inode)
goto nomem;
tnode_free_append(tn, inode);
/* initialize pointers out of node */
put_child(inode, 1, node1);
put_child(inode, 0, node0);
NODE_INIT_PARENT(inode, tn);
/* link parent to node */
put_child(tn, i / 2, inode);
}
/* setup the parent pointers into and out of this node */
return replace(t, oldtnode, tn);
nomem:
/* all pointers should be clean so we are done */
tnode_free(tn);
notnode:
return NULL;
}
static struct key_vector *collapse(struct trie *t,
struct key_vector *oldtnode)
{
struct key_vector *n, *tp;
unsigned long i;
/* scan the tnode looking for that one child that might still exist */
for (n = NULL, i = child_length(oldtnode); !n && i;)
n = get_child(oldtnode, --i);
/* compress one level */
tp = node_parent(oldtnode);
put_child_root(tp, oldtnode->key, n);
node_set_parent(n, tp);
/* drop dead node */
node_free(oldtnode);
return tp;
}
static unsigned char update_suffix(struct key_vector *tn)
{
unsigned char slen = tn->pos;
unsigned long stride, i;
unsigned char slen_max;
/* only vector 0 can have a suffix length greater than or equal to
* tn->pos + tn->bits, the second highest node will have a suffix
* length at most of tn->pos + tn->bits - 1
*/
slen_max = min_t(unsigned char, tn->pos + tn->bits - 1, tn->slen);
/* search though the list of children looking for nodes that might
* have a suffix greater than the one we currently have. This is
* why we start with a stride of 2 since a stride of 1 would
* represent the nodes with suffix length equal to tn->pos
*/
for (i = 0, stride = 0x2ul ; i < child_length(tn); i += stride) {
struct key_vector *n = get_child(tn, i);
if (!n || (n->slen <= slen))
continue;
/* update stride and slen based on new value */
stride <<= (n->slen - slen);
slen = n->slen;
i &= ~(stride - 1);
/* stop searching if we have hit the maximum possible value */
if (slen >= slen_max)
break;
}
tn->slen = slen;
return slen;
}
/* From "Implementing a dynamic compressed trie" by Stefan Nilsson of
* the Helsinki University of Technology and Matti Tikkanen of Nokia
* Telecommunications, page 6:
* "A node is doubled if the ratio of non-empty children to all
* children in the *doubled* node is at least 'high'."
*
* 'high' in this instance is the variable 'inflate_threshold'. It
* is expressed as a percentage, so we multiply it with
* child_length() and instead of multiplying by 2 (since the
* child array will be doubled by inflate()) and multiplying
* the left-hand side by 100 (to handle the percentage thing) we
* multiply the left-hand side by 50.
*
* The left-hand side may look a bit weird: child_length(tn)
* - tn->empty_children is of course the number of non-null children
* in the current node. tn->full_children is the number of "full"
* children, that is non-null tnodes with a skip value of 0.
* All of those will be doubled in the resulting inflated tnode, so
* we just count them one extra time here.
*
* A clearer way to write this would be:
*
* to_be_doubled = tn->full_children;
* not_to_be_doubled = child_length(tn) - tn->empty_children -
* tn->full_children;
*
* new_child_length = child_length(tn) * 2;
*
* new_fill_factor = 100 * (not_to_be_doubled + 2*to_be_doubled) /
* new_child_length;
* if (new_fill_factor >= inflate_threshold)
*
* ...and so on, tho it would mess up the while () loop.
*
* anyway,
* 100 * (not_to_be_doubled + 2*to_be_doubled) / new_child_length >=
* inflate_threshold
*
* avoid a division:
* 100 * (not_to_be_doubled + 2*to_be_doubled) >=
* inflate_threshold * new_child_length
*
* expand not_to_be_doubled and to_be_doubled, and shorten:
* 100 * (child_length(tn) - tn->empty_children +
* tn->full_children) >= inflate_threshold * new_child_length
*
* expand new_child_length:
* 100 * (child_length(tn) - tn->empty_children +
* tn->full_children) >=
* inflate_threshold * child_length(tn) * 2
*
* shorten again:
* 50 * (tn->full_children + child_length(tn) -
* tn->empty_children) >= inflate_threshold *
* child_length(tn)
*
*/
static inline bool should_inflate(struct key_vector *tp, struct key_vector *tn)
{
unsigned long used = child_length(tn);
unsigned long threshold = used;
/* Keep root node larger */
threshold *= IS_TRIE(tp) ? inflate_threshold_root : inflate_threshold;
used -= tn_info(tn)->empty_children;
used += tn_info(tn)->full_children;
/* if bits == KEYLENGTH then pos = 0, and will fail below */
return (used > 1) && tn->pos && ((50 * used) >= threshold);
}
static inline bool should_halve(struct key_vector *tp, struct key_vector *tn)
{
unsigned long used = child_length(tn);
unsigned long threshold = used;
/* Keep root node larger */
threshold *= IS_TRIE(tp) ? halve_threshold_root : halve_threshold;
used -= tn_info(tn)->empty_children;
/* if bits == KEYLENGTH then used = 100% on wrap, and will fail below */
return (used > 1) && (tn->bits > 1) && ((100 * used) < threshold);
}
static inline bool should_collapse(struct key_vector *tn)
{
unsigned long used = child_length(tn);
used -= tn_info(tn)->empty_children;
/* account for bits == KEYLENGTH case */
if ((tn->bits == KEYLENGTH) && tn_info(tn)->full_children)
used -= KEY_MAX;
/* One child or none, time to drop us from the trie */
return used < 2;
}
#define MAX_WORK 10
static struct key_vector *resize(struct trie *t, struct key_vector *tn)
{
#ifdef CONFIG_IP_FIB_TRIE_STATS
struct trie_use_stats __percpu *stats = t->stats;
#endif
struct key_vector *tp = node_parent(tn);
unsigned long cindex = get_index(tn->key, tp);
int max_work = MAX_WORK;
pr_debug("In tnode_resize %p inflate_threshold=%d threshold=%d\n",
tn, inflate_threshold, halve_threshold);
/* track the tnode via the pointer from the parent instead of
* doing it ourselves. This way we can let RCU fully do its
* thing without us interfering
*/
BUG_ON(tn != get_child(tp, cindex));
/* Double as long as the resulting node has a number of
* nonempty nodes that are above the threshold.
*/
while (should_inflate(tp, tn) && max_work) {
tp = inflate(t, tn);
if (!tp) {
#ifdef CONFIG_IP_FIB_TRIE_STATS
this_cpu_inc(stats->resize_node_skipped);
#endif
break;
}
max_work--;
tn = get_child(tp, cindex);
}
/* update parent in case inflate failed */
tp = node_parent(tn);
/* Return if at least one inflate is run */
if (max_work != MAX_WORK)
return tp;
/* Halve as long as the number of empty children in this
* node is above threshold.
*/
while (should_halve(tp, tn) && max_work) {
tp = halve(t, tn);
if (!tp) {
#ifdef CONFIG_IP_FIB_TRIE_STATS
this_cpu_inc(stats->resize_node_skipped);
#endif
break;
}
max_work--;
tn = get_child(tp, cindex);
}
/* Only one child remains */
if (should_collapse(tn))
return collapse(t, tn);
/* update parent in case halve failed */
return node_parent(tn);
}
static void node_pull_suffix(struct key_vector *tn, unsigned char slen)
{
unsigned char node_slen = tn->slen;
while ((node_slen > tn->pos) && (node_slen > slen)) {
slen = update_suffix(tn);
if (node_slen == slen)
break;
tn = node_parent(tn);
node_slen = tn->slen;
}
}
static void node_push_suffix(struct key_vector *tn, unsigned char slen)
{
while (tn->slen < slen) {
tn->slen = slen;
tn = node_parent(tn);
}
}
/* rcu_read_lock needs to be hold by caller from readside */
static struct key_vector *fib_find_node(struct trie *t,
struct key_vector **tp, u32 key)
{
struct key_vector *pn, *n = t->kv;
unsigned long index = 0;
do {
pn = n;
n = get_child_rcu(n, index);
if (!n)
break;
index = get_cindex(key, n);
/* This bit of code is a bit tricky but it combines multiple
* checks into a single check. The prefix consists of the
* prefix plus zeros for the bits in the cindex. The index
* is the difference between the key and this value. From
* this we can actually derive several pieces of data.
* if (index >= (1ul << bits))
* we have a mismatch in skip bits and failed
* else
* we know the value is cindex
*
* This check is safe even if bits == KEYLENGTH due to the
* fact that we can only allocate a node with 32 bits if a
* long is greater than 32 bits.
*/
if (index >= (1ul << n->bits)) {
n = NULL;
break;
}
/* keep searching until we find a perfect match leaf or NULL */
} while (IS_TNODE(n));
*tp = pn;
return n;
}
/* Return the first fib alias matching DSCP with
* priority less than or equal to PRIO.
* If 'find_first' is set, return the first matching
* fib alias, regardless of DSCP and priority.
*/
static struct fib_alias *fib_find_alias(struct hlist_head *fah, u8 slen,
dscp_t dscp, u32 prio, u32 tb_id,
bool find_first)
{
struct fib_alias *fa;
if (!fah)
return NULL;
hlist_for_each_entry(fa, fah, fa_list) {
/* Avoid Sparse warning when using dscp_t in inequalities */
u8 __fa_dscp = inet_dscp_to_dsfield(fa->fa_dscp);
u8 __dscp = inet_dscp_to_dsfield(dscp);
if (fa->fa_slen < slen)
continue;
if (fa->fa_slen != slen)
break;
if (fa->tb_id > tb_id)
continue;
if (fa->tb_id != tb_id)
break;
if (find_first)
return fa;
if (__fa_dscp > __dscp)
continue;
if (fa->fa_info->fib_priority >= prio || __fa_dscp < __dscp)
return fa;
}
return NULL;
}
static struct fib_alias *
fib_find_matching_alias(struct net *net, const struct fib_rt_info *fri)
{
u8 slen = KEYLENGTH - fri->dst_len;
struct key_vector *l, *tp;
struct fib_table *tb;
struct fib_alias *fa;
struct trie *t;
tb = fib_get_table(net, fri->tb_id);
if (!tb)
return NULL;
t = (struct trie *)tb->tb_data;
l = fib_find_node(t, &tp, be32_to_cpu(fri->dst));
if (!l)
return NULL;
hlist_for_each_entry_rcu(fa, &l->leaf, fa_list) {
if (fa->fa_slen == slen && fa->tb_id == fri->tb_id &&
fa->fa_dscp == fri->dscp && fa->fa_info == fri->fi &&
fa->fa_type == fri->type)
return fa;
}
return NULL;
}
void fib_alias_hw_flags_set(struct net *net, const struct fib_rt_info *fri)
{
u8 fib_notify_on_flag_change;
struct fib_alias *fa_match;
struct sk_buff *skb;
int err;
rcu_read_lock();
fa_match = fib_find_matching_alias(net, fri);
if (!fa_match)
goto out;
/* These are paired with the WRITE_ONCE() happening in this function.
* The reason is that we are only protected by RCU at this point.
*/
if (READ_ONCE(fa_match->offload) == fri->offload &&
READ_ONCE(fa_match->trap) == fri->trap &&
READ_ONCE(fa_match->offload_failed) == fri->offload_failed)
goto out;
WRITE_ONCE(fa_match->offload, fri->offload);
WRITE_ONCE(fa_match->trap, fri->trap);
fib_notify_on_flag_change = READ_ONCE(net->ipv4.sysctl_fib_notify_on_flag_change);
/* 2 means send notifications only if offload_failed was changed. */
if (fib_notify_on_flag_change == 2 &&
READ_ONCE(fa_match->offload_failed) == fri->offload_failed)
goto out;
WRITE_ONCE(fa_match->offload_failed, fri->offload_failed);
if (!fib_notify_on_flag_change)
goto out;
skb = nlmsg_new(fib_nlmsg_size(fa_match->fa_info), GFP_ATOMIC);
if (!skb) {
err = -ENOBUFS;
goto errout;
}
err = fib_dump_info(skb, 0, 0, RTM_NEWROUTE, fri, 0);
if (err < 0) {
/* -EMSGSIZE implies BUG in fib_nlmsg_size() */
WARN_ON(err == -EMSGSIZE);
kfree_skb(skb);
goto errout;
}
rtnl_notify(skb, net, 0, RTNLGRP_IPV4_ROUTE, NULL, GFP_ATOMIC);
goto out;
errout:
rtnl_set_sk_err(net, RTNLGRP_IPV4_ROUTE, err);
out:
rcu_read_unlock();
}
EXPORT_SYMBOL_GPL(fib_alias_hw_flags_set);
static void trie_rebalance(struct trie *t, struct key_vector *tn)
{
while (!IS_TRIE(tn))
tn = resize(t, tn);
}
static int fib_insert_node(struct trie *t, struct key_vector *tp,
struct fib_alias *new, t_key key)
{
struct key_vector *n, *l;
l = leaf_new(key, new);
if (!l)
goto noleaf;
/* retrieve child from parent node */
n = get_child(tp, get_index(key, tp));
/* Case 2: n is a LEAF or a TNODE and the key doesn't match.
*
* Add a new tnode here
* first tnode need some special handling
* leaves us in position for handling as case 3
*/
if (n) {
struct key_vector *tn;
tn = tnode_new(key, __fls(key ^ n->key), 1);
if (!tn)
goto notnode;
/* initialize routes out of node */
NODE_INIT_PARENT(tn, tp);
put_child(tn, get_index(key, tn) ^ 1, n);
/* start adding routes into the node */
put_child_root(tp, key, tn);
node_set_parent(n, tn);
/* parent now has a NULL spot where the leaf can go */
tp = tn;
}
/* Case 3: n is NULL, and will just insert a new leaf */
node_push_suffix(tp, new->fa_slen);
NODE_INIT_PARENT(l, tp);
put_child_root(tp, key, l);
trie_rebalance(t, tp);
return 0;
notnode:
node_free(l);
noleaf:
return -ENOMEM;
}
static int fib_insert_alias(struct trie *t, struct key_vector *tp,
struct key_vector *l, struct fib_alias *new,
struct fib_alias *fa, t_key key)
{
if (!l)
return fib_insert_node(t, tp, new, key);
if (fa) {
hlist_add_before_rcu(&new->fa_list, &fa->fa_list);
} else {
struct fib_alias *last;
hlist_for_each_entry(last, &l->leaf, fa_list) {
if (new->fa_slen < last->fa_slen)
break;
if ((new->fa_slen == last->fa_slen) &&
(new->tb_id > last->tb_id))
break;
fa = last;
}
if (fa)
hlist_add_behind_rcu(&new->fa_list, &fa->fa_list);
else
hlist_add_head_rcu(&new->fa_list, &l->leaf);
}
/* if we added to the tail node then we need to update slen */
if (l->slen < new->fa_slen) {
l->slen = new->fa_slen;
node_push_suffix(tp, new->fa_slen);
}
return 0;
}
static bool fib_valid_key_len(u32 key, u8 plen, struct netlink_ext_ack *extack)
{
if (plen > KEYLENGTH) {
NL_SET_ERR_MSG(extack, "Invalid prefix length");
return false;
}
if ((plen < KEYLENGTH) && (key << plen)) {
NL_SET_ERR_MSG(extack,
"Invalid prefix for given prefix length");
return false;
}
return true;
}
static void fib_remove_alias(struct trie *t, struct key_vector *tp,
struct key_vector *l, struct fib_alias *old);
/* Caller must hold RTNL. */
int fib_table_insert(struct net *net, struct fib_table *tb,
struct fib_config *cfg, struct netlink_ext_ack *extack)
{
struct trie *t = (struct trie *)tb->tb_data;
struct fib_alias *fa, *new_fa;
struct key_vector *l, *tp;
u16 nlflags = NLM_F_EXCL;
struct fib_info *fi;
u8 plen = cfg->fc_dst_len;
u8 slen = KEYLENGTH - plen;
dscp_t dscp;
u32 key;
int err;
key = ntohl(cfg->fc_dst);
if (!fib_valid_key_len(key, plen, extack))
return -EINVAL;
pr_debug("Insert table=%u %08x/%d\n", tb->tb_id, key, plen);
fi = fib_create_info(cfg, extack);
if (IS_ERR(fi)) {
err = PTR_ERR(fi);
goto err;
}
dscp = cfg->fc_dscp;
l = fib_find_node(t, &tp, key);
fa = l ? fib_find_alias(&l->leaf, slen, dscp, fi->fib_priority,
tb->tb_id, false) : NULL;
/* Now fa, if non-NULL, points to the first fib alias
* with the same keys [prefix,dscp,priority], if such key already
* exists or to the node before which we will insert new one.
*
* If fa is NULL, we will need to allocate a new one and
* insert to the tail of the section matching the suffix length
* of the new alias.
*/
if (fa && fa->fa_dscp == dscp &&
fa->fa_info->fib_priority == fi->fib_priority) {
struct fib_alias *fa_first, *fa_match;
err = -EEXIST;
if (cfg->fc_nlflags & NLM_F_EXCL)
goto out;
nlflags &= ~NLM_F_EXCL;
/* We have 2 goals:
* 1. Find exact match for type, scope, fib_info to avoid
* duplicate routes
* 2. Find next 'fa' (or head), NLM_F_APPEND inserts before it
*/
fa_match = NULL;
fa_first = fa;
hlist_for_each_entry_from(fa, fa_list) {
if ((fa->fa_slen != slen) ||
(fa->tb_id != tb->tb_id) ||
(fa->fa_dscp != dscp))
break;
if (fa->fa_info->fib_priority != fi->fib_priority)
break;
if (fa->fa_type == cfg->fc_type &&
fa->fa_info == fi) {
fa_match = fa;
break;
}
}
if (cfg->fc_nlflags & NLM_F_REPLACE) {
struct fib_info *fi_drop;
u8 state;
nlflags |= NLM_F_REPLACE;
fa = fa_first;
if (fa_match) {
if (fa == fa_match)
err = 0;
goto out;
}
err = -ENOBUFS;
new_fa = kmem_cache_alloc(fn_alias_kmem, GFP_KERNEL);
if (!new_fa)
goto out;
fi_drop = fa->fa_info;
new_fa->fa_dscp = fa->fa_dscp;
new_fa->fa_info = fi;
new_fa->fa_type = cfg->fc_type;
state = fa->fa_state;
new_fa->fa_state = state & ~FA_S_ACCESSED;
new_fa->fa_slen = fa->fa_slen;
new_fa->tb_id = tb->tb_id;
new_fa->fa_default = -1;
new_fa->offload = 0;
new_fa->trap = 0;
new_fa->offload_failed = 0;
hlist_replace_rcu(&fa->fa_list, &new_fa->fa_list);
if (fib_find_alias(&l->leaf, fa->fa_slen, 0, 0,
tb->tb_id, true) == new_fa) {
enum fib_event_type fib_event;
fib_event = FIB_EVENT_ENTRY_REPLACE;
err = call_fib_entry_notifiers(net, fib_event,
key, plen,
new_fa, extack);
if (err) {
hlist_replace_rcu(&new_fa->fa_list,
&fa->fa_list);
goto out_free_new_fa;
}
}
rtmsg_fib(RTM_NEWROUTE, htonl(key), new_fa, plen,
tb->tb_id, &cfg->fc_nlinfo, nlflags);
alias_free_mem_rcu(fa);
fib_release_info(fi_drop);
if (state & FA_S_ACCESSED)
rt_cache_flush(cfg->fc_nlinfo.nl_net);
goto succeeded;
}
/* Error if we find a perfect match which
* uses the same scope, type, and nexthop
* information.
*/
if (fa_match)
goto out;
if (cfg->fc_nlflags & NLM_F_APPEND)
nlflags |= NLM_F_APPEND;
else
fa = fa_first;
}
err = -ENOENT;
if (!(cfg->fc_nlflags & NLM_F_CREATE))
goto out;
nlflags |= NLM_F_CREATE;
err = -ENOBUFS;
new_fa = kmem_cache_alloc(fn_alias_kmem, GFP_KERNEL);
if (!new_fa)
goto out;
new_fa->fa_info = fi;
new_fa->fa_dscp = dscp;
new_fa->fa_type = cfg->fc_type;
new_fa->fa_state = 0;
new_fa->fa_slen = slen;
new_fa->tb_id = tb->tb_id;
new_fa->fa_default = -1;
new_fa->offload = 0;
new_fa->trap = 0;
new_fa->offload_failed = 0;
/* Insert new entry to the list. */
err = fib_insert_alias(t, tp, l, new_fa, fa, key);
if (err)
goto out_free_new_fa;
/* The alias was already inserted, so the node must exist. */
l = l ? l : fib_find_node(t, &tp, key);
if (WARN_ON_ONCE(!l)) {
err = -ENOENT;
goto out_free_new_fa;
}
if (fib_find_alias(&l->leaf, new_fa->fa_slen, 0, 0, tb->tb_id, true) ==
new_fa) {
enum fib_event_type fib_event;
fib_event = FIB_EVENT_ENTRY_REPLACE;
err = call_fib_entry_notifiers(net, fib_event, key, plen,
new_fa, extack);
if (err)
goto out_remove_new_fa;
}
if (!plen)
tb->tb_num_default++;
rt_cache_flush(cfg->fc_nlinfo.nl_net);
rtmsg_fib(RTM_NEWROUTE, htonl(key), new_fa, plen, new_fa->tb_id,
&cfg->fc_nlinfo, nlflags);
succeeded:
return 0;
out_remove_new_fa:
fib_remove_alias(t, tp, l, new_fa);
out_free_new_fa:
kmem_cache_free(fn_alias_kmem, new_fa);
out:
fib_release_info(fi);
err:
return err;
}
static inline t_key prefix_mismatch(t_key key, struct key_vector *n)
{
t_key prefix = n->key;
return (key ^ prefix) & (prefix | -prefix);
}
bool fib_lookup_good_nhc(const struct fib_nh_common *nhc, int fib_flags,
const struct flowi4 *flp)
{
if (nhc->nhc_flags & RTNH_F_DEAD)
return false;
if (ip_ignore_linkdown(nhc->nhc_dev) &&
nhc->nhc_flags & RTNH_F_LINKDOWN &&
!(fib_flags & FIB_LOOKUP_IGNORE_LINKSTATE))
return false;
if (flp->flowi4_oif && flp->flowi4_oif != nhc->nhc_oif)
return false;
return true;
}
/* should be called with rcu_read_lock */
int fib_table_lookup(struct fib_table *tb, const struct flowi4 *flp,
struct fib_result *res, int fib_flags)
{
struct trie *t = (struct trie *) tb->tb_data;
#ifdef CONFIG_IP_FIB_TRIE_STATS
struct trie_use_stats __percpu *stats = t->stats;
#endif
const t_key key = ntohl(flp->daddr);
struct key_vector *n, *pn;
struct fib_alias *fa;
unsigned long index;
t_key cindex;
pn = t->kv;
cindex = 0;
n = get_child_rcu(pn, cindex);
if (!n) {
trace_fib_table_lookup(tb->tb_id, flp, NULL, -EAGAIN);
return -EAGAIN;
}
#ifdef CONFIG_IP_FIB_TRIE_STATS
this_cpu_inc(stats->gets);
#endif
/* Step 1: Travel to the longest prefix match in the trie */
for (;;) {
index = get_cindex(key, n);
/* This bit of code is a bit tricky but it combines multiple
* checks into a single check. The prefix consists of the
* prefix plus zeros for the "bits" in the prefix. The index
* is the difference between the key and this value. From
* this we can actually derive several pieces of data.
* if (index >= (1ul << bits))
* we have a mismatch in skip bits and failed
* else
* we know the value is cindex
*
* This check is safe even if bits == KEYLENGTH due to the
* fact that we can only allocate a node with 32 bits if a
* long is greater than 32 bits.
*/
if (index >= (1ul << n->bits))
break;
/* we have found a leaf. Prefixes have already been compared */
if (IS_LEAF(n))
goto found;
/* only record pn and cindex if we are going to be chopping
* bits later. Otherwise we are just wasting cycles.
*/
if (n->slen > n->pos) {
pn = n;
cindex = index;
}
n = get_child_rcu(n, index);
if (unlikely(!n))
goto backtrace;
}
/* Step 2: Sort out leaves and begin backtracing for longest prefix */
for (;;) {
/* record the pointer where our next node pointer is stored */
struct key_vector __rcu **cptr = n->tnode;
/* This test verifies that none of the bits that differ
* between the key and the prefix exist in the region of
* the lsb and higher in the prefix.
*/
if (unlikely(prefix_mismatch(key, n)) || (n->slen == n->pos))
goto backtrace;
/* exit out and process leaf */
if (unlikely(IS_LEAF(n)))
break;
/* Don't bother recording parent info. Since we are in
* prefix match mode we will have to come back to wherever
* we started this traversal anyway
*/
while ((n = rcu_dereference(*cptr)) == NULL) {
backtrace:
#ifdef CONFIG_IP_FIB_TRIE_STATS
if (!n)
this_cpu_inc(stats->null_node_hit);
#endif
/* If we are at cindex 0 there are no more bits for
* us to strip at this level so we must ascend back
* up one level to see if there are any more bits to
* be stripped there.
*/
while (!cindex) {
t_key pkey = pn->key;
/* If we don't have a parent then there is
* nothing for us to do as we do not have any
* further nodes to parse.
*/
if (IS_TRIE(pn)) {
trace_fib_table_lookup(tb->tb_id, flp,
NULL, -EAGAIN);
return -EAGAIN;
}
#ifdef CONFIG_IP_FIB_TRIE_STATS
this_cpu_inc(stats->backtrack);
#endif
/* Get Child's index */
pn = node_parent_rcu(pn);
cindex = get_index(pkey, pn);
}
/* strip the least significant bit from the cindex */
cindex &= cindex - 1;
/* grab pointer for next child node */
cptr = &pn->tnode[cindex];
}
}
found:
/* this line carries forward the xor from earlier in the function */
index = key ^ n->key;
/* Step 3: Process the leaf, if that fails fall back to backtracing */
hlist_for_each_entry_rcu(fa, &n->leaf, fa_list) {
struct fib_info *fi = fa->fa_info;
struct fib_nh_common *nhc;
int nhsel, err;
if ((BITS_PER_LONG > KEYLENGTH) || (fa->fa_slen < KEYLENGTH)) {
if (index >= (1ul << fa->fa_slen))
continue;
}
if (fa->fa_dscp &&
inet_dscp_to_dsfield(fa->fa_dscp) != flp->flowi4_tos)
continue;
/* Paired with WRITE_ONCE() in fib_release_info() */
if (READ_ONCE(fi->fib_dead))
continue;
if (fa->fa_info->fib_scope < flp->flowi4_scope)
continue;
fib_alias_accessed(fa);
err = fib_props[fa->fa_type].error;
if (unlikely(err < 0)) {
out_reject:
#ifdef CONFIG_IP_FIB_TRIE_STATS
this_cpu_inc(stats->semantic_match_passed);
#endif
trace_fib_table_lookup(tb->tb_id, flp, NULL, err);
return err;
}
if (fi->fib_flags & RTNH_F_DEAD)
continue;
if (unlikely(fi->nh)) {
if (nexthop_is_blackhole(fi->nh)) {
err = fib_props[RTN_BLACKHOLE].error;
goto out_reject;
}
nhc = nexthop_get_nhc_lookup(fi->nh, fib_flags, flp,
&nhsel);
if (nhc)
goto set_result;
goto miss;
}
for (nhsel = 0; nhsel < fib_info_num_path(fi); nhsel++) {
nhc = fib_info_nhc(fi, nhsel);
if (!fib_lookup_good_nhc(nhc, fib_flags, flp))
continue;
set_result:
if (!(fib_flags & FIB_LOOKUP_NOREF))
refcount_inc(&fi->fib_clntref);
res->prefix = htonl(n->key);
res->prefixlen = KEYLENGTH - fa->fa_slen;
res->nh_sel = nhsel;
res->nhc = nhc;
res->type = fa->fa_type;
res->scope = fi->fib_scope;
res->fi = fi;
res->table = tb;
res->fa_head = &n->leaf;
#ifdef CONFIG_IP_FIB_TRIE_STATS
this_cpu_inc(stats->semantic_match_passed);
#endif
trace_fib_table_lookup(tb->tb_id, flp, nhc, err);
return err;
}
}
miss:
#ifdef CONFIG_IP_FIB_TRIE_STATS
this_cpu_inc(stats->semantic_match_miss);
#endif
goto backtrace;
}
EXPORT_SYMBOL_GPL(fib_table_lookup);
static void fib_remove_alias(struct trie *t, struct key_vector *tp,
struct key_vector *l, struct fib_alias *old)
{
/* record the location of the previous list_info entry */
struct hlist_node **pprev = old->fa_list.pprev;
struct fib_alias *fa = hlist_entry(pprev, typeof(*fa), fa_list.next);
/* remove the fib_alias from the list */
hlist_del_rcu(&old->fa_list);
/* if we emptied the list this leaf will be freed and we can sort
* out parent suffix lengths as a part of trie_rebalance
*/
if (hlist_empty(&l->leaf)) {
if (tp->slen == l->slen)
node_pull_suffix(tp, tp->pos);
put_child_root(tp, l->key, NULL);
node_free(l);
trie_rebalance(t, tp);
return;
}
/* only access fa if it is pointing at the last valid hlist_node */
if (*pprev)
return;
/* update the trie with the latest suffix length */
l->slen = fa->fa_slen;
node_pull_suffix(tp, fa->fa_slen);
}
static void fib_notify_alias_delete(struct net *net, u32 key,
struct hlist_head *fah,
struct fib_alias *fa_to_delete,
struct netlink_ext_ack *extack)
{
struct fib_alias *fa_next, *fa_to_notify;
u32 tb_id = fa_to_delete->tb_id;
u8 slen = fa_to_delete->fa_slen;
enum fib_event_type fib_event;
/* Do not notify if we do not care about the route. */
if (fib_find_alias(fah, slen, 0, 0, tb_id, true) != fa_to_delete)
return;
/* Determine if the route should be replaced by the next route in the
* list.
*/
fa_next = hlist_entry_safe(fa_to_delete->fa_list.next,
struct fib_alias, fa_list);
if (fa_next && fa_next->fa_slen == slen && fa_next->tb_id == tb_id) {
fib_event = FIB_EVENT_ENTRY_REPLACE;
fa_to_notify = fa_next;
} else {
fib_event = FIB_EVENT_ENTRY_DEL;
fa_to_notify = fa_to_delete;
}
call_fib_entry_notifiers(net, fib_event, key, KEYLENGTH - slen,
fa_to_notify, extack);
}
/* Caller must hold RTNL. */
int fib_table_delete(struct net *net, struct fib_table *tb,
struct fib_config *cfg, struct netlink_ext_ack *extack)
{
struct trie *t = (struct trie *) tb->tb_data;
struct fib_alias *fa, *fa_to_delete;
struct key_vector *l, *tp;
u8 plen = cfg->fc_dst_len;
u8 slen = KEYLENGTH - plen;
dscp_t dscp;
u32 key;
key = ntohl(cfg->fc_dst);
if (!fib_valid_key_len(key, plen, extack))
return -EINVAL;
l = fib_find_node(t, &tp, key);
if (!l)
return -ESRCH;
dscp = cfg->fc_dscp;
fa = fib_find_alias(&l->leaf, slen, dscp, 0, tb->tb_id, false);
if (!fa)
return -ESRCH;
pr_debug("Deleting %08x/%d dsfield=0x%02x t=%p\n", key, plen,
inet_dscp_to_dsfield(dscp), t);
fa_to_delete = NULL;
hlist_for_each_entry_from(fa, fa_list) {
struct fib_info *fi = fa->fa_info;
if ((fa->fa_slen != slen) ||
(fa->tb_id != tb->tb_id) ||
(fa->fa_dscp != dscp))
break;
if ((!cfg->fc_type || fa->fa_type == cfg->fc_type) &&
(cfg->fc_scope == RT_SCOPE_NOWHERE ||
fa->fa_info->fib_scope == cfg->fc_scope) &&
(!cfg->fc_prefsrc ||
fi->fib_prefsrc == cfg->fc_prefsrc) &&
(!cfg->fc_protocol ||
fi->fib_protocol == cfg->fc_protocol) &&
fib_nh_match(net, cfg, fi, extack) == 0 &&
fib_metrics_match(cfg, fi)) {
fa_to_delete = fa;
break;
}
}
if (!fa_to_delete)
return -ESRCH;
fib_notify_alias_delete(net, key, &l->leaf, fa_to_delete, extack);
rtmsg_fib(RTM_DELROUTE, htonl(key), fa_to_delete, plen, tb->tb_id,
&cfg->fc_nlinfo, 0);
if (!plen)
tb->tb_num_default--;
fib_remove_alias(t, tp, l, fa_to_delete);
if (fa_to_delete->fa_state & FA_S_ACCESSED)
rt_cache_flush(cfg->fc_nlinfo.nl_net);
fib_release_info(fa_to_delete->fa_info);
alias_free_mem_rcu(fa_to_delete);
return 0;
}
/* Scan for the next leaf starting at the provided key value */
static struct key_vector *leaf_walk_rcu(struct key_vector **tn, t_key key)
{
struct key_vector *pn, *n = *tn;
unsigned long cindex;
/* this loop is meant to try and find the key in the trie */
do {
/* record parent and next child index */
pn = n;
cindex = (key > pn->key) ? get_index(key, pn) : 0;
if (cindex >> pn->bits)
break;
/* descend into the next child */
n = get_child_rcu(pn, cindex++);
if (!n)
break;
/* guarantee forward progress on the keys */
if (IS_LEAF(n) && (n->key >= key))
goto found;
} while (IS_TNODE(n));
/* this loop will search for the next leaf with a greater key */
while (!IS_TRIE(pn)) {
/* if we exhausted the parent node we will need to climb */
if (cindex >= (1ul << pn->bits)) {
t_key pkey = pn->key;
pn = node_parent_rcu(pn);
cindex = get_index(pkey, pn) + 1;
continue;
}
/* grab the next available node */
n = get_child_rcu(pn, cindex++);
if (!n)
continue;
/* no need to compare keys since we bumped the index */
if (IS_LEAF(n))
goto found;
/* Rescan start scanning in new node */
pn = n;
cindex = 0;
}
*tn = pn;
return NULL; /* Root of trie */
found:
/* if we are at the limit for keys just return NULL for the tnode */
*tn = pn;
return n;
}
static void fib_trie_free(struct fib_table *tb)
{
struct trie *t = (struct trie *)tb->tb_data;
struct key_vector *pn = t->kv;
unsigned long cindex = 1;
struct hlist_node *tmp;
struct fib_alias *fa;
/* walk trie in reverse order and free everything */
for (;;) {
struct key_vector *n;
if (!(cindex--)) {
t_key pkey = pn->key;
if (IS_TRIE(pn))
break;
n = pn;
pn = node_parent(pn);
/* drop emptied tnode */
put_child_root(pn, n->key, NULL);
node_free(n);
cindex = get_index(pkey, pn);
continue;
}
/* grab the next available node */
n = get_child(pn, cindex);
if (!n)
continue;
if (IS_TNODE(n)) {
/* record pn and cindex for leaf walking */
pn = n;
cindex = 1ul << n->bits;
continue;
}
hlist_for_each_entry_safe(fa, tmp, &n->leaf, fa_list) {
hlist_del_rcu(&fa->fa_list);
alias_free_mem_rcu(fa);
}
put_child_root(pn, n->key, NULL);
node_free(n);
}
#ifdef CONFIG_IP_FIB_TRIE_STATS
free_percpu(t->stats);
#endif
kfree(tb);
}
struct fib_table *fib_trie_unmerge(struct fib_table *oldtb)
{
struct trie *ot = (struct trie *)oldtb->tb_data;
struct key_vector *l, *tp = ot->kv;
struct fib_table *local_tb;
struct fib_alias *fa;
struct trie *lt;
t_key key = 0;
if (oldtb->tb_data == oldtb->__data)
return oldtb;
local_tb = fib_trie_table(RT_TABLE_LOCAL, NULL);
if (!local_tb)
return NULL;
lt = (struct trie *)local_tb->tb_data;
while ((l = leaf_walk_rcu(&tp, key)) != NULL) {
struct key_vector *local_l = NULL, *local_tp;
hlist_for_each_entry(fa, &l->leaf, fa_list) {
struct fib_alias *new_fa;
if (local_tb->tb_id != fa->tb_id)
continue;
/* clone fa for new local table */
new_fa = kmem_cache_alloc(fn_alias_kmem, GFP_KERNEL);
if (!new_fa)
goto out;
memcpy(new_fa, fa, sizeof(*fa));
/* insert clone into table */
if (!local_l)
local_l = fib_find_node(lt, &local_tp, l->key);
if (fib_insert_alias(lt, local_tp, local_l, new_fa,
NULL, l->key)) {
kmem_cache_free(fn_alias_kmem, new_fa);
goto out;
}
}
/* stop loop if key wrapped back to 0 */
key = l->key + 1;
if (key < l->key)
break;
}
return local_tb;
out:
fib_trie_free(local_tb);
return NULL;
}
/* Caller must hold RTNL */
void fib_table_flush_external(struct fib_table *tb)
{
struct trie *t = (struct trie *)tb->tb_data;
struct key_vector *pn = t->kv;
unsigned long cindex = 1;
struct hlist_node *tmp;
struct fib_alias *fa;
/* walk trie in reverse order */
for (;;) {
unsigned char slen = 0;
struct key_vector *n;
if (!(cindex--)) {
t_key pkey = pn->key;
/* cannot resize the trie vector */
if (IS_TRIE(pn))
break;
/* update the suffix to address pulled leaves */
if (pn->slen > pn->pos)
update_suffix(pn);
/* resize completed node */
pn = resize(t, pn);
cindex = get_index(pkey, pn);
continue;
}
/* grab the next available node */
n = get_child(pn, cindex);
if (!n)
continue;
if (IS_TNODE(n)) {
/* record pn and cindex for leaf walking */
pn = n;
cindex = 1ul << n->bits;
continue;
}
hlist_for_each_entry_safe(fa, tmp, &n->leaf, fa_list) {
/* if alias was cloned to local then we just
* need to remove the local copy from main
*/
if (tb->tb_id != fa->tb_id) {
hlist_del_rcu(&fa->fa_list);
alias_free_mem_rcu(fa);
continue;
}
/* record local slen */
slen = fa->fa_slen;
}
/* update leaf slen */
n->slen = slen;
if (hlist_empty(&n->leaf)) {
put_child_root(pn, n->key, NULL);
node_free(n);
}
}
}
/* Caller must hold RTNL. */
int fib_table_flush(struct net *net, struct fib_table *tb, bool flush_all)
{
struct trie *t = (struct trie *)tb->tb_data;
struct key_vector *pn = t->kv;
unsigned long cindex = 1;
struct hlist_node *tmp;
struct fib_alias *fa;
int found = 0;
/* walk trie in reverse order */
for (;;) {
unsigned char slen = 0;
struct key_vector *n;
if (!(cindex--)) {
t_key pkey = pn->key;
/* cannot resize the trie vector */
if (IS_TRIE(pn))
break;
/* update the suffix to address pulled leaves */
if (pn->slen > pn->pos)
update_suffix(pn);
/* resize completed node */
pn = resize(t, pn);
cindex = get_index(pkey, pn);
continue;
}
/* grab the next available node */
n = get_child(pn, cindex);
if (!n)
continue;
if (IS_TNODE(n)) {
/* record pn and cindex for leaf walking */
pn = n;
cindex = 1ul << n->bits;
continue;
}
hlist_for_each_entry_safe(fa, tmp, &n->leaf, fa_list) {
struct fib_info *fi = fa->fa_info;
if (!fi || tb->tb_id != fa->tb_id ||
(!(fi->fib_flags & RTNH_F_DEAD) &&
!fib_props[fa->fa_type].error)) {
slen = fa->fa_slen;
continue;
}
/* Do not flush error routes if network namespace is
* not being dismantled
*/
if (!flush_all && fib_props[fa->fa_type].error) {
slen = fa->fa_slen;
continue;
}
fib_notify_alias_delete(net, n->key, &n->leaf, fa,
NULL);
hlist_del_rcu(&fa->fa_list);
fib_release_info(fa->fa_info);
alias_free_mem_rcu(fa);
found++;
}
/* update leaf slen */
n->slen = slen;
if (hlist_empty(&n->leaf)) {
put_child_root(pn, n->key, NULL);
node_free(n);
}
}
pr_debug("trie_flush found=%d\n", found);
return found;
}
/* derived from fib_trie_free */
static void __fib_info_notify_update(struct net *net, struct fib_table *tb,
struct nl_info *info)
{
struct trie *t = (struct trie *)tb->tb_data;
struct key_vector *pn = t->kv;
unsigned long cindex = 1;
struct fib_alias *fa;
for (;;) {
struct key_vector *n;
if (!(cindex--)) {
t_key pkey = pn->key;
if (IS_TRIE(pn))
break;
pn = node_parent(pn);
cindex = get_index(pkey, pn);
continue;
}
/* grab the next available node */
n = get_child(pn, cindex);
if (!n)
continue;
if (IS_TNODE(n)) {
/* record pn and cindex for leaf walking */
pn = n;
cindex = 1ul << n->bits;
continue;
}
hlist_for_each_entry(fa, &n->leaf, fa_list) {
struct fib_info *fi = fa->fa_info;
if (!fi || !fi->nh_updated || fa->tb_id != tb->tb_id)
continue;
rtmsg_fib(RTM_NEWROUTE, htonl(n->key), fa,
KEYLENGTH - fa->fa_slen, tb->tb_id,
info, NLM_F_REPLACE);
}
}
}
void fib_info_notify_update(struct net *net, struct nl_info *info)
{
unsigned int h;
for (h = 0; h < FIB_TABLE_HASHSZ; h++) {
struct hlist_head *head = &net->ipv4.fib_table_hash[h];
struct fib_table *tb;
hlist_for_each_entry_rcu(tb, head, tb_hlist,
lockdep_rtnl_is_held())
__fib_info_notify_update(net, tb, info);
}
}
static int fib_leaf_notify(struct key_vector *l, struct fib_table *tb,
struct notifier_block *nb,
struct netlink_ext_ack *extack)
{
struct fib_alias *fa;
int last_slen = -1;
int err;
hlist_for_each_entry_rcu(fa, &l->leaf, fa_list) {
struct fib_info *fi = fa->fa_info;
if (!fi)
continue;
/* local and main table can share the same trie,
* so don't notify twice for the same entry.
*/
if (tb->tb_id != fa->tb_id)
continue;
if (fa->fa_slen == last_slen)
continue;
last_slen = fa->fa_slen;
err = call_fib_entry_notifier(nb, FIB_EVENT_ENTRY_REPLACE,
l->key, KEYLENGTH - fa->fa_slen,
fa, extack);
if (err)
return err;
}
return 0;
}
static int fib_table_notify(struct fib_table *tb, struct notifier_block *nb,
struct netlink_ext_ack *extack)
{
struct trie *t = (struct trie *)tb->tb_data;
struct key_vector *l, *tp = t->kv;
t_key key = 0;
int err;
while ((l = leaf_walk_rcu(&tp, key)) != NULL) {
err = fib_leaf_notify(l, tb, nb, extack);
if (err)
return err;
key = l->key + 1;
/* stop in case of wrap around */
if (key < l->key)
break;
}
return 0;
}
int fib_notify(struct net *net, struct notifier_block *nb,
struct netlink_ext_ack *extack)
{
unsigned int h;
int err;
for (h = 0; h < FIB_TABLE_HASHSZ; h++) {
struct hlist_head *head = &net->ipv4.fib_table_hash[h];
struct fib_table *tb;
hlist_for_each_entry_rcu(tb, head, tb_hlist) {
err = fib_table_notify(tb, nb, extack);
if (err)
return err;
}
}
return 0;
}
static void __trie_free_rcu(struct rcu_head *head)
{
struct fib_table *tb = container_of(head, struct fib_table, rcu);
#ifdef CONFIG_IP_FIB_TRIE_STATS
struct trie *t = (struct trie *)tb->tb_data;
if (tb->tb_data == tb->__data)
free_percpu(t->stats);
#endif /* CONFIG_IP_FIB_TRIE_STATS */
kfree(tb);
}
void fib_free_table(struct fib_table *tb)
{
call_rcu(&tb->rcu, __trie_free_rcu);
}
static int fn_trie_dump_leaf(struct key_vector *l, struct fib_table *tb,
struct sk_buff *skb, struct netlink_callback *cb,
struct fib_dump_filter *filter)
{
unsigned int flags = NLM_F_MULTI;
__be32 xkey = htonl(l->key);
int i, s_i, i_fa, s_fa, err;
struct fib_alias *fa;
if (filter->filter_set ||
!filter->dump_exceptions || !filter->dump_routes)
flags |= NLM_F_DUMP_FILTERED;
s_i = cb->args[4];
s_fa = cb->args[5];
i = 0;
/* rcu_read_lock is hold by caller */
hlist_for_each_entry_rcu(fa, &l->leaf, fa_list) {
struct fib_info *fi = fa->fa_info;
if (i < s_i)
goto next;
i_fa = 0;
if (tb->tb_id != fa->tb_id)
goto next;
if (filter->filter_set) {
if (filter->rt_type && fa->fa_type != filter->rt_type)
goto next;
if ((filter->protocol &&
fi->fib_protocol != filter->protocol))
goto next;
if (filter->dev &&
!fib_info_nh_uses_dev(fi, filter->dev))
goto next;
}
if (filter->dump_routes) {
if (!s_fa) {
struct fib_rt_info fri;
fri.fi = fi;
fri.tb_id = tb->tb_id;
fri.dst = xkey;
fri.dst_len = KEYLENGTH - fa->fa_slen;
fri.dscp = fa->fa_dscp;
fri.type = fa->fa_type;
fri.offload = READ_ONCE(fa->offload);
fri.trap = READ_ONCE(fa->trap);
fri.offload_failed = READ_ONCE(fa->offload_failed);
err = fib_dump_info(skb,
NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq,
RTM_NEWROUTE, &fri, flags);
if (err < 0)
goto stop;
}
i_fa++;
}
if (filter->dump_exceptions) {
err = fib_dump_info_fnhe(skb, cb, tb->tb_id, fi,
&i_fa, s_fa, flags);
if (err < 0)
goto stop;
}
next:
i++;
}
cb->args[4] = i;
return skb->len;
stop:
cb->args[4] = i;
cb->args[5] = i_fa;
return err;
}
/* rcu_read_lock needs to be hold by caller from readside */
int fib_table_dump(struct fib_table *tb, struct sk_buff *skb,
struct netlink_callback *cb, struct fib_dump_filter *filter)
{
struct trie *t = (struct trie *)tb->tb_data;
struct key_vector *l, *tp = t->kv;
/* Dump starting at last key.
* Note: 0.0.0.0/0 (ie default) is first key.
*/
int count = cb->args[2];
t_key key = cb->args[3];
/* First time here, count and key are both always 0. Count > 0
* and key == 0 means the dump has wrapped around and we are done.
*/
if (count && !key)
return skb->len;
while ((l = leaf_walk_rcu(&tp, key)) != NULL) {
int err;
err = fn_trie_dump_leaf(l, tb, skb, cb, filter);
if (err < 0) {
cb->args[3] = key;
cb->args[2] = count;
return err;
}
++count;
key = l->key + 1;
memset(&cb->args[4], 0,
sizeof(cb->args) - 4*sizeof(cb->args[0]));
/* stop loop if key wrapped back to 0 */
if (key < l->key)
break;
}
cb->args[3] = key;
cb->args[2] = count;
return skb->len;
}
void __init fib_trie_init(void)
{
fn_alias_kmem = kmem_cache_create("ip_fib_alias",
sizeof(struct fib_alias),
0, SLAB_PANIC | SLAB_ACCOUNT, NULL);
trie_leaf_kmem = kmem_cache_create("ip_fib_trie",
LEAF_SIZE,
0, SLAB_PANIC | SLAB_ACCOUNT, NULL);
}
struct fib_table *fib_trie_table(u32 id, struct fib_table *alias)
{
struct fib_table *tb;
struct trie *t;
size_t sz = sizeof(*tb);
if (!alias)
sz += sizeof(struct trie);
tb = kzalloc(sz, GFP_KERNEL);
if (!tb)
return NULL;
tb->tb_id = id;
tb->tb_num_default = 0;
tb->tb_data = (alias ? alias->__data : tb->__data);
if (alias)
return tb;
t = (struct trie *) tb->tb_data;
t->kv[0].pos = KEYLENGTH;
t->kv[0].slen = KEYLENGTH;
#ifdef CONFIG_IP_FIB_TRIE_STATS
t->stats = alloc_percpu(struct trie_use_stats);
if (!t->stats) {
kfree(tb);
tb = NULL;
}
#endif
return tb;
}
#ifdef CONFIG_PROC_FS
/* Depth first Trie walk iterator */
struct fib_trie_iter {
struct seq_net_private p;
struct fib_table *tb;
struct key_vector *tnode;
unsigned int index;
unsigned int depth;
};
static struct key_vector *fib_trie_get_next(struct fib_trie_iter *iter)
{
unsigned long cindex = iter->index;
struct key_vector *pn = iter->tnode;
t_key pkey;
pr_debug("get_next iter={node=%p index=%d depth=%d}\n",
iter->tnode, iter->index, iter->depth);
while (!IS_TRIE(pn)) {
while (cindex < child_length(pn)) {
struct key_vector *n = get_child_rcu(pn, cindex++);
if (!n)
continue;
if (IS_LEAF(n)) {
iter->tnode = pn;
iter->index = cindex;
} else {
/* push down one level */
iter->tnode = n;
iter->index = 0;
++iter->depth;
}
return n;
}
/* Current node exhausted, pop back up */
pkey = pn->key;
pn = node_parent_rcu(pn);
cindex = get_index(pkey, pn) + 1;
--iter->depth;
}
/* record root node so further searches know we are done */
iter->tnode = pn;
iter->index = 0;
return NULL;
}
static struct key_vector *fib_trie_get_first(struct fib_trie_iter *iter,
struct trie *t)
{
struct key_vector *n, *pn;
if (!t)
return NULL;
pn = t->kv;
n = rcu_dereference(pn->tnode[0]);
if (!n)
return NULL;
if (IS_TNODE(n)) {
iter->tnode = n;
iter->index = 0;
iter->depth = 1;
} else {
iter->tnode = pn;
iter->index = 0;
iter->depth = 0;
}
return n;
}
static void trie_collect_stats(struct trie *t, struct trie_stat *s)
{
struct key_vector *n;
struct fib_trie_iter iter;
memset(s, 0, sizeof(*s));
rcu_read_lock();
for (n = fib_trie_get_first(&iter, t); n; n = fib_trie_get_next(&iter)) {
if (IS_LEAF(n)) {
struct fib_alias *fa;
s->leaves++;
s->totdepth += iter.depth;
if (iter.depth > s->maxdepth)
s->maxdepth = iter.depth;
hlist_for_each_entry_rcu(fa, &n->leaf, fa_list)
++s->prefixes;
} else {
s->tnodes++;
if (n->bits < MAX_STAT_DEPTH)
s->nodesizes[n->bits]++;
s->nullpointers += tn_info(n)->empty_children;
}
}
rcu_read_unlock();
}
/*
* This outputs /proc/net/fib_triestats
*/
static void trie_show_stats(struct seq_file *seq, struct trie_stat *stat)
{
unsigned int i, max, pointers, bytes, avdepth;
if (stat->leaves)
avdepth = stat->totdepth*100 / stat->leaves;
else
avdepth = 0;
seq_printf(seq, "\tAver depth: %u.%02d\n",
avdepth / 100, avdepth % 100);
seq_printf(seq, "\tMax depth: %u\n", stat->maxdepth);
seq_printf(seq, "\tLeaves: %u\n", stat->leaves);
bytes = LEAF_SIZE * stat->leaves;
seq_printf(seq, "\tPrefixes: %u\n", stat->prefixes);
bytes += sizeof(struct fib_alias) * stat->prefixes;
seq_printf(seq, "\tInternal nodes: %u\n\t", stat->tnodes);
bytes += TNODE_SIZE(0) * stat->tnodes;
max = MAX_STAT_DEPTH;
while (max > 0 && stat->nodesizes[max-1] == 0)
max--;
pointers = 0;
for (i = 1; i < max; i++)
if (stat->nodesizes[i] != 0) {
seq_printf(seq, " %u: %u", i, stat->nodesizes[i]);
pointers += (1<<i) * stat->nodesizes[i];
}
seq_putc(seq, '\n');
seq_printf(seq, "\tPointers: %u\n", pointers);
bytes += sizeof(struct key_vector *) * pointers;
seq_printf(seq, "Null ptrs: %u\n", stat->nullpointers);
seq_printf(seq, "Total size: %u kB\n", (bytes + 1023) / 1024);
}
#ifdef CONFIG_IP_FIB_TRIE_STATS
static void trie_show_usage(struct seq_file *seq,
const struct trie_use_stats __percpu *stats)
{
struct trie_use_stats s = { 0 };
int cpu;
/* loop through all of the CPUs and gather up the stats */
for_each_possible_cpu(cpu) {
const struct trie_use_stats *pcpu = per_cpu_ptr(stats, cpu);
s.gets += pcpu->gets;
s.backtrack += pcpu->backtrack;
s.semantic_match_passed += pcpu->semantic_match_passed;
s.semantic_match_miss += pcpu->semantic_match_miss;
s.null_node_hit += pcpu->null_node_hit;
s.resize_node_skipped += pcpu->resize_node_skipped;
}
seq_printf(seq, "\nCounters:\n---------\n");
seq_printf(seq, "gets = %u\n", s.gets);
seq_printf(seq, "backtracks = %u\n", s.backtrack);
seq_printf(seq, "semantic match passed = %u\n",
s.semantic_match_passed);
seq_printf(seq, "semantic match miss = %u\n", s.semantic_match_miss);
seq_printf(seq, "null node hit= %u\n", s.null_node_hit);
seq_printf(seq, "skipped node resize = %u\n\n", s.resize_node_skipped);
}
#endif /* CONFIG_IP_FIB_TRIE_STATS */
static void fib_table_print(struct seq_file *seq, struct fib_table *tb)
{
if (tb->tb_id == RT_TABLE_LOCAL)
seq_puts(seq, "Local:\n");
else if (tb->tb_id == RT_TABLE_MAIN)
seq_puts(seq, "Main:\n");
else
seq_printf(seq, "Id %d:\n", tb->tb_id);
}
static int fib_triestat_seq_show(struct seq_file *seq, void *v)
{
struct net *net = seq->private;
unsigned int h;
seq_printf(seq,
"Basic info: size of leaf:"
" %zd bytes, size of tnode: %zd bytes.\n",
LEAF_SIZE, TNODE_SIZE(0));
rcu_read_lock();
for (h = 0; h < FIB_TABLE_HASHSZ; h++) {
struct hlist_head *head = &net->ipv4.fib_table_hash[h];
struct fib_table *tb;
hlist_for_each_entry_rcu(tb, head, tb_hlist) {
struct trie *t = (struct trie *) tb->tb_data;
struct trie_stat stat;
if (!t)
continue;
fib_table_print(seq, tb);
trie_collect_stats(t, &stat);
trie_show_stats(seq, &stat);
#ifdef CONFIG_IP_FIB_TRIE_STATS
trie_show_usage(seq, t->stats);
#endif
}
cond_resched_rcu();
}
rcu_read_unlock();
return 0;
}
static struct key_vector *fib_trie_get_idx(struct seq_file *seq, loff_t pos)
{
struct fib_trie_iter *iter = seq->private;
struct net *net = seq_file_net(seq);
loff_t idx = 0;
unsigned int h;
for (h = 0; h < FIB_TABLE_HASHSZ; h++) {
struct hlist_head *head = &net->ipv4.fib_table_hash[h];
struct fib_table *tb;
hlist_for_each_entry_rcu(tb, head, tb_hlist) {
struct key_vector *n;
for (n = fib_trie_get_first(iter,
(struct trie *) tb->tb_data);
n; n = fib_trie_get_next(iter))
if (pos == idx++) {
iter->tb = tb;
return n;
}
}
}
return NULL;
}
static void *fib_trie_seq_start(struct seq_file *seq, loff_t *pos)
__acquires(RCU)
{
rcu_read_lock();
return fib_trie_get_idx(seq, *pos);
}
static void *fib_trie_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
struct fib_trie_iter *iter = seq->private;
struct net *net = seq_file_net(seq);
struct fib_table *tb = iter->tb;
struct hlist_node *tb_node;
unsigned int h;
struct key_vector *n;
++*pos;
/* next node in same table */
n = fib_trie_get_next(iter);
if (n)
return n;
/* walk rest of this hash chain */
h = tb->tb_id & (FIB_TABLE_HASHSZ - 1);
while ((tb_node = rcu_dereference(hlist_next_rcu(&tb->tb_hlist)))) {
tb = hlist_entry(tb_node, struct fib_table, tb_hlist);
n = fib_trie_get_first(iter, (struct trie *) tb->tb_data);
if (n)
goto found;
}
/* new hash chain */
while (++h < FIB_TABLE_HASHSZ) {
struct hlist_head *head = &net->ipv4.fib_table_hash[h];
hlist_for_each_entry_rcu(tb, head, tb_hlist) {
n = fib_trie_get_first(iter, (struct trie *) tb->tb_data);
if (n)
goto found;
}
}
return NULL;
found:
iter->tb = tb;
return n;
}
static void fib_trie_seq_stop(struct seq_file *seq, void *v)
__releases(RCU)
{
rcu_read_unlock();
}
static void seq_indent(struct seq_file *seq, int n)
{
while (n-- > 0)
seq_puts(seq, " ");
}
static inline const char *rtn_scope(char *buf, size_t len, enum rt_scope_t s)
{
switch (s) {
case RT_SCOPE_UNIVERSE: return "universe";
case RT_SCOPE_SITE: return "site";
case RT_SCOPE_LINK: return "link";
case RT_SCOPE_HOST: return "host";
case RT_SCOPE_NOWHERE: return "nowhere";
default:
snprintf(buf, len, "scope=%d", s);
return buf;
}
}
static const char *const rtn_type_names[__RTN_MAX] = {
[RTN_UNSPEC] = "UNSPEC",
[RTN_UNICAST] = "UNICAST",
[RTN_LOCAL] = "LOCAL",
[RTN_BROADCAST] = "BROADCAST",
[RTN_ANYCAST] = "ANYCAST",
[RTN_MULTICAST] = "MULTICAST",
[RTN_BLACKHOLE] = "BLACKHOLE",
[RTN_UNREACHABLE] = "UNREACHABLE",
[RTN_PROHIBIT] = "PROHIBIT",
[RTN_THROW] = "THROW",
[RTN_NAT] = "NAT",
[RTN_XRESOLVE] = "XRESOLVE",
};
static inline const char *rtn_type(char *buf, size_t len, unsigned int t)
{
if (t < __RTN_MAX && rtn_type_names[t])
return rtn_type_names[t];
snprintf(buf, len, "type %u", t);
return buf;
}
/* Pretty print the trie */
static int fib_trie_seq_show(struct seq_file *seq, void *v)
{
const struct fib_trie_iter *iter = seq->private;
struct key_vector *n = v;
if (IS_TRIE(node_parent_rcu(n)))
fib_table_print(seq, iter->tb);
if (IS_TNODE(n)) {
__be32 prf = htonl(n->key);
seq_indent(seq, iter->depth-1);
seq_printf(seq, " +-- %pI4/%zu %u %u %u\n",
&prf, KEYLENGTH - n->pos - n->bits, n->bits,
tn_info(n)->full_children,
tn_info(n)->empty_children);
} else {
__be32 val = htonl(n->key);
struct fib_alias *fa;
seq_indent(seq, iter->depth);
seq_printf(seq, " |-- %pI4\n", &val);
hlist_for_each_entry_rcu(fa, &n->leaf, fa_list) {
char buf1[32], buf2[32];
seq_indent(seq, iter->depth + 1);
seq_printf(seq, " /%zu %s %s",
KEYLENGTH - fa->fa_slen,
rtn_scope(buf1, sizeof(buf1),
fa->fa_info->fib_scope),
rtn_type(buf2, sizeof(buf2),
fa->fa_type));
if (fa->fa_dscp)
seq_printf(seq, " tos=%d",
inet_dscp_to_dsfield(fa->fa_dscp));
seq_putc(seq, '\n');
}
}
return 0;
}
static const struct seq_operations fib_trie_seq_ops = {
.start = fib_trie_seq_start,
.next = fib_trie_seq_next,
.stop = fib_trie_seq_stop,
.show = fib_trie_seq_show,
};
struct fib_route_iter {
struct seq_net_private p;
struct fib_table *main_tb;
struct key_vector *tnode;
loff_t pos;
t_key key;
};
static struct key_vector *fib_route_get_idx(struct fib_route_iter *iter,
loff_t pos)
{
struct key_vector *l, **tp = &iter->tnode;
t_key key;
/* use cached location of previously found key */
if (iter->pos > 0 && pos >= iter->pos) {
key = iter->key;
} else {
iter->pos = 1;
key = 0;
}
pos -= iter->pos;
while ((l = leaf_walk_rcu(tp, key)) && (pos-- > 0)) {
key = l->key + 1;
iter->pos++;
l = NULL;
/* handle unlikely case of a key wrap */
if (!key)
break;
}
if (l)
iter->key = l->key; /* remember it */
else
iter->pos = 0; /* forget it */
return l;
}
static void *fib_route_seq_start(struct seq_file *seq, loff_t *pos)
__acquires(RCU)
{
struct fib_route_iter *iter = seq->private;
struct fib_table *tb;
struct trie *t;
rcu_read_lock();
tb = fib_get_table(seq_file_net(seq), RT_TABLE_MAIN);
if (!tb)
return NULL;
iter->main_tb = tb;
t = (struct trie *)tb->tb_data;
iter->tnode = t->kv;
if (*pos != 0)
return fib_route_get_idx(iter, *pos);
iter->pos = 0;
iter->key = KEY_MAX;
return SEQ_START_TOKEN;
}
static void *fib_route_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
struct fib_route_iter *iter = seq->private;
struct key_vector *l = NULL;
t_key key = iter->key + 1;
++*pos;
/* only allow key of 0 for start of sequence */
if ((v == SEQ_START_TOKEN) || key)
l = leaf_walk_rcu(&iter->tnode, key);
if (l) {
iter->key = l->key;
iter->pos++;
} else {
iter->pos = 0;
}
return l;
}
static void fib_route_seq_stop(struct seq_file *seq, void *v)
__releases(RCU)
{
rcu_read_unlock();
}
static unsigned int fib_flag_trans(int type, __be32 mask, struct fib_info *fi)
{
unsigned int flags = 0;
if (type == RTN_UNREACHABLE || type == RTN_PROHIBIT)
flags = RTF_REJECT;
if (fi) {
const struct fib_nh_common *nhc = fib_info_nhc(fi, 0);
if (nhc->nhc_gw.ipv4)
flags |= RTF_GATEWAY;
}
if (mask == htonl(0xFFFFFFFF))
flags |= RTF_HOST;
flags |= RTF_UP;
return flags;
}
/*
* This outputs /proc/net/route.
* The format of the file is not supposed to be changed
* and needs to be same as fib_hash output to avoid breaking
* legacy utilities
*/
static int fib_route_seq_show(struct seq_file *seq, void *v)
{
struct fib_route_iter *iter = seq->private;
struct fib_table *tb = iter->main_tb;
struct fib_alias *fa;
struct key_vector *l = v;
__be32 prefix;
if (v == SEQ_START_TOKEN) {
seq_printf(seq, "%-127s\n", "Iface\tDestination\tGateway "
"\tFlags\tRefCnt\tUse\tMetric\tMask\t\tMTU"
"\tWindow\tIRTT");
return 0;
}
prefix = htonl(l->key);
hlist_for_each_entry_rcu(fa, &l->leaf, fa_list) {
struct fib_info *fi = fa->fa_info;
__be32 mask = inet_make_mask(KEYLENGTH - fa->fa_slen);
unsigned int flags = fib_flag_trans(fa->fa_type, mask, fi);
if ((fa->fa_type == RTN_BROADCAST) ||
(fa->fa_type == RTN_MULTICAST))
continue;
if (fa->tb_id != tb->tb_id)
continue;
seq_setwidth(seq, 127);
if (fi) {
struct fib_nh_common *nhc = fib_info_nhc(fi, 0);
__be32 gw = 0;
if (nhc->nhc_gw_family == AF_INET)
gw = nhc->nhc_gw.ipv4;
seq_printf(seq,
"%s\t%08X\t%08X\t%04X\t%d\t%u\t"
"%d\t%08X\t%d\t%u\t%u",
nhc->nhc_dev ? nhc->nhc_dev->name : "*",
prefix, gw, flags, 0, 0,
fi->fib_priority,
mask,
(fi->fib_advmss ?
fi->fib_advmss + 40 : 0),
fi->fib_window,
fi->fib_rtt >> 3);
} else {
seq_printf(seq,
"*\t%08X\t%08X\t%04X\t%d\t%u\t"
"%d\t%08X\t%d\t%u\t%u",
prefix, 0, flags, 0, 0, 0,
mask, 0, 0, 0);
}
seq_pad(seq, '\n');
}
return 0;
}
static const struct seq_operations fib_route_seq_ops = {
.start = fib_route_seq_start,
.next = fib_route_seq_next,
.stop = fib_route_seq_stop,
.show = fib_route_seq_show,
};
int __net_init fib_proc_init(struct net *net)
{
if (!proc_create_net("fib_trie", 0444, net->proc_net, &fib_trie_seq_ops,
sizeof(struct fib_trie_iter)))
goto out1;
if (!proc_create_net_single("fib_triestat", 0444, net->proc_net,
fib_triestat_seq_show, NULL))
goto out2;
if (!proc_create_net("route", 0444, net->proc_net, &fib_route_seq_ops,
sizeof(struct fib_route_iter)))
goto out3;
return 0;
out3:
remove_proc_entry("fib_triestat", net->proc_net);
out2:
remove_proc_entry("fib_trie", net->proc_net);
out1:
return -ENOMEM;
}
void __net_exit fib_proc_exit(struct net *net)
{
remove_proc_entry("fib_trie", net->proc_net);
remove_proc_entry("fib_triestat", net->proc_net);
remove_proc_entry("route", net->proc_net);
}
#endif /* CONFIG_PROC_FS */
| linux-master | net/ipv4/fib_trie.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Packet matching code.
*
* Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
* Copyright (C) 2000-2005 Netfilter Core Team <[email protected]>
* Copyright (C) 2006-2010 Patrick McHardy <[email protected]>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/cache.h>
#include <linux/capability.h>
#include <linux/skbuff.h>
#include <linux/kmod.h>
#include <linux/vmalloc.h>
#include <linux/netdevice.h>
#include <linux/module.h>
#include <net/ip.h>
#include <net/compat.h>
#include <linux/uaccess.h>
#include <linux/mutex.h>
#include <linux/proc_fs.h>
#include <linux/err.h>
#include <linux/cpumask.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter_ipv4/ip_tables.h>
#include <net/netfilter/nf_log.h>
#include "../../netfilter/xt_repldata.h"
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Netfilter Core Team <[email protected]>");
MODULE_DESCRIPTION("IPv4 packet filter");
void *ipt_alloc_initial_table(const struct xt_table *info)
{
return xt_alloc_initial_table(ipt, IPT);
}
EXPORT_SYMBOL_GPL(ipt_alloc_initial_table);
/* Returns whether matches rule or not. */
/* Performance critical - called for every packet */
static inline bool
ip_packet_match(const struct iphdr *ip,
const char *indev,
const char *outdev,
const struct ipt_ip *ipinfo,
int isfrag)
{
unsigned long ret;
if (NF_INVF(ipinfo, IPT_INV_SRCIP,
(ip->saddr & ipinfo->smsk.s_addr) != ipinfo->src.s_addr) ||
NF_INVF(ipinfo, IPT_INV_DSTIP,
(ip->daddr & ipinfo->dmsk.s_addr) != ipinfo->dst.s_addr))
return false;
ret = ifname_compare_aligned(indev, ipinfo->iniface, ipinfo->iniface_mask);
if (NF_INVF(ipinfo, IPT_INV_VIA_IN, ret != 0))
return false;
ret = ifname_compare_aligned(outdev, ipinfo->outiface, ipinfo->outiface_mask);
if (NF_INVF(ipinfo, IPT_INV_VIA_OUT, ret != 0))
return false;
/* Check specific protocol */
if (ipinfo->proto &&
NF_INVF(ipinfo, IPT_INV_PROTO, ip->protocol != ipinfo->proto))
return false;
/* If we have a fragment rule but the packet is not a fragment
* then we return zero */
if (NF_INVF(ipinfo, IPT_INV_FRAG,
(ipinfo->flags & IPT_F_FRAG) && !isfrag))
return false;
return true;
}
static bool
ip_checkentry(const struct ipt_ip *ip)
{
if (ip->flags & ~IPT_F_MASK)
return false;
if (ip->invflags & ~IPT_INV_MASK)
return false;
return true;
}
static unsigned int
ipt_error(struct sk_buff *skb, const struct xt_action_param *par)
{
net_info_ratelimited("error: `%s'\n", (const char *)par->targinfo);
return NF_DROP;
}
/* Performance critical */
static inline struct ipt_entry *
get_entry(const void *base, unsigned int offset)
{
return (struct ipt_entry *)(base + offset);
}
/* All zeroes == unconditional rule. */
/* Mildly perf critical (only if packet tracing is on) */
static inline bool unconditional(const struct ipt_entry *e)
{
static const struct ipt_ip uncond;
return e->target_offset == sizeof(struct ipt_entry) &&
memcmp(&e->ip, &uncond, sizeof(uncond)) == 0;
}
/* for const-correctness */
static inline const struct xt_entry_target *
ipt_get_target_c(const struct ipt_entry *e)
{
return ipt_get_target((struct ipt_entry *)e);
}
#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
static const char *const hooknames[] = {
[NF_INET_PRE_ROUTING] = "PREROUTING",
[NF_INET_LOCAL_IN] = "INPUT",
[NF_INET_FORWARD] = "FORWARD",
[NF_INET_LOCAL_OUT] = "OUTPUT",
[NF_INET_POST_ROUTING] = "POSTROUTING",
};
enum nf_ip_trace_comments {
NF_IP_TRACE_COMMENT_RULE,
NF_IP_TRACE_COMMENT_RETURN,
NF_IP_TRACE_COMMENT_POLICY,
};
static const char *const comments[] = {
[NF_IP_TRACE_COMMENT_RULE] = "rule",
[NF_IP_TRACE_COMMENT_RETURN] = "return",
[NF_IP_TRACE_COMMENT_POLICY] = "policy",
};
static const struct nf_loginfo trace_loginfo = {
.type = NF_LOG_TYPE_LOG,
.u = {
.log = {
.level = 4,
.logflags = NF_LOG_DEFAULT_MASK,
},
},
};
/* Mildly perf critical (only if packet tracing is on) */
static inline int
get_chainname_rulenum(const struct ipt_entry *s, const struct ipt_entry *e,
const char *hookname, const char **chainname,
const char **comment, unsigned int *rulenum)
{
const struct xt_standard_target *t = (void *)ipt_get_target_c(s);
if (strcmp(t->target.u.kernel.target->name, XT_ERROR_TARGET) == 0) {
/* Head of user chain: ERROR target with chainname */
*chainname = t->target.data;
(*rulenum) = 0;
} else if (s == e) {
(*rulenum)++;
if (unconditional(s) &&
strcmp(t->target.u.kernel.target->name,
XT_STANDARD_TARGET) == 0 &&
t->verdict < 0) {
/* Tail of chains: STANDARD target (return/policy) */
*comment = *chainname == hookname
? comments[NF_IP_TRACE_COMMENT_POLICY]
: comments[NF_IP_TRACE_COMMENT_RETURN];
}
return 1;
} else
(*rulenum)++;
return 0;
}
static void trace_packet(struct net *net,
const struct sk_buff *skb,
unsigned int hook,
const struct net_device *in,
const struct net_device *out,
const char *tablename,
const struct xt_table_info *private,
const struct ipt_entry *e)
{
const struct ipt_entry *root;
const char *hookname, *chainname, *comment;
const struct ipt_entry *iter;
unsigned int rulenum = 0;
root = get_entry(private->entries, private->hook_entry[hook]);
hookname = chainname = hooknames[hook];
comment = comments[NF_IP_TRACE_COMMENT_RULE];
xt_entry_foreach(iter, root, private->size - private->hook_entry[hook])
if (get_chainname_rulenum(iter, e, hookname,
&chainname, &comment, &rulenum) != 0)
break;
nf_log_trace(net, AF_INET, hook, skb, in, out, &trace_loginfo,
"TRACE: %s:%s:%s:%u ",
tablename, chainname, comment, rulenum);
}
#endif
static inline
struct ipt_entry *ipt_next_entry(const struct ipt_entry *entry)
{
return (void *)entry + entry->next_offset;
}
/* Returns one of the generic firewall policies, like NF_ACCEPT. */
unsigned int
ipt_do_table(void *priv,
struct sk_buff *skb,
const struct nf_hook_state *state)
{
const struct xt_table *table = priv;
unsigned int hook = state->hook;
static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
const struct iphdr *ip;
/* Initializing verdict to NF_DROP keeps gcc happy. */
unsigned int verdict = NF_DROP;
const char *indev, *outdev;
const void *table_base;
struct ipt_entry *e, **jumpstack;
unsigned int stackidx, cpu;
const struct xt_table_info *private;
struct xt_action_param acpar;
unsigned int addend;
/* Initialization */
stackidx = 0;
ip = ip_hdr(skb);
indev = state->in ? state->in->name : nulldevname;
outdev = state->out ? state->out->name : nulldevname;
/* We handle fragments by dealing with the first fragment as
* if it was a normal packet. All other fragments are treated
* normally, except that they will NEVER match rules that ask
* things we don't know, ie. tcp syn flag or ports). If the
* rule is also a fragment-specific rule, non-fragments won't
* match it. */
acpar.fragoff = ntohs(ip->frag_off) & IP_OFFSET;
acpar.thoff = ip_hdrlen(skb);
acpar.hotdrop = false;
acpar.state = state;
WARN_ON(!(table->valid_hooks & (1 << hook)));
local_bh_disable();
addend = xt_write_recseq_begin();
private = READ_ONCE(table->private); /* Address dependency. */
cpu = smp_processor_id();
table_base = private->entries;
jumpstack = (struct ipt_entry **)private->jumpstack[cpu];
/* Switch to alternate jumpstack if we're being invoked via TEE.
* TEE issues XT_CONTINUE verdict on original skb so we must not
* clobber the jumpstack.
*
* For recursion via REJECT or SYNPROXY the stack will be clobbered
* but it is no problem since absolute verdict is issued by these.
*/
if (static_key_false(&xt_tee_enabled))
jumpstack += private->stacksize * __this_cpu_read(nf_skb_duplicated);
e = get_entry(table_base, private->hook_entry[hook]);
do {
const struct xt_entry_target *t;
const struct xt_entry_match *ematch;
struct xt_counters *counter;
WARN_ON(!e);
if (!ip_packet_match(ip, indev, outdev,
&e->ip, acpar.fragoff)) {
no_match:
e = ipt_next_entry(e);
continue;
}
xt_ematch_foreach(ematch, e) {
acpar.match = ematch->u.kernel.match;
acpar.matchinfo = ematch->data;
if (!acpar.match->match(skb, &acpar))
goto no_match;
}
counter = xt_get_this_cpu_counter(&e->counters);
ADD_COUNTER(*counter, skb->len, 1);
t = ipt_get_target_c(e);
WARN_ON(!t->u.kernel.target);
#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
/* The packet is traced: log it */
if (unlikely(skb->nf_trace))
trace_packet(state->net, skb, hook, state->in,
state->out, table->name, private, e);
#endif
/* Standard target? */
if (!t->u.kernel.target->target) {
int v;
v = ((struct xt_standard_target *)t)->verdict;
if (v < 0) {
/* Pop from stack? */
if (v != XT_RETURN) {
verdict = (unsigned int)(-v) - 1;
break;
}
if (stackidx == 0) {
e = get_entry(table_base,
private->underflow[hook]);
} else {
e = jumpstack[--stackidx];
e = ipt_next_entry(e);
}
continue;
}
if (table_base + v != ipt_next_entry(e) &&
!(e->ip.flags & IPT_F_GOTO)) {
if (unlikely(stackidx >= private->stacksize)) {
verdict = NF_DROP;
break;
}
jumpstack[stackidx++] = e;
}
e = get_entry(table_base, v);
continue;
}
acpar.target = t->u.kernel.target;
acpar.targinfo = t->data;
verdict = t->u.kernel.target->target(skb, &acpar);
if (verdict == XT_CONTINUE) {
/* Target might have changed stuff. */
ip = ip_hdr(skb);
e = ipt_next_entry(e);
} else {
/* Verdict */
break;
}
} while (!acpar.hotdrop);
xt_write_recseq_end(addend);
local_bh_enable();
if (acpar.hotdrop)
return NF_DROP;
else return verdict;
}
/* Figures out from what hook each rule can be called: returns 0 if
there are loops. Puts hook bitmask in comefrom. */
static int
mark_source_chains(const struct xt_table_info *newinfo,
unsigned int valid_hooks, void *entry0,
unsigned int *offsets)
{
unsigned int hook;
/* No recursion; use packet counter to save back ptrs (reset
to 0 as we leave), and comefrom to save source hook bitmask */
for (hook = 0; hook < NF_INET_NUMHOOKS; hook++) {
unsigned int pos = newinfo->hook_entry[hook];
struct ipt_entry *e = entry0 + pos;
if (!(valid_hooks & (1 << hook)))
continue;
/* Set initial back pointer. */
e->counters.pcnt = pos;
for (;;) {
const struct xt_standard_target *t
= (void *)ipt_get_target_c(e);
int visited = e->comefrom & (1 << hook);
if (e->comefrom & (1 << NF_INET_NUMHOOKS))
return 0;
e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS));
/* Unconditional return/END. */
if ((unconditional(e) &&
(strcmp(t->target.u.user.name,
XT_STANDARD_TARGET) == 0) &&
t->verdict < 0) || visited) {
unsigned int oldpos, size;
/* Return: backtrack through the last
big jump. */
do {
e->comefrom ^= (1<<NF_INET_NUMHOOKS);
oldpos = pos;
pos = e->counters.pcnt;
e->counters.pcnt = 0;
/* We're at the start. */
if (pos == oldpos)
goto next;
e = entry0 + pos;
} while (oldpos == pos + e->next_offset);
/* Move along one */
size = e->next_offset;
e = entry0 + pos + size;
if (pos + size >= newinfo->size)
return 0;
e->counters.pcnt = pos;
pos += size;
} else {
int newpos = t->verdict;
if (strcmp(t->target.u.user.name,
XT_STANDARD_TARGET) == 0 &&
newpos >= 0) {
/* This a jump; chase it. */
if (!xt_find_jump_offset(offsets, newpos,
newinfo->number))
return 0;
} else {
/* ... this is a fallthru */
newpos = pos + e->next_offset;
if (newpos >= newinfo->size)
return 0;
}
e = entry0 + newpos;
e->counters.pcnt = pos;
pos = newpos;
}
}
next: ;
}
return 1;
}
static void cleanup_match(struct xt_entry_match *m, struct net *net)
{
struct xt_mtdtor_param par;
par.net = net;
par.match = m->u.kernel.match;
par.matchinfo = m->data;
par.family = NFPROTO_IPV4;
if (par.match->destroy != NULL)
par.match->destroy(&par);
module_put(par.match->me);
}
static int
check_match(struct xt_entry_match *m, struct xt_mtchk_param *par)
{
const struct ipt_ip *ip = par->entryinfo;
par->match = m->u.kernel.match;
par->matchinfo = m->data;
return xt_check_match(par, m->u.match_size - sizeof(*m),
ip->proto, ip->invflags & IPT_INV_PROTO);
}
static int
find_check_match(struct xt_entry_match *m, struct xt_mtchk_param *par)
{
struct xt_match *match;
int ret;
match = xt_request_find_match(NFPROTO_IPV4, m->u.user.name,
m->u.user.revision);
if (IS_ERR(match))
return PTR_ERR(match);
m->u.kernel.match = match;
ret = check_match(m, par);
if (ret)
goto err;
return 0;
err:
module_put(m->u.kernel.match->me);
return ret;
}
static int check_target(struct ipt_entry *e, struct net *net, const char *name)
{
struct xt_entry_target *t = ipt_get_target(e);
struct xt_tgchk_param par = {
.net = net,
.table = name,
.entryinfo = e,
.target = t->u.kernel.target,
.targinfo = t->data,
.hook_mask = e->comefrom,
.family = NFPROTO_IPV4,
};
return xt_check_target(&par, t->u.target_size - sizeof(*t),
e->ip.proto, e->ip.invflags & IPT_INV_PROTO);
}
static int
find_check_entry(struct ipt_entry *e, struct net *net, const char *name,
unsigned int size,
struct xt_percpu_counter_alloc_state *alloc_state)
{
struct xt_entry_target *t;
struct xt_target *target;
int ret;
unsigned int j;
struct xt_mtchk_param mtpar;
struct xt_entry_match *ematch;
if (!xt_percpu_counter_alloc(alloc_state, &e->counters))
return -ENOMEM;
j = 0;
memset(&mtpar, 0, sizeof(mtpar));
mtpar.net = net;
mtpar.table = name;
mtpar.entryinfo = &e->ip;
mtpar.hook_mask = e->comefrom;
mtpar.family = NFPROTO_IPV4;
xt_ematch_foreach(ematch, e) {
ret = find_check_match(ematch, &mtpar);
if (ret != 0)
goto cleanup_matches;
++j;
}
t = ipt_get_target(e);
target = xt_request_find_target(NFPROTO_IPV4, t->u.user.name,
t->u.user.revision);
if (IS_ERR(target)) {
ret = PTR_ERR(target);
goto cleanup_matches;
}
t->u.kernel.target = target;
ret = check_target(e, net, name);
if (ret)
goto err;
return 0;
err:
module_put(t->u.kernel.target->me);
cleanup_matches:
xt_ematch_foreach(ematch, e) {
if (j-- == 0)
break;
cleanup_match(ematch, net);
}
xt_percpu_counter_free(&e->counters);
return ret;
}
static bool check_underflow(const struct ipt_entry *e)
{
const struct xt_entry_target *t;
unsigned int verdict;
if (!unconditional(e))
return false;
t = ipt_get_target_c(e);
if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0)
return false;
verdict = ((struct xt_standard_target *)t)->verdict;
verdict = -verdict - 1;
return verdict == NF_DROP || verdict == NF_ACCEPT;
}
static int
check_entry_size_and_hooks(struct ipt_entry *e,
struct xt_table_info *newinfo,
const unsigned char *base,
const unsigned char *limit,
const unsigned int *hook_entries,
const unsigned int *underflows,
unsigned int valid_hooks)
{
unsigned int h;
int err;
if ((unsigned long)e % __alignof__(struct ipt_entry) != 0 ||
(unsigned char *)e + sizeof(struct ipt_entry) >= limit ||
(unsigned char *)e + e->next_offset > limit)
return -EINVAL;
if (e->next_offset
< sizeof(struct ipt_entry) + sizeof(struct xt_entry_target))
return -EINVAL;
if (!ip_checkentry(&e->ip))
return -EINVAL;
err = xt_check_entry_offsets(e, e->elems, e->target_offset,
e->next_offset);
if (err)
return err;
/* Check hooks & underflows */
for (h = 0; h < NF_INET_NUMHOOKS; h++) {
if (!(valid_hooks & (1 << h)))
continue;
if ((unsigned char *)e - base == hook_entries[h])
newinfo->hook_entry[h] = hook_entries[h];
if ((unsigned char *)e - base == underflows[h]) {
if (!check_underflow(e))
return -EINVAL;
newinfo->underflow[h] = underflows[h];
}
}
/* Clear counters and comefrom */
e->counters = ((struct xt_counters) { 0, 0 });
e->comefrom = 0;
return 0;
}
static void
cleanup_entry(struct ipt_entry *e, struct net *net)
{
struct xt_tgdtor_param par;
struct xt_entry_target *t;
struct xt_entry_match *ematch;
/* Cleanup all matches */
xt_ematch_foreach(ematch, e)
cleanup_match(ematch, net);
t = ipt_get_target(e);
par.net = net;
par.target = t->u.kernel.target;
par.targinfo = t->data;
par.family = NFPROTO_IPV4;
if (par.target->destroy != NULL)
par.target->destroy(&par);
module_put(par.target->me);
xt_percpu_counter_free(&e->counters);
}
/* Checks and translates the user-supplied table segment (held in
newinfo) */
static int
translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
const struct ipt_replace *repl)
{
struct xt_percpu_counter_alloc_state alloc_state = { 0 };
struct ipt_entry *iter;
unsigned int *offsets;
unsigned int i;
int ret = 0;
newinfo->size = repl->size;
newinfo->number = repl->num_entries;
/* Init all hooks to impossible value. */
for (i = 0; i < NF_INET_NUMHOOKS; i++) {
newinfo->hook_entry[i] = 0xFFFFFFFF;
newinfo->underflow[i] = 0xFFFFFFFF;
}
offsets = xt_alloc_entry_offsets(newinfo->number);
if (!offsets)
return -ENOMEM;
i = 0;
/* Walk through entries, checking offsets. */
xt_entry_foreach(iter, entry0, newinfo->size) {
ret = check_entry_size_and_hooks(iter, newinfo, entry0,
entry0 + repl->size,
repl->hook_entry,
repl->underflow,
repl->valid_hooks);
if (ret != 0)
goto out_free;
if (i < repl->num_entries)
offsets[i] = (void *)iter - entry0;
++i;
if (strcmp(ipt_get_target(iter)->u.user.name,
XT_ERROR_TARGET) == 0)
++newinfo->stacksize;
}
ret = -EINVAL;
if (i != repl->num_entries)
goto out_free;
ret = xt_check_table_hooks(newinfo, repl->valid_hooks);
if (ret)
goto out_free;
if (!mark_source_chains(newinfo, repl->valid_hooks, entry0, offsets)) {
ret = -ELOOP;
goto out_free;
}
kvfree(offsets);
/* Finally, each sanity check must pass */
i = 0;
xt_entry_foreach(iter, entry0, newinfo->size) {
ret = find_check_entry(iter, net, repl->name, repl->size,
&alloc_state);
if (ret != 0)
break;
++i;
}
if (ret != 0) {
xt_entry_foreach(iter, entry0, newinfo->size) {
if (i-- == 0)
break;
cleanup_entry(iter, net);
}
return ret;
}
return ret;
out_free:
kvfree(offsets);
return ret;
}
static void
get_counters(const struct xt_table_info *t,
struct xt_counters counters[])
{
struct ipt_entry *iter;
unsigned int cpu;
unsigned int i;
for_each_possible_cpu(cpu) {
seqcount_t *s = &per_cpu(xt_recseq, cpu);
i = 0;
xt_entry_foreach(iter, t->entries, t->size) {
struct xt_counters *tmp;
u64 bcnt, pcnt;
unsigned int start;
tmp = xt_get_per_cpu_counter(&iter->counters, cpu);
do {
start = read_seqcount_begin(s);
bcnt = tmp->bcnt;
pcnt = tmp->pcnt;
} while (read_seqcount_retry(s, start));
ADD_COUNTER(counters[i], bcnt, pcnt);
++i; /* macro does multi eval of i */
cond_resched();
}
}
}
static void get_old_counters(const struct xt_table_info *t,
struct xt_counters counters[])
{
struct ipt_entry *iter;
unsigned int cpu, i;
for_each_possible_cpu(cpu) {
i = 0;
xt_entry_foreach(iter, t->entries, t->size) {
const struct xt_counters *tmp;
tmp = xt_get_per_cpu_counter(&iter->counters, cpu);
ADD_COUNTER(counters[i], tmp->bcnt, tmp->pcnt);
++i; /* macro does multi eval of i */
}
cond_resched();
}
}
static struct xt_counters *alloc_counters(const struct xt_table *table)
{
unsigned int countersize;
struct xt_counters *counters;
const struct xt_table_info *private = table->private;
/* We need atomic snapshot of counters: rest doesn't change
(other than comefrom, which userspace doesn't care
about). */
countersize = sizeof(struct xt_counters) * private->number;
counters = vzalloc(countersize);
if (counters == NULL)
return ERR_PTR(-ENOMEM);
get_counters(private, counters);
return counters;
}
static int
copy_entries_to_user(unsigned int total_size,
const struct xt_table *table,
void __user *userptr)
{
unsigned int off, num;
const struct ipt_entry *e;
struct xt_counters *counters;
const struct xt_table_info *private = table->private;
int ret = 0;
const void *loc_cpu_entry;
counters = alloc_counters(table);
if (IS_ERR(counters))
return PTR_ERR(counters);
loc_cpu_entry = private->entries;
/* FIXME: use iterator macros --RR */
/* ... then go back and fix counters and names */
for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
unsigned int i;
const struct xt_entry_match *m;
const struct xt_entry_target *t;
e = loc_cpu_entry + off;
if (copy_to_user(userptr + off, e, sizeof(*e))) {
ret = -EFAULT;
goto free_counters;
}
if (copy_to_user(userptr + off
+ offsetof(struct ipt_entry, counters),
&counters[num],
sizeof(counters[num])) != 0) {
ret = -EFAULT;
goto free_counters;
}
for (i = sizeof(struct ipt_entry);
i < e->target_offset;
i += m->u.match_size) {
m = (void *)e + i;
if (xt_match_to_user(m, userptr + off + i)) {
ret = -EFAULT;
goto free_counters;
}
}
t = ipt_get_target_c(e);
if (xt_target_to_user(t, userptr + off + e->target_offset)) {
ret = -EFAULT;
goto free_counters;
}
}
free_counters:
vfree(counters);
return ret;
}
#ifdef CONFIG_NETFILTER_XTABLES_COMPAT
static void compat_standard_from_user(void *dst, const void *src)
{
int v = *(compat_int_t *)src;
if (v > 0)
v += xt_compat_calc_jump(AF_INET, v);
memcpy(dst, &v, sizeof(v));
}
static int compat_standard_to_user(void __user *dst, const void *src)
{
compat_int_t cv = *(int *)src;
if (cv > 0)
cv -= xt_compat_calc_jump(AF_INET, cv);
return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
}
static int compat_calc_entry(const struct ipt_entry *e,
const struct xt_table_info *info,
const void *base, struct xt_table_info *newinfo)
{
const struct xt_entry_match *ematch;
const struct xt_entry_target *t;
unsigned int entry_offset;
int off, i, ret;
off = sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry);
entry_offset = (void *)e - base;
xt_ematch_foreach(ematch, e)
off += xt_compat_match_offset(ematch->u.kernel.match);
t = ipt_get_target_c(e);
off += xt_compat_target_offset(t->u.kernel.target);
newinfo->size -= off;
ret = xt_compat_add_offset(AF_INET, entry_offset, off);
if (ret)
return ret;
for (i = 0; i < NF_INET_NUMHOOKS; i++) {
if (info->hook_entry[i] &&
(e < (struct ipt_entry *)(base + info->hook_entry[i])))
newinfo->hook_entry[i] -= off;
if (info->underflow[i] &&
(e < (struct ipt_entry *)(base + info->underflow[i])))
newinfo->underflow[i] -= off;
}
return 0;
}
static int compat_table_info(const struct xt_table_info *info,
struct xt_table_info *newinfo)
{
struct ipt_entry *iter;
const void *loc_cpu_entry;
int ret;
if (!newinfo || !info)
return -EINVAL;
/* we dont care about newinfo->entries */
memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
newinfo->initial_entries = 0;
loc_cpu_entry = info->entries;
ret = xt_compat_init_offsets(AF_INET, info->number);
if (ret)
return ret;
xt_entry_foreach(iter, loc_cpu_entry, info->size) {
ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo);
if (ret != 0)
return ret;
}
return 0;
}
#endif
static int get_info(struct net *net, void __user *user, const int *len)
{
char name[XT_TABLE_MAXNAMELEN];
struct xt_table *t;
int ret;
if (*len != sizeof(struct ipt_getinfo))
return -EINVAL;
if (copy_from_user(name, user, sizeof(name)) != 0)
return -EFAULT;
name[XT_TABLE_MAXNAMELEN-1] = '\0';
#ifdef CONFIG_NETFILTER_XTABLES_COMPAT
if (in_compat_syscall())
xt_compat_lock(AF_INET);
#endif
t = xt_request_find_table_lock(net, AF_INET, name);
if (!IS_ERR(t)) {
struct ipt_getinfo info;
const struct xt_table_info *private = t->private;
#ifdef CONFIG_NETFILTER_XTABLES_COMPAT
struct xt_table_info tmp;
if (in_compat_syscall()) {
ret = compat_table_info(private, &tmp);
xt_compat_flush_offsets(AF_INET);
private = &tmp;
}
#endif
memset(&info, 0, sizeof(info));
info.valid_hooks = t->valid_hooks;
memcpy(info.hook_entry, private->hook_entry,
sizeof(info.hook_entry));
memcpy(info.underflow, private->underflow,
sizeof(info.underflow));
info.num_entries = private->number;
info.size = private->size;
strcpy(info.name, name);
if (copy_to_user(user, &info, *len) != 0)
ret = -EFAULT;
else
ret = 0;
xt_table_unlock(t);
module_put(t->me);
} else
ret = PTR_ERR(t);
#ifdef CONFIG_NETFILTER_XTABLES_COMPAT
if (in_compat_syscall())
xt_compat_unlock(AF_INET);
#endif
return ret;
}
static int
get_entries(struct net *net, struct ipt_get_entries __user *uptr,
const int *len)
{
int ret;
struct ipt_get_entries get;
struct xt_table *t;
if (*len < sizeof(get))
return -EINVAL;
if (copy_from_user(&get, uptr, sizeof(get)) != 0)
return -EFAULT;
if (*len != sizeof(struct ipt_get_entries) + get.size)
return -EINVAL;
get.name[sizeof(get.name) - 1] = '\0';
t = xt_find_table_lock(net, AF_INET, get.name);
if (!IS_ERR(t)) {
const struct xt_table_info *private = t->private;
if (get.size == private->size)
ret = copy_entries_to_user(private->size,
t, uptr->entrytable);
else
ret = -EAGAIN;
module_put(t->me);
xt_table_unlock(t);
} else
ret = PTR_ERR(t);
return ret;
}
static int
__do_replace(struct net *net, const char *name, unsigned int valid_hooks,
struct xt_table_info *newinfo, unsigned int num_counters,
void __user *counters_ptr)
{
int ret;
struct xt_table *t;
struct xt_table_info *oldinfo;
struct xt_counters *counters;
struct ipt_entry *iter;
counters = xt_counters_alloc(num_counters);
if (!counters) {
ret = -ENOMEM;
goto out;
}
t = xt_request_find_table_lock(net, AF_INET, name);
if (IS_ERR(t)) {
ret = PTR_ERR(t);
goto free_newinfo_counters_untrans;
}
/* You lied! */
if (valid_hooks != t->valid_hooks) {
ret = -EINVAL;
goto put_module;
}
oldinfo = xt_replace_table(t, num_counters, newinfo, &ret);
if (!oldinfo)
goto put_module;
/* Update module usage count based on number of rules */
if ((oldinfo->number > oldinfo->initial_entries) ||
(newinfo->number <= oldinfo->initial_entries))
module_put(t->me);
if ((oldinfo->number > oldinfo->initial_entries) &&
(newinfo->number <= oldinfo->initial_entries))
module_put(t->me);
xt_table_unlock(t);
get_old_counters(oldinfo, counters);
/* Decrease module usage counts and free resource */
xt_entry_foreach(iter, oldinfo->entries, oldinfo->size)
cleanup_entry(iter, net);
xt_free_table_info(oldinfo);
if (copy_to_user(counters_ptr, counters,
sizeof(struct xt_counters) * num_counters) != 0) {
/* Silent error, can't fail, new table is already in place */
net_warn_ratelimited("iptables: counters copy to user failed while replacing table\n");
}
vfree(counters);
return 0;
put_module:
module_put(t->me);
xt_table_unlock(t);
free_newinfo_counters_untrans:
vfree(counters);
out:
return ret;
}
static int
do_replace(struct net *net, sockptr_t arg, unsigned int len)
{
int ret;
struct ipt_replace tmp;
struct xt_table_info *newinfo;
void *loc_cpu_entry;
struct ipt_entry *iter;
if (copy_from_sockptr(&tmp, arg, sizeof(tmp)) != 0)
return -EFAULT;
/* overflow check */
if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
return -ENOMEM;
if (tmp.num_counters == 0)
return -EINVAL;
tmp.name[sizeof(tmp.name)-1] = 0;
newinfo = xt_alloc_table_info(tmp.size);
if (!newinfo)
return -ENOMEM;
loc_cpu_entry = newinfo->entries;
if (copy_from_sockptr_offset(loc_cpu_entry, arg, sizeof(tmp),
tmp.size) != 0) {
ret = -EFAULT;
goto free_newinfo;
}
ret = translate_table(net, newinfo, loc_cpu_entry, &tmp);
if (ret != 0)
goto free_newinfo;
ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
tmp.num_counters, tmp.counters);
if (ret)
goto free_newinfo_untrans;
return 0;
free_newinfo_untrans:
xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
cleanup_entry(iter, net);
free_newinfo:
xt_free_table_info(newinfo);
return ret;
}
static int
do_add_counters(struct net *net, sockptr_t arg, unsigned int len)
{
unsigned int i;
struct xt_counters_info tmp;
struct xt_counters *paddc;
struct xt_table *t;
const struct xt_table_info *private;
int ret = 0;
struct ipt_entry *iter;
unsigned int addend;
paddc = xt_copy_counters(arg, len, &tmp);
if (IS_ERR(paddc))
return PTR_ERR(paddc);
t = xt_find_table_lock(net, AF_INET, tmp.name);
if (IS_ERR(t)) {
ret = PTR_ERR(t);
goto free;
}
local_bh_disable();
private = t->private;
if (private->number != tmp.num_counters) {
ret = -EINVAL;
goto unlock_up_free;
}
i = 0;
addend = xt_write_recseq_begin();
xt_entry_foreach(iter, private->entries, private->size) {
struct xt_counters *tmp;
tmp = xt_get_this_cpu_counter(&iter->counters);
ADD_COUNTER(*tmp, paddc[i].bcnt, paddc[i].pcnt);
++i;
}
xt_write_recseq_end(addend);
unlock_up_free:
local_bh_enable();
xt_table_unlock(t);
module_put(t->me);
free:
vfree(paddc);
return ret;
}
#ifdef CONFIG_NETFILTER_XTABLES_COMPAT
struct compat_ipt_replace {
char name[XT_TABLE_MAXNAMELEN];
u32 valid_hooks;
u32 num_entries;
u32 size;
u32 hook_entry[NF_INET_NUMHOOKS];
u32 underflow[NF_INET_NUMHOOKS];
u32 num_counters;
compat_uptr_t counters; /* struct xt_counters * */
struct compat_ipt_entry entries[];
};
static int
compat_copy_entry_to_user(struct ipt_entry *e, void __user **dstptr,
unsigned int *size, struct xt_counters *counters,
unsigned int i)
{
struct xt_entry_target *t;
struct compat_ipt_entry __user *ce;
u_int16_t target_offset, next_offset;
compat_uint_t origsize;
const struct xt_entry_match *ematch;
int ret = 0;
origsize = *size;
ce = *dstptr;
if (copy_to_user(ce, e, sizeof(struct ipt_entry)) != 0 ||
copy_to_user(&ce->counters, &counters[i],
sizeof(counters[i])) != 0)
return -EFAULT;
*dstptr += sizeof(struct compat_ipt_entry);
*size -= sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry);
xt_ematch_foreach(ematch, e) {
ret = xt_compat_match_to_user(ematch, dstptr, size);
if (ret != 0)
return ret;
}
target_offset = e->target_offset - (origsize - *size);
t = ipt_get_target(e);
ret = xt_compat_target_to_user(t, dstptr, size);
if (ret)
return ret;
next_offset = e->next_offset - (origsize - *size);
if (put_user(target_offset, &ce->target_offset) != 0 ||
put_user(next_offset, &ce->next_offset) != 0)
return -EFAULT;
return 0;
}
static int
compat_find_calc_match(struct xt_entry_match *m,
const struct ipt_ip *ip,
int *size)
{
struct xt_match *match;
match = xt_request_find_match(NFPROTO_IPV4, m->u.user.name,
m->u.user.revision);
if (IS_ERR(match))
return PTR_ERR(match);
m->u.kernel.match = match;
*size += xt_compat_match_offset(match);
return 0;
}
static void compat_release_entry(struct compat_ipt_entry *e)
{
struct xt_entry_target *t;
struct xt_entry_match *ematch;
/* Cleanup all matches */
xt_ematch_foreach(ematch, e)
module_put(ematch->u.kernel.match->me);
t = compat_ipt_get_target(e);
module_put(t->u.kernel.target->me);
}
static int
check_compat_entry_size_and_hooks(struct compat_ipt_entry *e,
struct xt_table_info *newinfo,
unsigned int *size,
const unsigned char *base,
const unsigned char *limit)
{
struct xt_entry_match *ematch;
struct xt_entry_target *t;
struct xt_target *target;
unsigned int entry_offset;
unsigned int j;
int ret, off;
if ((unsigned long)e % __alignof__(struct compat_ipt_entry) != 0 ||
(unsigned char *)e + sizeof(struct compat_ipt_entry) >= limit ||
(unsigned char *)e + e->next_offset > limit)
return -EINVAL;
if (e->next_offset < sizeof(struct compat_ipt_entry) +
sizeof(struct compat_xt_entry_target))
return -EINVAL;
if (!ip_checkentry(&e->ip))
return -EINVAL;
ret = xt_compat_check_entry_offsets(e, e->elems,
e->target_offset, e->next_offset);
if (ret)
return ret;
off = sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry);
entry_offset = (void *)e - (void *)base;
j = 0;
xt_ematch_foreach(ematch, e) {
ret = compat_find_calc_match(ematch, &e->ip, &off);
if (ret != 0)
goto release_matches;
++j;
}
t = compat_ipt_get_target(e);
target = xt_request_find_target(NFPROTO_IPV4, t->u.user.name,
t->u.user.revision);
if (IS_ERR(target)) {
ret = PTR_ERR(target);
goto release_matches;
}
t->u.kernel.target = target;
off += xt_compat_target_offset(target);
*size += off;
ret = xt_compat_add_offset(AF_INET, entry_offset, off);
if (ret)
goto out;
return 0;
out:
module_put(t->u.kernel.target->me);
release_matches:
xt_ematch_foreach(ematch, e) {
if (j-- == 0)
break;
module_put(ematch->u.kernel.match->me);
}
return ret;
}
static void
compat_copy_entry_from_user(struct compat_ipt_entry *e, void **dstptr,
unsigned int *size,
struct xt_table_info *newinfo, unsigned char *base)
{
struct xt_entry_target *t;
struct ipt_entry *de;
unsigned int origsize;
int h;
struct xt_entry_match *ematch;
origsize = *size;
de = *dstptr;
memcpy(de, e, sizeof(struct ipt_entry));
memcpy(&de->counters, &e->counters, sizeof(e->counters));
*dstptr += sizeof(struct ipt_entry);
*size += sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry);
xt_ematch_foreach(ematch, e)
xt_compat_match_from_user(ematch, dstptr, size);
de->target_offset = e->target_offset - (origsize - *size);
t = compat_ipt_get_target(e);
xt_compat_target_from_user(t, dstptr, size);
de->next_offset = e->next_offset - (origsize - *size);
for (h = 0; h < NF_INET_NUMHOOKS; h++) {
if ((unsigned char *)de - base < newinfo->hook_entry[h])
newinfo->hook_entry[h] -= origsize - *size;
if ((unsigned char *)de - base < newinfo->underflow[h])
newinfo->underflow[h] -= origsize - *size;
}
}
static int
translate_compat_table(struct net *net,
struct xt_table_info **pinfo,
void **pentry0,
const struct compat_ipt_replace *compatr)
{
unsigned int i, j;
struct xt_table_info *newinfo, *info;
void *pos, *entry0, *entry1;
struct compat_ipt_entry *iter0;
struct ipt_replace repl;
unsigned int size;
int ret;
info = *pinfo;
entry0 = *pentry0;
size = compatr->size;
info->number = compatr->num_entries;
j = 0;
xt_compat_lock(AF_INET);
ret = xt_compat_init_offsets(AF_INET, compatr->num_entries);
if (ret)
goto out_unlock;
/* Walk through entries, checking offsets. */
xt_entry_foreach(iter0, entry0, compatr->size) {
ret = check_compat_entry_size_and_hooks(iter0, info, &size,
entry0,
entry0 + compatr->size);
if (ret != 0)
goto out_unlock;
++j;
}
ret = -EINVAL;
if (j != compatr->num_entries)
goto out_unlock;
ret = -ENOMEM;
newinfo = xt_alloc_table_info(size);
if (!newinfo)
goto out_unlock;
memset(newinfo->entries, 0, size);
newinfo->number = compatr->num_entries;
for (i = 0; i < NF_INET_NUMHOOKS; i++) {
newinfo->hook_entry[i] = compatr->hook_entry[i];
newinfo->underflow[i] = compatr->underflow[i];
}
entry1 = newinfo->entries;
pos = entry1;
size = compatr->size;
xt_entry_foreach(iter0, entry0, compatr->size)
compat_copy_entry_from_user(iter0, &pos, &size,
newinfo, entry1);
/* all module references in entry0 are now gone.
* entry1/newinfo contains a 64bit ruleset that looks exactly as
* generated by 64bit userspace.
*
* Call standard translate_table() to validate all hook_entrys,
* underflows, check for loops, etc.
*/
xt_compat_flush_offsets(AF_INET);
xt_compat_unlock(AF_INET);
memcpy(&repl, compatr, sizeof(*compatr));
for (i = 0; i < NF_INET_NUMHOOKS; i++) {
repl.hook_entry[i] = newinfo->hook_entry[i];
repl.underflow[i] = newinfo->underflow[i];
}
repl.num_counters = 0;
repl.counters = NULL;
repl.size = newinfo->size;
ret = translate_table(net, newinfo, entry1, &repl);
if (ret)
goto free_newinfo;
*pinfo = newinfo;
*pentry0 = entry1;
xt_free_table_info(info);
return 0;
free_newinfo:
xt_free_table_info(newinfo);
return ret;
out_unlock:
xt_compat_flush_offsets(AF_INET);
xt_compat_unlock(AF_INET);
xt_entry_foreach(iter0, entry0, compatr->size) {
if (j-- == 0)
break;
compat_release_entry(iter0);
}
return ret;
}
static int
compat_do_replace(struct net *net, sockptr_t arg, unsigned int len)
{
int ret;
struct compat_ipt_replace tmp;
struct xt_table_info *newinfo;
void *loc_cpu_entry;
struct ipt_entry *iter;
if (copy_from_sockptr(&tmp, arg, sizeof(tmp)) != 0)
return -EFAULT;
/* overflow check */
if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
return -ENOMEM;
if (tmp.num_counters == 0)
return -EINVAL;
tmp.name[sizeof(tmp.name)-1] = 0;
newinfo = xt_alloc_table_info(tmp.size);
if (!newinfo)
return -ENOMEM;
loc_cpu_entry = newinfo->entries;
if (copy_from_sockptr_offset(loc_cpu_entry, arg, sizeof(tmp),
tmp.size) != 0) {
ret = -EFAULT;
goto free_newinfo;
}
ret = translate_compat_table(net, &newinfo, &loc_cpu_entry, &tmp);
if (ret != 0)
goto free_newinfo;
ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
tmp.num_counters, compat_ptr(tmp.counters));
if (ret)
goto free_newinfo_untrans;
return 0;
free_newinfo_untrans:
xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
cleanup_entry(iter, net);
free_newinfo:
xt_free_table_info(newinfo);
return ret;
}
struct compat_ipt_get_entries {
char name[XT_TABLE_MAXNAMELEN];
compat_uint_t size;
struct compat_ipt_entry entrytable[];
};
static int
compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
void __user *userptr)
{
struct xt_counters *counters;
const struct xt_table_info *private = table->private;
void __user *pos;
unsigned int size;
int ret = 0;
unsigned int i = 0;
struct ipt_entry *iter;
counters = alloc_counters(table);
if (IS_ERR(counters))
return PTR_ERR(counters);
pos = userptr;
size = total_size;
xt_entry_foreach(iter, private->entries, total_size) {
ret = compat_copy_entry_to_user(iter, &pos,
&size, counters, i++);
if (ret != 0)
break;
}
vfree(counters);
return ret;
}
static int
compat_get_entries(struct net *net, struct compat_ipt_get_entries __user *uptr,
int *len)
{
int ret;
struct compat_ipt_get_entries get;
struct xt_table *t;
if (*len < sizeof(get))
return -EINVAL;
if (copy_from_user(&get, uptr, sizeof(get)) != 0)
return -EFAULT;
if (*len != sizeof(struct compat_ipt_get_entries) + get.size)
return -EINVAL;
get.name[sizeof(get.name) - 1] = '\0';
xt_compat_lock(AF_INET);
t = xt_find_table_lock(net, AF_INET, get.name);
if (!IS_ERR(t)) {
const struct xt_table_info *private = t->private;
struct xt_table_info info;
ret = compat_table_info(private, &info);
if (!ret && get.size == info.size)
ret = compat_copy_entries_to_user(private->size,
t, uptr->entrytable);
else if (!ret)
ret = -EAGAIN;
xt_compat_flush_offsets(AF_INET);
module_put(t->me);
xt_table_unlock(t);
} else
ret = PTR_ERR(t);
xt_compat_unlock(AF_INET);
return ret;
}
#endif
static int
do_ipt_set_ctl(struct sock *sk, int cmd, sockptr_t arg, unsigned int len)
{
int ret;
if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
return -EPERM;
switch (cmd) {
case IPT_SO_SET_REPLACE:
#ifdef CONFIG_NETFILTER_XTABLES_COMPAT
if (in_compat_syscall())
ret = compat_do_replace(sock_net(sk), arg, len);
else
#endif
ret = do_replace(sock_net(sk), arg, len);
break;
case IPT_SO_SET_ADD_COUNTERS:
ret = do_add_counters(sock_net(sk), arg, len);
break;
default:
ret = -EINVAL;
}
return ret;
}
static int
do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
{
int ret;
if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
return -EPERM;
switch (cmd) {
case IPT_SO_GET_INFO:
ret = get_info(sock_net(sk), user, len);
break;
case IPT_SO_GET_ENTRIES:
#ifdef CONFIG_NETFILTER_XTABLES_COMPAT
if (in_compat_syscall())
ret = compat_get_entries(sock_net(sk), user, len);
else
#endif
ret = get_entries(sock_net(sk), user, len);
break;
case IPT_SO_GET_REVISION_MATCH:
case IPT_SO_GET_REVISION_TARGET: {
struct xt_get_revision rev;
int target;
if (*len != sizeof(rev)) {
ret = -EINVAL;
break;
}
if (copy_from_user(&rev, user, sizeof(rev)) != 0) {
ret = -EFAULT;
break;
}
rev.name[sizeof(rev.name)-1] = 0;
if (cmd == IPT_SO_GET_REVISION_TARGET)
target = 1;
else
target = 0;
try_then_request_module(xt_find_revision(AF_INET, rev.name,
rev.revision,
target, &ret),
"ipt_%s", rev.name);
break;
}
default:
ret = -EINVAL;
}
return ret;
}
static void __ipt_unregister_table(struct net *net, struct xt_table *table)
{
struct xt_table_info *private;
void *loc_cpu_entry;
struct module *table_owner = table->me;
struct ipt_entry *iter;
private = xt_unregister_table(table);
/* Decrease module usage counts and free resources */
loc_cpu_entry = private->entries;
xt_entry_foreach(iter, loc_cpu_entry, private->size)
cleanup_entry(iter, net);
if (private->number > private->initial_entries)
module_put(table_owner);
xt_free_table_info(private);
}
int ipt_register_table(struct net *net, const struct xt_table *table,
const struct ipt_replace *repl,
const struct nf_hook_ops *template_ops)
{
struct nf_hook_ops *ops;
unsigned int num_ops;
int ret, i;
struct xt_table_info *newinfo;
struct xt_table_info bootstrap = {0};
void *loc_cpu_entry;
struct xt_table *new_table;
newinfo = xt_alloc_table_info(repl->size);
if (!newinfo)
return -ENOMEM;
loc_cpu_entry = newinfo->entries;
memcpy(loc_cpu_entry, repl->entries, repl->size);
ret = translate_table(net, newinfo, loc_cpu_entry, repl);
if (ret != 0) {
xt_free_table_info(newinfo);
return ret;
}
new_table = xt_register_table(net, table, &bootstrap, newinfo);
if (IS_ERR(new_table)) {
struct ipt_entry *iter;
xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
cleanup_entry(iter, net);
xt_free_table_info(newinfo);
return PTR_ERR(new_table);
}
/* No template? No need to do anything. This is used by 'nat' table, it registers
* with the nat core instead of the netfilter core.
*/
if (!template_ops)
return 0;
num_ops = hweight32(table->valid_hooks);
if (num_ops == 0) {
ret = -EINVAL;
goto out_free;
}
ops = kmemdup(template_ops, sizeof(*ops) * num_ops, GFP_KERNEL);
if (!ops) {
ret = -ENOMEM;
goto out_free;
}
for (i = 0; i < num_ops; i++)
ops[i].priv = new_table;
new_table->ops = ops;
ret = nf_register_net_hooks(net, ops, num_ops);
if (ret != 0)
goto out_free;
return ret;
out_free:
__ipt_unregister_table(net, new_table);
return ret;
}
void ipt_unregister_table_pre_exit(struct net *net, const char *name)
{
struct xt_table *table = xt_find_table(net, NFPROTO_IPV4, name);
if (table)
nf_unregister_net_hooks(net, table->ops, hweight32(table->valid_hooks));
}
void ipt_unregister_table_exit(struct net *net, const char *name)
{
struct xt_table *table = xt_find_table(net, NFPROTO_IPV4, name);
if (table)
__ipt_unregister_table(net, table);
}
static struct xt_target ipt_builtin_tg[] __read_mostly = {
{
.name = XT_STANDARD_TARGET,
.targetsize = sizeof(int),
.family = NFPROTO_IPV4,
#ifdef CONFIG_NETFILTER_XTABLES_COMPAT
.compatsize = sizeof(compat_int_t),
.compat_from_user = compat_standard_from_user,
.compat_to_user = compat_standard_to_user,
#endif
},
{
.name = XT_ERROR_TARGET,
.target = ipt_error,
.targetsize = XT_FUNCTION_MAXNAMELEN,
.family = NFPROTO_IPV4,
},
};
static struct nf_sockopt_ops ipt_sockopts = {
.pf = PF_INET,
.set_optmin = IPT_BASE_CTL,
.set_optmax = IPT_SO_SET_MAX+1,
.set = do_ipt_set_ctl,
.get_optmin = IPT_BASE_CTL,
.get_optmax = IPT_SO_GET_MAX+1,
.get = do_ipt_get_ctl,
.owner = THIS_MODULE,
};
static int __net_init ip_tables_net_init(struct net *net)
{
return xt_proto_init(net, NFPROTO_IPV4);
}
static void __net_exit ip_tables_net_exit(struct net *net)
{
xt_proto_fini(net, NFPROTO_IPV4);
}
static struct pernet_operations ip_tables_net_ops = {
.init = ip_tables_net_init,
.exit = ip_tables_net_exit,
};
static int __init ip_tables_init(void)
{
int ret;
ret = register_pernet_subsys(&ip_tables_net_ops);
if (ret < 0)
goto err1;
/* No one else will be downing sem now, so we won't sleep */
ret = xt_register_targets(ipt_builtin_tg, ARRAY_SIZE(ipt_builtin_tg));
if (ret < 0)
goto err2;
/* Register setsockopt */
ret = nf_register_sockopt(&ipt_sockopts);
if (ret < 0)
goto err4;
return 0;
err4:
xt_unregister_targets(ipt_builtin_tg, ARRAY_SIZE(ipt_builtin_tg));
err2:
unregister_pernet_subsys(&ip_tables_net_ops);
err1:
return ret;
}
static void __exit ip_tables_fini(void)
{
nf_unregister_sockopt(&ipt_sockopts);
xt_unregister_targets(ipt_builtin_tg, ARRAY_SIZE(ipt_builtin_tg));
unregister_pernet_subsys(&ip_tables_net_ops);
}
EXPORT_SYMBOL(ipt_register_table);
EXPORT_SYMBOL(ipt_unregister_table_pre_exit);
EXPORT_SYMBOL(ipt_unregister_table_exit);
EXPORT_SYMBOL(ipt_do_table);
module_init(ip_tables_init);
module_exit(ip_tables_fini);
| linux-master | net/ipv4/netfilter/ip_tables.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Filtering ARP tables module.
*
* Copyright (C) 2002 David S. Miller ([email protected])
*
*/
#include <linux/module.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter_arp/arp_tables.h>
#include <linux/slab.h>
MODULE_LICENSE("GPL");
MODULE_AUTHOR("David S. Miller <[email protected]>");
MODULE_DESCRIPTION("arptables filter table");
#define FILTER_VALID_HOOKS ((1 << NF_ARP_IN) | (1 << NF_ARP_OUT) | \
(1 << NF_ARP_FORWARD))
static const struct xt_table packet_filter = {
.name = "filter",
.valid_hooks = FILTER_VALID_HOOKS,
.me = THIS_MODULE,
.af = NFPROTO_ARP,
.priority = NF_IP_PRI_FILTER,
};
static struct nf_hook_ops *arpfilter_ops __read_mostly;
static int arptable_filter_table_init(struct net *net)
{
struct arpt_replace *repl;
int err;
repl = arpt_alloc_initial_table(&packet_filter);
if (repl == NULL)
return -ENOMEM;
err = arpt_register_table(net, &packet_filter, repl, arpfilter_ops);
kfree(repl);
return err;
}
static void __net_exit arptable_filter_net_pre_exit(struct net *net)
{
arpt_unregister_table_pre_exit(net, "filter");
}
static void __net_exit arptable_filter_net_exit(struct net *net)
{
arpt_unregister_table(net, "filter");
}
static struct pernet_operations arptable_filter_net_ops = {
.exit = arptable_filter_net_exit,
.pre_exit = arptable_filter_net_pre_exit,
};
static int __init arptable_filter_init(void)
{
int ret = xt_register_template(&packet_filter,
arptable_filter_table_init);
if (ret < 0)
return ret;
arpfilter_ops = xt_hook_ops_alloc(&packet_filter, arpt_do_table);
if (IS_ERR(arpfilter_ops)) {
xt_unregister_template(&packet_filter);
return PTR_ERR(arpfilter_ops);
}
ret = register_pernet_subsys(&arptable_filter_net_ops);
if (ret < 0) {
xt_unregister_template(&packet_filter);
kfree(arpfilter_ops);
return ret;
}
return ret;
}
static void __exit arptable_filter_fini(void)
{
unregister_pernet_subsys(&arptable_filter_net_ops);
xt_unregister_template(&packet_filter);
kfree(arpfilter_ops);
}
module_init(arptable_filter_init);
module_exit(arptable_filter_fini);
| linux-master | net/ipv4/netfilter/arptable_filter.c |
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/netlink.h>
#include <linux/netfilter.h>
#include <linux/netfilter/nf_tables.h>
#include <net/netfilter/nf_tables_core.h>
#include <net/netfilter/nf_tables.h>
#include <net/netfilter/nft_fib.h>
#include <net/ip_fib.h>
#include <net/route.h>
/* don't try to find route from mcast/bcast/zeronet */
static __be32 get_saddr(__be32 addr)
{
if (ipv4_is_multicast(addr) || ipv4_is_lbcast(addr) ||
ipv4_is_zeronet(addr))
return 0;
return addr;
}
#define DSCP_BITS 0xfc
void nft_fib4_eval_type(const struct nft_expr *expr, struct nft_regs *regs,
const struct nft_pktinfo *pkt)
{
const struct nft_fib *priv = nft_expr_priv(expr);
int noff = skb_network_offset(pkt->skb);
u32 *dst = ®s->data[priv->dreg];
const struct net_device *dev = NULL;
struct iphdr *iph, _iph;
__be32 addr;
if (priv->flags & NFTA_FIB_F_IIF)
dev = nft_in(pkt);
else if (priv->flags & NFTA_FIB_F_OIF)
dev = nft_out(pkt);
iph = skb_header_pointer(pkt->skb, noff, sizeof(_iph), &_iph);
if (!iph) {
regs->verdict.code = NFT_BREAK;
return;
}
if (priv->flags & NFTA_FIB_F_DADDR)
addr = iph->daddr;
else
addr = iph->saddr;
*dst = inet_dev_addr_type(nft_net(pkt), dev, addr);
}
EXPORT_SYMBOL_GPL(nft_fib4_eval_type);
void nft_fib4_eval(const struct nft_expr *expr, struct nft_regs *regs,
const struct nft_pktinfo *pkt)
{
const struct nft_fib *priv = nft_expr_priv(expr);
int noff = skb_network_offset(pkt->skb);
u32 *dest = ®s->data[priv->dreg];
struct iphdr *iph, _iph;
struct fib_result res;
struct flowi4 fl4 = {
.flowi4_scope = RT_SCOPE_UNIVERSE,
.flowi4_iif = LOOPBACK_IFINDEX,
.flowi4_uid = sock_net_uid(nft_net(pkt), NULL),
};
const struct net_device *oif;
const struct net_device *found;
/*
* Do not set flowi4_oif, it restricts results (for example, asking
* for oif 3 will get RTN_UNICAST result even if the daddr exits
* on another interface.
*
* Search results for the desired outinterface instead.
*/
if (priv->flags & NFTA_FIB_F_OIF)
oif = nft_out(pkt);
else if (priv->flags & NFTA_FIB_F_IIF)
oif = nft_in(pkt);
else
oif = NULL;
if (priv->flags & NFTA_FIB_F_IIF)
fl4.flowi4_l3mdev = l3mdev_master_ifindex_rcu(oif);
if (nft_hook(pkt) == NF_INET_PRE_ROUTING &&
nft_fib_is_loopback(pkt->skb, nft_in(pkt))) {
nft_fib_store_result(dest, priv, nft_in(pkt));
return;
}
iph = skb_header_pointer(pkt->skb, noff, sizeof(_iph), &_iph);
if (!iph) {
regs->verdict.code = NFT_BREAK;
return;
}
if (ipv4_is_zeronet(iph->saddr)) {
if (ipv4_is_lbcast(iph->daddr) ||
ipv4_is_local_multicast(iph->daddr)) {
nft_fib_store_result(dest, priv, pkt->skb->dev);
return;
}
}
if (priv->flags & NFTA_FIB_F_MARK)
fl4.flowi4_mark = pkt->skb->mark;
fl4.flowi4_tos = iph->tos & DSCP_BITS;
if (priv->flags & NFTA_FIB_F_DADDR) {
fl4.daddr = iph->daddr;
fl4.saddr = get_saddr(iph->saddr);
} else {
if (nft_hook(pkt) == NF_INET_FORWARD &&
priv->flags & NFTA_FIB_F_IIF)
fl4.flowi4_iif = nft_out(pkt)->ifindex;
fl4.daddr = iph->saddr;
fl4.saddr = get_saddr(iph->daddr);
}
*dest = 0;
if (fib_lookup(nft_net(pkt), &fl4, &res, FIB_LOOKUP_IGNORE_LINKSTATE))
return;
switch (res.type) {
case RTN_UNICAST:
break;
case RTN_LOCAL: /* Should not see RTN_LOCAL here */
return;
default:
break;
}
if (!oif) {
found = FIB_RES_DEV(res);
} else {
if (!fib_info_nh_uses_dev(res.fi, oif))
return;
found = oif;
}
nft_fib_store_result(dest, priv, found);
}
EXPORT_SYMBOL_GPL(nft_fib4_eval);
static struct nft_expr_type nft_fib4_type;
static const struct nft_expr_ops nft_fib4_type_ops = {
.type = &nft_fib4_type,
.size = NFT_EXPR_SIZE(sizeof(struct nft_fib)),
.eval = nft_fib4_eval_type,
.init = nft_fib_init,
.dump = nft_fib_dump,
.validate = nft_fib_validate,
.reduce = nft_fib_reduce,
};
static const struct nft_expr_ops nft_fib4_ops = {
.type = &nft_fib4_type,
.size = NFT_EXPR_SIZE(sizeof(struct nft_fib)),
.eval = nft_fib4_eval,
.init = nft_fib_init,
.dump = nft_fib_dump,
.validate = nft_fib_validate,
.reduce = nft_fib_reduce,
};
static const struct nft_expr_ops *
nft_fib4_select_ops(const struct nft_ctx *ctx,
const struct nlattr * const tb[])
{
enum nft_fib_result result;
if (!tb[NFTA_FIB_RESULT])
return ERR_PTR(-EINVAL);
result = ntohl(nla_get_be32(tb[NFTA_FIB_RESULT]));
switch (result) {
case NFT_FIB_RESULT_OIF:
return &nft_fib4_ops;
case NFT_FIB_RESULT_OIFNAME:
return &nft_fib4_ops;
case NFT_FIB_RESULT_ADDRTYPE:
return &nft_fib4_type_ops;
default:
return ERR_PTR(-EOPNOTSUPP);
}
}
static struct nft_expr_type nft_fib4_type __read_mostly = {
.name = "fib",
.select_ops = nft_fib4_select_ops,
.policy = nft_fib_policy,
.maxattr = NFTA_FIB_MAX,
.family = NFPROTO_IPV4,
.owner = THIS_MODULE,
};
static int __init nft_fib4_module_init(void)
{
return nft_register_expr(&nft_fib4_type);
}
static void __exit nft_fib4_module_exit(void)
{
nft_unregister_expr(&nft_fib4_type);
}
module_init(nft_fib4_module_init);
module_exit(nft_fib4_module_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Florian Westphal <[email protected]>");
MODULE_ALIAS_NFT_AF_EXPR(2, "fib");
MODULE_DESCRIPTION("nftables fib / ip route lookup support");
| linux-master | net/ipv4/netfilter/nft_fib_ipv4.c |
// SPDX-License-Identifier: GPL-2.0-only
/* (C) 1999-2001 Paul `Rusty' Russell
* (C) 2002-2006 Netfilter Core Team <[email protected]>
* (C) 2011 Patrick McHardy <[email protected]>
*/
#include <linux/module.h>
#include <linux/netfilter.h>
#include <linux/netfilter_ipv4.h>
#include <linux/netfilter_ipv4/ip_tables.h>
#include <linux/ip.h>
#include <net/ip.h>
#include <net/netfilter/nf_nat.h>
struct iptable_nat_pernet {
struct nf_hook_ops *nf_nat_ops;
};
static unsigned int iptable_nat_net_id __read_mostly;
static const struct xt_table nf_nat_ipv4_table = {
.name = "nat",
.valid_hooks = (1 << NF_INET_PRE_ROUTING) |
(1 << NF_INET_POST_ROUTING) |
(1 << NF_INET_LOCAL_OUT) |
(1 << NF_INET_LOCAL_IN),
.me = THIS_MODULE,
.af = NFPROTO_IPV4,
};
static const struct nf_hook_ops nf_nat_ipv4_ops[] = {
{
.hook = ipt_do_table,
.pf = NFPROTO_IPV4,
.hooknum = NF_INET_PRE_ROUTING,
.priority = NF_IP_PRI_NAT_DST,
},
{
.hook = ipt_do_table,
.pf = NFPROTO_IPV4,
.hooknum = NF_INET_POST_ROUTING,
.priority = NF_IP_PRI_NAT_SRC,
},
{
.hook = ipt_do_table,
.pf = NFPROTO_IPV4,
.hooknum = NF_INET_LOCAL_OUT,
.priority = NF_IP_PRI_NAT_DST,
},
{
.hook = ipt_do_table,
.pf = NFPROTO_IPV4,
.hooknum = NF_INET_LOCAL_IN,
.priority = NF_IP_PRI_NAT_SRC,
},
};
static int ipt_nat_register_lookups(struct net *net)
{
struct iptable_nat_pernet *xt_nat_net;
struct nf_hook_ops *ops;
struct xt_table *table;
int i, ret;
xt_nat_net = net_generic(net, iptable_nat_net_id);
table = xt_find_table(net, NFPROTO_IPV4, "nat");
if (WARN_ON_ONCE(!table))
return -ENOENT;
ops = kmemdup(nf_nat_ipv4_ops, sizeof(nf_nat_ipv4_ops), GFP_KERNEL);
if (!ops)
return -ENOMEM;
for (i = 0; i < ARRAY_SIZE(nf_nat_ipv4_ops); i++) {
ops[i].priv = table;
ret = nf_nat_ipv4_register_fn(net, &ops[i]);
if (ret) {
while (i)
nf_nat_ipv4_unregister_fn(net, &ops[--i]);
kfree(ops);
return ret;
}
}
xt_nat_net->nf_nat_ops = ops;
return 0;
}
static void ipt_nat_unregister_lookups(struct net *net)
{
struct iptable_nat_pernet *xt_nat_net = net_generic(net, iptable_nat_net_id);
struct nf_hook_ops *ops = xt_nat_net->nf_nat_ops;
int i;
if (!ops)
return;
for (i = 0; i < ARRAY_SIZE(nf_nat_ipv4_ops); i++)
nf_nat_ipv4_unregister_fn(net, &ops[i]);
kfree(ops);
}
static int iptable_nat_table_init(struct net *net)
{
struct ipt_replace *repl;
int ret;
repl = ipt_alloc_initial_table(&nf_nat_ipv4_table);
if (repl == NULL)
return -ENOMEM;
ret = ipt_register_table(net, &nf_nat_ipv4_table, repl, NULL);
if (ret < 0) {
kfree(repl);
return ret;
}
ret = ipt_nat_register_lookups(net);
if (ret < 0)
ipt_unregister_table_exit(net, "nat");
kfree(repl);
return ret;
}
static void __net_exit iptable_nat_net_pre_exit(struct net *net)
{
ipt_nat_unregister_lookups(net);
}
static void __net_exit iptable_nat_net_exit(struct net *net)
{
ipt_unregister_table_exit(net, "nat");
}
static struct pernet_operations iptable_nat_net_ops = {
.pre_exit = iptable_nat_net_pre_exit,
.exit = iptable_nat_net_exit,
.id = &iptable_nat_net_id,
.size = sizeof(struct iptable_nat_pernet),
};
static int __init iptable_nat_init(void)
{
int ret = xt_register_template(&nf_nat_ipv4_table,
iptable_nat_table_init);
if (ret < 0)
return ret;
ret = register_pernet_subsys(&iptable_nat_net_ops);
if (ret < 0) {
xt_unregister_template(&nf_nat_ipv4_table);
return ret;
}
return ret;
}
static void __exit iptable_nat_exit(void)
{
unregister_pernet_subsys(&iptable_nat_net_ops);
xt_unregister_template(&nf_nat_ipv4_table);
}
module_init(iptable_nat_init);
module_exit(iptable_nat_exit);
MODULE_LICENSE("GPL");
| linux-master | net/ipv4/netfilter/iptable_nat.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2007-2008 BalaBit IT Ltd.
* Author: Krisztian Kovacs
*/
#include <net/netfilter/nf_tproxy.h>
#include <linux/module.h>
#include <linux/skbuff.h>
#include <net/sock.h>
#include <net/inet_sock.h>
#include <linux/ip.h>
#include <net/checksum.h>
#include <net/udp.h>
#include <net/tcp.h>
#include <linux/inetdevice.h>
struct sock *
nf_tproxy_handle_time_wait4(struct net *net, struct sk_buff *skb,
__be32 laddr, __be16 lport, struct sock *sk)
{
const struct iphdr *iph = ip_hdr(skb);
struct tcphdr _hdr, *hp;
hp = skb_header_pointer(skb, ip_hdrlen(skb), sizeof(_hdr), &_hdr);
if (hp == NULL) {
inet_twsk_put(inet_twsk(sk));
return NULL;
}
if (hp->syn && !hp->rst && !hp->ack && !hp->fin) {
/* SYN to a TIME_WAIT socket, we'd rather redirect it
* to a listener socket if there's one */
struct sock *sk2;
sk2 = nf_tproxy_get_sock_v4(net, skb, iph->protocol,
iph->saddr, laddr ? laddr : iph->daddr,
hp->source, lport ? lport : hp->dest,
skb->dev, NF_TPROXY_LOOKUP_LISTENER);
if (sk2) {
nf_tproxy_twsk_deschedule_put(inet_twsk(sk));
sk = sk2;
}
}
return sk;
}
EXPORT_SYMBOL_GPL(nf_tproxy_handle_time_wait4);
__be32 nf_tproxy_laddr4(struct sk_buff *skb, __be32 user_laddr, __be32 daddr)
{
const struct in_ifaddr *ifa;
struct in_device *indev;
__be32 laddr;
if (user_laddr)
return user_laddr;
laddr = 0;
indev = __in_dev_get_rcu(skb->dev);
in_dev_for_each_ifa_rcu(ifa, indev) {
if (ifa->ifa_flags & IFA_F_SECONDARY)
continue;
laddr = ifa->ifa_local;
break;
}
return laddr ? laddr : daddr;
}
EXPORT_SYMBOL_GPL(nf_tproxy_laddr4);
struct sock *
nf_tproxy_get_sock_v4(struct net *net, struct sk_buff *skb,
const u8 protocol,
const __be32 saddr, const __be32 daddr,
const __be16 sport, const __be16 dport,
const struct net_device *in,
const enum nf_tproxy_lookup_t lookup_type)
{
struct inet_hashinfo *hinfo = net->ipv4.tcp_death_row.hashinfo;
struct sock *sk;
switch (protocol) {
case IPPROTO_TCP: {
struct tcphdr _hdr, *hp;
hp = skb_header_pointer(skb, ip_hdrlen(skb),
sizeof(struct tcphdr), &_hdr);
if (hp == NULL)
return NULL;
switch (lookup_type) {
case NF_TPROXY_LOOKUP_LISTENER:
sk = inet_lookup_listener(net, hinfo, skb,
ip_hdrlen(skb) + __tcp_hdrlen(hp),
saddr, sport, daddr, dport,
in->ifindex, 0);
if (sk && !refcount_inc_not_zero(&sk->sk_refcnt))
sk = NULL;
/* NOTE: we return listeners even if bound to
* 0.0.0.0, those are filtered out in
* xt_socket, since xt_TPROXY needs 0 bound
* listeners too
*/
break;
case NF_TPROXY_LOOKUP_ESTABLISHED:
sk = inet_lookup_established(net, hinfo, saddr, sport,
daddr, dport, in->ifindex);
break;
default:
BUG();
}
break;
}
case IPPROTO_UDP:
sk = udp4_lib_lookup(net, saddr, sport, daddr, dport,
in->ifindex);
if (sk) {
int connected = (sk->sk_state == TCP_ESTABLISHED);
int wildcard = (inet_sk(sk)->inet_rcv_saddr == 0);
/* NOTE: we return listeners even if bound to
* 0.0.0.0, those are filtered out in
* xt_socket, since xt_TPROXY needs 0 bound
* listeners too
*/
if ((lookup_type == NF_TPROXY_LOOKUP_ESTABLISHED &&
(!connected || wildcard)) ||
(lookup_type == NF_TPROXY_LOOKUP_LISTENER && connected)) {
sock_put(sk);
sk = NULL;
}
}
break;
default:
WARN_ON(1);
sk = NULL;
}
pr_debug("tproxy socket lookup: proto %u %08x:%u -> %08x:%u, lookup type: %d, sock %p\n",
protocol, ntohl(saddr), ntohs(sport), ntohl(daddr), ntohs(dport), lookup_type, sk);
return sk;
}
EXPORT_SYMBOL_GPL(nf_tproxy_get_sock_v4);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Balazs Scheidler, Krisztian Kovacs");
MODULE_DESCRIPTION("Netfilter IPv4 transparent proxy support");
| linux-master | net/ipv4/netfilter/nf_tproxy_ipv4.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* This is the 1999 rewrite of IP Firewalling, aiming for kernel 2.3.x.
*
* Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
* Copyright (C) 2000-2004 Netfilter Core Team <[email protected]>
*/
#include <linux/module.h>
#include <linux/netfilter_ipv4/ip_tables.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
#include <net/sock.h>
#include <net/route.h>
#include <linux/ip.h>
#include <net/ip.h>
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Netfilter Core Team <[email protected]>");
MODULE_DESCRIPTION("iptables mangle table");
#define MANGLE_VALID_HOOKS ((1 << NF_INET_PRE_ROUTING) | \
(1 << NF_INET_LOCAL_IN) | \
(1 << NF_INET_FORWARD) | \
(1 << NF_INET_LOCAL_OUT) | \
(1 << NF_INET_POST_ROUTING))
static const struct xt_table packet_mangler = {
.name = "mangle",
.valid_hooks = MANGLE_VALID_HOOKS,
.me = THIS_MODULE,
.af = NFPROTO_IPV4,
.priority = NF_IP_PRI_MANGLE,
};
static unsigned int
ipt_mangle_out(void *priv, struct sk_buff *skb, const struct nf_hook_state *state)
{
unsigned int ret;
const struct iphdr *iph;
u_int8_t tos;
__be32 saddr, daddr;
u_int32_t mark;
int err;
/* Save things which could affect route */
mark = skb->mark;
iph = ip_hdr(skb);
saddr = iph->saddr;
daddr = iph->daddr;
tos = iph->tos;
ret = ipt_do_table(priv, skb, state);
/* Reroute for ANY change. */
if (ret != NF_DROP && ret != NF_STOLEN) {
iph = ip_hdr(skb);
if (iph->saddr != saddr ||
iph->daddr != daddr ||
skb->mark != mark ||
iph->tos != tos) {
err = ip_route_me_harder(state->net, state->sk, skb, RTN_UNSPEC);
if (err < 0)
ret = NF_DROP_ERR(err);
}
}
return ret;
}
/* The work comes in here from netfilter.c. */
static unsigned int
iptable_mangle_hook(void *priv,
struct sk_buff *skb,
const struct nf_hook_state *state)
{
if (state->hook == NF_INET_LOCAL_OUT)
return ipt_mangle_out(priv, skb, state);
return ipt_do_table(priv, skb, state);
}
static struct nf_hook_ops *mangle_ops __read_mostly;
static int iptable_mangle_table_init(struct net *net)
{
struct ipt_replace *repl;
int ret;
repl = ipt_alloc_initial_table(&packet_mangler);
if (repl == NULL)
return -ENOMEM;
ret = ipt_register_table(net, &packet_mangler, repl, mangle_ops);
kfree(repl);
return ret;
}
static void __net_exit iptable_mangle_net_pre_exit(struct net *net)
{
ipt_unregister_table_pre_exit(net, "mangle");
}
static void __net_exit iptable_mangle_net_exit(struct net *net)
{
ipt_unregister_table_exit(net, "mangle");
}
static struct pernet_operations iptable_mangle_net_ops = {
.pre_exit = iptable_mangle_net_pre_exit,
.exit = iptable_mangle_net_exit,
};
static int __init iptable_mangle_init(void)
{
int ret = xt_register_template(&packet_mangler,
iptable_mangle_table_init);
if (ret < 0)
return ret;
mangle_ops = xt_hook_ops_alloc(&packet_mangler, iptable_mangle_hook);
if (IS_ERR(mangle_ops)) {
xt_unregister_template(&packet_mangler);
ret = PTR_ERR(mangle_ops);
return ret;
}
ret = register_pernet_subsys(&iptable_mangle_net_ops);
if (ret < 0) {
xt_unregister_template(&packet_mangler);
kfree(mangle_ops);
return ret;
}
return ret;
}
static void __exit iptable_mangle_fini(void)
{
unregister_pernet_subsys(&iptable_mangle_net_ops);
xt_unregister_template(&packet_mangler);
kfree(mangle_ops);
}
module_init(iptable_mangle_init);
module_exit(iptable_mangle_fini);
| linux-master | net/ipv4/netfilter/iptable_mangle.c |
// SPDX-License-Identifier: GPL-2.0-only
/* module that allows mangling of the arp payload */
#include <linux/module.h>
#include <linux/netfilter.h>
#include <linux/netfilter_arp/arpt_mangle.h>
#include <net/sock.h>
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Bart De Schuymer <[email protected]>");
MODULE_DESCRIPTION("arptables arp payload mangle target");
static unsigned int
target(struct sk_buff *skb, const struct xt_action_param *par)
{
const struct arpt_mangle *mangle = par->targinfo;
const struct arphdr *arp;
unsigned char *arpptr;
int pln, hln;
if (skb_ensure_writable(skb, skb->len))
return NF_DROP;
arp = arp_hdr(skb);
arpptr = skb_network_header(skb) + sizeof(*arp);
pln = arp->ar_pln;
hln = arp->ar_hln;
/* We assume that pln and hln were checked in the match */
if (mangle->flags & ARPT_MANGLE_SDEV) {
if (ARPT_DEV_ADDR_LEN_MAX < hln ||
(arpptr + hln > skb_tail_pointer(skb)))
return NF_DROP;
memcpy(arpptr, mangle->src_devaddr, hln);
}
arpptr += hln;
if (mangle->flags & ARPT_MANGLE_SIP) {
if (ARPT_MANGLE_ADDR_LEN_MAX < pln ||
(arpptr + pln > skb_tail_pointer(skb)))
return NF_DROP;
memcpy(arpptr, &mangle->u_s.src_ip, pln);
}
arpptr += pln;
if (mangle->flags & ARPT_MANGLE_TDEV) {
if (ARPT_DEV_ADDR_LEN_MAX < hln ||
(arpptr + hln > skb_tail_pointer(skb)))
return NF_DROP;
memcpy(arpptr, mangle->tgt_devaddr, hln);
}
arpptr += hln;
if (mangle->flags & ARPT_MANGLE_TIP) {
if (ARPT_MANGLE_ADDR_LEN_MAX < pln ||
(arpptr + pln > skb_tail_pointer(skb)))
return NF_DROP;
memcpy(arpptr, &mangle->u_t.tgt_ip, pln);
}
return mangle->target;
}
static int checkentry(const struct xt_tgchk_param *par)
{
const struct arpt_mangle *mangle = par->targinfo;
if (mangle->flags & ~ARPT_MANGLE_MASK ||
!(mangle->flags & ARPT_MANGLE_MASK))
return -EINVAL;
if (mangle->target != NF_DROP && mangle->target != NF_ACCEPT &&
mangle->target != XT_CONTINUE)
return -EINVAL;
return 0;
}
static struct xt_target arpt_mangle_reg __read_mostly = {
.name = "mangle",
.family = NFPROTO_ARP,
.target = target,
.targetsize = sizeof(struct arpt_mangle),
.checkentry = checkentry,
.me = THIS_MODULE,
};
static int __init arpt_mangle_init(void)
{
return xt_register_target(&arpt_mangle_reg);
}
static void __exit arpt_mangle_fini(void)
{
xt_unregister_target(&arpt_mangle_reg);
}
module_init(arpt_mangle_init);
module_exit(arpt_mangle_fini);
| linux-master | net/ipv4/netfilter/arpt_mangle.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* This is a module which is used for rejecting packets.
*/
/* (C) 1999-2001 Paul `Rusty' Russell
* (C) 2002-2004 Netfilter Core Team <[email protected]>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
#include <linux/ip.h>
#include <linux/udp.h>
#include <linux/icmp.h>
#include <net/icmp.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter_ipv4/ip_tables.h>
#include <linux/netfilter_ipv4/ipt_REJECT.h>
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
#include <linux/netfilter_bridge.h>
#endif
#include <net/netfilter/ipv4/nf_reject.h>
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Netfilter Core Team <[email protected]>");
MODULE_DESCRIPTION("Xtables: packet \"rejection\" target for IPv4");
static unsigned int
reject_tg(struct sk_buff *skb, const struct xt_action_param *par)
{
const struct ipt_reject_info *reject = par->targinfo;
int hook = xt_hooknum(par);
switch (reject->with) {
case IPT_ICMP_NET_UNREACHABLE:
nf_send_unreach(skb, ICMP_NET_UNREACH, hook);
break;
case IPT_ICMP_HOST_UNREACHABLE:
nf_send_unreach(skb, ICMP_HOST_UNREACH, hook);
break;
case IPT_ICMP_PROT_UNREACHABLE:
nf_send_unreach(skb, ICMP_PROT_UNREACH, hook);
break;
case IPT_ICMP_PORT_UNREACHABLE:
nf_send_unreach(skb, ICMP_PORT_UNREACH, hook);
break;
case IPT_ICMP_NET_PROHIBITED:
nf_send_unreach(skb, ICMP_NET_ANO, hook);
break;
case IPT_ICMP_HOST_PROHIBITED:
nf_send_unreach(skb, ICMP_HOST_ANO, hook);
break;
case IPT_ICMP_ADMIN_PROHIBITED:
nf_send_unreach(skb, ICMP_PKT_FILTERED, hook);
break;
case IPT_TCP_RESET:
nf_send_reset(xt_net(par), par->state->sk, skb, hook);
break;
case IPT_ICMP_ECHOREPLY:
/* Doesn't happen. */
break;
}
return NF_DROP;
}
static int reject_tg_check(const struct xt_tgchk_param *par)
{
const struct ipt_reject_info *rejinfo = par->targinfo;
const struct ipt_entry *e = par->entryinfo;
if (rejinfo->with == IPT_ICMP_ECHOREPLY) {
pr_info_ratelimited("ECHOREPLY no longer supported.\n");
return -EINVAL;
} else if (rejinfo->with == IPT_TCP_RESET) {
/* Must specify that it's a TCP packet */
if (e->ip.proto != IPPROTO_TCP ||
(e->ip.invflags & XT_INV_PROTO)) {
pr_info_ratelimited("TCP_RESET invalid for non-tcp\n");
return -EINVAL;
}
}
return 0;
}
static struct xt_target reject_tg_reg __read_mostly = {
.name = "REJECT",
.family = NFPROTO_IPV4,
.target = reject_tg,
.targetsize = sizeof(struct ipt_reject_info),
.table = "filter",
.hooks = (1 << NF_INET_LOCAL_IN) | (1 << NF_INET_FORWARD) |
(1 << NF_INET_LOCAL_OUT),
.checkentry = reject_tg_check,
.me = THIS_MODULE,
};
static int __init reject_tg_init(void)
{
return xt_register_target(&reject_tg_reg);
}
static void __exit reject_tg_exit(void)
{
xt_unregister_target(&reject_tg_reg);
}
module_init(reject_tg_init);
module_exit(reject_tg_exit);
| linux-master | net/ipv4/netfilter/ipt_REJECT.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* nf_nat_pptp.c
*
* NAT support for PPTP (Point to Point Tunneling Protocol).
* PPTP is a protocol for creating virtual private networks.
* It is a specification defined by Microsoft and some vendors
* working with Microsoft. PPTP is built on top of a modified
* version of the Internet Generic Routing Encapsulation Protocol.
* GRE is defined in RFC 1701 and RFC 1702. Documentation of
* PPTP can be found in RFC 2637
*
* (C) 2000-2005 by Harald Welte <[email protected]>
*
* Development of this code funded by Astaro AG (http://www.astaro.com/)
*
* (C) 2006-2012 Patrick McHardy <[email protected]>
*
* TODO: - NAT to a unique tuple, not to TCP source port
* (needs netfilter tuple reservation)
*/
#include <linux/module.h>
#include <linux/tcp.h>
#include <net/netfilter/nf_nat.h>
#include <net/netfilter/nf_nat_helper.h>
#include <net/netfilter/nf_conntrack_helper.h>
#include <net/netfilter/nf_conntrack_expect.h>
#include <net/netfilter/nf_conntrack_zones.h>
#include <linux/netfilter/nf_conntrack_proto_gre.h>
#include <linux/netfilter/nf_conntrack_pptp.h>
#define NF_NAT_PPTP_VERSION "3.0"
#define REQ_CID(req, off) (*(__be16 *)((char *)(req) + (off)))
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Harald Welte <[email protected]>");
MODULE_DESCRIPTION("Netfilter NAT helper module for PPTP");
MODULE_ALIAS_NF_NAT_HELPER("pptp");
static void pptp_nat_expected(struct nf_conn *ct,
struct nf_conntrack_expect *exp)
{
struct net *net = nf_ct_net(ct);
const struct nf_conn *master = ct->master;
struct nf_conntrack_expect *other_exp;
struct nf_conntrack_tuple t = {};
const struct nf_ct_pptp_master *ct_pptp_info;
const struct nf_nat_pptp *nat_pptp_info;
struct nf_nat_range2 range;
struct nf_conn_nat *nat;
nat = nf_ct_nat_ext_add(ct);
if (WARN_ON_ONCE(!nat))
return;
nat_pptp_info = &nat->help.nat_pptp_info;
ct_pptp_info = nfct_help_data(master);
/* And here goes the grand finale of corrosion... */
if (exp->dir == IP_CT_DIR_ORIGINAL) {
pr_debug("we are PNS->PAC\n");
/* therefore, build tuple for PAC->PNS */
t.src.l3num = AF_INET;
t.src.u3.ip = master->tuplehash[!exp->dir].tuple.src.u3.ip;
t.src.u.gre.key = ct_pptp_info->pac_call_id;
t.dst.u3.ip = master->tuplehash[!exp->dir].tuple.dst.u3.ip;
t.dst.u.gre.key = ct_pptp_info->pns_call_id;
t.dst.protonum = IPPROTO_GRE;
} else {
pr_debug("we are PAC->PNS\n");
/* build tuple for PNS->PAC */
t.src.l3num = AF_INET;
t.src.u3.ip = master->tuplehash[!exp->dir].tuple.src.u3.ip;
t.src.u.gre.key = nat_pptp_info->pns_call_id;
t.dst.u3.ip = master->tuplehash[!exp->dir].tuple.dst.u3.ip;
t.dst.u.gre.key = nat_pptp_info->pac_call_id;
t.dst.protonum = IPPROTO_GRE;
}
pr_debug("trying to unexpect other dir: ");
nf_ct_dump_tuple_ip(&t);
other_exp = nf_ct_expect_find_get(net, nf_ct_zone(ct), &t);
if (other_exp) {
nf_ct_unexpect_related(other_exp);
nf_ct_expect_put(other_exp);
pr_debug("success\n");
} else {
pr_debug("not found!\n");
}
/* This must be a fresh one. */
BUG_ON(ct->status & IPS_NAT_DONE_MASK);
/* Change src to where master sends to */
range.flags = NF_NAT_RANGE_MAP_IPS;
range.min_addr = range.max_addr
= ct->master->tuplehash[!exp->dir].tuple.dst.u3;
if (exp->dir == IP_CT_DIR_ORIGINAL) {
range.flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
range.min_proto = range.max_proto = exp->saved_proto;
}
nf_nat_setup_info(ct, &range, NF_NAT_MANIP_SRC);
/* For DST manip, map port here to where it's expected. */
range.flags = NF_NAT_RANGE_MAP_IPS;
range.min_addr = range.max_addr
= ct->master->tuplehash[!exp->dir].tuple.src.u3;
if (exp->dir == IP_CT_DIR_REPLY) {
range.flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
range.min_proto = range.max_proto = exp->saved_proto;
}
nf_nat_setup_info(ct, &range, NF_NAT_MANIP_DST);
}
/* outbound packets == from PNS to PAC */
static int
pptp_outbound_pkt(struct sk_buff *skb,
struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
unsigned int protoff,
struct PptpControlHeader *ctlh,
union pptp_ctrl_union *pptpReq)
{
struct nf_ct_pptp_master *ct_pptp_info;
struct nf_conn_nat *nat = nfct_nat(ct);
struct nf_nat_pptp *nat_pptp_info;
u_int16_t msg;
__be16 new_callid;
unsigned int cid_off;
if (WARN_ON_ONCE(!nat))
return NF_DROP;
nat_pptp_info = &nat->help.nat_pptp_info;
ct_pptp_info = nfct_help_data(ct);
new_callid = ct_pptp_info->pns_call_id;
switch (msg = ntohs(ctlh->messageType)) {
case PPTP_OUT_CALL_REQUEST:
cid_off = offsetof(union pptp_ctrl_union, ocreq.callID);
/* FIXME: ideally we would want to reserve a call ID
* here. current netfilter NAT core is not able to do
* this :( For now we use TCP source port. This breaks
* multiple calls within one control session */
/* save original call ID in nat_info */
nat_pptp_info->pns_call_id = ct_pptp_info->pns_call_id;
/* don't use tcph->source since we are at a DSTmanip
* hook (e.g. PREROUTING) and pkt is not mangled yet */
new_callid = ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u.tcp.port;
/* save new call ID in ct info */
ct_pptp_info->pns_call_id = new_callid;
break;
case PPTP_IN_CALL_REPLY:
cid_off = offsetof(union pptp_ctrl_union, icack.callID);
break;
case PPTP_CALL_CLEAR_REQUEST:
cid_off = offsetof(union pptp_ctrl_union, clrreq.callID);
break;
default:
pr_debug("unknown outbound packet 0x%04x:%s\n", msg,
pptp_msg_name(msg));
fallthrough;
case PPTP_SET_LINK_INFO:
/* only need to NAT in case PAC is behind NAT box */
case PPTP_START_SESSION_REQUEST:
case PPTP_START_SESSION_REPLY:
case PPTP_STOP_SESSION_REQUEST:
case PPTP_STOP_SESSION_REPLY:
case PPTP_ECHO_REQUEST:
case PPTP_ECHO_REPLY:
/* no need to alter packet */
return NF_ACCEPT;
}
/* only OUT_CALL_REQUEST, IN_CALL_REPLY, CALL_CLEAR_REQUEST pass
* down to here */
pr_debug("altering call id from 0x%04x to 0x%04x\n",
ntohs(REQ_CID(pptpReq, cid_off)), ntohs(new_callid));
/* mangle packet */
if (!nf_nat_mangle_tcp_packet(skb, ct, ctinfo, protoff,
cid_off + sizeof(struct pptp_pkt_hdr) +
sizeof(struct PptpControlHeader),
sizeof(new_callid), (char *)&new_callid,
sizeof(new_callid)))
return NF_DROP;
return NF_ACCEPT;
}
static void
pptp_exp_gre(struct nf_conntrack_expect *expect_orig,
struct nf_conntrack_expect *expect_reply)
{
const struct nf_conn *ct = expect_orig->master;
struct nf_conn_nat *nat = nfct_nat(ct);
struct nf_ct_pptp_master *ct_pptp_info;
struct nf_nat_pptp *nat_pptp_info;
if (WARN_ON_ONCE(!nat))
return;
nat_pptp_info = &nat->help.nat_pptp_info;
ct_pptp_info = nfct_help_data(ct);
/* save original PAC call ID in nat_info */
nat_pptp_info->pac_call_id = ct_pptp_info->pac_call_id;
/* alter expectation for PNS->PAC direction */
expect_orig->saved_proto.gre.key = ct_pptp_info->pns_call_id;
expect_orig->tuple.src.u.gre.key = nat_pptp_info->pns_call_id;
expect_orig->tuple.dst.u.gre.key = ct_pptp_info->pac_call_id;
expect_orig->dir = IP_CT_DIR_ORIGINAL;
/* alter expectation for PAC->PNS direction */
expect_reply->saved_proto.gre.key = nat_pptp_info->pns_call_id;
expect_reply->tuple.src.u.gre.key = nat_pptp_info->pac_call_id;
expect_reply->tuple.dst.u.gre.key = ct_pptp_info->pns_call_id;
expect_reply->dir = IP_CT_DIR_REPLY;
}
/* inbound packets == from PAC to PNS */
static int
pptp_inbound_pkt(struct sk_buff *skb,
struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
unsigned int protoff,
struct PptpControlHeader *ctlh,
union pptp_ctrl_union *pptpReq)
{
const struct nf_nat_pptp *nat_pptp_info;
struct nf_conn_nat *nat = nfct_nat(ct);
u_int16_t msg;
__be16 new_pcid;
unsigned int pcid_off;
if (WARN_ON_ONCE(!nat))
return NF_DROP;
nat_pptp_info = &nat->help.nat_pptp_info;
new_pcid = nat_pptp_info->pns_call_id;
switch (msg = ntohs(ctlh->messageType)) {
case PPTP_OUT_CALL_REPLY:
pcid_off = offsetof(union pptp_ctrl_union, ocack.peersCallID);
break;
case PPTP_IN_CALL_CONNECT:
pcid_off = offsetof(union pptp_ctrl_union, iccon.peersCallID);
break;
case PPTP_IN_CALL_REQUEST:
/* only need to nat in case PAC is behind NAT box */
return NF_ACCEPT;
case PPTP_WAN_ERROR_NOTIFY:
pcid_off = offsetof(union pptp_ctrl_union, wanerr.peersCallID);
break;
case PPTP_CALL_DISCONNECT_NOTIFY:
pcid_off = offsetof(union pptp_ctrl_union, disc.callID);
break;
case PPTP_SET_LINK_INFO:
pcid_off = offsetof(union pptp_ctrl_union, setlink.peersCallID);
break;
default:
pr_debug("unknown inbound packet %s\n", pptp_msg_name(msg));
fallthrough;
case PPTP_START_SESSION_REQUEST:
case PPTP_START_SESSION_REPLY:
case PPTP_STOP_SESSION_REQUEST:
case PPTP_STOP_SESSION_REPLY:
case PPTP_ECHO_REQUEST:
case PPTP_ECHO_REPLY:
/* no need to alter packet */
return NF_ACCEPT;
}
/* only OUT_CALL_REPLY, IN_CALL_CONNECT, IN_CALL_REQUEST,
* WAN_ERROR_NOTIFY, CALL_DISCONNECT_NOTIFY pass down here */
/* mangle packet */
pr_debug("altering peer call id from 0x%04x to 0x%04x\n",
ntohs(REQ_CID(pptpReq, pcid_off)), ntohs(new_pcid));
if (!nf_nat_mangle_tcp_packet(skb, ct, ctinfo, protoff,
pcid_off + sizeof(struct pptp_pkt_hdr) +
sizeof(struct PptpControlHeader),
sizeof(new_pcid), (char *)&new_pcid,
sizeof(new_pcid)))
return NF_DROP;
return NF_ACCEPT;
}
static const struct nf_nat_pptp_hook pptp_hooks = {
.outbound = pptp_outbound_pkt,
.inbound = pptp_inbound_pkt,
.exp_gre = pptp_exp_gre,
.expectfn = pptp_nat_expected,
};
static int __init nf_nat_helper_pptp_init(void)
{
WARN_ON(nf_nat_pptp_hook != NULL);
RCU_INIT_POINTER(nf_nat_pptp_hook, &pptp_hooks);
return 0;
}
static void __exit nf_nat_helper_pptp_fini(void)
{
RCU_INIT_POINTER(nf_nat_pptp_hook, NULL);
synchronize_rcu();
}
module_init(nf_nat_helper_pptp_init);
module_exit(nf_nat_helper_pptp_fini);
| linux-master | net/ipv4/netfilter/nf_nat_pptp.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* (C) 2007 by Sebastian Claßen <[email protected]>
* (C) 2007-2010 by Jan Engelhardt <[email protected]>
*
* Extracted from xt_TEE.c
*/
#include <linux/ip.h>
#include <linux/module.h>
#include <linux/percpu.h>
#include <linux/route.h>
#include <linux/skbuff.h>
#include <linux/netfilter.h>
#include <net/checksum.h>
#include <net/icmp.h>
#include <net/ip.h>
#include <net/route.h>
#include <net/netfilter/ipv4/nf_dup_ipv4.h>
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
#include <net/netfilter/nf_conntrack.h>
#endif
static bool nf_dup_ipv4_route(struct net *net, struct sk_buff *skb,
const struct in_addr *gw, int oif)
{
const struct iphdr *iph = ip_hdr(skb);
struct rtable *rt;
struct flowi4 fl4;
memset(&fl4, 0, sizeof(fl4));
if (oif != -1)
fl4.flowi4_oif = oif;
fl4.daddr = gw->s_addr;
fl4.flowi4_tos = RT_TOS(iph->tos);
fl4.flowi4_scope = RT_SCOPE_UNIVERSE;
fl4.flowi4_flags = FLOWI_FLAG_KNOWN_NH;
rt = ip_route_output_key(net, &fl4);
if (IS_ERR(rt))
return false;
skb_dst_drop(skb);
skb_dst_set(skb, &rt->dst);
skb->dev = rt->dst.dev;
skb->protocol = htons(ETH_P_IP);
return true;
}
void nf_dup_ipv4(struct net *net, struct sk_buff *skb, unsigned int hooknum,
const struct in_addr *gw, int oif)
{
struct iphdr *iph;
if (this_cpu_read(nf_skb_duplicated))
return;
/*
* Copy the skb, and route the copy. Will later return %XT_CONTINUE for
* the original skb, which should continue on its way as if nothing has
* happened. The copy should be independently delivered to the gateway.
*/
skb = pskb_copy(skb, GFP_ATOMIC);
if (skb == NULL)
return;
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
/* Avoid counting cloned packets towards the original connection. */
nf_reset_ct(skb);
nf_ct_set(skb, NULL, IP_CT_UNTRACKED);
#endif
/*
* If we are in PREROUTING/INPUT, decrease the TTL to mitigate potential
* loops between two hosts.
*
* Set %IP_DF so that the original source is notified of a potentially
* decreased MTU on the clone route. IPv6 does this too.
*
* IP header checksum will be recalculated at ip_local_out.
*/
iph = ip_hdr(skb);
iph->frag_off |= htons(IP_DF);
if (hooknum == NF_INET_PRE_ROUTING ||
hooknum == NF_INET_LOCAL_IN)
--iph->ttl;
if (nf_dup_ipv4_route(net, skb, gw, oif)) {
__this_cpu_write(nf_skb_duplicated, true);
ip_local_out(net, skb->sk, skb);
__this_cpu_write(nf_skb_duplicated, false);
} else {
kfree_skb(skb);
}
}
EXPORT_SYMBOL_GPL(nf_dup_ipv4);
MODULE_AUTHOR("Sebastian Claßen <[email protected]>");
MODULE_AUTHOR("Jan Engelhardt <[email protected]>");
MODULE_DESCRIPTION("nf_dup_ipv4: Duplicate IPv4 packet");
MODULE_LICENSE("GPL");
| linux-master | net/ipv4/netfilter/nf_dup_ipv4.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2008-2009 Patrick McHardy <[email protected]>
* Copyright (c) 2013 Eric Leblond <[email protected]>
*
* Development of this code funded by Astaro AG (http://www.astaro.com/)
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/netlink.h>
#include <linux/netfilter.h>
#include <linux/netfilter/nf_tables.h>
#include <net/netfilter/nf_tables.h>
#include <net/netfilter/ipv4/nf_reject.h>
#include <net/netfilter/nft_reject.h>
static void nft_reject_ipv4_eval(const struct nft_expr *expr,
struct nft_regs *regs,
const struct nft_pktinfo *pkt)
{
struct nft_reject *priv = nft_expr_priv(expr);
switch (priv->type) {
case NFT_REJECT_ICMP_UNREACH:
nf_send_unreach(pkt->skb, priv->icmp_code, nft_hook(pkt));
break;
case NFT_REJECT_TCP_RST:
nf_send_reset(nft_net(pkt), nft_sk(pkt), pkt->skb,
nft_hook(pkt));
break;
default:
break;
}
regs->verdict.code = NF_DROP;
}
static struct nft_expr_type nft_reject_ipv4_type;
static const struct nft_expr_ops nft_reject_ipv4_ops = {
.type = &nft_reject_ipv4_type,
.size = NFT_EXPR_SIZE(sizeof(struct nft_reject)),
.eval = nft_reject_ipv4_eval,
.init = nft_reject_init,
.dump = nft_reject_dump,
.validate = nft_reject_validate,
.reduce = NFT_REDUCE_READONLY,
};
static struct nft_expr_type nft_reject_ipv4_type __read_mostly = {
.family = NFPROTO_IPV4,
.name = "reject",
.ops = &nft_reject_ipv4_ops,
.policy = nft_reject_policy,
.maxattr = NFTA_REJECT_MAX,
.owner = THIS_MODULE,
};
static int __init nft_reject_ipv4_module_init(void)
{
return nft_register_expr(&nft_reject_ipv4_type);
}
static void __exit nft_reject_ipv4_module_exit(void)
{
nft_unregister_expr(&nft_reject_ipv4_type);
}
module_init(nft_reject_ipv4_module_init);
module_exit(nft_reject_ipv4_module_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Patrick McHardy <[email protected]>");
MODULE_ALIAS_NFT_AF_EXPR(AF_INET, "reject");
MODULE_DESCRIPTION("IPv4 packet rejection for nftables");
| linux-master | net/ipv4/netfilter/nft_reject_ipv4.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* 'raw' table, which is the very first hooked in at PRE_ROUTING and LOCAL_OUT .
*
* Copyright (C) 2003 Jozsef Kadlecsik <[email protected]>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/netfilter_ipv4/ip_tables.h>
#include <linux/slab.h>
#include <net/ip.h>
#define RAW_VALID_HOOKS ((1 << NF_INET_PRE_ROUTING) | (1 << NF_INET_LOCAL_OUT))
static bool raw_before_defrag __read_mostly;
MODULE_PARM_DESC(raw_before_defrag, "Enable raw table before defrag");
module_param(raw_before_defrag, bool, 0000);
static const struct xt_table packet_raw = {
.name = "raw",
.valid_hooks = RAW_VALID_HOOKS,
.me = THIS_MODULE,
.af = NFPROTO_IPV4,
.priority = NF_IP_PRI_RAW,
};
static const struct xt_table packet_raw_before_defrag = {
.name = "raw",
.valid_hooks = RAW_VALID_HOOKS,
.me = THIS_MODULE,
.af = NFPROTO_IPV4,
.priority = NF_IP_PRI_RAW_BEFORE_DEFRAG,
};
static struct nf_hook_ops *rawtable_ops __read_mostly;
static int iptable_raw_table_init(struct net *net)
{
struct ipt_replace *repl;
const struct xt_table *table = &packet_raw;
int ret;
if (raw_before_defrag)
table = &packet_raw_before_defrag;
repl = ipt_alloc_initial_table(table);
if (repl == NULL)
return -ENOMEM;
ret = ipt_register_table(net, table, repl, rawtable_ops);
kfree(repl);
return ret;
}
static void __net_exit iptable_raw_net_pre_exit(struct net *net)
{
ipt_unregister_table_pre_exit(net, "raw");
}
static void __net_exit iptable_raw_net_exit(struct net *net)
{
ipt_unregister_table_exit(net, "raw");
}
static struct pernet_operations iptable_raw_net_ops = {
.pre_exit = iptable_raw_net_pre_exit,
.exit = iptable_raw_net_exit,
};
static int __init iptable_raw_init(void)
{
int ret;
const struct xt_table *table = &packet_raw;
if (raw_before_defrag) {
table = &packet_raw_before_defrag;
pr_info("Enabling raw table before defrag\n");
}
ret = xt_register_template(table,
iptable_raw_table_init);
if (ret < 0)
return ret;
rawtable_ops = xt_hook_ops_alloc(table, ipt_do_table);
if (IS_ERR(rawtable_ops)) {
xt_unregister_template(table);
return PTR_ERR(rawtable_ops);
}
ret = register_pernet_subsys(&iptable_raw_net_ops);
if (ret < 0) {
xt_unregister_template(table);
kfree(rawtable_ops);
return ret;
}
return ret;
}
static void __exit iptable_raw_fini(void)
{
unregister_pernet_subsys(&iptable_raw_net_ops);
kfree(rawtable_ops);
xt_unregister_template(&packet_raw);
}
module_init(iptable_raw_init);
module_exit(iptable_raw_fini);
MODULE_LICENSE("GPL");
| linux-master | net/ipv4/netfilter/iptable_raw.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2007-2008 BalaBit IT Ltd.
* Author: Krisztian Kovacs
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/skbuff.h>
#include <net/tcp.h>
#include <net/udp.h>
#include <net/icmp.h>
#include <net/sock.h>
#include <net/inet_sock.h>
#include <net/netfilter/nf_socket.h>
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
#include <net/netfilter/nf_conntrack.h>
#endif
static int
extract_icmp4_fields(const struct sk_buff *skb, u8 *protocol,
__be32 *raddr, __be32 *laddr,
__be16 *rport, __be16 *lport)
{
unsigned int outside_hdrlen = ip_hdrlen(skb);
struct iphdr *inside_iph, _inside_iph;
struct icmphdr *icmph, _icmph;
__be16 *ports, _ports[2];
icmph = skb_header_pointer(skb, outside_hdrlen,
sizeof(_icmph), &_icmph);
if (icmph == NULL)
return 1;
if (!icmp_is_err(icmph->type))
return 1;
inside_iph = skb_header_pointer(skb, outside_hdrlen +
sizeof(struct icmphdr),
sizeof(_inside_iph), &_inside_iph);
if (inside_iph == NULL)
return 1;
if (inside_iph->protocol != IPPROTO_TCP &&
inside_iph->protocol != IPPROTO_UDP)
return 1;
ports = skb_header_pointer(skb, outside_hdrlen +
sizeof(struct icmphdr) +
(inside_iph->ihl << 2),
sizeof(_ports), &_ports);
if (ports == NULL)
return 1;
/* the inside IP packet is the one quoted from our side, thus
* its saddr is the local address */
*protocol = inside_iph->protocol;
*laddr = inside_iph->saddr;
*lport = ports[0];
*raddr = inside_iph->daddr;
*rport = ports[1];
return 0;
}
static struct sock *
nf_socket_get_sock_v4(struct net *net, struct sk_buff *skb, const int doff,
const u8 protocol,
const __be32 saddr, const __be32 daddr,
const __be16 sport, const __be16 dport,
const struct net_device *in)
{
switch (protocol) {
case IPPROTO_TCP:
return inet_lookup(net, net->ipv4.tcp_death_row.hashinfo,
skb, doff, saddr, sport, daddr, dport,
in->ifindex);
case IPPROTO_UDP:
return udp4_lib_lookup(net, saddr, sport, daddr, dport,
in->ifindex);
}
return NULL;
}
struct sock *nf_sk_lookup_slow_v4(struct net *net, const struct sk_buff *skb,
const struct net_device *indev)
{
__be32 daddr, saddr;
__be16 dport, sport;
const struct iphdr *iph = ip_hdr(skb);
struct sk_buff *data_skb = NULL;
u8 protocol;
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
enum ip_conntrack_info ctinfo;
struct nf_conn const *ct;
#endif
int doff = 0;
if (iph->protocol == IPPROTO_UDP || iph->protocol == IPPROTO_TCP) {
struct tcphdr _hdr;
struct udphdr *hp;
hp = skb_header_pointer(skb, ip_hdrlen(skb),
iph->protocol == IPPROTO_UDP ?
sizeof(*hp) : sizeof(_hdr), &_hdr);
if (hp == NULL)
return NULL;
protocol = iph->protocol;
saddr = iph->saddr;
sport = hp->source;
daddr = iph->daddr;
dport = hp->dest;
data_skb = (struct sk_buff *)skb;
doff = iph->protocol == IPPROTO_TCP ?
ip_hdrlen(skb) + __tcp_hdrlen((struct tcphdr *)hp) :
ip_hdrlen(skb) + sizeof(*hp);
} else if (iph->protocol == IPPROTO_ICMP) {
if (extract_icmp4_fields(skb, &protocol, &saddr, &daddr,
&sport, &dport))
return NULL;
} else {
return NULL;
}
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
/* Do the lookup with the original socket address in
* case this is a reply packet of an established
* SNAT-ted connection.
*/
ct = nf_ct_get(skb, &ctinfo);
if (ct &&
((iph->protocol != IPPROTO_ICMP &&
ctinfo == IP_CT_ESTABLISHED_REPLY) ||
(iph->protocol == IPPROTO_ICMP &&
ctinfo == IP_CT_RELATED_REPLY)) &&
(ct->status & IPS_SRC_NAT_DONE)) {
daddr = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.ip;
dport = (iph->protocol == IPPROTO_TCP) ?
ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u.tcp.port :
ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u.udp.port;
}
#endif
return nf_socket_get_sock_v4(net, data_skb, doff, protocol, saddr,
daddr, sport, dport, indev);
}
EXPORT_SYMBOL_GPL(nf_sk_lookup_slow_v4);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Krisztian Kovacs, Balazs Scheidler");
MODULE_DESCRIPTION("Netfilter IPv4 socket lookup infrastructure");
| linux-master | net/ipv4/netfilter/nf_socket_ipv4.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* nf_nat_snmp_basic.c
*
* Basic SNMP Application Layer Gateway
*
* This IP NAT module is intended for use with SNMP network
* discovery and monitoring applications where target networks use
* conflicting private address realms.
*
* Static NAT is used to remap the networks from the view of the network
* management system at the IP layer, and this module remaps some application
* layer addresses to match.
*
* The simplest form of ALG is performed, where only tagged IP addresses
* are modified. The module does not need to be MIB aware and only scans
* messages at the ASN.1/BER level.
*
* Currently, only SNMPv1 and SNMPv2 are supported.
*
* More information on ALG and associated issues can be found in
* RFC 2962
*
* The ASB.1/BER parsing code is derived from the gxsnmp package by Gregory
* McLean & Jochen Friedrich, stripped down for use in the kernel.
*
* Copyright (c) 2000 RP Internet (www.rpi.net.au).
*
* Author: James Morris <[email protected]>
*
* Copyright (c) 2006-2010 Patrick McHardy <[email protected]>
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/in.h>
#include <linux/ip.h>
#include <linux/udp.h>
#include <net/checksum.h>
#include <net/udp.h>
#include <net/netfilter/nf_nat.h>
#include <net/netfilter/nf_conntrack_expect.h>
#include <net/netfilter/nf_conntrack_helper.h>
#include <linux/netfilter/nf_conntrack_snmp.h>
#include "nf_nat_snmp_basic.asn1.h"
MODULE_LICENSE("GPL");
MODULE_AUTHOR("James Morris <[email protected]>");
MODULE_DESCRIPTION("Basic SNMP Application Layer Gateway");
MODULE_ALIAS("ip_nat_snmp_basic");
MODULE_ALIAS_NFCT_HELPER("snmp_trap");
#define SNMP_PORT 161
#define SNMP_TRAP_PORT 162
static DEFINE_SPINLOCK(snmp_lock);
struct snmp_ctx {
unsigned char *begin;
__sum16 *check;
__be32 from;
__be32 to;
};
static void fast_csum(struct snmp_ctx *ctx, unsigned char offset)
{
unsigned char s[12] = {0,};
int size;
if (offset & 1) {
memcpy(&s[1], &ctx->from, 4);
memcpy(&s[7], &ctx->to, 4);
s[0] = ~0;
s[1] = ~s[1];
s[2] = ~s[2];
s[3] = ~s[3];
s[4] = ~s[4];
s[5] = ~0;
size = 12;
} else {
memcpy(&s[0], &ctx->from, 4);
memcpy(&s[4], &ctx->to, 4);
s[0] = ~s[0];
s[1] = ~s[1];
s[2] = ~s[2];
s[3] = ~s[3];
size = 8;
}
*ctx->check = csum_fold(csum_partial(s, size,
~csum_unfold(*ctx->check)));
}
int snmp_version(void *context, size_t hdrlen, unsigned char tag,
const void *data, size_t datalen)
{
if (datalen != 1)
return -EINVAL;
if (*(unsigned char *)data > 1)
return -ENOTSUPP;
return 1;
}
int snmp_helper(void *context, size_t hdrlen, unsigned char tag,
const void *data, size_t datalen)
{
struct snmp_ctx *ctx = (struct snmp_ctx *)context;
__be32 *pdata;
if (datalen != 4)
return -EINVAL;
pdata = (__be32 *)data;
if (*pdata == ctx->from) {
pr_debug("%s: %pI4 to %pI4\n", __func__,
(void *)&ctx->from, (void *)&ctx->to);
if (*ctx->check)
fast_csum(ctx, (unsigned char *)data - ctx->begin);
*pdata = ctx->to;
}
return 1;
}
static int snmp_translate(struct nf_conn *ct, int dir, struct sk_buff *skb)
{
struct iphdr *iph = ip_hdr(skb);
struct udphdr *udph = (struct udphdr *)((__be32 *)iph + iph->ihl);
u16 datalen = ntohs(udph->len) - sizeof(struct udphdr);
char *data = (unsigned char *)udph + sizeof(struct udphdr);
struct snmp_ctx ctx;
int ret;
if (dir == IP_CT_DIR_ORIGINAL) {
ctx.from = ct->tuplehash[dir].tuple.src.u3.ip;
ctx.to = ct->tuplehash[!dir].tuple.dst.u3.ip;
} else {
ctx.from = ct->tuplehash[!dir].tuple.src.u3.ip;
ctx.to = ct->tuplehash[dir].tuple.dst.u3.ip;
}
if (ctx.from == ctx.to)
return NF_ACCEPT;
ctx.begin = (unsigned char *)udph + sizeof(struct udphdr);
ctx.check = &udph->check;
ret = asn1_ber_decoder(&nf_nat_snmp_basic_decoder, &ctx, data, datalen);
if (ret < 0) {
nf_ct_helper_log(skb, ct, "parser failed\n");
return NF_DROP;
}
return NF_ACCEPT;
}
/* We don't actually set up expectations, just adjust internal IP
* addresses if this is being NATted
*/
static int help(struct sk_buff *skb, unsigned int protoff,
struct nf_conn *ct,
enum ip_conntrack_info ctinfo)
{
int dir = CTINFO2DIR(ctinfo);
unsigned int ret;
const struct iphdr *iph = ip_hdr(skb);
const struct udphdr *udph = (struct udphdr *)((__be32 *)iph + iph->ihl);
/* SNMP replies and originating SNMP traps get mangled */
if (udph->source == htons(SNMP_PORT) && dir != IP_CT_DIR_REPLY)
return NF_ACCEPT;
if (udph->dest == htons(SNMP_TRAP_PORT) && dir != IP_CT_DIR_ORIGINAL)
return NF_ACCEPT;
/* No NAT? */
if (!(ct->status & IPS_NAT_MASK))
return NF_ACCEPT;
/* Make sure the packet length is ok. So far, we were only guaranteed
* to have a valid length IP header plus 8 bytes, which means we have
* enough room for a UDP header. Just verify the UDP length field so we
* can mess around with the payload.
*/
if (ntohs(udph->len) != skb->len - (iph->ihl << 2)) {
nf_ct_helper_log(skb, ct, "dropping malformed packet\n");
return NF_DROP;
}
if (skb_ensure_writable(skb, skb->len)) {
nf_ct_helper_log(skb, ct, "cannot mangle packet");
return NF_DROP;
}
spin_lock_bh(&snmp_lock);
ret = snmp_translate(ct, dir, skb);
spin_unlock_bh(&snmp_lock);
return ret;
}
static const struct nf_conntrack_expect_policy snmp_exp_policy = {
.max_expected = 0,
.timeout = 180,
};
static struct nf_conntrack_helper snmp_trap_helper __read_mostly = {
.me = THIS_MODULE,
.help = help,
.expect_policy = &snmp_exp_policy,
.name = "snmp_trap",
.tuple.src.l3num = AF_INET,
.tuple.src.u.udp.port = cpu_to_be16(SNMP_TRAP_PORT),
.tuple.dst.protonum = IPPROTO_UDP,
};
static int __init nf_nat_snmp_basic_init(void)
{
BUG_ON(nf_nat_snmp_hook != NULL);
RCU_INIT_POINTER(nf_nat_snmp_hook, help);
return nf_conntrack_helper_register(&snmp_trap_helper);
}
static void __exit nf_nat_snmp_basic_fini(void)
{
RCU_INIT_POINTER(nf_nat_snmp_hook, NULL);
synchronize_rcu();
nf_conntrack_helper_unregister(&snmp_trap_helper);
}
module_init(nf_nat_snmp_basic_init);
module_exit(nf_nat_snmp_basic_fini);
| linux-master | net/ipv4/netfilter/nf_nat_snmp_basic_main.c |
// SPDX-License-Identifier: GPL-2.0-only
/* (C) 1999-2001 Paul `Rusty' Russell
* (C) 2002-2004 Netfilter Core Team <[email protected]>
*/
#include <linux/module.h>
#include <net/ip.h>
#include <net/tcp.h>
#include <net/route.h>
#include <net/dst.h>
#include <net/netfilter/ipv4/nf_reject.h>
#include <linux/netfilter_ipv4.h>
#include <linux/netfilter_bridge.h>
static int nf_reject_iphdr_validate(struct sk_buff *skb)
{
struct iphdr *iph;
u32 len;
if (!pskb_may_pull(skb, sizeof(struct iphdr)))
return 0;
iph = ip_hdr(skb);
if (iph->ihl < 5 || iph->version != 4)
return 0;
len = ntohs(iph->tot_len);
if (skb->len < len)
return 0;
else if (len < (iph->ihl*4))
return 0;
if (!pskb_may_pull(skb, iph->ihl*4))
return 0;
return 1;
}
struct sk_buff *nf_reject_skb_v4_tcp_reset(struct net *net,
struct sk_buff *oldskb,
const struct net_device *dev,
int hook)
{
const struct tcphdr *oth;
struct sk_buff *nskb;
struct iphdr *niph;
struct tcphdr _oth;
if (!nf_reject_iphdr_validate(oldskb))
return NULL;
oth = nf_reject_ip_tcphdr_get(oldskb, &_oth, hook);
if (!oth)
return NULL;
nskb = alloc_skb(sizeof(struct iphdr) + sizeof(struct tcphdr) +
LL_MAX_HEADER, GFP_ATOMIC);
if (!nskb)
return NULL;
nskb->dev = (struct net_device *)dev;
skb_reserve(nskb, LL_MAX_HEADER);
niph = nf_reject_iphdr_put(nskb, oldskb, IPPROTO_TCP,
READ_ONCE(net->ipv4.sysctl_ip_default_ttl));
nf_reject_ip_tcphdr_put(nskb, oldskb, oth);
niph->tot_len = htons(nskb->len);
ip_send_check(niph);
return nskb;
}
EXPORT_SYMBOL_GPL(nf_reject_skb_v4_tcp_reset);
struct sk_buff *nf_reject_skb_v4_unreach(struct net *net,
struct sk_buff *oldskb,
const struct net_device *dev,
int hook, u8 code)
{
struct sk_buff *nskb;
struct iphdr *niph;
struct icmphdr *icmph;
unsigned int len;
int dataoff;
__wsum csum;
u8 proto;
if (!nf_reject_iphdr_validate(oldskb))
return NULL;
/* IP header checks: fragment. */
if (ip_hdr(oldskb)->frag_off & htons(IP_OFFSET))
return NULL;
/* RFC says return as much as we can without exceeding 576 bytes. */
len = min_t(unsigned int, 536, oldskb->len);
if (!pskb_may_pull(oldskb, len))
return NULL;
if (pskb_trim_rcsum(oldskb, ntohs(ip_hdr(oldskb)->tot_len)))
return NULL;
dataoff = ip_hdrlen(oldskb);
proto = ip_hdr(oldskb)->protocol;
if (!skb_csum_unnecessary(oldskb) &&
nf_reject_verify_csum(oldskb, dataoff, proto) &&
nf_ip_checksum(oldskb, hook, ip_hdrlen(oldskb), proto))
return NULL;
nskb = alloc_skb(sizeof(struct iphdr) + sizeof(struct icmphdr) +
LL_MAX_HEADER + len, GFP_ATOMIC);
if (!nskb)
return NULL;
nskb->dev = (struct net_device *)dev;
skb_reserve(nskb, LL_MAX_HEADER);
niph = nf_reject_iphdr_put(nskb, oldskb, IPPROTO_ICMP,
READ_ONCE(net->ipv4.sysctl_ip_default_ttl));
skb_reset_transport_header(nskb);
icmph = skb_put_zero(nskb, sizeof(struct icmphdr));
icmph->type = ICMP_DEST_UNREACH;
icmph->code = code;
skb_put_data(nskb, skb_network_header(oldskb), len);
csum = csum_partial((void *)icmph, len + sizeof(struct icmphdr), 0);
icmph->checksum = csum_fold(csum);
niph->tot_len = htons(nskb->len);
ip_send_check(niph);
return nskb;
}
EXPORT_SYMBOL_GPL(nf_reject_skb_v4_unreach);
const struct tcphdr *nf_reject_ip_tcphdr_get(struct sk_buff *oldskb,
struct tcphdr *_oth, int hook)
{
const struct tcphdr *oth;
/* IP header checks: fragment. */
if (ip_hdr(oldskb)->frag_off & htons(IP_OFFSET))
return NULL;
if (ip_hdr(oldskb)->protocol != IPPROTO_TCP)
return NULL;
oth = skb_header_pointer(oldskb, ip_hdrlen(oldskb),
sizeof(struct tcphdr), _oth);
if (oth == NULL)
return NULL;
/* No RST for RST. */
if (oth->rst)
return NULL;
/* Check checksum */
if (nf_ip_checksum(oldskb, hook, ip_hdrlen(oldskb), IPPROTO_TCP))
return NULL;
return oth;
}
EXPORT_SYMBOL_GPL(nf_reject_ip_tcphdr_get);
struct iphdr *nf_reject_iphdr_put(struct sk_buff *nskb,
const struct sk_buff *oldskb,
__u8 protocol, int ttl)
{
struct iphdr *niph, *oiph = ip_hdr(oldskb);
skb_reset_network_header(nskb);
niph = skb_put(nskb, sizeof(struct iphdr));
niph->version = 4;
niph->ihl = sizeof(struct iphdr) / 4;
niph->tos = 0;
niph->id = 0;
niph->frag_off = htons(IP_DF);
niph->protocol = protocol;
niph->check = 0;
niph->saddr = oiph->daddr;
niph->daddr = oiph->saddr;
niph->ttl = ttl;
nskb->protocol = htons(ETH_P_IP);
return niph;
}
EXPORT_SYMBOL_GPL(nf_reject_iphdr_put);
void nf_reject_ip_tcphdr_put(struct sk_buff *nskb, const struct sk_buff *oldskb,
const struct tcphdr *oth)
{
struct iphdr *niph = ip_hdr(nskb);
struct tcphdr *tcph;
skb_reset_transport_header(nskb);
tcph = skb_put_zero(nskb, sizeof(struct tcphdr));
tcph->source = oth->dest;
tcph->dest = oth->source;
tcph->doff = sizeof(struct tcphdr) / 4;
if (oth->ack) {
tcph->seq = oth->ack_seq;
} else {
tcph->ack_seq = htonl(ntohl(oth->seq) + oth->syn + oth->fin +
oldskb->len - ip_hdrlen(oldskb) -
(oth->doff << 2));
tcph->ack = 1;
}
tcph->rst = 1;
tcph->check = ~tcp_v4_check(sizeof(struct tcphdr), niph->saddr,
niph->daddr, 0);
nskb->ip_summed = CHECKSUM_PARTIAL;
nskb->csum_start = (unsigned char *)tcph - nskb->head;
nskb->csum_offset = offsetof(struct tcphdr, check);
}
EXPORT_SYMBOL_GPL(nf_reject_ip_tcphdr_put);
static int nf_reject_fill_skb_dst(struct sk_buff *skb_in)
{
struct dst_entry *dst = NULL;
struct flowi fl;
memset(&fl, 0, sizeof(struct flowi));
fl.u.ip4.daddr = ip_hdr(skb_in)->saddr;
nf_ip_route(dev_net(skb_in->dev), &dst, &fl, false);
if (!dst)
return -1;
skb_dst_set(skb_in, dst);
return 0;
}
/* Send RST reply */
void nf_send_reset(struct net *net, struct sock *sk, struct sk_buff *oldskb,
int hook)
{
struct net_device *br_indev __maybe_unused;
struct sk_buff *nskb;
struct iphdr *niph;
const struct tcphdr *oth;
struct tcphdr _oth;
oth = nf_reject_ip_tcphdr_get(oldskb, &_oth, hook);
if (!oth)
return;
if ((hook == NF_INET_PRE_ROUTING || hook == NF_INET_INGRESS) &&
nf_reject_fill_skb_dst(oldskb) < 0)
return;
if (skb_rtable(oldskb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
return;
nskb = alloc_skb(sizeof(struct iphdr) + sizeof(struct tcphdr) +
LL_MAX_HEADER, GFP_ATOMIC);
if (!nskb)
return;
/* ip_route_me_harder expects skb->dst to be set */
skb_dst_set_noref(nskb, skb_dst(oldskb));
nskb->mark = IP4_REPLY_MARK(net, oldskb->mark);
skb_reserve(nskb, LL_MAX_HEADER);
niph = nf_reject_iphdr_put(nskb, oldskb, IPPROTO_TCP,
ip4_dst_hoplimit(skb_dst(nskb)));
nf_reject_ip_tcphdr_put(nskb, oldskb, oth);
if (ip_route_me_harder(net, sk, nskb, RTN_UNSPEC))
goto free_nskb;
niph = ip_hdr(nskb);
/* "Never happens" */
if (nskb->len > dst_mtu(skb_dst(nskb)))
goto free_nskb;
nf_ct_attach(nskb, oldskb);
nf_ct_set_closing(skb_nfct(oldskb));
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
/* If we use ip_local_out for bridged traffic, the MAC source on
* the RST will be ours, instead of the destination's. This confuses
* some routers/firewalls, and they drop the packet. So we need to
* build the eth header using the original destination's MAC as the
* source, and send the RST packet directly.
*/
br_indev = nf_bridge_get_physindev(oldskb);
if (br_indev) {
struct ethhdr *oeth = eth_hdr(oldskb);
nskb->dev = br_indev;
niph->tot_len = htons(nskb->len);
ip_send_check(niph);
if (dev_hard_header(nskb, nskb->dev, ntohs(nskb->protocol),
oeth->h_source, oeth->h_dest, nskb->len) < 0)
goto free_nskb;
dev_queue_xmit(nskb);
} else
#endif
ip_local_out(net, nskb->sk, nskb);
return;
free_nskb:
kfree_skb(nskb);
}
EXPORT_SYMBOL_GPL(nf_send_reset);
void nf_send_unreach(struct sk_buff *skb_in, int code, int hook)
{
struct iphdr *iph = ip_hdr(skb_in);
int dataoff = ip_hdrlen(skb_in);
u8 proto = iph->protocol;
if (iph->frag_off & htons(IP_OFFSET))
return;
if ((hook == NF_INET_PRE_ROUTING || hook == NF_INET_INGRESS) &&
nf_reject_fill_skb_dst(skb_in) < 0)
return;
if (skb_csum_unnecessary(skb_in) ||
!nf_reject_verify_csum(skb_in, dataoff, proto)) {
icmp_send(skb_in, ICMP_DEST_UNREACH, code, 0);
return;
}
if (nf_ip_checksum(skb_in, hook, dataoff, proto) == 0)
icmp_send(skb_in, ICMP_DEST_UNREACH, code, 0);
}
EXPORT_SYMBOL_GPL(nf_send_unreach);
MODULE_LICENSE("GPL");
| linux-master | net/ipv4/netfilter/nf_reject_ipv4.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Packet matching code for ARP packets.
*
* Based heavily, if not almost entirely, upon ip_tables.c framework.
*
* Some ARP specific bits are:
*
* Copyright (C) 2002 David S. Miller ([email protected])
* Copyright (C) 2006-2009 Patrick McHardy <[email protected]>
*
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <linux/capability.h>
#include <linux/if_arp.h>
#include <linux/kmod.h>
#include <linux/vmalloc.h>
#include <linux/proc_fs.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/mutex.h>
#include <linux/err.h>
#include <net/compat.h>
#include <net/sock.h>
#include <linux/uaccess.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter_arp/arp_tables.h>
#include "../../netfilter/xt_repldata.h"
MODULE_LICENSE("GPL");
MODULE_AUTHOR("David S. Miller <[email protected]>");
MODULE_DESCRIPTION("arptables core");
void *arpt_alloc_initial_table(const struct xt_table *info)
{
return xt_alloc_initial_table(arpt, ARPT);
}
EXPORT_SYMBOL_GPL(arpt_alloc_initial_table);
static inline int arp_devaddr_compare(const struct arpt_devaddr_info *ap,
const char *hdr_addr, int len)
{
int i, ret;
if (len > ARPT_DEV_ADDR_LEN_MAX)
len = ARPT_DEV_ADDR_LEN_MAX;
ret = 0;
for (i = 0; i < len; i++)
ret |= (hdr_addr[i] ^ ap->addr[i]) & ap->mask[i];
return ret != 0;
}
/*
* Unfortunately, _b and _mask are not aligned to an int (or long int)
* Some arches dont care, unrolling the loop is a win on them.
* For other arches, we only have a 16bit alignement.
*/
static unsigned long ifname_compare(const char *_a, const char *_b, const char *_mask)
{
#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
unsigned long ret = ifname_compare_aligned(_a, _b, _mask);
#else
unsigned long ret = 0;
const u16 *a = (const u16 *)_a;
const u16 *b = (const u16 *)_b;
const u16 *mask = (const u16 *)_mask;
int i;
for (i = 0; i < IFNAMSIZ/sizeof(u16); i++)
ret |= (a[i] ^ b[i]) & mask[i];
#endif
return ret;
}
/* Returns whether packet matches rule or not. */
static inline int arp_packet_match(const struct arphdr *arphdr,
struct net_device *dev,
const char *indev,
const char *outdev,
const struct arpt_arp *arpinfo)
{
const char *arpptr = (char *)(arphdr + 1);
const char *src_devaddr, *tgt_devaddr;
__be32 src_ipaddr, tgt_ipaddr;
long ret;
if (NF_INVF(arpinfo, ARPT_INV_ARPOP,
(arphdr->ar_op & arpinfo->arpop_mask) != arpinfo->arpop))
return 0;
if (NF_INVF(arpinfo, ARPT_INV_ARPHRD,
(arphdr->ar_hrd & arpinfo->arhrd_mask) != arpinfo->arhrd))
return 0;
if (NF_INVF(arpinfo, ARPT_INV_ARPPRO,
(arphdr->ar_pro & arpinfo->arpro_mask) != arpinfo->arpro))
return 0;
if (NF_INVF(arpinfo, ARPT_INV_ARPHLN,
(arphdr->ar_hln & arpinfo->arhln_mask) != arpinfo->arhln))
return 0;
src_devaddr = arpptr;
arpptr += dev->addr_len;
memcpy(&src_ipaddr, arpptr, sizeof(u32));
arpptr += sizeof(u32);
tgt_devaddr = arpptr;
arpptr += dev->addr_len;
memcpy(&tgt_ipaddr, arpptr, sizeof(u32));
if (NF_INVF(arpinfo, ARPT_INV_SRCDEVADDR,
arp_devaddr_compare(&arpinfo->src_devaddr, src_devaddr,
dev->addr_len)) ||
NF_INVF(arpinfo, ARPT_INV_TGTDEVADDR,
arp_devaddr_compare(&arpinfo->tgt_devaddr, tgt_devaddr,
dev->addr_len)))
return 0;
if (NF_INVF(arpinfo, ARPT_INV_SRCIP,
(src_ipaddr & arpinfo->smsk.s_addr) != arpinfo->src.s_addr) ||
NF_INVF(arpinfo, ARPT_INV_TGTIP,
(tgt_ipaddr & arpinfo->tmsk.s_addr) != arpinfo->tgt.s_addr))
return 0;
/* Look for ifname matches. */
ret = ifname_compare(indev, arpinfo->iniface, arpinfo->iniface_mask);
if (NF_INVF(arpinfo, ARPT_INV_VIA_IN, ret != 0))
return 0;
ret = ifname_compare(outdev, arpinfo->outiface, arpinfo->outiface_mask);
if (NF_INVF(arpinfo, ARPT_INV_VIA_OUT, ret != 0))
return 0;
return 1;
}
static inline int arp_checkentry(const struct arpt_arp *arp)
{
if (arp->flags & ~ARPT_F_MASK)
return 0;
if (arp->invflags & ~ARPT_INV_MASK)
return 0;
return 1;
}
static unsigned int
arpt_error(struct sk_buff *skb, const struct xt_action_param *par)
{
net_err_ratelimited("arp_tables: error: '%s'\n",
(const char *)par->targinfo);
return NF_DROP;
}
static inline const struct xt_entry_target *
arpt_get_target_c(const struct arpt_entry *e)
{
return arpt_get_target((struct arpt_entry *)e);
}
static inline struct arpt_entry *
get_entry(const void *base, unsigned int offset)
{
return (struct arpt_entry *)(base + offset);
}
static inline
struct arpt_entry *arpt_next_entry(const struct arpt_entry *entry)
{
return (void *)entry + entry->next_offset;
}
unsigned int arpt_do_table(void *priv,
struct sk_buff *skb,
const struct nf_hook_state *state)
{
const struct xt_table *table = priv;
unsigned int hook = state->hook;
static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
unsigned int verdict = NF_DROP;
const struct arphdr *arp;
struct arpt_entry *e, **jumpstack;
const char *indev, *outdev;
const void *table_base;
unsigned int cpu, stackidx = 0;
const struct xt_table_info *private;
struct xt_action_param acpar;
unsigned int addend;
if (!pskb_may_pull(skb, arp_hdr_len(skb->dev)))
return NF_DROP;
indev = state->in ? state->in->name : nulldevname;
outdev = state->out ? state->out->name : nulldevname;
local_bh_disable();
addend = xt_write_recseq_begin();
private = READ_ONCE(table->private); /* Address dependency. */
cpu = smp_processor_id();
table_base = private->entries;
jumpstack = (struct arpt_entry **)private->jumpstack[cpu];
/* No TEE support for arptables, so no need to switch to alternate
* stack. All targets that reenter must return absolute verdicts.
*/
e = get_entry(table_base, private->hook_entry[hook]);
acpar.state = state;
acpar.hotdrop = false;
arp = arp_hdr(skb);
do {
const struct xt_entry_target *t;
struct xt_counters *counter;
if (!arp_packet_match(arp, skb->dev, indev, outdev, &e->arp)) {
e = arpt_next_entry(e);
continue;
}
counter = xt_get_this_cpu_counter(&e->counters);
ADD_COUNTER(*counter, arp_hdr_len(skb->dev), 1);
t = arpt_get_target_c(e);
/* Standard target? */
if (!t->u.kernel.target->target) {
int v;
v = ((struct xt_standard_target *)t)->verdict;
if (v < 0) {
/* Pop from stack? */
if (v != XT_RETURN) {
verdict = (unsigned int)(-v) - 1;
break;
}
if (stackidx == 0) {
e = get_entry(table_base,
private->underflow[hook]);
} else {
e = jumpstack[--stackidx];
e = arpt_next_entry(e);
}
continue;
}
if (table_base + v
!= arpt_next_entry(e)) {
if (unlikely(stackidx >= private->stacksize)) {
verdict = NF_DROP;
break;
}
jumpstack[stackidx++] = e;
}
e = get_entry(table_base, v);
continue;
}
acpar.target = t->u.kernel.target;
acpar.targinfo = t->data;
verdict = t->u.kernel.target->target(skb, &acpar);
if (verdict == XT_CONTINUE) {
/* Target might have changed stuff. */
arp = arp_hdr(skb);
e = arpt_next_entry(e);
} else {
/* Verdict */
break;
}
} while (!acpar.hotdrop);
xt_write_recseq_end(addend);
local_bh_enable();
if (acpar.hotdrop)
return NF_DROP;
else
return verdict;
}
/* All zeroes == unconditional rule. */
static inline bool unconditional(const struct arpt_entry *e)
{
static const struct arpt_arp uncond;
return e->target_offset == sizeof(struct arpt_entry) &&
memcmp(&e->arp, &uncond, sizeof(uncond)) == 0;
}
/* Figures out from what hook each rule can be called: returns 0 if
* there are loops. Puts hook bitmask in comefrom.
*/
static int mark_source_chains(const struct xt_table_info *newinfo,
unsigned int valid_hooks, void *entry0,
unsigned int *offsets)
{
unsigned int hook;
/* No recursion; use packet counter to save back ptrs (reset
* to 0 as we leave), and comefrom to save source hook bitmask.
*/
for (hook = 0; hook < NF_ARP_NUMHOOKS; hook++) {
unsigned int pos = newinfo->hook_entry[hook];
struct arpt_entry *e = entry0 + pos;
if (!(valid_hooks & (1 << hook)))
continue;
/* Set initial back pointer. */
e->counters.pcnt = pos;
for (;;) {
const struct xt_standard_target *t
= (void *)arpt_get_target_c(e);
int visited = e->comefrom & (1 << hook);
if (e->comefrom & (1 << NF_ARP_NUMHOOKS))
return 0;
e->comefrom
|= ((1 << hook) | (1 << NF_ARP_NUMHOOKS));
/* Unconditional return/END. */
if ((unconditional(e) &&
(strcmp(t->target.u.user.name,
XT_STANDARD_TARGET) == 0) &&
t->verdict < 0) || visited) {
unsigned int oldpos, size;
/* Return: backtrack through the last
* big jump.
*/
do {
e->comefrom ^= (1<<NF_ARP_NUMHOOKS);
oldpos = pos;
pos = e->counters.pcnt;
e->counters.pcnt = 0;
/* We're at the start. */
if (pos == oldpos)
goto next;
e = entry0 + pos;
} while (oldpos == pos + e->next_offset);
/* Move along one */
size = e->next_offset;
e = entry0 + pos + size;
if (pos + size >= newinfo->size)
return 0;
e->counters.pcnt = pos;
pos += size;
} else {
int newpos = t->verdict;
if (strcmp(t->target.u.user.name,
XT_STANDARD_TARGET) == 0 &&
newpos >= 0) {
/* This a jump; chase it. */
if (!xt_find_jump_offset(offsets, newpos,
newinfo->number))
return 0;
} else {
/* ... this is a fallthru */
newpos = pos + e->next_offset;
if (newpos >= newinfo->size)
return 0;
}
e = entry0 + newpos;
e->counters.pcnt = pos;
pos = newpos;
}
}
next: ;
}
return 1;
}
static int check_target(struct arpt_entry *e, struct net *net, const char *name)
{
struct xt_entry_target *t = arpt_get_target(e);
struct xt_tgchk_param par = {
.net = net,
.table = name,
.entryinfo = e,
.target = t->u.kernel.target,
.targinfo = t->data,
.hook_mask = e->comefrom,
.family = NFPROTO_ARP,
};
return xt_check_target(&par, t->u.target_size - sizeof(*t), 0, false);
}
static int
find_check_entry(struct arpt_entry *e, struct net *net, const char *name,
unsigned int size,
struct xt_percpu_counter_alloc_state *alloc_state)
{
struct xt_entry_target *t;
struct xt_target *target;
int ret;
if (!xt_percpu_counter_alloc(alloc_state, &e->counters))
return -ENOMEM;
t = arpt_get_target(e);
target = xt_request_find_target(NFPROTO_ARP, t->u.user.name,
t->u.user.revision);
if (IS_ERR(target)) {
ret = PTR_ERR(target);
goto out;
}
t->u.kernel.target = target;
ret = check_target(e, net, name);
if (ret)
goto err;
return 0;
err:
module_put(t->u.kernel.target->me);
out:
xt_percpu_counter_free(&e->counters);
return ret;
}
static bool check_underflow(const struct arpt_entry *e)
{
const struct xt_entry_target *t;
unsigned int verdict;
if (!unconditional(e))
return false;
t = arpt_get_target_c(e);
if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0)
return false;
verdict = ((struct xt_standard_target *)t)->verdict;
verdict = -verdict - 1;
return verdict == NF_DROP || verdict == NF_ACCEPT;
}
static inline int check_entry_size_and_hooks(struct arpt_entry *e,
struct xt_table_info *newinfo,
const unsigned char *base,
const unsigned char *limit,
const unsigned int *hook_entries,
const unsigned int *underflows,
unsigned int valid_hooks)
{
unsigned int h;
int err;
if ((unsigned long)e % __alignof__(struct arpt_entry) != 0 ||
(unsigned char *)e + sizeof(struct arpt_entry) >= limit ||
(unsigned char *)e + e->next_offset > limit)
return -EINVAL;
if (e->next_offset
< sizeof(struct arpt_entry) + sizeof(struct xt_entry_target))
return -EINVAL;
if (!arp_checkentry(&e->arp))
return -EINVAL;
err = xt_check_entry_offsets(e, e->elems, e->target_offset,
e->next_offset);
if (err)
return err;
/* Check hooks & underflows */
for (h = 0; h < NF_ARP_NUMHOOKS; h++) {
if (!(valid_hooks & (1 << h)))
continue;
if ((unsigned char *)e - base == hook_entries[h])
newinfo->hook_entry[h] = hook_entries[h];
if ((unsigned char *)e - base == underflows[h]) {
if (!check_underflow(e))
return -EINVAL;
newinfo->underflow[h] = underflows[h];
}
}
/* Clear counters and comefrom */
e->counters = ((struct xt_counters) { 0, 0 });
e->comefrom = 0;
return 0;
}
static void cleanup_entry(struct arpt_entry *e, struct net *net)
{
struct xt_tgdtor_param par;
struct xt_entry_target *t;
t = arpt_get_target(e);
par.net = net;
par.target = t->u.kernel.target;
par.targinfo = t->data;
par.family = NFPROTO_ARP;
if (par.target->destroy != NULL)
par.target->destroy(&par);
module_put(par.target->me);
xt_percpu_counter_free(&e->counters);
}
/* Checks and translates the user-supplied table segment (held in
* newinfo).
*/
static int translate_table(struct net *net,
struct xt_table_info *newinfo,
void *entry0,
const struct arpt_replace *repl)
{
struct xt_percpu_counter_alloc_state alloc_state = { 0 };
struct arpt_entry *iter;
unsigned int *offsets;
unsigned int i;
int ret = 0;
newinfo->size = repl->size;
newinfo->number = repl->num_entries;
/* Init all hooks to impossible value. */
for (i = 0; i < NF_ARP_NUMHOOKS; i++) {
newinfo->hook_entry[i] = 0xFFFFFFFF;
newinfo->underflow[i] = 0xFFFFFFFF;
}
offsets = xt_alloc_entry_offsets(newinfo->number);
if (!offsets)
return -ENOMEM;
i = 0;
/* Walk through entries, checking offsets. */
xt_entry_foreach(iter, entry0, newinfo->size) {
ret = check_entry_size_and_hooks(iter, newinfo, entry0,
entry0 + repl->size,
repl->hook_entry,
repl->underflow,
repl->valid_hooks);
if (ret != 0)
goto out_free;
if (i < repl->num_entries)
offsets[i] = (void *)iter - entry0;
++i;
if (strcmp(arpt_get_target(iter)->u.user.name,
XT_ERROR_TARGET) == 0)
++newinfo->stacksize;
}
ret = -EINVAL;
if (i != repl->num_entries)
goto out_free;
ret = xt_check_table_hooks(newinfo, repl->valid_hooks);
if (ret)
goto out_free;
if (!mark_source_chains(newinfo, repl->valid_hooks, entry0, offsets)) {
ret = -ELOOP;
goto out_free;
}
kvfree(offsets);
/* Finally, each sanity check must pass */
i = 0;
xt_entry_foreach(iter, entry0, newinfo->size) {
ret = find_check_entry(iter, net, repl->name, repl->size,
&alloc_state);
if (ret != 0)
break;
++i;
}
if (ret != 0) {
xt_entry_foreach(iter, entry0, newinfo->size) {
if (i-- == 0)
break;
cleanup_entry(iter, net);
}
return ret;
}
return ret;
out_free:
kvfree(offsets);
return ret;
}
static void get_counters(const struct xt_table_info *t,
struct xt_counters counters[])
{
struct arpt_entry *iter;
unsigned int cpu;
unsigned int i;
for_each_possible_cpu(cpu) {
seqcount_t *s = &per_cpu(xt_recseq, cpu);
i = 0;
xt_entry_foreach(iter, t->entries, t->size) {
struct xt_counters *tmp;
u64 bcnt, pcnt;
unsigned int start;
tmp = xt_get_per_cpu_counter(&iter->counters, cpu);
do {
start = read_seqcount_begin(s);
bcnt = tmp->bcnt;
pcnt = tmp->pcnt;
} while (read_seqcount_retry(s, start));
ADD_COUNTER(counters[i], bcnt, pcnt);
++i;
cond_resched();
}
}
}
static void get_old_counters(const struct xt_table_info *t,
struct xt_counters counters[])
{
struct arpt_entry *iter;
unsigned int cpu, i;
for_each_possible_cpu(cpu) {
i = 0;
xt_entry_foreach(iter, t->entries, t->size) {
struct xt_counters *tmp;
tmp = xt_get_per_cpu_counter(&iter->counters, cpu);
ADD_COUNTER(counters[i], tmp->bcnt, tmp->pcnt);
++i;
}
cond_resched();
}
}
static struct xt_counters *alloc_counters(const struct xt_table *table)
{
unsigned int countersize;
struct xt_counters *counters;
const struct xt_table_info *private = table->private;
/* We need atomic snapshot of counters: rest doesn't change
* (other than comefrom, which userspace doesn't care
* about).
*/
countersize = sizeof(struct xt_counters) * private->number;
counters = vzalloc(countersize);
if (counters == NULL)
return ERR_PTR(-ENOMEM);
get_counters(private, counters);
return counters;
}
static int copy_entries_to_user(unsigned int total_size,
const struct xt_table *table,
void __user *userptr)
{
unsigned int off, num;
const struct arpt_entry *e;
struct xt_counters *counters;
struct xt_table_info *private = table->private;
int ret = 0;
void *loc_cpu_entry;
counters = alloc_counters(table);
if (IS_ERR(counters))
return PTR_ERR(counters);
loc_cpu_entry = private->entries;
/* FIXME: use iterator macros --RR */
/* ... then go back and fix counters and names */
for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
const struct xt_entry_target *t;
e = loc_cpu_entry + off;
if (copy_to_user(userptr + off, e, sizeof(*e))) {
ret = -EFAULT;
goto free_counters;
}
if (copy_to_user(userptr + off
+ offsetof(struct arpt_entry, counters),
&counters[num],
sizeof(counters[num])) != 0) {
ret = -EFAULT;
goto free_counters;
}
t = arpt_get_target_c(e);
if (xt_target_to_user(t, userptr + off + e->target_offset)) {
ret = -EFAULT;
goto free_counters;
}
}
free_counters:
vfree(counters);
return ret;
}
#ifdef CONFIG_NETFILTER_XTABLES_COMPAT
static void compat_standard_from_user(void *dst, const void *src)
{
int v = *(compat_int_t *)src;
if (v > 0)
v += xt_compat_calc_jump(NFPROTO_ARP, v);
memcpy(dst, &v, sizeof(v));
}
static int compat_standard_to_user(void __user *dst, const void *src)
{
compat_int_t cv = *(int *)src;
if (cv > 0)
cv -= xt_compat_calc_jump(NFPROTO_ARP, cv);
return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
}
static int compat_calc_entry(const struct arpt_entry *e,
const struct xt_table_info *info,
const void *base, struct xt_table_info *newinfo)
{
const struct xt_entry_target *t;
unsigned int entry_offset;
int off, i, ret;
off = sizeof(struct arpt_entry) - sizeof(struct compat_arpt_entry);
entry_offset = (void *)e - base;
t = arpt_get_target_c(e);
off += xt_compat_target_offset(t->u.kernel.target);
newinfo->size -= off;
ret = xt_compat_add_offset(NFPROTO_ARP, entry_offset, off);
if (ret)
return ret;
for (i = 0; i < NF_ARP_NUMHOOKS; i++) {
if (info->hook_entry[i] &&
(e < (struct arpt_entry *)(base + info->hook_entry[i])))
newinfo->hook_entry[i] -= off;
if (info->underflow[i] &&
(e < (struct arpt_entry *)(base + info->underflow[i])))
newinfo->underflow[i] -= off;
}
return 0;
}
static int compat_table_info(const struct xt_table_info *info,
struct xt_table_info *newinfo)
{
struct arpt_entry *iter;
const void *loc_cpu_entry;
int ret;
if (!newinfo || !info)
return -EINVAL;
/* we dont care about newinfo->entries */
memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
newinfo->initial_entries = 0;
loc_cpu_entry = info->entries;
ret = xt_compat_init_offsets(NFPROTO_ARP, info->number);
if (ret)
return ret;
xt_entry_foreach(iter, loc_cpu_entry, info->size) {
ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo);
if (ret != 0)
return ret;
}
return 0;
}
#endif
static int get_info(struct net *net, void __user *user, const int *len)
{
char name[XT_TABLE_MAXNAMELEN];
struct xt_table *t;
int ret;
if (*len != sizeof(struct arpt_getinfo))
return -EINVAL;
if (copy_from_user(name, user, sizeof(name)) != 0)
return -EFAULT;
name[XT_TABLE_MAXNAMELEN-1] = '\0';
#ifdef CONFIG_NETFILTER_XTABLES_COMPAT
if (in_compat_syscall())
xt_compat_lock(NFPROTO_ARP);
#endif
t = xt_request_find_table_lock(net, NFPROTO_ARP, name);
if (!IS_ERR(t)) {
struct arpt_getinfo info;
const struct xt_table_info *private = t->private;
#ifdef CONFIG_NETFILTER_XTABLES_COMPAT
struct xt_table_info tmp;
if (in_compat_syscall()) {
ret = compat_table_info(private, &tmp);
xt_compat_flush_offsets(NFPROTO_ARP);
private = &tmp;
}
#endif
memset(&info, 0, sizeof(info));
info.valid_hooks = t->valid_hooks;
memcpy(info.hook_entry, private->hook_entry,
sizeof(info.hook_entry));
memcpy(info.underflow, private->underflow,
sizeof(info.underflow));
info.num_entries = private->number;
info.size = private->size;
strcpy(info.name, name);
if (copy_to_user(user, &info, *len) != 0)
ret = -EFAULT;
else
ret = 0;
xt_table_unlock(t);
module_put(t->me);
} else
ret = PTR_ERR(t);
#ifdef CONFIG_NETFILTER_XTABLES_COMPAT
if (in_compat_syscall())
xt_compat_unlock(NFPROTO_ARP);
#endif
return ret;
}
static int get_entries(struct net *net, struct arpt_get_entries __user *uptr,
const int *len)
{
int ret;
struct arpt_get_entries get;
struct xt_table *t;
if (*len < sizeof(get))
return -EINVAL;
if (copy_from_user(&get, uptr, sizeof(get)) != 0)
return -EFAULT;
if (*len != sizeof(struct arpt_get_entries) + get.size)
return -EINVAL;
get.name[sizeof(get.name) - 1] = '\0';
t = xt_find_table_lock(net, NFPROTO_ARP, get.name);
if (!IS_ERR(t)) {
const struct xt_table_info *private = t->private;
if (get.size == private->size)
ret = copy_entries_to_user(private->size,
t, uptr->entrytable);
else
ret = -EAGAIN;
module_put(t->me);
xt_table_unlock(t);
} else
ret = PTR_ERR(t);
return ret;
}
static int __do_replace(struct net *net, const char *name,
unsigned int valid_hooks,
struct xt_table_info *newinfo,
unsigned int num_counters,
void __user *counters_ptr)
{
int ret;
struct xt_table *t;
struct xt_table_info *oldinfo;
struct xt_counters *counters;
void *loc_cpu_old_entry;
struct arpt_entry *iter;
ret = 0;
counters = xt_counters_alloc(num_counters);
if (!counters) {
ret = -ENOMEM;
goto out;
}
t = xt_request_find_table_lock(net, NFPROTO_ARP, name);
if (IS_ERR(t)) {
ret = PTR_ERR(t);
goto free_newinfo_counters_untrans;
}
/* You lied! */
if (valid_hooks != t->valid_hooks) {
ret = -EINVAL;
goto put_module;
}
oldinfo = xt_replace_table(t, num_counters, newinfo, &ret);
if (!oldinfo)
goto put_module;
/* Update module usage count based on number of rules */
if ((oldinfo->number > oldinfo->initial_entries) ||
(newinfo->number <= oldinfo->initial_entries))
module_put(t->me);
if ((oldinfo->number > oldinfo->initial_entries) &&
(newinfo->number <= oldinfo->initial_entries))
module_put(t->me);
xt_table_unlock(t);
get_old_counters(oldinfo, counters);
/* Decrease module usage counts and free resource */
loc_cpu_old_entry = oldinfo->entries;
xt_entry_foreach(iter, loc_cpu_old_entry, oldinfo->size)
cleanup_entry(iter, net);
xt_free_table_info(oldinfo);
if (copy_to_user(counters_ptr, counters,
sizeof(struct xt_counters) * num_counters) != 0) {
/* Silent error, can't fail, new table is already in place */
net_warn_ratelimited("arptables: counters copy to user failed while replacing table\n");
}
vfree(counters);
return ret;
put_module:
module_put(t->me);
xt_table_unlock(t);
free_newinfo_counters_untrans:
vfree(counters);
out:
return ret;
}
static int do_replace(struct net *net, sockptr_t arg, unsigned int len)
{
int ret;
struct arpt_replace tmp;
struct xt_table_info *newinfo;
void *loc_cpu_entry;
struct arpt_entry *iter;
if (copy_from_sockptr(&tmp, arg, sizeof(tmp)) != 0)
return -EFAULT;
/* overflow check */
if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
return -ENOMEM;
if (tmp.num_counters == 0)
return -EINVAL;
tmp.name[sizeof(tmp.name)-1] = 0;
newinfo = xt_alloc_table_info(tmp.size);
if (!newinfo)
return -ENOMEM;
loc_cpu_entry = newinfo->entries;
if (copy_from_sockptr_offset(loc_cpu_entry, arg, sizeof(tmp),
tmp.size) != 0) {
ret = -EFAULT;
goto free_newinfo;
}
ret = translate_table(net, newinfo, loc_cpu_entry, &tmp);
if (ret != 0)
goto free_newinfo;
ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
tmp.num_counters, tmp.counters);
if (ret)
goto free_newinfo_untrans;
return 0;
free_newinfo_untrans:
xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
cleanup_entry(iter, net);
free_newinfo:
xt_free_table_info(newinfo);
return ret;
}
static int do_add_counters(struct net *net, sockptr_t arg, unsigned int len)
{
unsigned int i;
struct xt_counters_info tmp;
struct xt_counters *paddc;
struct xt_table *t;
const struct xt_table_info *private;
int ret = 0;
struct arpt_entry *iter;
unsigned int addend;
paddc = xt_copy_counters(arg, len, &tmp);
if (IS_ERR(paddc))
return PTR_ERR(paddc);
t = xt_find_table_lock(net, NFPROTO_ARP, tmp.name);
if (IS_ERR(t)) {
ret = PTR_ERR(t);
goto free;
}
local_bh_disable();
private = t->private;
if (private->number != tmp.num_counters) {
ret = -EINVAL;
goto unlock_up_free;
}
i = 0;
addend = xt_write_recseq_begin();
xt_entry_foreach(iter, private->entries, private->size) {
struct xt_counters *tmp;
tmp = xt_get_this_cpu_counter(&iter->counters);
ADD_COUNTER(*tmp, paddc[i].bcnt, paddc[i].pcnt);
++i;
}
xt_write_recseq_end(addend);
unlock_up_free:
local_bh_enable();
xt_table_unlock(t);
module_put(t->me);
free:
vfree(paddc);
return ret;
}
#ifdef CONFIG_NETFILTER_XTABLES_COMPAT
struct compat_arpt_replace {
char name[XT_TABLE_MAXNAMELEN];
u32 valid_hooks;
u32 num_entries;
u32 size;
u32 hook_entry[NF_ARP_NUMHOOKS];
u32 underflow[NF_ARP_NUMHOOKS];
u32 num_counters;
compat_uptr_t counters;
struct compat_arpt_entry entries[];
};
static inline void compat_release_entry(struct compat_arpt_entry *e)
{
struct xt_entry_target *t;
t = compat_arpt_get_target(e);
module_put(t->u.kernel.target->me);
}
static int
check_compat_entry_size_and_hooks(struct compat_arpt_entry *e,
struct xt_table_info *newinfo,
unsigned int *size,
const unsigned char *base,
const unsigned char *limit)
{
struct xt_entry_target *t;
struct xt_target *target;
unsigned int entry_offset;
int ret, off;
if ((unsigned long)e % __alignof__(struct compat_arpt_entry) != 0 ||
(unsigned char *)e + sizeof(struct compat_arpt_entry) >= limit ||
(unsigned char *)e + e->next_offset > limit)
return -EINVAL;
if (e->next_offset < sizeof(struct compat_arpt_entry) +
sizeof(struct compat_xt_entry_target))
return -EINVAL;
if (!arp_checkentry(&e->arp))
return -EINVAL;
ret = xt_compat_check_entry_offsets(e, e->elems, e->target_offset,
e->next_offset);
if (ret)
return ret;
off = sizeof(struct arpt_entry) - sizeof(struct compat_arpt_entry);
entry_offset = (void *)e - (void *)base;
t = compat_arpt_get_target(e);
target = xt_request_find_target(NFPROTO_ARP, t->u.user.name,
t->u.user.revision);
if (IS_ERR(target)) {
ret = PTR_ERR(target);
goto out;
}
t->u.kernel.target = target;
off += xt_compat_target_offset(target);
*size += off;
ret = xt_compat_add_offset(NFPROTO_ARP, entry_offset, off);
if (ret)
goto release_target;
return 0;
release_target:
module_put(t->u.kernel.target->me);
out:
return ret;
}
static void
compat_copy_entry_from_user(struct compat_arpt_entry *e, void **dstptr,
unsigned int *size,
struct xt_table_info *newinfo, unsigned char *base)
{
struct xt_entry_target *t;
struct arpt_entry *de;
unsigned int origsize;
int h;
origsize = *size;
de = *dstptr;
memcpy(de, e, sizeof(struct arpt_entry));
memcpy(&de->counters, &e->counters, sizeof(e->counters));
*dstptr += sizeof(struct arpt_entry);
*size += sizeof(struct arpt_entry) - sizeof(struct compat_arpt_entry);
de->target_offset = e->target_offset - (origsize - *size);
t = compat_arpt_get_target(e);
xt_compat_target_from_user(t, dstptr, size);
de->next_offset = e->next_offset - (origsize - *size);
for (h = 0; h < NF_ARP_NUMHOOKS; h++) {
if ((unsigned char *)de - base < newinfo->hook_entry[h])
newinfo->hook_entry[h] -= origsize - *size;
if ((unsigned char *)de - base < newinfo->underflow[h])
newinfo->underflow[h] -= origsize - *size;
}
}
static int translate_compat_table(struct net *net,
struct xt_table_info **pinfo,
void **pentry0,
const struct compat_arpt_replace *compatr)
{
unsigned int i, j;
struct xt_table_info *newinfo, *info;
void *pos, *entry0, *entry1;
struct compat_arpt_entry *iter0;
struct arpt_replace repl;
unsigned int size;
int ret;
info = *pinfo;
entry0 = *pentry0;
size = compatr->size;
info->number = compatr->num_entries;
j = 0;
xt_compat_lock(NFPROTO_ARP);
ret = xt_compat_init_offsets(NFPROTO_ARP, compatr->num_entries);
if (ret)
goto out_unlock;
/* Walk through entries, checking offsets. */
xt_entry_foreach(iter0, entry0, compatr->size) {
ret = check_compat_entry_size_and_hooks(iter0, info, &size,
entry0,
entry0 + compatr->size);
if (ret != 0)
goto out_unlock;
++j;
}
ret = -EINVAL;
if (j != compatr->num_entries)
goto out_unlock;
ret = -ENOMEM;
newinfo = xt_alloc_table_info(size);
if (!newinfo)
goto out_unlock;
memset(newinfo->entries, 0, size);
newinfo->number = compatr->num_entries;
for (i = 0; i < NF_ARP_NUMHOOKS; i++) {
newinfo->hook_entry[i] = compatr->hook_entry[i];
newinfo->underflow[i] = compatr->underflow[i];
}
entry1 = newinfo->entries;
pos = entry1;
size = compatr->size;
xt_entry_foreach(iter0, entry0, compatr->size)
compat_copy_entry_from_user(iter0, &pos, &size,
newinfo, entry1);
/* all module references in entry0 are now gone */
xt_compat_flush_offsets(NFPROTO_ARP);
xt_compat_unlock(NFPROTO_ARP);
memcpy(&repl, compatr, sizeof(*compatr));
for (i = 0; i < NF_ARP_NUMHOOKS; i++) {
repl.hook_entry[i] = newinfo->hook_entry[i];
repl.underflow[i] = newinfo->underflow[i];
}
repl.num_counters = 0;
repl.counters = NULL;
repl.size = newinfo->size;
ret = translate_table(net, newinfo, entry1, &repl);
if (ret)
goto free_newinfo;
*pinfo = newinfo;
*pentry0 = entry1;
xt_free_table_info(info);
return 0;
free_newinfo:
xt_free_table_info(newinfo);
return ret;
out_unlock:
xt_compat_flush_offsets(NFPROTO_ARP);
xt_compat_unlock(NFPROTO_ARP);
xt_entry_foreach(iter0, entry0, compatr->size) {
if (j-- == 0)
break;
compat_release_entry(iter0);
}
return ret;
}
static int compat_do_replace(struct net *net, sockptr_t arg, unsigned int len)
{
int ret;
struct compat_arpt_replace tmp;
struct xt_table_info *newinfo;
void *loc_cpu_entry;
struct arpt_entry *iter;
if (copy_from_sockptr(&tmp, arg, sizeof(tmp)) != 0)
return -EFAULT;
/* overflow check */
if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
return -ENOMEM;
if (tmp.num_counters == 0)
return -EINVAL;
tmp.name[sizeof(tmp.name)-1] = 0;
newinfo = xt_alloc_table_info(tmp.size);
if (!newinfo)
return -ENOMEM;
loc_cpu_entry = newinfo->entries;
if (copy_from_sockptr_offset(loc_cpu_entry, arg, sizeof(tmp),
tmp.size) != 0) {
ret = -EFAULT;
goto free_newinfo;
}
ret = translate_compat_table(net, &newinfo, &loc_cpu_entry, &tmp);
if (ret != 0)
goto free_newinfo;
ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
tmp.num_counters, compat_ptr(tmp.counters));
if (ret)
goto free_newinfo_untrans;
return 0;
free_newinfo_untrans:
xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
cleanup_entry(iter, net);
free_newinfo:
xt_free_table_info(newinfo);
return ret;
}
static int compat_copy_entry_to_user(struct arpt_entry *e, void __user **dstptr,
compat_uint_t *size,
struct xt_counters *counters,
unsigned int i)
{
struct xt_entry_target *t;
struct compat_arpt_entry __user *ce;
u_int16_t target_offset, next_offset;
compat_uint_t origsize;
int ret;
origsize = *size;
ce = *dstptr;
if (copy_to_user(ce, e, sizeof(struct arpt_entry)) != 0 ||
copy_to_user(&ce->counters, &counters[i],
sizeof(counters[i])) != 0)
return -EFAULT;
*dstptr += sizeof(struct compat_arpt_entry);
*size -= sizeof(struct arpt_entry) - sizeof(struct compat_arpt_entry);
target_offset = e->target_offset - (origsize - *size);
t = arpt_get_target(e);
ret = xt_compat_target_to_user(t, dstptr, size);
if (ret)
return ret;
next_offset = e->next_offset - (origsize - *size);
if (put_user(target_offset, &ce->target_offset) != 0 ||
put_user(next_offset, &ce->next_offset) != 0)
return -EFAULT;
return 0;
}
static int compat_copy_entries_to_user(unsigned int total_size,
struct xt_table *table,
void __user *userptr)
{
struct xt_counters *counters;
const struct xt_table_info *private = table->private;
void __user *pos;
unsigned int size;
int ret = 0;
unsigned int i = 0;
struct arpt_entry *iter;
counters = alloc_counters(table);
if (IS_ERR(counters))
return PTR_ERR(counters);
pos = userptr;
size = total_size;
xt_entry_foreach(iter, private->entries, total_size) {
ret = compat_copy_entry_to_user(iter, &pos,
&size, counters, i++);
if (ret != 0)
break;
}
vfree(counters);
return ret;
}
struct compat_arpt_get_entries {
char name[XT_TABLE_MAXNAMELEN];
compat_uint_t size;
struct compat_arpt_entry entrytable[];
};
static int compat_get_entries(struct net *net,
struct compat_arpt_get_entries __user *uptr,
int *len)
{
int ret;
struct compat_arpt_get_entries get;
struct xt_table *t;
if (*len < sizeof(get))
return -EINVAL;
if (copy_from_user(&get, uptr, sizeof(get)) != 0)
return -EFAULT;
if (*len != sizeof(struct compat_arpt_get_entries) + get.size)
return -EINVAL;
get.name[sizeof(get.name) - 1] = '\0';
xt_compat_lock(NFPROTO_ARP);
t = xt_find_table_lock(net, NFPROTO_ARP, get.name);
if (!IS_ERR(t)) {
const struct xt_table_info *private = t->private;
struct xt_table_info info;
ret = compat_table_info(private, &info);
if (!ret && get.size == info.size) {
ret = compat_copy_entries_to_user(private->size,
t, uptr->entrytable);
} else if (!ret)
ret = -EAGAIN;
xt_compat_flush_offsets(NFPROTO_ARP);
module_put(t->me);
xt_table_unlock(t);
} else
ret = PTR_ERR(t);
xt_compat_unlock(NFPROTO_ARP);
return ret;
}
#endif
static int do_arpt_set_ctl(struct sock *sk, int cmd, sockptr_t arg,
unsigned int len)
{
int ret;
if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
return -EPERM;
switch (cmd) {
case ARPT_SO_SET_REPLACE:
#ifdef CONFIG_NETFILTER_XTABLES_COMPAT
if (in_compat_syscall())
ret = compat_do_replace(sock_net(sk), arg, len);
else
#endif
ret = do_replace(sock_net(sk), arg, len);
break;
case ARPT_SO_SET_ADD_COUNTERS:
ret = do_add_counters(sock_net(sk), arg, len);
break;
default:
ret = -EINVAL;
}
return ret;
}
static int do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
{
int ret;
if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
return -EPERM;
switch (cmd) {
case ARPT_SO_GET_INFO:
ret = get_info(sock_net(sk), user, len);
break;
case ARPT_SO_GET_ENTRIES:
#ifdef CONFIG_NETFILTER_XTABLES_COMPAT
if (in_compat_syscall())
ret = compat_get_entries(sock_net(sk), user, len);
else
#endif
ret = get_entries(sock_net(sk), user, len);
break;
case ARPT_SO_GET_REVISION_TARGET: {
struct xt_get_revision rev;
if (*len != sizeof(rev)) {
ret = -EINVAL;
break;
}
if (copy_from_user(&rev, user, sizeof(rev)) != 0) {
ret = -EFAULT;
break;
}
rev.name[sizeof(rev.name)-1] = 0;
try_then_request_module(xt_find_revision(NFPROTO_ARP, rev.name,
rev.revision, 1, &ret),
"arpt_%s", rev.name);
break;
}
default:
ret = -EINVAL;
}
return ret;
}
static void __arpt_unregister_table(struct net *net, struct xt_table *table)
{
struct xt_table_info *private;
void *loc_cpu_entry;
struct module *table_owner = table->me;
struct arpt_entry *iter;
private = xt_unregister_table(table);
/* Decrease module usage counts and free resources */
loc_cpu_entry = private->entries;
xt_entry_foreach(iter, loc_cpu_entry, private->size)
cleanup_entry(iter, net);
if (private->number > private->initial_entries)
module_put(table_owner);
xt_free_table_info(private);
}
int arpt_register_table(struct net *net,
const struct xt_table *table,
const struct arpt_replace *repl,
const struct nf_hook_ops *template_ops)
{
struct nf_hook_ops *ops;
unsigned int num_ops;
int ret, i;
struct xt_table_info *newinfo;
struct xt_table_info bootstrap = {0};
void *loc_cpu_entry;
struct xt_table *new_table;
newinfo = xt_alloc_table_info(repl->size);
if (!newinfo)
return -ENOMEM;
loc_cpu_entry = newinfo->entries;
memcpy(loc_cpu_entry, repl->entries, repl->size);
ret = translate_table(net, newinfo, loc_cpu_entry, repl);
if (ret != 0) {
xt_free_table_info(newinfo);
return ret;
}
new_table = xt_register_table(net, table, &bootstrap, newinfo);
if (IS_ERR(new_table)) {
struct arpt_entry *iter;
xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
cleanup_entry(iter, net);
xt_free_table_info(newinfo);
return PTR_ERR(new_table);
}
num_ops = hweight32(table->valid_hooks);
if (num_ops == 0) {
ret = -EINVAL;
goto out_free;
}
ops = kmemdup(template_ops, sizeof(*ops) * num_ops, GFP_KERNEL);
if (!ops) {
ret = -ENOMEM;
goto out_free;
}
for (i = 0; i < num_ops; i++)
ops[i].priv = new_table;
new_table->ops = ops;
ret = nf_register_net_hooks(net, ops, num_ops);
if (ret != 0)
goto out_free;
return ret;
out_free:
__arpt_unregister_table(net, new_table);
return ret;
}
void arpt_unregister_table_pre_exit(struct net *net, const char *name)
{
struct xt_table *table = xt_find_table(net, NFPROTO_ARP, name);
if (table)
nf_unregister_net_hooks(net, table->ops, hweight32(table->valid_hooks));
}
EXPORT_SYMBOL(arpt_unregister_table_pre_exit);
void arpt_unregister_table(struct net *net, const char *name)
{
struct xt_table *table = xt_find_table(net, NFPROTO_ARP, name);
if (table)
__arpt_unregister_table(net, table);
}
/* The built-in targets: standard (NULL) and error. */
static struct xt_target arpt_builtin_tg[] __read_mostly = {
{
.name = XT_STANDARD_TARGET,
.targetsize = sizeof(int),
.family = NFPROTO_ARP,
#ifdef CONFIG_NETFILTER_XTABLES_COMPAT
.compatsize = sizeof(compat_int_t),
.compat_from_user = compat_standard_from_user,
.compat_to_user = compat_standard_to_user,
#endif
},
{
.name = XT_ERROR_TARGET,
.target = arpt_error,
.targetsize = XT_FUNCTION_MAXNAMELEN,
.family = NFPROTO_ARP,
},
};
static struct nf_sockopt_ops arpt_sockopts = {
.pf = PF_INET,
.set_optmin = ARPT_BASE_CTL,
.set_optmax = ARPT_SO_SET_MAX+1,
.set = do_arpt_set_ctl,
.get_optmin = ARPT_BASE_CTL,
.get_optmax = ARPT_SO_GET_MAX+1,
.get = do_arpt_get_ctl,
.owner = THIS_MODULE,
};
static int __net_init arp_tables_net_init(struct net *net)
{
return xt_proto_init(net, NFPROTO_ARP);
}
static void __net_exit arp_tables_net_exit(struct net *net)
{
xt_proto_fini(net, NFPROTO_ARP);
}
static struct pernet_operations arp_tables_net_ops = {
.init = arp_tables_net_init,
.exit = arp_tables_net_exit,
};
static int __init arp_tables_init(void)
{
int ret;
ret = register_pernet_subsys(&arp_tables_net_ops);
if (ret < 0)
goto err1;
/* No one else will be downing sem now, so we won't sleep */
ret = xt_register_targets(arpt_builtin_tg, ARRAY_SIZE(arpt_builtin_tg));
if (ret < 0)
goto err2;
/* Register setsockopt */
ret = nf_register_sockopt(&arpt_sockopts);
if (ret < 0)
goto err4;
return 0;
err4:
xt_unregister_targets(arpt_builtin_tg, ARRAY_SIZE(arpt_builtin_tg));
err2:
unregister_pernet_subsys(&arp_tables_net_ops);
err1:
return ret;
}
static void __exit arp_tables_fini(void)
{
nf_unregister_sockopt(&arpt_sockopts);
xt_unregister_targets(arpt_builtin_tg, ARRAY_SIZE(arpt_builtin_tg));
unregister_pernet_subsys(&arp_tables_net_ops);
}
EXPORT_SYMBOL(arpt_register_table);
EXPORT_SYMBOL(arpt_unregister_table);
EXPORT_SYMBOL(arpt_do_table);
module_init(arp_tables_init);
module_exit(arp_tables_fini);
| linux-master | net/ipv4/netfilter/arp_tables.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* H.323 extension for NAT alteration.
*
* Copyright (c) 2006 Jing Min Zhao <[email protected]>
* Copyright (c) 2006-2012 Patrick McHardy <[email protected]>
*
* Based on the 'brute force' H.323 NAT module by
* Jozsef Kadlecsik <[email protected]>
*/
#include <linux/module.h>
#include <linux/tcp.h>
#include <net/tcp.h>
#include <net/netfilter/nf_nat.h>
#include <net/netfilter/nf_nat_helper.h>
#include <net/netfilter/nf_conntrack_helper.h>
#include <net/netfilter/nf_conntrack_expect.h>
#include <linux/netfilter/nf_conntrack_h323.h>
/****************************************************************************/
static int set_addr(struct sk_buff *skb, unsigned int protoff,
unsigned char **data, int dataoff,
unsigned int addroff, __be32 ip, __be16 port)
{
enum ip_conntrack_info ctinfo;
struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
struct {
__be32 ip;
__be16 port;
} __attribute__ ((__packed__)) buf;
const struct tcphdr *th;
struct tcphdr _tcph;
buf.ip = ip;
buf.port = port;
addroff += dataoff;
if (ip_hdr(skb)->protocol == IPPROTO_TCP) {
if (!nf_nat_mangle_tcp_packet(skb, ct, ctinfo,
protoff, addroff, sizeof(buf),
(char *) &buf, sizeof(buf))) {
net_notice_ratelimited("nf_nat_h323: nf_nat_mangle_tcp_packet error\n");
return -1;
}
/* Relocate data pointer */
th = skb_header_pointer(skb, ip_hdrlen(skb),
sizeof(_tcph), &_tcph);
if (th == NULL)
return -1;
*data = skb->data + ip_hdrlen(skb) + th->doff * 4 + dataoff;
} else {
if (!nf_nat_mangle_udp_packet(skb, ct, ctinfo,
protoff, addroff, sizeof(buf),
(char *) &buf, sizeof(buf))) {
net_notice_ratelimited("nf_nat_h323: nf_nat_mangle_udp_packet error\n");
return -1;
}
/* nf_nat_mangle_udp_packet uses skb_ensure_writable() to copy
* or pull everything in a linear buffer, so we can safely
* use the skb pointers now */
*data = skb->data + ip_hdrlen(skb) + sizeof(struct udphdr);
}
return 0;
}
/****************************************************************************/
static int set_h225_addr(struct sk_buff *skb, unsigned int protoff,
unsigned char **data, int dataoff,
TransportAddress *taddr,
union nf_inet_addr *addr, __be16 port)
{
return set_addr(skb, protoff, data, dataoff, taddr->ipAddress.ip,
addr->ip, port);
}
/****************************************************************************/
static int set_h245_addr(struct sk_buff *skb, unsigned protoff,
unsigned char **data, int dataoff,
H245_TransportAddress *taddr,
union nf_inet_addr *addr, __be16 port)
{
return set_addr(skb, protoff, data, dataoff,
taddr->unicastAddress.iPAddress.network,
addr->ip, port);
}
/****************************************************************************/
static int set_sig_addr(struct sk_buff *skb, struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
unsigned int protoff, unsigned char **data,
TransportAddress *taddr, int count)
{
const struct nf_ct_h323_master *info = nfct_help_data(ct);
int dir = CTINFO2DIR(ctinfo);
int i;
__be16 port;
union nf_inet_addr addr;
for (i = 0; i < count; i++) {
if (get_h225_addr(ct, *data, &taddr[i], &addr, &port)) {
if (addr.ip == ct->tuplehash[dir].tuple.src.u3.ip &&
port == info->sig_port[dir]) {
/* GW->GK */
/* Fix for Gnomemeeting */
if (i > 0 &&
get_h225_addr(ct, *data, &taddr[0],
&addr, &port) &&
(ntohl(addr.ip) & 0xff000000) == 0x7f000000)
i = 0;
pr_debug("nf_nat_ras: set signal address %pI4:%hu->%pI4:%hu\n",
&addr.ip, port,
&ct->tuplehash[!dir].tuple.dst.u3.ip,
info->sig_port[!dir]);
return set_h225_addr(skb, protoff, data, 0,
&taddr[i],
&ct->tuplehash[!dir].
tuple.dst.u3,
info->sig_port[!dir]);
} else if (addr.ip == ct->tuplehash[dir].tuple.dst.u3.ip &&
port == info->sig_port[dir]) {
/* GK->GW */
pr_debug("nf_nat_ras: set signal address %pI4:%hu->%pI4:%hu\n",
&addr.ip, port,
&ct->tuplehash[!dir].tuple.src.u3.ip,
info->sig_port[!dir]);
return set_h225_addr(skb, protoff, data, 0,
&taddr[i],
&ct->tuplehash[!dir].
tuple.src.u3,
info->sig_port[!dir]);
}
}
}
return 0;
}
/****************************************************************************/
static int set_ras_addr(struct sk_buff *skb, struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
unsigned int protoff, unsigned char **data,
TransportAddress *taddr, int count)
{
int dir = CTINFO2DIR(ctinfo);
int i;
__be16 port;
union nf_inet_addr addr;
for (i = 0; i < count; i++) {
if (get_h225_addr(ct, *data, &taddr[i], &addr, &port) &&
addr.ip == ct->tuplehash[dir].tuple.src.u3.ip &&
port == ct->tuplehash[dir].tuple.src.u.udp.port) {
pr_debug("nf_nat_ras: set rasAddress %pI4:%hu->%pI4:%hu\n",
&addr.ip, ntohs(port),
&ct->tuplehash[!dir].tuple.dst.u3.ip,
ntohs(ct->tuplehash[!dir].tuple.dst.u.udp.port));
return set_h225_addr(skb, protoff, data, 0, &taddr[i],
&ct->tuplehash[!dir].tuple.dst.u3,
ct->tuplehash[!dir].tuple.
dst.u.udp.port);
}
}
return 0;
}
/****************************************************************************/
static int nat_rtp_rtcp(struct sk_buff *skb, struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
unsigned int protoff, unsigned char **data, int dataoff,
H245_TransportAddress *taddr,
__be16 port, __be16 rtp_port,
struct nf_conntrack_expect *rtp_exp,
struct nf_conntrack_expect *rtcp_exp)
{
struct nf_ct_h323_master *info = nfct_help_data(ct);
int dir = CTINFO2DIR(ctinfo);
int i;
u_int16_t nated_port;
/* Set expectations for NAT */
rtp_exp->saved_proto.udp.port = rtp_exp->tuple.dst.u.udp.port;
rtp_exp->expectfn = nf_nat_follow_master;
rtp_exp->dir = !dir;
rtcp_exp->saved_proto.udp.port = rtcp_exp->tuple.dst.u.udp.port;
rtcp_exp->expectfn = nf_nat_follow_master;
rtcp_exp->dir = !dir;
/* Lookup existing expects */
for (i = 0; i < H323_RTP_CHANNEL_MAX; i++) {
if (info->rtp_port[i][dir] == rtp_port) {
/* Expected */
/* Use allocated ports first. This will refresh
* the expects */
rtp_exp->tuple.dst.u.udp.port = info->rtp_port[i][dir];
rtcp_exp->tuple.dst.u.udp.port =
htons(ntohs(info->rtp_port[i][dir]) + 1);
break;
} else if (info->rtp_port[i][dir] == 0) {
/* Not expected */
break;
}
}
/* Run out of expectations */
if (i >= H323_RTP_CHANNEL_MAX) {
net_notice_ratelimited("nf_nat_h323: out of expectations\n");
return 0;
}
/* Try to get a pair of ports. */
for (nated_port = ntohs(rtp_exp->tuple.dst.u.udp.port);
nated_port != 0; nated_port += 2) {
int ret;
rtp_exp->tuple.dst.u.udp.port = htons(nated_port);
ret = nf_ct_expect_related(rtp_exp, 0);
if (ret == 0) {
rtcp_exp->tuple.dst.u.udp.port =
htons(nated_port + 1);
ret = nf_ct_expect_related(rtcp_exp, 0);
if (ret == 0)
break;
else if (ret == -EBUSY) {
nf_ct_unexpect_related(rtp_exp);
continue;
} else if (ret < 0) {
nf_ct_unexpect_related(rtp_exp);
nated_port = 0;
break;
}
} else if (ret != -EBUSY) {
nated_port = 0;
break;
}
}
if (nated_port == 0) { /* No port available */
net_notice_ratelimited("nf_nat_h323: out of RTP ports\n");
return 0;
}
/* Modify signal */
if (set_h245_addr(skb, protoff, data, dataoff, taddr,
&ct->tuplehash[!dir].tuple.dst.u3,
htons((port & htons(1)) ? nated_port + 1 :
nated_port))) {
nf_ct_unexpect_related(rtp_exp);
nf_ct_unexpect_related(rtcp_exp);
return -1;
}
/* Save ports */
info->rtp_port[i][dir] = rtp_port;
info->rtp_port[i][!dir] = htons(nated_port);
/* Success */
pr_debug("nf_nat_h323: expect RTP %pI4:%hu->%pI4:%hu\n",
&rtp_exp->tuple.src.u3.ip,
ntohs(rtp_exp->tuple.src.u.udp.port),
&rtp_exp->tuple.dst.u3.ip,
ntohs(rtp_exp->tuple.dst.u.udp.port));
pr_debug("nf_nat_h323: expect RTCP %pI4:%hu->%pI4:%hu\n",
&rtcp_exp->tuple.src.u3.ip,
ntohs(rtcp_exp->tuple.src.u.udp.port),
&rtcp_exp->tuple.dst.u3.ip,
ntohs(rtcp_exp->tuple.dst.u.udp.port));
return 0;
}
/****************************************************************************/
static int nat_t120(struct sk_buff *skb, struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
unsigned int protoff, unsigned char **data, int dataoff,
H245_TransportAddress *taddr, __be16 port,
struct nf_conntrack_expect *exp)
{
int dir = CTINFO2DIR(ctinfo);
u_int16_t nated_port = ntohs(port);
/* Set expectations for NAT */
exp->saved_proto.tcp.port = exp->tuple.dst.u.tcp.port;
exp->expectfn = nf_nat_follow_master;
exp->dir = !dir;
nated_port = nf_nat_exp_find_port(exp, nated_port);
if (nated_port == 0) { /* No port available */
net_notice_ratelimited("nf_nat_h323: out of TCP ports\n");
return 0;
}
/* Modify signal */
if (set_h245_addr(skb, protoff, data, dataoff, taddr,
&ct->tuplehash[!dir].tuple.dst.u3,
htons(nated_port)) < 0) {
nf_ct_unexpect_related(exp);
return -1;
}
pr_debug("nf_nat_h323: expect T.120 %pI4:%hu->%pI4:%hu\n",
&exp->tuple.src.u3.ip,
ntohs(exp->tuple.src.u.tcp.port),
&exp->tuple.dst.u3.ip,
ntohs(exp->tuple.dst.u.tcp.port));
return 0;
}
/****************************************************************************/
static int nat_h245(struct sk_buff *skb, struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
unsigned int protoff, unsigned char **data, int dataoff,
TransportAddress *taddr, __be16 port,
struct nf_conntrack_expect *exp)
{
struct nf_ct_h323_master *info = nfct_help_data(ct);
int dir = CTINFO2DIR(ctinfo);
u_int16_t nated_port = ntohs(port);
/* Set expectations for NAT */
exp->saved_proto.tcp.port = exp->tuple.dst.u.tcp.port;
exp->expectfn = nf_nat_follow_master;
exp->dir = !dir;
/* Check existing expects */
if (info->sig_port[dir] == port)
nated_port = ntohs(info->sig_port[!dir]);
nated_port = nf_nat_exp_find_port(exp, nated_port);
if (nated_port == 0) { /* No port available */
net_notice_ratelimited("nf_nat_q931: out of TCP ports\n");
return 0;
}
/* Modify signal */
if (set_h225_addr(skb, protoff, data, dataoff, taddr,
&ct->tuplehash[!dir].tuple.dst.u3,
htons(nated_port))) {
nf_ct_unexpect_related(exp);
return -1;
}
/* Save ports */
info->sig_port[dir] = port;
info->sig_port[!dir] = htons(nated_port);
pr_debug("nf_nat_q931: expect H.245 %pI4:%hu->%pI4:%hu\n",
&exp->tuple.src.u3.ip,
ntohs(exp->tuple.src.u.tcp.port),
&exp->tuple.dst.u3.ip,
ntohs(exp->tuple.dst.u.tcp.port));
return 0;
}
/****************************************************************************
* This conntrack expect function replaces nf_conntrack_q931_expect()
* which was set by nf_conntrack_h323.c.
****************************************************************************/
static void ip_nat_q931_expect(struct nf_conn *new,
struct nf_conntrack_expect *this)
{
struct nf_nat_range2 range;
if (this->tuple.src.u3.ip != 0) { /* Only accept calls from GK */
nf_nat_follow_master(new, this);
return;
}
/* This must be a fresh one. */
BUG_ON(new->status & IPS_NAT_DONE_MASK);
/* Change src to where master sends to */
range.flags = NF_NAT_RANGE_MAP_IPS;
range.min_addr = range.max_addr =
new->tuplehash[!this->dir].tuple.src.u3;
nf_nat_setup_info(new, &range, NF_NAT_MANIP_SRC);
/* For DST manip, map port here to where it's expected. */
range.flags = (NF_NAT_RANGE_MAP_IPS | NF_NAT_RANGE_PROTO_SPECIFIED);
range.min_proto = range.max_proto = this->saved_proto;
range.min_addr = range.max_addr =
new->master->tuplehash[!this->dir].tuple.src.u3;
nf_nat_setup_info(new, &range, NF_NAT_MANIP_DST);
}
/****************************************************************************/
static int nat_q931(struct sk_buff *skb, struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
unsigned int protoff, unsigned char **data,
TransportAddress *taddr, int idx,
__be16 port, struct nf_conntrack_expect *exp)
{
struct nf_ct_h323_master *info = nfct_help_data(ct);
int dir = CTINFO2DIR(ctinfo);
u_int16_t nated_port = ntohs(port);
union nf_inet_addr addr;
/* Set expectations for NAT */
exp->saved_proto.tcp.port = exp->tuple.dst.u.tcp.port;
exp->expectfn = ip_nat_q931_expect;
exp->dir = !dir;
/* Check existing expects */
if (info->sig_port[dir] == port)
nated_port = ntohs(info->sig_port[!dir]);
nated_port = nf_nat_exp_find_port(exp, nated_port);
if (nated_port == 0) { /* No port available */
net_notice_ratelimited("nf_nat_ras: out of TCP ports\n");
return 0;
}
/* Modify signal */
if (set_h225_addr(skb, protoff, data, 0, &taddr[idx],
&ct->tuplehash[!dir].tuple.dst.u3,
htons(nated_port))) {
nf_ct_unexpect_related(exp);
return -1;
}
/* Save ports */
info->sig_port[dir] = port;
info->sig_port[!dir] = htons(nated_port);
/* Fix for Gnomemeeting */
if (idx > 0 &&
get_h225_addr(ct, *data, &taddr[0], &addr, &port) &&
(ntohl(addr.ip) & 0xff000000) == 0x7f000000) {
if (set_h225_addr(skb, protoff, data, 0, &taddr[0],
&ct->tuplehash[!dir].tuple.dst.u3,
info->sig_port[!dir])) {
nf_ct_unexpect_related(exp);
return -1;
}
}
/* Success */
pr_debug("nf_nat_ras: expect Q.931 %pI4:%hu->%pI4:%hu\n",
&exp->tuple.src.u3.ip,
ntohs(exp->tuple.src.u.tcp.port),
&exp->tuple.dst.u3.ip,
ntohs(exp->tuple.dst.u.tcp.port));
return 0;
}
/****************************************************************************/
static void ip_nat_callforwarding_expect(struct nf_conn *new,
struct nf_conntrack_expect *this)
{
struct nf_nat_range2 range;
/* This must be a fresh one. */
BUG_ON(new->status & IPS_NAT_DONE_MASK);
/* Change src to where master sends to */
range.flags = NF_NAT_RANGE_MAP_IPS;
range.min_addr = range.max_addr =
new->tuplehash[!this->dir].tuple.src.u3;
nf_nat_setup_info(new, &range, NF_NAT_MANIP_SRC);
/* For DST manip, map port here to where it's expected. */
range.flags = (NF_NAT_RANGE_MAP_IPS | NF_NAT_RANGE_PROTO_SPECIFIED);
range.min_proto = range.max_proto = this->saved_proto;
range.min_addr = range.max_addr = this->saved_addr;
nf_nat_setup_info(new, &range, NF_NAT_MANIP_DST);
}
/****************************************************************************/
static int nat_callforwarding(struct sk_buff *skb, struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
unsigned int protoff,
unsigned char **data, int dataoff,
TransportAddress *taddr, __be16 port,
struct nf_conntrack_expect *exp)
{
int dir = CTINFO2DIR(ctinfo);
u_int16_t nated_port;
/* Set expectations for NAT */
exp->saved_addr = exp->tuple.dst.u3;
exp->tuple.dst.u3.ip = ct->tuplehash[!dir].tuple.dst.u3.ip;
exp->saved_proto.tcp.port = exp->tuple.dst.u.tcp.port;
exp->expectfn = ip_nat_callforwarding_expect;
exp->dir = !dir;
nated_port = nf_nat_exp_find_port(exp, ntohs(port));
if (nated_port == 0) { /* No port available */
net_notice_ratelimited("nf_nat_q931: out of TCP ports\n");
return 0;
}
/* Modify signal */
if (set_h225_addr(skb, protoff, data, dataoff, taddr,
&ct->tuplehash[!dir].tuple.dst.u3,
htons(nated_port))) {
nf_ct_unexpect_related(exp);
return -1;
}
/* Success */
pr_debug("nf_nat_q931: expect Call Forwarding %pI4:%hu->%pI4:%hu\n",
&exp->tuple.src.u3.ip,
ntohs(exp->tuple.src.u.tcp.port),
&exp->tuple.dst.u3.ip,
ntohs(exp->tuple.dst.u.tcp.port));
return 0;
}
static struct nf_ct_helper_expectfn q931_nat = {
.name = "Q.931",
.expectfn = ip_nat_q931_expect,
};
static struct nf_ct_helper_expectfn callforwarding_nat = {
.name = "callforwarding",
.expectfn = ip_nat_callforwarding_expect,
};
static const struct nfct_h323_nat_hooks nathooks = {
.set_h245_addr = set_h245_addr,
.set_h225_addr = set_h225_addr,
.set_sig_addr = set_sig_addr,
.set_ras_addr = set_ras_addr,
.nat_rtp_rtcp = nat_rtp_rtcp,
.nat_t120 = nat_t120,
.nat_h245 = nat_h245,
.nat_callforwarding = nat_callforwarding,
.nat_q931 = nat_q931,
};
/****************************************************************************/
static int __init nf_nat_h323_init(void)
{
RCU_INIT_POINTER(nfct_h323_nat_hook, &nathooks);
nf_ct_helper_expectfn_register(&q931_nat);
nf_ct_helper_expectfn_register(&callforwarding_nat);
return 0;
}
/****************************************************************************/
static void __exit nf_nat_h323_fini(void)
{
RCU_INIT_POINTER(nfct_h323_nat_hook, NULL);
nf_ct_helper_expectfn_unregister(&q931_nat);
nf_ct_helper_expectfn_unregister(&callforwarding_nat);
synchronize_rcu();
}
/****************************************************************************/
module_init(nf_nat_h323_init);
module_exit(nf_nat_h323_fini);
MODULE_AUTHOR("Jing Min Zhao <[email protected]>");
MODULE_DESCRIPTION("H.323 NAT helper");
MODULE_LICENSE("GPL");
MODULE_ALIAS_NF_NAT_HELPER("h323");
| linux-master | net/ipv4/netfilter/nf_nat_h323.c |
// SPDX-License-Identifier: GPL-2.0-only
/* (C) 1999-2001 Paul `Rusty' Russell
* (C) 2002-2004 Netfilter Core Team <[email protected]>
*/
#include <linux/types.h>
#include <linux/ip.h>
#include <linux/netfilter.h>
#include <linux/module.h>
#include <linux/rcupdate.h>
#include <linux/skbuff.h>
#include <net/netns/generic.h>
#include <net/route.h>
#include <net/ip.h>
#include <linux/netfilter_bridge.h>
#include <linux/netfilter_ipv4.h>
#include <net/netfilter/ipv4/nf_defrag_ipv4.h>
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
#include <net/netfilter/nf_conntrack.h>
#endif
#include <net/netfilter/nf_conntrack_zones.h>
static DEFINE_MUTEX(defrag4_mutex);
static int nf_ct_ipv4_gather_frags(struct net *net, struct sk_buff *skb,
u_int32_t user)
{
int err;
local_bh_disable();
err = ip_defrag(net, skb, user);
local_bh_enable();
if (!err)
skb->ignore_df = 1;
return err;
}
static enum ip_defrag_users nf_ct_defrag_user(unsigned int hooknum,
struct sk_buff *skb)
{
u16 zone_id = NF_CT_DEFAULT_ZONE_ID;
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
if (skb_nfct(skb)) {
enum ip_conntrack_info ctinfo;
const struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
zone_id = nf_ct_zone_id(nf_ct_zone(ct), CTINFO2DIR(ctinfo));
}
#endif
if (nf_bridge_in_prerouting(skb))
return IP_DEFRAG_CONNTRACK_BRIDGE_IN + zone_id;
if (hooknum == NF_INET_PRE_ROUTING)
return IP_DEFRAG_CONNTRACK_IN + zone_id;
else
return IP_DEFRAG_CONNTRACK_OUT + zone_id;
}
static unsigned int ipv4_conntrack_defrag(void *priv,
struct sk_buff *skb,
const struct nf_hook_state *state)
{
struct sock *sk = skb->sk;
if (sk && sk_fullsock(sk) && (sk->sk_family == PF_INET) &&
inet_test_bit(NODEFRAG, sk))
return NF_ACCEPT;
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
#if !IS_ENABLED(CONFIG_NF_NAT)
/* Previously seen (loopback)? Ignore. Do this before
fragment check. */
if (skb_nfct(skb) && !nf_ct_is_template((struct nf_conn *)skb_nfct(skb)))
return NF_ACCEPT;
#endif
if (skb->_nfct == IP_CT_UNTRACKED)
return NF_ACCEPT;
#endif
/* Gather fragments. */
if (ip_is_fragment(ip_hdr(skb))) {
enum ip_defrag_users user =
nf_ct_defrag_user(state->hook, skb);
if (nf_ct_ipv4_gather_frags(state->net, skb, user))
return NF_STOLEN;
}
return NF_ACCEPT;
}
static const struct nf_hook_ops ipv4_defrag_ops[] = {
{
.hook = ipv4_conntrack_defrag,
.pf = NFPROTO_IPV4,
.hooknum = NF_INET_PRE_ROUTING,
.priority = NF_IP_PRI_CONNTRACK_DEFRAG,
},
{
.hook = ipv4_conntrack_defrag,
.pf = NFPROTO_IPV4,
.hooknum = NF_INET_LOCAL_OUT,
.priority = NF_IP_PRI_CONNTRACK_DEFRAG,
},
};
static void __net_exit defrag4_net_exit(struct net *net)
{
if (net->nf.defrag_ipv4_users) {
nf_unregister_net_hooks(net, ipv4_defrag_ops,
ARRAY_SIZE(ipv4_defrag_ops));
net->nf.defrag_ipv4_users = 0;
}
}
static const struct nf_defrag_hook defrag_hook = {
.owner = THIS_MODULE,
.enable = nf_defrag_ipv4_enable,
.disable = nf_defrag_ipv4_disable,
};
static struct pernet_operations defrag4_net_ops = {
.exit = defrag4_net_exit,
};
static int __init nf_defrag_init(void)
{
int err;
err = register_pernet_subsys(&defrag4_net_ops);
if (err)
return err;
rcu_assign_pointer(nf_defrag_v4_hook, &defrag_hook);
return err;
}
static void __exit nf_defrag_fini(void)
{
rcu_assign_pointer(nf_defrag_v4_hook, NULL);
unregister_pernet_subsys(&defrag4_net_ops);
}
int nf_defrag_ipv4_enable(struct net *net)
{
int err = 0;
mutex_lock(&defrag4_mutex);
if (net->nf.defrag_ipv4_users == UINT_MAX) {
err = -EOVERFLOW;
goto out_unlock;
}
if (net->nf.defrag_ipv4_users) {
net->nf.defrag_ipv4_users++;
goto out_unlock;
}
err = nf_register_net_hooks(net, ipv4_defrag_ops,
ARRAY_SIZE(ipv4_defrag_ops));
if (err == 0)
net->nf.defrag_ipv4_users = 1;
out_unlock:
mutex_unlock(&defrag4_mutex);
return err;
}
EXPORT_SYMBOL_GPL(nf_defrag_ipv4_enable);
void nf_defrag_ipv4_disable(struct net *net)
{
mutex_lock(&defrag4_mutex);
if (net->nf.defrag_ipv4_users) {
net->nf.defrag_ipv4_users--;
if (net->nf.defrag_ipv4_users == 0)
nf_unregister_net_hooks(net, ipv4_defrag_ops,
ARRAY_SIZE(ipv4_defrag_ops));
}
mutex_unlock(&defrag4_mutex);
}
EXPORT_SYMBOL_GPL(nf_defrag_ipv4_disable);
module_init(nf_defrag_init);
module_exit(nf_defrag_fini);
MODULE_LICENSE("GPL");
| linux-master | net/ipv4/netfilter/nf_defrag_ipv4.c |
// SPDX-License-Identifier: GPL-2.0-only
/* iptables module for the IPv4 and TCP ECN bits, Version 1.5
*
* (C) 2002 by Harald Welte <[email protected]>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/in.h>
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/ip.h>
#include <net/ip.h>
#include <linux/tcp.h>
#include <net/checksum.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter_ipv4/ip_tables.h>
#include <linux/netfilter_ipv4/ipt_ECN.h>
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Harald Welte <[email protected]>");
MODULE_DESCRIPTION("Xtables: Explicit Congestion Notification (ECN) flag modification");
/* set ECT codepoint from IP header.
* return false if there was an error. */
static inline bool
set_ect_ip(struct sk_buff *skb, const struct ipt_ECN_info *einfo)
{
struct iphdr *iph = ip_hdr(skb);
if ((iph->tos & IPT_ECN_IP_MASK) != (einfo->ip_ect & IPT_ECN_IP_MASK)) {
__u8 oldtos;
if (skb_ensure_writable(skb, sizeof(struct iphdr)))
return false;
iph = ip_hdr(skb);
oldtos = iph->tos;
iph->tos &= ~IPT_ECN_IP_MASK;
iph->tos |= (einfo->ip_ect & IPT_ECN_IP_MASK);
csum_replace2(&iph->check, htons(oldtos), htons(iph->tos));
}
return true;
}
/* Return false if there was an error. */
static inline bool
set_ect_tcp(struct sk_buff *skb, const struct ipt_ECN_info *einfo)
{
struct tcphdr _tcph, *tcph;
__be16 oldval;
/* Not enough header? */
tcph = skb_header_pointer(skb, ip_hdrlen(skb), sizeof(_tcph), &_tcph);
if (!tcph)
return false;
if ((!(einfo->operation & IPT_ECN_OP_SET_ECE) ||
tcph->ece == einfo->proto.tcp.ece) &&
(!(einfo->operation & IPT_ECN_OP_SET_CWR) ||
tcph->cwr == einfo->proto.tcp.cwr))
return true;
if (skb_ensure_writable(skb, ip_hdrlen(skb) + sizeof(*tcph)))
return false;
tcph = (void *)ip_hdr(skb) + ip_hdrlen(skb);
oldval = ((__be16 *)tcph)[6];
if (einfo->operation & IPT_ECN_OP_SET_ECE)
tcph->ece = einfo->proto.tcp.ece;
if (einfo->operation & IPT_ECN_OP_SET_CWR)
tcph->cwr = einfo->proto.tcp.cwr;
inet_proto_csum_replace2(&tcph->check, skb,
oldval, ((__be16 *)tcph)[6], false);
return true;
}
static unsigned int
ecn_tg(struct sk_buff *skb, const struct xt_action_param *par)
{
const struct ipt_ECN_info *einfo = par->targinfo;
if (einfo->operation & IPT_ECN_OP_SET_IP)
if (!set_ect_ip(skb, einfo))
return NF_DROP;
if (einfo->operation & (IPT_ECN_OP_SET_ECE | IPT_ECN_OP_SET_CWR) &&
ip_hdr(skb)->protocol == IPPROTO_TCP)
if (!set_ect_tcp(skb, einfo))
return NF_DROP;
return XT_CONTINUE;
}
static int ecn_tg_check(const struct xt_tgchk_param *par)
{
const struct ipt_ECN_info *einfo = par->targinfo;
const struct ipt_entry *e = par->entryinfo;
if (einfo->operation & IPT_ECN_OP_MASK)
return -EINVAL;
if (einfo->ip_ect & ~IPT_ECN_IP_MASK)
return -EINVAL;
if ((einfo->operation & (IPT_ECN_OP_SET_ECE|IPT_ECN_OP_SET_CWR)) &&
(e->ip.proto != IPPROTO_TCP || (e->ip.invflags & XT_INV_PROTO))) {
pr_info_ratelimited("cannot use operation on non-tcp rule\n");
return -EINVAL;
}
return 0;
}
static struct xt_target ecn_tg_reg __read_mostly = {
.name = "ECN",
.family = NFPROTO_IPV4,
.target = ecn_tg,
.targetsize = sizeof(struct ipt_ECN_info),
.table = "mangle",
.checkentry = ecn_tg_check,
.me = THIS_MODULE,
};
static int __init ecn_tg_init(void)
{
return xt_register_target(&ecn_tg_reg);
}
static void __exit ecn_tg_exit(void)
{
xt_unregister_target(&ecn_tg_reg);
}
module_init(ecn_tg_init);
module_exit(ecn_tg_exit);
| linux-master | net/ipv4/netfilter/ipt_ECN.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Kernel module to match AH parameters. */
/* (C) 1999-2000 Yon Uriarte <[email protected]>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/in.h>
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/ip.h>
#include <linux/netfilter_ipv4/ipt_ah.h>
#include <linux/netfilter/x_tables.h>
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Yon Uriarte <[email protected]>");
MODULE_DESCRIPTION("Xtables: IPv4 IPsec-AH SPI match");
/* Returns 1 if the spi is matched by the range, 0 otherwise */
static inline bool
spi_match(u_int32_t min, u_int32_t max, u_int32_t spi, bool invert)
{
bool r;
pr_debug("spi_match:%c 0x%x <= 0x%x <= 0x%x\n",
invert ? '!' : ' ', min, spi, max);
r = (spi >= min && spi <= max) ^ invert;
pr_debug(" result %s\n", r ? "PASS" : "FAILED");
return r;
}
static bool ah_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
struct ip_auth_hdr _ahdr;
const struct ip_auth_hdr *ah;
const struct ipt_ah *ahinfo = par->matchinfo;
/* Must not be a fragment. */
if (par->fragoff != 0)
return false;
ah = skb_header_pointer(skb, par->thoff, sizeof(_ahdr), &_ahdr);
if (ah == NULL) {
/* We've been asked to examine this packet, and we
* can't. Hence, no choice but to drop.
*/
pr_debug("Dropping evil AH tinygram.\n");
par->hotdrop = true;
return false;
}
return spi_match(ahinfo->spis[0], ahinfo->spis[1],
ntohl(ah->spi),
!!(ahinfo->invflags & IPT_AH_INV_SPI));
}
static int ah_mt_check(const struct xt_mtchk_param *par)
{
const struct ipt_ah *ahinfo = par->matchinfo;
/* Must specify no unknown invflags */
if (ahinfo->invflags & ~IPT_AH_INV_MASK) {
pr_debug("unknown flags %X\n", ahinfo->invflags);
return -EINVAL;
}
return 0;
}
static struct xt_match ah_mt_reg __read_mostly = {
.name = "ah",
.family = NFPROTO_IPV4,
.match = ah_mt,
.matchsize = sizeof(struct ipt_ah),
.proto = IPPROTO_AH,
.checkentry = ah_mt_check,
.me = THIS_MODULE,
};
static int __init ah_mt_init(void)
{
return xt_register_match(&ah_mt_reg);
}
static void __exit ah_mt_exit(void)
{
xt_unregister_match(&ah_mt_reg);
}
module_init(ah_mt_init);
module_exit(ah_mt_exit);
| linux-master | net/ipv4/netfilter/ipt_ah.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2011 Florian Westphal <[email protected]>
*
* based on fib_frontend.c; Author: Alexey Kuznetsov, <[email protected]>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <linux/ip.h>
#include <net/ip.h>
#include <net/ip_fib.h>
#include <net/route.h>
#include <linux/netfilter/xt_rpfilter.h>
#include <linux/netfilter/x_tables.h>
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Florian Westphal <[email protected]>");
MODULE_DESCRIPTION("iptables: ipv4 reverse path filter match");
/* don't try to find route from mcast/bcast/zeronet */
static __be32 rpfilter_get_saddr(__be32 addr)
{
if (ipv4_is_multicast(addr) || ipv4_is_lbcast(addr) ||
ipv4_is_zeronet(addr))
return 0;
return addr;
}
static bool rpfilter_lookup_reverse(struct net *net, struct flowi4 *fl4,
const struct net_device *dev, u8 flags)
{
struct fib_result res;
if (fib_lookup(net, fl4, &res, FIB_LOOKUP_IGNORE_LINKSTATE))
return false;
if (res.type != RTN_UNICAST) {
if (res.type != RTN_LOCAL || !(flags & XT_RPFILTER_ACCEPT_LOCAL))
return false;
}
return fib_info_nh_uses_dev(res.fi, dev) || flags & XT_RPFILTER_LOOSE;
}
static bool
rpfilter_is_loopback(const struct sk_buff *skb, const struct net_device *in)
{
return skb->pkt_type == PACKET_LOOPBACK || in->flags & IFF_LOOPBACK;
}
static bool rpfilter_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct xt_rpfilter_info *info;
const struct iphdr *iph;
struct flowi4 flow;
bool invert;
info = par->matchinfo;
invert = info->flags & XT_RPFILTER_INVERT;
if (rpfilter_is_loopback(skb, xt_in(par)))
return true ^ invert;
iph = ip_hdr(skb);
if (ipv4_is_zeronet(iph->saddr)) {
if (ipv4_is_lbcast(iph->daddr) ||
ipv4_is_local_multicast(iph->daddr))
return true ^ invert;
}
memset(&flow, 0, sizeof(flow));
flow.flowi4_iif = LOOPBACK_IFINDEX;
flow.daddr = iph->saddr;
flow.saddr = rpfilter_get_saddr(iph->daddr);
flow.flowi4_mark = info->flags & XT_RPFILTER_VALID_MARK ? skb->mark : 0;
flow.flowi4_tos = iph->tos & IPTOS_RT_MASK;
flow.flowi4_scope = RT_SCOPE_UNIVERSE;
flow.flowi4_l3mdev = l3mdev_master_ifindex_rcu(xt_in(par));
flow.flowi4_uid = sock_net_uid(xt_net(par), NULL);
return rpfilter_lookup_reverse(xt_net(par), &flow, xt_in(par), info->flags) ^ invert;
}
static int rpfilter_check(const struct xt_mtchk_param *par)
{
const struct xt_rpfilter_info *info = par->matchinfo;
unsigned int options = ~XT_RPFILTER_OPTION_MASK;
if (info->flags & options) {
pr_info_ratelimited("unknown options\n");
return -EINVAL;
}
if (strcmp(par->table, "mangle") != 0 &&
strcmp(par->table, "raw") != 0) {
pr_info_ratelimited("only valid in \'raw\' or \'mangle\' table, not \'%s\'\n",
par->table);
return -EINVAL;
}
return 0;
}
static struct xt_match rpfilter_mt_reg __read_mostly = {
.name = "rpfilter",
.family = NFPROTO_IPV4,
.checkentry = rpfilter_check,
.match = rpfilter_mt,
.matchsize = sizeof(struct xt_rpfilter_info),
.hooks = (1 << NF_INET_PRE_ROUTING),
.me = THIS_MODULE
};
static int __init rpfilter_mt_init(void)
{
return xt_register_match(&rpfilter_mt_reg);
}
static void __exit rpfilter_mt_exit(void)
{
xt_unregister_match(&rpfilter_mt_reg);
}
module_init(rpfilter_mt_init);
module_exit(rpfilter_mt_exit);
| linux-master | net/ipv4/netfilter/ipt_rpfilter.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* This is the 1999 rewrite of IP Firewalling, aiming for kernel 2.3.x.
*
* Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
* Copyright (C) 2000-2004 Netfilter Core Team <[email protected]>
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/netfilter_ipv4/ip_tables.h>
#include <linux/slab.h>
#include <net/ip.h>
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Netfilter Core Team <[email protected]>");
MODULE_DESCRIPTION("iptables filter table");
#define FILTER_VALID_HOOKS ((1 << NF_INET_LOCAL_IN) | \
(1 << NF_INET_FORWARD) | \
(1 << NF_INET_LOCAL_OUT))
static const struct xt_table packet_filter = {
.name = "filter",
.valid_hooks = FILTER_VALID_HOOKS,
.me = THIS_MODULE,
.af = NFPROTO_IPV4,
.priority = NF_IP_PRI_FILTER,
};
static struct nf_hook_ops *filter_ops __read_mostly;
/* Default to forward because I got too much mail already. */
static bool forward __read_mostly = true;
module_param(forward, bool, 0000);
static int iptable_filter_table_init(struct net *net)
{
struct ipt_replace *repl;
int err;
repl = ipt_alloc_initial_table(&packet_filter);
if (repl == NULL)
return -ENOMEM;
/* Entry 1 is the FORWARD hook */
((struct ipt_standard *)repl->entries)[1].target.verdict =
forward ? -NF_ACCEPT - 1 : -NF_DROP - 1;
err = ipt_register_table(net, &packet_filter, repl, filter_ops);
kfree(repl);
return err;
}
static int __net_init iptable_filter_net_init(struct net *net)
{
if (!forward)
return iptable_filter_table_init(net);
return 0;
}
static void __net_exit iptable_filter_net_pre_exit(struct net *net)
{
ipt_unregister_table_pre_exit(net, "filter");
}
static void __net_exit iptable_filter_net_exit(struct net *net)
{
ipt_unregister_table_exit(net, "filter");
}
static struct pernet_operations iptable_filter_net_ops = {
.init = iptable_filter_net_init,
.pre_exit = iptable_filter_net_pre_exit,
.exit = iptable_filter_net_exit,
};
static int __init iptable_filter_init(void)
{
int ret = xt_register_template(&packet_filter,
iptable_filter_table_init);
if (ret < 0)
return ret;
filter_ops = xt_hook_ops_alloc(&packet_filter, ipt_do_table);
if (IS_ERR(filter_ops)) {
xt_unregister_template(&packet_filter);
return PTR_ERR(filter_ops);
}
ret = register_pernet_subsys(&iptable_filter_net_ops);
if (ret < 0) {
xt_unregister_template(&packet_filter);
kfree(filter_ops);
return ret;
}
return 0;
}
static void __exit iptable_filter_fini(void)
{
unregister_pernet_subsys(&iptable_filter_net_ops);
xt_unregister_template(&packet_filter);
kfree(filter_ops);
}
module_init(iptable_filter_init);
module_exit(iptable_filter_fini);
| linux-master | net/ipv4/netfilter/iptable_filter.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* "security" table
*
* This is for use by Mandatory Access Control (MAC) security models,
* which need to be able to manage security policy in separate context
* to DAC.
*
* Based on iptable_mangle.c
*
* Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
* Copyright (C) 2000-2004 Netfilter Core Team <coreteam <at> netfilter.org>
* Copyright (C) 2008 Red Hat, Inc., James Morris <jmorris <at> redhat.com>
*/
#include <linux/module.h>
#include <linux/netfilter_ipv4/ip_tables.h>
#include <linux/slab.h>
#include <net/ip.h>
MODULE_LICENSE("GPL");
MODULE_AUTHOR("James Morris <jmorris <at> redhat.com>");
MODULE_DESCRIPTION("iptables security table, for MAC rules");
#define SECURITY_VALID_HOOKS (1 << NF_INET_LOCAL_IN) | \
(1 << NF_INET_FORWARD) | \
(1 << NF_INET_LOCAL_OUT)
static const struct xt_table security_table = {
.name = "security",
.valid_hooks = SECURITY_VALID_HOOKS,
.me = THIS_MODULE,
.af = NFPROTO_IPV4,
.priority = NF_IP_PRI_SECURITY,
};
static struct nf_hook_ops *sectbl_ops __read_mostly;
static int iptable_security_table_init(struct net *net)
{
struct ipt_replace *repl;
int ret;
repl = ipt_alloc_initial_table(&security_table);
if (repl == NULL)
return -ENOMEM;
ret = ipt_register_table(net, &security_table, repl, sectbl_ops);
kfree(repl);
return ret;
}
static void __net_exit iptable_security_net_pre_exit(struct net *net)
{
ipt_unregister_table_pre_exit(net, "security");
}
static void __net_exit iptable_security_net_exit(struct net *net)
{
ipt_unregister_table_exit(net, "security");
}
static struct pernet_operations iptable_security_net_ops = {
.pre_exit = iptable_security_net_pre_exit,
.exit = iptable_security_net_exit,
};
static int __init iptable_security_init(void)
{
int ret = xt_register_template(&security_table,
iptable_security_table_init);
if (ret < 0)
return ret;
sectbl_ops = xt_hook_ops_alloc(&security_table, ipt_do_table);
if (IS_ERR(sectbl_ops)) {
xt_unregister_template(&security_table);
return PTR_ERR(sectbl_ops);
}
ret = register_pernet_subsys(&iptable_security_net_ops);
if (ret < 0) {
xt_unregister_template(&security_table);
kfree(sectbl_ops);
return ret;
}
return ret;
}
static void __exit iptable_security_fini(void)
{
unregister_pernet_subsys(&iptable_security_net_ops);
kfree(sectbl_ops);
xt_unregister_template(&security_table);
}
module_init(iptable_security_init);
module_exit(iptable_security_fini);
| linux-master | net/ipv4/netfilter/iptable_security.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2013 Patrick McHardy <[email protected]>
*/
#include <linux/netfilter_ipv4/ip_tables.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter/xt_SYNPROXY.h>
#include <net/netfilter/nf_synproxy.h>
static unsigned int
synproxy_tg4(struct sk_buff *skb, const struct xt_action_param *par)
{
const struct xt_synproxy_info *info = par->targinfo;
struct net *net = xt_net(par);
struct synproxy_net *snet = synproxy_pernet(net);
struct synproxy_options opts = {};
struct tcphdr *th, _th;
if (nf_ip_checksum(skb, xt_hooknum(par), par->thoff, IPPROTO_TCP))
return NF_DROP;
th = skb_header_pointer(skb, par->thoff, sizeof(_th), &_th);
if (th == NULL)
return NF_DROP;
if (!synproxy_parse_options(skb, par->thoff, th, &opts))
return NF_DROP;
if (th->syn && !(th->ack || th->fin || th->rst)) {
/* Initial SYN from client */
this_cpu_inc(snet->stats->syn_received);
if (th->ece && th->cwr)
opts.options |= XT_SYNPROXY_OPT_ECN;
opts.options &= info->options;
opts.mss_encode = opts.mss_option;
opts.mss_option = info->mss;
if (opts.options & XT_SYNPROXY_OPT_TIMESTAMP)
synproxy_init_timestamp_cookie(info, &opts);
else
opts.options &= ~(XT_SYNPROXY_OPT_WSCALE |
XT_SYNPROXY_OPT_SACK_PERM |
XT_SYNPROXY_OPT_ECN);
synproxy_send_client_synack(net, skb, th, &opts);
consume_skb(skb);
return NF_STOLEN;
} else if (th->ack && !(th->fin || th->rst || th->syn)) {
/* ACK from client */
if (synproxy_recv_client_ack(net, skb, th, &opts, ntohl(th->seq))) {
consume_skb(skb);
return NF_STOLEN;
} else {
return NF_DROP;
}
}
return XT_CONTINUE;
}
static int synproxy_tg4_check(const struct xt_tgchk_param *par)
{
struct synproxy_net *snet = synproxy_pernet(par->net);
const struct ipt_entry *e = par->entryinfo;
int err;
if (e->ip.proto != IPPROTO_TCP ||
e->ip.invflags & XT_INV_PROTO)
return -EINVAL;
err = nf_ct_netns_get(par->net, par->family);
if (err)
return err;
err = nf_synproxy_ipv4_init(snet, par->net);
if (err) {
nf_ct_netns_put(par->net, par->family);
return err;
}
return err;
}
static void synproxy_tg4_destroy(const struct xt_tgdtor_param *par)
{
struct synproxy_net *snet = synproxy_pernet(par->net);
nf_synproxy_ipv4_fini(snet, par->net);
nf_ct_netns_put(par->net, par->family);
}
static struct xt_target synproxy_tg4_reg __read_mostly = {
.name = "SYNPROXY",
.family = NFPROTO_IPV4,
.hooks = (1 << NF_INET_LOCAL_IN) | (1 << NF_INET_FORWARD),
.target = synproxy_tg4,
.targetsize = sizeof(struct xt_synproxy_info),
.checkentry = synproxy_tg4_check,
.destroy = synproxy_tg4_destroy,
.me = THIS_MODULE,
};
static int __init synproxy_tg4_init(void)
{
return xt_register_target(&synproxy_tg4_reg);
}
static void __exit synproxy_tg4_exit(void)
{
xt_unregister_target(&synproxy_tg4_reg);
}
module_init(synproxy_tg4_init);
module_exit(synproxy_tg4_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Patrick McHardy <[email protected]>");
MODULE_DESCRIPTION("Intercept TCP connections and establish them using syncookies");
| linux-master | net/ipv4/netfilter/ipt_SYNPROXY.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2015 Pablo Neira Ayuso <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/netlink.h>
#include <linux/netfilter.h>
#include <linux/netfilter/nf_tables.h>
#include <net/netfilter/nf_tables.h>
#include <net/netfilter/ipv4/nf_dup_ipv4.h>
struct nft_dup_ipv4 {
u8 sreg_addr;
u8 sreg_dev;
};
static void nft_dup_ipv4_eval(const struct nft_expr *expr,
struct nft_regs *regs,
const struct nft_pktinfo *pkt)
{
struct nft_dup_ipv4 *priv = nft_expr_priv(expr);
struct in_addr gw = {
.s_addr = (__force __be32)regs->data[priv->sreg_addr],
};
int oif = priv->sreg_dev ? regs->data[priv->sreg_dev] : -1;
nf_dup_ipv4(nft_net(pkt), pkt->skb, nft_hook(pkt), &gw, oif);
}
static int nft_dup_ipv4_init(const struct nft_ctx *ctx,
const struct nft_expr *expr,
const struct nlattr * const tb[])
{
struct nft_dup_ipv4 *priv = nft_expr_priv(expr);
int err;
if (tb[NFTA_DUP_SREG_ADDR] == NULL)
return -EINVAL;
err = nft_parse_register_load(tb[NFTA_DUP_SREG_ADDR], &priv->sreg_addr,
sizeof(struct in_addr));
if (err < 0)
return err;
if (tb[NFTA_DUP_SREG_DEV])
err = nft_parse_register_load(tb[NFTA_DUP_SREG_DEV],
&priv->sreg_dev, sizeof(int));
return err;
}
static int nft_dup_ipv4_dump(struct sk_buff *skb,
const struct nft_expr *expr, bool reset)
{
struct nft_dup_ipv4 *priv = nft_expr_priv(expr);
if (nft_dump_register(skb, NFTA_DUP_SREG_ADDR, priv->sreg_addr))
goto nla_put_failure;
if (priv->sreg_dev &&
nft_dump_register(skb, NFTA_DUP_SREG_DEV, priv->sreg_dev))
goto nla_put_failure;
return 0;
nla_put_failure:
return -1;
}
static struct nft_expr_type nft_dup_ipv4_type;
static const struct nft_expr_ops nft_dup_ipv4_ops = {
.type = &nft_dup_ipv4_type,
.size = NFT_EXPR_SIZE(sizeof(struct nft_dup_ipv4)),
.eval = nft_dup_ipv4_eval,
.init = nft_dup_ipv4_init,
.dump = nft_dup_ipv4_dump,
.reduce = NFT_REDUCE_READONLY,
};
static const struct nla_policy nft_dup_ipv4_policy[NFTA_DUP_MAX + 1] = {
[NFTA_DUP_SREG_ADDR] = { .type = NLA_U32 },
[NFTA_DUP_SREG_DEV] = { .type = NLA_U32 },
};
static struct nft_expr_type nft_dup_ipv4_type __read_mostly = {
.family = NFPROTO_IPV4,
.name = "dup",
.ops = &nft_dup_ipv4_ops,
.policy = nft_dup_ipv4_policy,
.maxattr = NFTA_DUP_MAX,
.owner = THIS_MODULE,
};
static int __init nft_dup_ipv4_module_init(void)
{
return nft_register_expr(&nft_dup_ipv4_type);
}
static void __exit nft_dup_ipv4_module_exit(void)
{
nft_unregister_expr(&nft_dup_ipv4_type);
}
module_init(nft_dup_ipv4_module_init);
module_exit(nft_dup_ipv4_module_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Pablo Neira Ayuso <[email protected]>");
MODULE_ALIAS_NFT_AF_EXPR(AF_INET, "dup");
MODULE_DESCRIPTION("IPv4 nftables packet duplication support");
| linux-master | net/ipv4/netfilter/nft_dup_ipv4.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/init.h>
#include <linux/module.h>
#include <linux/uaccess.h>
#include <linux/bpfilter.h>
#include <uapi/linux/bpf.h>
#include <linux/wait.h>
#include <linux/kmod.h>
#include <linux/fs.h>
#include <linux/file.h>
struct bpfilter_umh_ops bpfilter_ops;
EXPORT_SYMBOL_GPL(bpfilter_ops);
static int bpfilter_mbox_request(struct sock *sk, int optname, sockptr_t optval,
unsigned int optlen, bool is_set)
{
int err;
mutex_lock(&bpfilter_ops.lock);
if (!bpfilter_ops.sockopt) {
mutex_unlock(&bpfilter_ops.lock);
request_module("bpfilter");
mutex_lock(&bpfilter_ops.lock);
if (!bpfilter_ops.sockopt) {
err = -ENOPROTOOPT;
goto out;
}
}
if (bpfilter_ops.info.tgid &&
thread_group_exited(bpfilter_ops.info.tgid))
umd_cleanup_helper(&bpfilter_ops.info);
if (!bpfilter_ops.info.tgid) {
err = bpfilter_ops.start();
if (err)
goto out;
}
err = bpfilter_ops.sockopt(sk, optname, optval, optlen, is_set);
out:
mutex_unlock(&bpfilter_ops.lock);
return err;
}
int bpfilter_ip_set_sockopt(struct sock *sk, int optname, sockptr_t optval,
unsigned int optlen)
{
return bpfilter_mbox_request(sk, optname, optval, optlen, true);
}
int bpfilter_ip_get_sockopt(struct sock *sk, int optname, char __user *optval,
int __user *optlen)
{
int len;
if (get_user(len, optlen))
return -EFAULT;
return bpfilter_mbox_request(sk, optname, USER_SOCKPTR(optval), len,
false);
}
static int __init bpfilter_sockopt_init(void)
{
mutex_init(&bpfilter_ops.lock);
bpfilter_ops.info.tgid = NULL;
bpfilter_ops.info.driver_name = "bpfilter_umh";
return 0;
}
device_initcall(bpfilter_sockopt_init);
| linux-master | net/ipv4/bpfilter/sockopt.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2006 - 2007 Ivo van Doorn
* Copyright (C) 2007 Dmitry Torokhov
* Copyright 2009 Johannes Berg <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/workqueue.h>
#include <linux/capability.h>
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/rfkill.h>
#include <linux/sched.h>
#include <linux/spinlock.h>
#include <linux/device.h>
#include <linux/miscdevice.h>
#include <linux/wait.h>
#include <linux/poll.h>
#include <linux/fs.h>
#include <linux/slab.h>
#include "rfkill.h"
#define POLL_INTERVAL (5 * HZ)
#define RFKILL_BLOCK_HW BIT(0)
#define RFKILL_BLOCK_SW BIT(1)
#define RFKILL_BLOCK_SW_PREV BIT(2)
#define RFKILL_BLOCK_ANY (RFKILL_BLOCK_HW |\
RFKILL_BLOCK_SW |\
RFKILL_BLOCK_SW_PREV)
#define RFKILL_BLOCK_SW_SETCALL BIT(31)
struct rfkill {
spinlock_t lock;
enum rfkill_type type;
unsigned long state;
unsigned long hard_block_reasons;
u32 idx;
bool registered;
bool persistent;
bool polling_paused;
bool suspended;
const struct rfkill_ops *ops;
void *data;
#ifdef CONFIG_RFKILL_LEDS
struct led_trigger led_trigger;
const char *ledtrigname;
#endif
struct device dev;
struct list_head node;
struct delayed_work poll_work;
struct work_struct uevent_work;
struct work_struct sync_work;
char name[];
};
#define to_rfkill(d) container_of(d, struct rfkill, dev)
struct rfkill_int_event {
struct list_head list;
struct rfkill_event_ext ev;
};
struct rfkill_data {
struct list_head list;
struct list_head events;
struct mutex mtx;
wait_queue_head_t read_wait;
bool input_handler;
u8 max_size;
};
MODULE_AUTHOR("Ivo van Doorn <[email protected]>");
MODULE_AUTHOR("Johannes Berg <[email protected]>");
MODULE_DESCRIPTION("RF switch support");
MODULE_LICENSE("GPL");
/*
* The locking here should be made much smarter, we currently have
* a bit of a stupid situation because drivers might want to register
* the rfkill struct under their own lock, and take this lock during
* rfkill method calls -- which will cause an AB-BA deadlock situation.
*
* To fix that, we need to rework this code here to be mostly lock-free
* and only use the mutex for list manipulations, not to protect the
* various other global variables. Then we can avoid holding the mutex
* around driver operations, and all is happy.
*/
static LIST_HEAD(rfkill_list); /* list of registered rf switches */
static DEFINE_MUTEX(rfkill_global_mutex);
static LIST_HEAD(rfkill_fds); /* list of open fds of /dev/rfkill */
static unsigned int rfkill_default_state = 1;
module_param_named(default_state, rfkill_default_state, uint, 0444);
MODULE_PARM_DESC(default_state,
"Default initial state for all radio types, 0 = radio off");
static struct {
bool cur, sav;
} rfkill_global_states[NUM_RFKILL_TYPES];
static bool rfkill_epo_lock_active;
#ifdef CONFIG_RFKILL_LEDS
static void rfkill_led_trigger_event(struct rfkill *rfkill)
{
struct led_trigger *trigger;
if (!rfkill->registered)
return;
trigger = &rfkill->led_trigger;
if (rfkill->state & RFKILL_BLOCK_ANY)
led_trigger_event(trigger, LED_OFF);
else
led_trigger_event(trigger, LED_FULL);
}
static int rfkill_led_trigger_activate(struct led_classdev *led)
{
struct rfkill *rfkill;
rfkill = container_of(led->trigger, struct rfkill, led_trigger);
rfkill_led_trigger_event(rfkill);
return 0;
}
const char *rfkill_get_led_trigger_name(struct rfkill *rfkill)
{
return rfkill->led_trigger.name;
}
EXPORT_SYMBOL(rfkill_get_led_trigger_name);
void rfkill_set_led_trigger_name(struct rfkill *rfkill, const char *name)
{
BUG_ON(!rfkill);
rfkill->ledtrigname = name;
}
EXPORT_SYMBOL(rfkill_set_led_trigger_name);
static int rfkill_led_trigger_register(struct rfkill *rfkill)
{
rfkill->led_trigger.name = rfkill->ledtrigname
? : dev_name(&rfkill->dev);
rfkill->led_trigger.activate = rfkill_led_trigger_activate;
return led_trigger_register(&rfkill->led_trigger);
}
static void rfkill_led_trigger_unregister(struct rfkill *rfkill)
{
led_trigger_unregister(&rfkill->led_trigger);
}
static struct led_trigger rfkill_any_led_trigger;
static struct led_trigger rfkill_none_led_trigger;
static struct work_struct rfkill_global_led_trigger_work;
static void rfkill_global_led_trigger_worker(struct work_struct *work)
{
enum led_brightness brightness = LED_OFF;
struct rfkill *rfkill;
mutex_lock(&rfkill_global_mutex);
list_for_each_entry(rfkill, &rfkill_list, node) {
if (!(rfkill->state & RFKILL_BLOCK_ANY)) {
brightness = LED_FULL;
break;
}
}
mutex_unlock(&rfkill_global_mutex);
led_trigger_event(&rfkill_any_led_trigger, brightness);
led_trigger_event(&rfkill_none_led_trigger,
brightness == LED_OFF ? LED_FULL : LED_OFF);
}
static void rfkill_global_led_trigger_event(void)
{
schedule_work(&rfkill_global_led_trigger_work);
}
static int rfkill_global_led_trigger_register(void)
{
int ret;
INIT_WORK(&rfkill_global_led_trigger_work,
rfkill_global_led_trigger_worker);
rfkill_any_led_trigger.name = "rfkill-any";
ret = led_trigger_register(&rfkill_any_led_trigger);
if (ret)
return ret;
rfkill_none_led_trigger.name = "rfkill-none";
ret = led_trigger_register(&rfkill_none_led_trigger);
if (ret)
led_trigger_unregister(&rfkill_any_led_trigger);
else
/* Delay activation until all global triggers are registered */
rfkill_global_led_trigger_event();
return ret;
}
static void rfkill_global_led_trigger_unregister(void)
{
led_trigger_unregister(&rfkill_none_led_trigger);
led_trigger_unregister(&rfkill_any_led_trigger);
cancel_work_sync(&rfkill_global_led_trigger_work);
}
#else
static void rfkill_led_trigger_event(struct rfkill *rfkill)
{
}
static inline int rfkill_led_trigger_register(struct rfkill *rfkill)
{
return 0;
}
static inline void rfkill_led_trigger_unregister(struct rfkill *rfkill)
{
}
static void rfkill_global_led_trigger_event(void)
{
}
static int rfkill_global_led_trigger_register(void)
{
return 0;
}
static void rfkill_global_led_trigger_unregister(void)
{
}
#endif /* CONFIG_RFKILL_LEDS */
static void rfkill_fill_event(struct rfkill_event_ext *ev,
struct rfkill *rfkill,
enum rfkill_operation op)
{
unsigned long flags;
ev->idx = rfkill->idx;
ev->type = rfkill->type;
ev->op = op;
spin_lock_irqsave(&rfkill->lock, flags);
ev->hard = !!(rfkill->state & RFKILL_BLOCK_HW);
ev->soft = !!(rfkill->state & (RFKILL_BLOCK_SW |
RFKILL_BLOCK_SW_PREV));
ev->hard_block_reasons = rfkill->hard_block_reasons;
spin_unlock_irqrestore(&rfkill->lock, flags);
}
static void rfkill_send_events(struct rfkill *rfkill, enum rfkill_operation op)
{
struct rfkill_data *data;
struct rfkill_int_event *ev;
list_for_each_entry(data, &rfkill_fds, list) {
ev = kzalloc(sizeof(*ev), GFP_KERNEL);
if (!ev)
continue;
rfkill_fill_event(&ev->ev, rfkill, op);
mutex_lock(&data->mtx);
list_add_tail(&ev->list, &data->events);
mutex_unlock(&data->mtx);
wake_up_interruptible(&data->read_wait);
}
}
static void rfkill_event(struct rfkill *rfkill)
{
if (!rfkill->registered)
return;
kobject_uevent(&rfkill->dev.kobj, KOBJ_CHANGE);
/* also send event to /dev/rfkill */
rfkill_send_events(rfkill, RFKILL_OP_CHANGE);
}
/**
* rfkill_set_block - wrapper for set_block method
*
* @rfkill: the rfkill struct to use
* @blocked: the new software state
*
* Calls the set_block method (when applicable) and handles notifications
* etc. as well.
*/
static void rfkill_set_block(struct rfkill *rfkill, bool blocked)
{
unsigned long flags;
bool prev, curr;
int err;
if (unlikely(rfkill->dev.power.power_state.event & PM_EVENT_SLEEP))
return;
/*
* Some platforms (...!) generate input events which affect the
* _hard_ kill state -- whenever something tries to change the
* current software state query the hardware state too.
*/
if (rfkill->ops->query)
rfkill->ops->query(rfkill, rfkill->data);
spin_lock_irqsave(&rfkill->lock, flags);
prev = rfkill->state & RFKILL_BLOCK_SW;
if (prev)
rfkill->state |= RFKILL_BLOCK_SW_PREV;
else
rfkill->state &= ~RFKILL_BLOCK_SW_PREV;
if (blocked)
rfkill->state |= RFKILL_BLOCK_SW;
else
rfkill->state &= ~RFKILL_BLOCK_SW;
rfkill->state |= RFKILL_BLOCK_SW_SETCALL;
spin_unlock_irqrestore(&rfkill->lock, flags);
err = rfkill->ops->set_block(rfkill->data, blocked);
spin_lock_irqsave(&rfkill->lock, flags);
if (err) {
/*
* Failed -- reset status to _PREV, which may be different
* from what we have set _PREV to earlier in this function
* if rfkill_set_sw_state was invoked.
*/
if (rfkill->state & RFKILL_BLOCK_SW_PREV)
rfkill->state |= RFKILL_BLOCK_SW;
else
rfkill->state &= ~RFKILL_BLOCK_SW;
}
rfkill->state &= ~RFKILL_BLOCK_SW_SETCALL;
rfkill->state &= ~RFKILL_BLOCK_SW_PREV;
curr = rfkill->state & RFKILL_BLOCK_SW;
spin_unlock_irqrestore(&rfkill->lock, flags);
rfkill_led_trigger_event(rfkill);
rfkill_global_led_trigger_event();
if (prev != curr)
rfkill_event(rfkill);
}
static void rfkill_update_global_state(enum rfkill_type type, bool blocked)
{
int i;
if (type != RFKILL_TYPE_ALL) {
rfkill_global_states[type].cur = blocked;
return;
}
for (i = 0; i < NUM_RFKILL_TYPES; i++)
rfkill_global_states[i].cur = blocked;
}
#ifdef CONFIG_RFKILL_INPUT
static atomic_t rfkill_input_disabled = ATOMIC_INIT(0);
/**
* __rfkill_switch_all - Toggle state of all switches of given type
* @type: type of interfaces to be affected
* @blocked: the new state
*
* This function sets the state of all switches of given type,
* unless a specific switch is suspended.
*
* Caller must have acquired rfkill_global_mutex.
*/
static void __rfkill_switch_all(const enum rfkill_type type, bool blocked)
{
struct rfkill *rfkill;
rfkill_update_global_state(type, blocked);
list_for_each_entry(rfkill, &rfkill_list, node) {
if (rfkill->type != type && type != RFKILL_TYPE_ALL)
continue;
rfkill_set_block(rfkill, blocked);
}
}
/**
* rfkill_switch_all - Toggle state of all switches of given type
* @type: type of interfaces to be affected
* @blocked: the new state
*
* Acquires rfkill_global_mutex and calls __rfkill_switch_all(@type, @state).
* Please refer to __rfkill_switch_all() for details.
*
* Does nothing if the EPO lock is active.
*/
void rfkill_switch_all(enum rfkill_type type, bool blocked)
{
if (atomic_read(&rfkill_input_disabled))
return;
mutex_lock(&rfkill_global_mutex);
if (!rfkill_epo_lock_active)
__rfkill_switch_all(type, blocked);
mutex_unlock(&rfkill_global_mutex);
}
/**
* rfkill_epo - emergency power off all transmitters
*
* This kicks all non-suspended rfkill devices to RFKILL_STATE_SOFT_BLOCKED,
* ignoring everything in its path but rfkill_global_mutex and rfkill->mutex.
*
* The global state before the EPO is saved and can be restored later
* using rfkill_restore_states().
*/
void rfkill_epo(void)
{
struct rfkill *rfkill;
int i;
if (atomic_read(&rfkill_input_disabled))
return;
mutex_lock(&rfkill_global_mutex);
rfkill_epo_lock_active = true;
list_for_each_entry(rfkill, &rfkill_list, node)
rfkill_set_block(rfkill, true);
for (i = 0; i < NUM_RFKILL_TYPES; i++) {
rfkill_global_states[i].sav = rfkill_global_states[i].cur;
rfkill_global_states[i].cur = true;
}
mutex_unlock(&rfkill_global_mutex);
}
/**
* rfkill_restore_states - restore global states
*
* Restore (and sync switches to) the global state from the
* states in rfkill_default_states. This can undo the effects of
* a call to rfkill_epo().
*/
void rfkill_restore_states(void)
{
int i;
if (atomic_read(&rfkill_input_disabled))
return;
mutex_lock(&rfkill_global_mutex);
rfkill_epo_lock_active = false;
for (i = 0; i < NUM_RFKILL_TYPES; i++)
__rfkill_switch_all(i, rfkill_global_states[i].sav);
mutex_unlock(&rfkill_global_mutex);
}
/**
* rfkill_remove_epo_lock - unlock state changes
*
* Used by rfkill-input manually unlock state changes, when
* the EPO switch is deactivated.
*/
void rfkill_remove_epo_lock(void)
{
if (atomic_read(&rfkill_input_disabled))
return;
mutex_lock(&rfkill_global_mutex);
rfkill_epo_lock_active = false;
mutex_unlock(&rfkill_global_mutex);
}
/**
* rfkill_is_epo_lock_active - returns true EPO is active
*
* Returns 0 (false) if there is NOT an active EPO condition,
* and 1 (true) if there is an active EPO condition, which
* locks all radios in one of the BLOCKED states.
*
* Can be called in atomic context.
*/
bool rfkill_is_epo_lock_active(void)
{
return rfkill_epo_lock_active;
}
/**
* rfkill_get_global_sw_state - returns global state for a type
* @type: the type to get the global state of
*
* Returns the current global state for a given wireless
* device type.
*/
bool rfkill_get_global_sw_state(const enum rfkill_type type)
{
return rfkill_global_states[type].cur;
}
#endif
bool rfkill_set_hw_state_reason(struct rfkill *rfkill,
bool blocked, unsigned long reason)
{
unsigned long flags;
bool ret, prev;
BUG_ON(!rfkill);
if (WARN(reason &
~(RFKILL_HARD_BLOCK_SIGNAL | RFKILL_HARD_BLOCK_NOT_OWNER),
"hw_state reason not supported: 0x%lx", reason))
return blocked;
spin_lock_irqsave(&rfkill->lock, flags);
prev = !!(rfkill->hard_block_reasons & reason);
if (blocked) {
rfkill->state |= RFKILL_BLOCK_HW;
rfkill->hard_block_reasons |= reason;
} else {
rfkill->hard_block_reasons &= ~reason;
if (!rfkill->hard_block_reasons)
rfkill->state &= ~RFKILL_BLOCK_HW;
}
ret = !!(rfkill->state & RFKILL_BLOCK_ANY);
spin_unlock_irqrestore(&rfkill->lock, flags);
rfkill_led_trigger_event(rfkill);
rfkill_global_led_trigger_event();
if (rfkill->registered && prev != blocked)
schedule_work(&rfkill->uevent_work);
return ret;
}
EXPORT_SYMBOL(rfkill_set_hw_state_reason);
static void __rfkill_set_sw_state(struct rfkill *rfkill, bool blocked)
{
u32 bit = RFKILL_BLOCK_SW;
/* if in a ops->set_block right now, use other bit */
if (rfkill->state & RFKILL_BLOCK_SW_SETCALL)
bit = RFKILL_BLOCK_SW_PREV;
if (blocked)
rfkill->state |= bit;
else
rfkill->state &= ~bit;
}
bool rfkill_set_sw_state(struct rfkill *rfkill, bool blocked)
{
unsigned long flags;
bool prev, hwblock;
BUG_ON(!rfkill);
spin_lock_irqsave(&rfkill->lock, flags);
prev = !!(rfkill->state & RFKILL_BLOCK_SW);
__rfkill_set_sw_state(rfkill, blocked);
hwblock = !!(rfkill->state & RFKILL_BLOCK_HW);
blocked = blocked || hwblock;
spin_unlock_irqrestore(&rfkill->lock, flags);
if (!rfkill->registered)
return blocked;
if (prev != blocked && !hwblock)
schedule_work(&rfkill->uevent_work);
rfkill_led_trigger_event(rfkill);
rfkill_global_led_trigger_event();
return blocked;
}
EXPORT_SYMBOL(rfkill_set_sw_state);
void rfkill_init_sw_state(struct rfkill *rfkill, bool blocked)
{
unsigned long flags;
BUG_ON(!rfkill);
BUG_ON(rfkill->registered);
spin_lock_irqsave(&rfkill->lock, flags);
__rfkill_set_sw_state(rfkill, blocked);
rfkill->persistent = true;
spin_unlock_irqrestore(&rfkill->lock, flags);
}
EXPORT_SYMBOL(rfkill_init_sw_state);
void rfkill_set_states(struct rfkill *rfkill, bool sw, bool hw)
{
unsigned long flags;
bool swprev, hwprev;
BUG_ON(!rfkill);
spin_lock_irqsave(&rfkill->lock, flags);
/*
* No need to care about prev/setblock ... this is for uevent only
* and that will get triggered by rfkill_set_block anyway.
*/
swprev = !!(rfkill->state & RFKILL_BLOCK_SW);
hwprev = !!(rfkill->state & RFKILL_BLOCK_HW);
__rfkill_set_sw_state(rfkill, sw);
if (hw)
rfkill->state |= RFKILL_BLOCK_HW;
else
rfkill->state &= ~RFKILL_BLOCK_HW;
spin_unlock_irqrestore(&rfkill->lock, flags);
if (!rfkill->registered) {
rfkill->persistent = true;
} else {
if (swprev != sw || hwprev != hw)
schedule_work(&rfkill->uevent_work);
rfkill_led_trigger_event(rfkill);
rfkill_global_led_trigger_event();
}
}
EXPORT_SYMBOL(rfkill_set_states);
static const char * const rfkill_types[] = {
NULL, /* RFKILL_TYPE_ALL */
"wlan",
"bluetooth",
"ultrawideband",
"wimax",
"wwan",
"gps",
"fm",
"nfc",
};
enum rfkill_type rfkill_find_type(const char *name)
{
int i;
BUILD_BUG_ON(ARRAY_SIZE(rfkill_types) != NUM_RFKILL_TYPES);
if (!name)
return RFKILL_TYPE_ALL;
for (i = 1; i < NUM_RFKILL_TYPES; i++)
if (!strcmp(name, rfkill_types[i]))
return i;
return RFKILL_TYPE_ALL;
}
EXPORT_SYMBOL(rfkill_find_type);
static ssize_t name_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct rfkill *rfkill = to_rfkill(dev);
return sysfs_emit(buf, "%s\n", rfkill->name);
}
static DEVICE_ATTR_RO(name);
static ssize_t type_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct rfkill *rfkill = to_rfkill(dev);
return sysfs_emit(buf, "%s\n", rfkill_types[rfkill->type]);
}
static DEVICE_ATTR_RO(type);
static ssize_t index_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct rfkill *rfkill = to_rfkill(dev);
return sysfs_emit(buf, "%d\n", rfkill->idx);
}
static DEVICE_ATTR_RO(index);
static ssize_t persistent_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct rfkill *rfkill = to_rfkill(dev);
return sysfs_emit(buf, "%d\n", rfkill->persistent);
}
static DEVICE_ATTR_RO(persistent);
static ssize_t hard_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct rfkill *rfkill = to_rfkill(dev);
return sysfs_emit(buf, "%d\n", (rfkill->state & RFKILL_BLOCK_HW) ? 1 : 0);
}
static DEVICE_ATTR_RO(hard);
static ssize_t soft_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct rfkill *rfkill = to_rfkill(dev);
return sysfs_emit(buf, "%d\n", (rfkill->state & RFKILL_BLOCK_SW) ? 1 : 0);
}
static ssize_t soft_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct rfkill *rfkill = to_rfkill(dev);
unsigned long state;
int err;
if (!capable(CAP_NET_ADMIN))
return -EPERM;
err = kstrtoul(buf, 0, &state);
if (err)
return err;
if (state > 1 )
return -EINVAL;
mutex_lock(&rfkill_global_mutex);
rfkill_set_block(rfkill, state);
mutex_unlock(&rfkill_global_mutex);
return count;
}
static DEVICE_ATTR_RW(soft);
static ssize_t hard_block_reasons_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct rfkill *rfkill = to_rfkill(dev);
return sysfs_emit(buf, "0x%lx\n", rfkill->hard_block_reasons);
}
static DEVICE_ATTR_RO(hard_block_reasons);
static u8 user_state_from_blocked(unsigned long state)
{
if (state & RFKILL_BLOCK_HW)
return RFKILL_USER_STATE_HARD_BLOCKED;
if (state & RFKILL_BLOCK_SW)
return RFKILL_USER_STATE_SOFT_BLOCKED;
return RFKILL_USER_STATE_UNBLOCKED;
}
static ssize_t state_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct rfkill *rfkill = to_rfkill(dev);
return sysfs_emit(buf, "%d\n", user_state_from_blocked(rfkill->state));
}
static ssize_t state_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct rfkill *rfkill = to_rfkill(dev);
unsigned long state;
int err;
if (!capable(CAP_NET_ADMIN))
return -EPERM;
err = kstrtoul(buf, 0, &state);
if (err)
return err;
if (state != RFKILL_USER_STATE_SOFT_BLOCKED &&
state != RFKILL_USER_STATE_UNBLOCKED)
return -EINVAL;
mutex_lock(&rfkill_global_mutex);
rfkill_set_block(rfkill, state == RFKILL_USER_STATE_SOFT_BLOCKED);
mutex_unlock(&rfkill_global_mutex);
return count;
}
static DEVICE_ATTR_RW(state);
static struct attribute *rfkill_dev_attrs[] = {
&dev_attr_name.attr,
&dev_attr_type.attr,
&dev_attr_index.attr,
&dev_attr_persistent.attr,
&dev_attr_state.attr,
&dev_attr_soft.attr,
&dev_attr_hard.attr,
&dev_attr_hard_block_reasons.attr,
NULL,
};
ATTRIBUTE_GROUPS(rfkill_dev);
static void rfkill_release(struct device *dev)
{
struct rfkill *rfkill = to_rfkill(dev);
kfree(rfkill);
}
static int rfkill_dev_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
struct rfkill *rfkill = to_rfkill(dev);
unsigned long flags;
unsigned long reasons;
u32 state;
int error;
error = add_uevent_var(env, "RFKILL_NAME=%s", rfkill->name);
if (error)
return error;
error = add_uevent_var(env, "RFKILL_TYPE=%s",
rfkill_types[rfkill->type]);
if (error)
return error;
spin_lock_irqsave(&rfkill->lock, flags);
state = rfkill->state;
reasons = rfkill->hard_block_reasons;
spin_unlock_irqrestore(&rfkill->lock, flags);
error = add_uevent_var(env, "RFKILL_STATE=%d",
user_state_from_blocked(state));
if (error)
return error;
return add_uevent_var(env, "RFKILL_HW_BLOCK_REASON=0x%lx", reasons);
}
void rfkill_pause_polling(struct rfkill *rfkill)
{
BUG_ON(!rfkill);
if (!rfkill->ops->poll)
return;
rfkill->polling_paused = true;
cancel_delayed_work_sync(&rfkill->poll_work);
}
EXPORT_SYMBOL(rfkill_pause_polling);
void rfkill_resume_polling(struct rfkill *rfkill)
{
BUG_ON(!rfkill);
if (!rfkill->ops->poll)
return;
rfkill->polling_paused = false;
if (rfkill->suspended)
return;
queue_delayed_work(system_power_efficient_wq,
&rfkill->poll_work, 0);
}
EXPORT_SYMBOL(rfkill_resume_polling);
#ifdef CONFIG_PM_SLEEP
static int rfkill_suspend(struct device *dev)
{
struct rfkill *rfkill = to_rfkill(dev);
rfkill->suspended = true;
cancel_delayed_work_sync(&rfkill->poll_work);
return 0;
}
static int rfkill_resume(struct device *dev)
{
struct rfkill *rfkill = to_rfkill(dev);
bool cur;
rfkill->suspended = false;
if (!rfkill->registered)
return 0;
if (!rfkill->persistent) {
cur = !!(rfkill->state & RFKILL_BLOCK_SW);
rfkill_set_block(rfkill, cur);
}
if (rfkill->ops->poll && !rfkill->polling_paused)
queue_delayed_work(system_power_efficient_wq,
&rfkill->poll_work, 0);
return 0;
}
static SIMPLE_DEV_PM_OPS(rfkill_pm_ops, rfkill_suspend, rfkill_resume);
#define RFKILL_PM_OPS (&rfkill_pm_ops)
#else
#define RFKILL_PM_OPS NULL
#endif
static struct class rfkill_class = {
.name = "rfkill",
.dev_release = rfkill_release,
.dev_groups = rfkill_dev_groups,
.dev_uevent = rfkill_dev_uevent,
.pm = RFKILL_PM_OPS,
};
bool rfkill_blocked(struct rfkill *rfkill)
{
unsigned long flags;
u32 state;
spin_lock_irqsave(&rfkill->lock, flags);
state = rfkill->state;
spin_unlock_irqrestore(&rfkill->lock, flags);
return !!(state & RFKILL_BLOCK_ANY);
}
EXPORT_SYMBOL(rfkill_blocked);
bool rfkill_soft_blocked(struct rfkill *rfkill)
{
unsigned long flags;
u32 state;
spin_lock_irqsave(&rfkill->lock, flags);
state = rfkill->state;
spin_unlock_irqrestore(&rfkill->lock, flags);
return !!(state & RFKILL_BLOCK_SW);
}
EXPORT_SYMBOL(rfkill_soft_blocked);
struct rfkill * __must_check rfkill_alloc(const char *name,
struct device *parent,
const enum rfkill_type type,
const struct rfkill_ops *ops,
void *ops_data)
{
struct rfkill *rfkill;
struct device *dev;
if (WARN_ON(!ops))
return NULL;
if (WARN_ON(!ops->set_block))
return NULL;
if (WARN_ON(!name))
return NULL;
if (WARN_ON(type == RFKILL_TYPE_ALL || type >= NUM_RFKILL_TYPES))
return NULL;
rfkill = kzalloc(sizeof(*rfkill) + strlen(name) + 1, GFP_KERNEL);
if (!rfkill)
return NULL;
spin_lock_init(&rfkill->lock);
INIT_LIST_HEAD(&rfkill->node);
rfkill->type = type;
strcpy(rfkill->name, name);
rfkill->ops = ops;
rfkill->data = ops_data;
dev = &rfkill->dev;
dev->class = &rfkill_class;
dev->parent = parent;
device_initialize(dev);
return rfkill;
}
EXPORT_SYMBOL(rfkill_alloc);
static void rfkill_poll(struct work_struct *work)
{
struct rfkill *rfkill;
rfkill = container_of(work, struct rfkill, poll_work.work);
/*
* Poll hardware state -- driver will use one of the
* rfkill_set{,_hw,_sw}_state functions and use its
* return value to update the current status.
*/
rfkill->ops->poll(rfkill, rfkill->data);
queue_delayed_work(system_power_efficient_wq,
&rfkill->poll_work,
round_jiffies_relative(POLL_INTERVAL));
}
static void rfkill_uevent_work(struct work_struct *work)
{
struct rfkill *rfkill;
rfkill = container_of(work, struct rfkill, uevent_work);
mutex_lock(&rfkill_global_mutex);
rfkill_event(rfkill);
mutex_unlock(&rfkill_global_mutex);
}
static void rfkill_sync_work(struct work_struct *work)
{
struct rfkill *rfkill;
bool cur;
rfkill = container_of(work, struct rfkill, sync_work);
mutex_lock(&rfkill_global_mutex);
cur = rfkill_global_states[rfkill->type].cur;
rfkill_set_block(rfkill, cur);
mutex_unlock(&rfkill_global_mutex);
}
int __must_check rfkill_register(struct rfkill *rfkill)
{
static unsigned long rfkill_no;
struct device *dev;
int error;
if (!rfkill)
return -EINVAL;
dev = &rfkill->dev;
mutex_lock(&rfkill_global_mutex);
if (rfkill->registered) {
error = -EALREADY;
goto unlock;
}
rfkill->idx = rfkill_no;
dev_set_name(dev, "rfkill%lu", rfkill_no);
rfkill_no++;
list_add_tail(&rfkill->node, &rfkill_list);
error = device_add(dev);
if (error)
goto remove;
error = rfkill_led_trigger_register(rfkill);
if (error)
goto devdel;
rfkill->registered = true;
INIT_DELAYED_WORK(&rfkill->poll_work, rfkill_poll);
INIT_WORK(&rfkill->uevent_work, rfkill_uevent_work);
INIT_WORK(&rfkill->sync_work, rfkill_sync_work);
if (rfkill->ops->poll)
queue_delayed_work(system_power_efficient_wq,
&rfkill->poll_work,
round_jiffies_relative(POLL_INTERVAL));
if (!rfkill->persistent || rfkill_epo_lock_active) {
schedule_work(&rfkill->sync_work);
} else {
#ifdef CONFIG_RFKILL_INPUT
bool soft_blocked = !!(rfkill->state & RFKILL_BLOCK_SW);
if (!atomic_read(&rfkill_input_disabled))
__rfkill_switch_all(rfkill->type, soft_blocked);
#endif
}
rfkill_global_led_trigger_event();
rfkill_send_events(rfkill, RFKILL_OP_ADD);
mutex_unlock(&rfkill_global_mutex);
return 0;
devdel:
device_del(&rfkill->dev);
remove:
list_del_init(&rfkill->node);
unlock:
mutex_unlock(&rfkill_global_mutex);
return error;
}
EXPORT_SYMBOL(rfkill_register);
void rfkill_unregister(struct rfkill *rfkill)
{
BUG_ON(!rfkill);
if (rfkill->ops->poll)
cancel_delayed_work_sync(&rfkill->poll_work);
cancel_work_sync(&rfkill->uevent_work);
cancel_work_sync(&rfkill->sync_work);
rfkill->registered = false;
device_del(&rfkill->dev);
mutex_lock(&rfkill_global_mutex);
rfkill_send_events(rfkill, RFKILL_OP_DEL);
list_del_init(&rfkill->node);
rfkill_global_led_trigger_event();
mutex_unlock(&rfkill_global_mutex);
rfkill_led_trigger_unregister(rfkill);
}
EXPORT_SYMBOL(rfkill_unregister);
void rfkill_destroy(struct rfkill *rfkill)
{
if (rfkill)
put_device(&rfkill->dev);
}
EXPORT_SYMBOL(rfkill_destroy);
static int rfkill_fop_open(struct inode *inode, struct file *file)
{
struct rfkill_data *data;
struct rfkill *rfkill;
struct rfkill_int_event *ev, *tmp;
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
data->max_size = RFKILL_EVENT_SIZE_V1;
INIT_LIST_HEAD(&data->events);
mutex_init(&data->mtx);
init_waitqueue_head(&data->read_wait);
mutex_lock(&rfkill_global_mutex);
mutex_lock(&data->mtx);
/*
* start getting events from elsewhere but hold mtx to get
* startup events added first
*/
list_for_each_entry(rfkill, &rfkill_list, node) {
ev = kzalloc(sizeof(*ev), GFP_KERNEL);
if (!ev)
goto free;
rfkill_fill_event(&ev->ev, rfkill, RFKILL_OP_ADD);
list_add_tail(&ev->list, &data->events);
}
list_add(&data->list, &rfkill_fds);
mutex_unlock(&data->mtx);
mutex_unlock(&rfkill_global_mutex);
file->private_data = data;
return stream_open(inode, file);
free:
mutex_unlock(&data->mtx);
mutex_unlock(&rfkill_global_mutex);
mutex_destroy(&data->mtx);
list_for_each_entry_safe(ev, tmp, &data->events, list)
kfree(ev);
kfree(data);
return -ENOMEM;
}
static __poll_t rfkill_fop_poll(struct file *file, poll_table *wait)
{
struct rfkill_data *data = file->private_data;
__poll_t res = EPOLLOUT | EPOLLWRNORM;
poll_wait(file, &data->read_wait, wait);
mutex_lock(&data->mtx);
if (!list_empty(&data->events))
res = EPOLLIN | EPOLLRDNORM;
mutex_unlock(&data->mtx);
return res;
}
static ssize_t rfkill_fop_read(struct file *file, char __user *buf,
size_t count, loff_t *pos)
{
struct rfkill_data *data = file->private_data;
struct rfkill_int_event *ev;
unsigned long sz;
int ret;
mutex_lock(&data->mtx);
while (list_empty(&data->events)) {
if (file->f_flags & O_NONBLOCK) {
ret = -EAGAIN;
goto out;
}
mutex_unlock(&data->mtx);
/* since we re-check and it just compares pointers,
* using !list_empty() without locking isn't a problem
*/
ret = wait_event_interruptible(data->read_wait,
!list_empty(&data->events));
mutex_lock(&data->mtx);
if (ret)
goto out;
}
ev = list_first_entry(&data->events, struct rfkill_int_event,
list);
sz = min_t(unsigned long, sizeof(ev->ev), count);
sz = min_t(unsigned long, sz, data->max_size);
ret = sz;
if (copy_to_user(buf, &ev->ev, sz))
ret = -EFAULT;
list_del(&ev->list);
kfree(ev);
out:
mutex_unlock(&data->mtx);
return ret;
}
static ssize_t rfkill_fop_write(struct file *file, const char __user *buf,
size_t count, loff_t *pos)
{
struct rfkill_data *data = file->private_data;
struct rfkill *rfkill;
struct rfkill_event_ext ev;
int ret;
/* we don't need the 'hard' variable but accept it */
if (count < RFKILL_EVENT_SIZE_V1 - 1)
return -EINVAL;
/*
* Copy as much data as we can accept into our 'ev' buffer,
* but tell userspace how much we've copied so it can determine
* our API version even in a write() call, if it cares.
*/
count = min(count, sizeof(ev));
count = min_t(size_t, count, data->max_size);
if (copy_from_user(&ev, buf, count))
return -EFAULT;
if (ev.type >= NUM_RFKILL_TYPES)
return -EINVAL;
mutex_lock(&rfkill_global_mutex);
switch (ev.op) {
case RFKILL_OP_CHANGE_ALL:
rfkill_update_global_state(ev.type, ev.soft);
list_for_each_entry(rfkill, &rfkill_list, node)
if (rfkill->type == ev.type ||
ev.type == RFKILL_TYPE_ALL)
rfkill_set_block(rfkill, ev.soft);
ret = 0;
break;
case RFKILL_OP_CHANGE:
list_for_each_entry(rfkill, &rfkill_list, node)
if (rfkill->idx == ev.idx &&
(rfkill->type == ev.type ||
ev.type == RFKILL_TYPE_ALL))
rfkill_set_block(rfkill, ev.soft);
ret = 0;
break;
default:
ret = -EINVAL;
break;
}
mutex_unlock(&rfkill_global_mutex);
return ret ?: count;
}
static int rfkill_fop_release(struct inode *inode, struct file *file)
{
struct rfkill_data *data = file->private_data;
struct rfkill_int_event *ev, *tmp;
mutex_lock(&rfkill_global_mutex);
list_del(&data->list);
mutex_unlock(&rfkill_global_mutex);
mutex_destroy(&data->mtx);
list_for_each_entry_safe(ev, tmp, &data->events, list)
kfree(ev);
#ifdef CONFIG_RFKILL_INPUT
if (data->input_handler)
if (atomic_dec_return(&rfkill_input_disabled) == 0)
printk(KERN_DEBUG "rfkill: input handler enabled\n");
#endif
kfree(data);
return 0;
}
static long rfkill_fop_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
struct rfkill_data *data = file->private_data;
int ret = -ENOSYS;
u32 size;
if (_IOC_TYPE(cmd) != RFKILL_IOC_MAGIC)
return -ENOSYS;
mutex_lock(&data->mtx);
switch (_IOC_NR(cmd)) {
#ifdef CONFIG_RFKILL_INPUT
case RFKILL_IOC_NOINPUT:
if (!data->input_handler) {
if (atomic_inc_return(&rfkill_input_disabled) == 1)
printk(KERN_DEBUG "rfkill: input handler disabled\n");
data->input_handler = true;
}
ret = 0;
break;
#endif
case RFKILL_IOC_MAX_SIZE:
if (get_user(size, (__u32 __user *)arg)) {
ret = -EFAULT;
break;
}
if (size < RFKILL_EVENT_SIZE_V1 || size > U8_MAX) {
ret = -EINVAL;
break;
}
data->max_size = size;
ret = 0;
break;
default:
break;
}
mutex_unlock(&data->mtx);
return ret;
}
static const struct file_operations rfkill_fops = {
.owner = THIS_MODULE,
.open = rfkill_fop_open,
.read = rfkill_fop_read,
.write = rfkill_fop_write,
.poll = rfkill_fop_poll,
.release = rfkill_fop_release,
.unlocked_ioctl = rfkill_fop_ioctl,
.compat_ioctl = compat_ptr_ioctl,
.llseek = no_llseek,
};
#define RFKILL_NAME "rfkill"
static struct miscdevice rfkill_miscdev = {
.fops = &rfkill_fops,
.name = RFKILL_NAME,
.minor = RFKILL_MINOR,
};
static int __init rfkill_init(void)
{
int error;
rfkill_update_global_state(RFKILL_TYPE_ALL, !rfkill_default_state);
error = class_register(&rfkill_class);
if (error)
goto error_class;
error = misc_register(&rfkill_miscdev);
if (error)
goto error_misc;
error = rfkill_global_led_trigger_register();
if (error)
goto error_led_trigger;
#ifdef CONFIG_RFKILL_INPUT
error = rfkill_handler_init();
if (error)
goto error_input;
#endif
return 0;
#ifdef CONFIG_RFKILL_INPUT
error_input:
rfkill_global_led_trigger_unregister();
#endif
error_led_trigger:
misc_deregister(&rfkill_miscdev);
error_misc:
class_unregister(&rfkill_class);
error_class:
return error;
}
subsys_initcall(rfkill_init);
static void __exit rfkill_exit(void)
{
#ifdef CONFIG_RFKILL_INPUT
rfkill_handler_exit();
#endif
rfkill_global_led_trigger_unregister();
misc_deregister(&rfkill_miscdev);
class_unregister(&rfkill_class);
}
module_exit(rfkill_exit);
MODULE_ALIAS_MISCDEV(RFKILL_MINOR);
MODULE_ALIAS("devname:" RFKILL_NAME);
| linux-master | net/rfkill/core.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Input layer to RF Kill interface connector
*
* Copyright (c) 2007 Dmitry Torokhov
* Copyright 2009 Johannes Berg <[email protected]>
*
* If you ever run into a situation in which you have a SW_ type rfkill
* input device, then you can revive code that was removed in the patch
* "rfkill-input: remove unused code".
*/
#include <linux/input.h>
#include <linux/slab.h>
#include <linux/moduleparam.h>
#include <linux/workqueue.h>
#include <linux/init.h>
#include <linux/rfkill.h>
#include <linux/sched.h>
#include "rfkill.h"
enum rfkill_input_master_mode {
RFKILL_INPUT_MASTER_UNLOCK = 0,
RFKILL_INPUT_MASTER_RESTORE = 1,
RFKILL_INPUT_MASTER_UNBLOCKALL = 2,
NUM_RFKILL_INPUT_MASTER_MODES
};
/* Delay (in ms) between consecutive switch ops */
#define RFKILL_OPS_DELAY 200
static enum rfkill_input_master_mode rfkill_master_switch_mode =
RFKILL_INPUT_MASTER_UNBLOCKALL;
module_param_named(master_switch_mode, rfkill_master_switch_mode, uint, 0);
MODULE_PARM_DESC(master_switch_mode,
"SW_RFKILL_ALL ON should: 0=do nothing (only unlock); 1=restore; 2=unblock all");
static DEFINE_SPINLOCK(rfkill_op_lock);
static bool rfkill_op_pending;
static unsigned long rfkill_sw_pending[BITS_TO_LONGS(NUM_RFKILL_TYPES)];
static unsigned long rfkill_sw_state[BITS_TO_LONGS(NUM_RFKILL_TYPES)];
enum rfkill_sched_op {
RFKILL_GLOBAL_OP_EPO = 0,
RFKILL_GLOBAL_OP_RESTORE,
RFKILL_GLOBAL_OP_UNLOCK,
RFKILL_GLOBAL_OP_UNBLOCK,
};
static enum rfkill_sched_op rfkill_master_switch_op;
static enum rfkill_sched_op rfkill_op;
static void __rfkill_handle_global_op(enum rfkill_sched_op op)
{
unsigned int i;
switch (op) {
case RFKILL_GLOBAL_OP_EPO:
rfkill_epo();
break;
case RFKILL_GLOBAL_OP_RESTORE:
rfkill_restore_states();
break;
case RFKILL_GLOBAL_OP_UNLOCK:
rfkill_remove_epo_lock();
break;
case RFKILL_GLOBAL_OP_UNBLOCK:
rfkill_remove_epo_lock();
for (i = 0; i < NUM_RFKILL_TYPES; i++)
rfkill_switch_all(i, false);
break;
default:
/* memory corruption or bug, fail safely */
rfkill_epo();
WARN(1, "Unknown requested operation %d! "
"rfkill Emergency Power Off activated\n",
op);
}
}
static void __rfkill_handle_normal_op(const enum rfkill_type type,
const bool complement)
{
bool blocked;
blocked = rfkill_get_global_sw_state(type);
if (complement)
blocked = !blocked;
rfkill_switch_all(type, blocked);
}
static void rfkill_op_handler(struct work_struct *work)
{
unsigned int i;
bool c;
spin_lock_irq(&rfkill_op_lock);
do {
if (rfkill_op_pending) {
enum rfkill_sched_op op = rfkill_op;
rfkill_op_pending = false;
memset(rfkill_sw_pending, 0,
sizeof(rfkill_sw_pending));
spin_unlock_irq(&rfkill_op_lock);
__rfkill_handle_global_op(op);
spin_lock_irq(&rfkill_op_lock);
/*
* handle global ops first -- during unlocked period
* we might have gotten a new global op.
*/
if (rfkill_op_pending)
continue;
}
if (rfkill_is_epo_lock_active())
continue;
for (i = 0; i < NUM_RFKILL_TYPES; i++) {
if (__test_and_clear_bit(i, rfkill_sw_pending)) {
c = __test_and_clear_bit(i, rfkill_sw_state);
spin_unlock_irq(&rfkill_op_lock);
__rfkill_handle_normal_op(i, c);
spin_lock_irq(&rfkill_op_lock);
}
}
} while (rfkill_op_pending);
spin_unlock_irq(&rfkill_op_lock);
}
static DECLARE_DELAYED_WORK(rfkill_op_work, rfkill_op_handler);
static unsigned long rfkill_last_scheduled;
static unsigned long rfkill_ratelimit(const unsigned long last)
{
const unsigned long delay = msecs_to_jiffies(RFKILL_OPS_DELAY);
return time_after(jiffies, last + delay) ? 0 : delay;
}
static void rfkill_schedule_ratelimited(void)
{
if (schedule_delayed_work(&rfkill_op_work,
rfkill_ratelimit(rfkill_last_scheduled)))
rfkill_last_scheduled = jiffies;
}
static void rfkill_schedule_global_op(enum rfkill_sched_op op)
{
unsigned long flags;
spin_lock_irqsave(&rfkill_op_lock, flags);
rfkill_op = op;
rfkill_op_pending = true;
if (op == RFKILL_GLOBAL_OP_EPO && !rfkill_is_epo_lock_active()) {
/* bypass the limiter for EPO */
mod_delayed_work(system_wq, &rfkill_op_work, 0);
rfkill_last_scheduled = jiffies;
} else
rfkill_schedule_ratelimited();
spin_unlock_irqrestore(&rfkill_op_lock, flags);
}
static void rfkill_schedule_toggle(enum rfkill_type type)
{
unsigned long flags;
if (rfkill_is_epo_lock_active())
return;
spin_lock_irqsave(&rfkill_op_lock, flags);
if (!rfkill_op_pending) {
__set_bit(type, rfkill_sw_pending);
__change_bit(type, rfkill_sw_state);
rfkill_schedule_ratelimited();
}
spin_unlock_irqrestore(&rfkill_op_lock, flags);
}
static void rfkill_schedule_evsw_rfkillall(int state)
{
if (state)
rfkill_schedule_global_op(rfkill_master_switch_op);
else
rfkill_schedule_global_op(RFKILL_GLOBAL_OP_EPO);
}
static void rfkill_event(struct input_handle *handle, unsigned int type,
unsigned int code, int data)
{
if (type == EV_KEY && data == 1) {
switch (code) {
case KEY_WLAN:
rfkill_schedule_toggle(RFKILL_TYPE_WLAN);
break;
case KEY_BLUETOOTH:
rfkill_schedule_toggle(RFKILL_TYPE_BLUETOOTH);
break;
case KEY_UWB:
rfkill_schedule_toggle(RFKILL_TYPE_UWB);
break;
case KEY_WIMAX:
rfkill_schedule_toggle(RFKILL_TYPE_WIMAX);
break;
case KEY_RFKILL:
rfkill_schedule_toggle(RFKILL_TYPE_ALL);
break;
}
} else if (type == EV_SW && code == SW_RFKILL_ALL)
rfkill_schedule_evsw_rfkillall(data);
}
static int rfkill_connect(struct input_handler *handler, struct input_dev *dev,
const struct input_device_id *id)
{
struct input_handle *handle;
int error;
handle = kzalloc(sizeof(struct input_handle), GFP_KERNEL);
if (!handle)
return -ENOMEM;
handle->dev = dev;
handle->handler = handler;
handle->name = "rfkill";
/* causes rfkill_start() to be called */
error = input_register_handle(handle);
if (error)
goto err_free_handle;
error = input_open_device(handle);
if (error)
goto err_unregister_handle;
return 0;
err_unregister_handle:
input_unregister_handle(handle);
err_free_handle:
kfree(handle);
return error;
}
static void rfkill_start(struct input_handle *handle)
{
/*
* Take event_lock to guard against configuration changes, we
* should be able to deal with concurrency with rfkill_event()
* just fine (which event_lock will also avoid).
*/
spin_lock_irq(&handle->dev->event_lock);
if (test_bit(EV_SW, handle->dev->evbit) &&
test_bit(SW_RFKILL_ALL, handle->dev->swbit))
rfkill_schedule_evsw_rfkillall(test_bit(SW_RFKILL_ALL,
handle->dev->sw));
spin_unlock_irq(&handle->dev->event_lock);
}
static void rfkill_disconnect(struct input_handle *handle)
{
input_close_device(handle);
input_unregister_handle(handle);
kfree(handle);
}
static const struct input_device_id rfkill_ids[] = {
{
.flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT,
.evbit = { BIT_MASK(EV_KEY) },
.keybit = { [BIT_WORD(KEY_WLAN)] = BIT_MASK(KEY_WLAN) },
},
{
.flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT,
.evbit = { BIT_MASK(EV_KEY) },
.keybit = { [BIT_WORD(KEY_BLUETOOTH)] = BIT_MASK(KEY_BLUETOOTH) },
},
{
.flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT,
.evbit = { BIT_MASK(EV_KEY) },
.keybit = { [BIT_WORD(KEY_UWB)] = BIT_MASK(KEY_UWB) },
},
{
.flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT,
.evbit = { BIT_MASK(EV_KEY) },
.keybit = { [BIT_WORD(KEY_WIMAX)] = BIT_MASK(KEY_WIMAX) },
},
{
.flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT,
.evbit = { BIT_MASK(EV_KEY) },
.keybit = { [BIT_WORD(KEY_RFKILL)] = BIT_MASK(KEY_RFKILL) },
},
{
.flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_SWBIT,
.evbit = { BIT(EV_SW) },
.swbit = { [BIT_WORD(SW_RFKILL_ALL)] = BIT_MASK(SW_RFKILL_ALL) },
},
{ }
};
static struct input_handler rfkill_handler = {
.name = "rfkill",
.event = rfkill_event,
.connect = rfkill_connect,
.start = rfkill_start,
.disconnect = rfkill_disconnect,
.id_table = rfkill_ids,
};
int __init rfkill_handler_init(void)
{
switch (rfkill_master_switch_mode) {
case RFKILL_INPUT_MASTER_UNBLOCKALL:
rfkill_master_switch_op = RFKILL_GLOBAL_OP_UNBLOCK;
break;
case RFKILL_INPUT_MASTER_RESTORE:
rfkill_master_switch_op = RFKILL_GLOBAL_OP_RESTORE;
break;
case RFKILL_INPUT_MASTER_UNLOCK:
rfkill_master_switch_op = RFKILL_GLOBAL_OP_UNLOCK;
break;
default:
return -EINVAL;
}
/* Avoid delay at first schedule */
rfkill_last_scheduled =
jiffies - msecs_to_jiffies(RFKILL_OPS_DELAY) - 1;
return input_register_handler(&rfkill_handler);
}
void __exit rfkill_handler_exit(void)
{
input_unregister_handler(&rfkill_handler);
cancel_delayed_work_sync(&rfkill_op_work);
}
| linux-master | net/rfkill/input.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (c) 2011, NVIDIA Corporation.
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/rfkill.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/slab.h>
#include <linux/acpi.h>
#include <linux/gpio/consumer.h>
struct rfkill_gpio_data {
const char *name;
enum rfkill_type type;
struct gpio_desc *reset_gpio;
struct gpio_desc *shutdown_gpio;
struct rfkill *rfkill_dev;
struct clk *clk;
bool clk_enabled;
};
static int rfkill_gpio_set_power(void *data, bool blocked)
{
struct rfkill_gpio_data *rfkill = data;
if (!blocked && !IS_ERR(rfkill->clk) && !rfkill->clk_enabled)
clk_enable(rfkill->clk);
gpiod_set_value_cansleep(rfkill->shutdown_gpio, !blocked);
gpiod_set_value_cansleep(rfkill->reset_gpio, !blocked);
if (blocked && !IS_ERR(rfkill->clk) && rfkill->clk_enabled)
clk_disable(rfkill->clk);
rfkill->clk_enabled = !blocked;
return 0;
}
static const struct rfkill_ops rfkill_gpio_ops = {
.set_block = rfkill_gpio_set_power,
};
static const struct acpi_gpio_params reset_gpios = { 0, 0, false };
static const struct acpi_gpio_params shutdown_gpios = { 1, 0, false };
static const struct acpi_gpio_mapping acpi_rfkill_default_gpios[] = {
{ "reset-gpios", &reset_gpios, 1 },
{ "shutdown-gpios", &shutdown_gpios, 1 },
{ },
};
static int rfkill_gpio_acpi_probe(struct device *dev,
struct rfkill_gpio_data *rfkill)
{
const struct acpi_device_id *id;
id = acpi_match_device(dev->driver->acpi_match_table, dev);
if (!id)
return -ENODEV;
rfkill->type = (unsigned)id->driver_data;
return devm_acpi_dev_add_driver_gpios(dev, acpi_rfkill_default_gpios);
}
static int rfkill_gpio_probe(struct platform_device *pdev)
{
struct rfkill_gpio_data *rfkill;
struct gpio_desc *gpio;
const char *name_property;
const char *type_property;
const char *type_name;
int ret;
rfkill = devm_kzalloc(&pdev->dev, sizeof(*rfkill), GFP_KERNEL);
if (!rfkill)
return -ENOMEM;
if (dev_of_node(&pdev->dev)) {
name_property = "label";
type_property = "radio-type";
} else {
name_property = "name";
type_property = "type";
}
device_property_read_string(&pdev->dev, name_property, &rfkill->name);
device_property_read_string(&pdev->dev, type_property, &type_name);
if (!rfkill->name)
rfkill->name = dev_name(&pdev->dev);
rfkill->type = rfkill_find_type(type_name);
if (ACPI_HANDLE(&pdev->dev)) {
ret = rfkill_gpio_acpi_probe(&pdev->dev, rfkill);
if (ret)
return ret;
}
rfkill->clk = devm_clk_get(&pdev->dev, NULL);
gpio = devm_gpiod_get_optional(&pdev->dev, "reset", GPIOD_OUT_LOW);
if (IS_ERR(gpio))
return PTR_ERR(gpio);
rfkill->reset_gpio = gpio;
gpio = devm_gpiod_get_optional(&pdev->dev, "shutdown", GPIOD_OUT_LOW);
if (IS_ERR(gpio))
return PTR_ERR(gpio);
rfkill->shutdown_gpio = gpio;
/* Make sure at-least one GPIO is defined for this instance */
if (!rfkill->reset_gpio && !rfkill->shutdown_gpio) {
dev_err(&pdev->dev, "invalid platform data\n");
return -EINVAL;
}
rfkill->rfkill_dev = rfkill_alloc(rfkill->name, &pdev->dev,
rfkill->type, &rfkill_gpio_ops,
rfkill);
if (!rfkill->rfkill_dev)
return -ENOMEM;
ret = rfkill_register(rfkill->rfkill_dev);
if (ret < 0)
goto err_destroy;
platform_set_drvdata(pdev, rfkill);
dev_info(&pdev->dev, "%s device registered.\n", rfkill->name);
return 0;
err_destroy:
rfkill_destroy(rfkill->rfkill_dev);
return ret;
}
static int rfkill_gpio_remove(struct platform_device *pdev)
{
struct rfkill_gpio_data *rfkill = platform_get_drvdata(pdev);
rfkill_unregister(rfkill->rfkill_dev);
rfkill_destroy(rfkill->rfkill_dev);
return 0;
}
#ifdef CONFIG_ACPI
static const struct acpi_device_id rfkill_acpi_match[] = {
{ "BCM4752", RFKILL_TYPE_GPS },
{ "LNV4752", RFKILL_TYPE_GPS },
{ },
};
MODULE_DEVICE_TABLE(acpi, rfkill_acpi_match);
#endif
static const struct of_device_id rfkill_of_match[] __maybe_unused = {
{ .compatible = "rfkill-gpio", },
{ },
};
MODULE_DEVICE_TABLE(of, rfkill_of_match);
static struct platform_driver rfkill_gpio_driver = {
.probe = rfkill_gpio_probe,
.remove = rfkill_gpio_remove,
.driver = {
.name = "rfkill_gpio",
.acpi_match_table = ACPI_PTR(rfkill_acpi_match),
.of_match_table = of_match_ptr(rfkill_of_match),
},
};
module_platform_driver(rfkill_gpio_driver);
MODULE_DESCRIPTION("gpio rfkill");
MODULE_AUTHOR("NVIDIA");
MODULE_LICENSE("GPL");
| linux-master | net/rfkill/rfkill-gpio.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* NET3: Garbage Collector For AF_UNIX sockets
*
* Garbage Collector:
* Copyright (C) Barak A. Pearlmutter.
*
* Chopped about by Alan Cox 22/3/96 to make it fit the AF_UNIX socket problem.
* If it doesn't work blame me, it worked when Barak sent it.
*
* Assumptions:
*
* - object w/ a bit
* - free list
*
* Current optimizations:
*
* - explicit stack instead of recursion
* - tail recurse on first born instead of immediate push/pop
* - we gather the stuff that should not be killed into tree
* and stack is just a path from root to the current pointer.
*
* Future optimizations:
*
* - don't just push entire root set; process in place
*
* Fixes:
* Alan Cox 07 Sept 1997 Vmalloc internal stack as needed.
* Cope with changing max_files.
* Al Viro 11 Oct 1998
* Graph may have cycles. That is, we can send the descriptor
* of foo to bar and vice versa. Current code chokes on that.
* Fix: move SCM_RIGHTS ones into the separate list and then
* skb_free() them all instead of doing explicit fput's.
* Another problem: since fput() may block somebody may
* create a new unix_socket when we are in the middle of sweep
* phase. Fix: revert the logic wrt MARKED. Mark everything
* upon the beginning and unmark non-junk ones.
*
* [12 Oct 1998] AAARGH! New code purges all SCM_RIGHTS
* sent to connect()'ed but still not accept()'ed sockets.
* Fixed. Old code had slightly different problem here:
* extra fput() in situation when we passed the descriptor via
* such socket and closed it (descriptor). That would happen on
* each unix_gc() until the accept(). Since the struct file in
* question would go to the free list and might be reused...
* That might be the reason of random oopses on filp_close()
* in unrelated processes.
*
* AV 28 Feb 1999
* Kill the explicit allocation of stack. Now we keep the tree
* with root in dummy + pointer (gc_current) to one of the nodes.
* Stack is represented as path from gc_current to dummy. Unmark
* now means "add to tree". Push == "make it a son of gc_current".
* Pop == "move gc_current to parent". We keep only pointers to
* parents (->gc_tree).
* AV 1 Mar 1999
* Damn. Added missing check for ->dead in listen queues scanning.
*
* Miklos Szeredi 25 Jun 2007
* Reimplement with a cycle collecting algorithm. This should
* solve several problems with the previous code, like being racy
* wrt receive and holding up unrelated socket operations.
*/
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/socket.h>
#include <linux/un.h>
#include <linux/net.h>
#include <linux/fs.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <linux/file.h>
#include <linux/proc_fs.h>
#include <linux/mutex.h>
#include <linux/wait.h>
#include <net/sock.h>
#include <net/af_unix.h>
#include <net/scm.h>
#include <net/tcp_states.h>
#include "scm.h"
/* Internal data structures and random procedures: */
static LIST_HEAD(gc_candidates);
static DECLARE_WAIT_QUEUE_HEAD(unix_gc_wait);
static void scan_inflight(struct sock *x, void (*func)(struct unix_sock *),
struct sk_buff_head *hitlist)
{
struct sk_buff *skb;
struct sk_buff *next;
spin_lock(&x->sk_receive_queue.lock);
skb_queue_walk_safe(&x->sk_receive_queue, skb, next) {
/* Do we have file descriptors ? */
if (UNIXCB(skb).fp) {
bool hit = false;
/* Process the descriptors of this socket */
int nfd = UNIXCB(skb).fp->count;
struct file **fp = UNIXCB(skb).fp->fp;
while (nfd--) {
/* Get the socket the fd matches if it indeed does so */
struct sock *sk = unix_get_socket(*fp++);
if (sk) {
struct unix_sock *u = unix_sk(sk);
/* Ignore non-candidates, they could
* have been added to the queues after
* starting the garbage collection
*/
if (test_bit(UNIX_GC_CANDIDATE, &u->gc_flags)) {
hit = true;
func(u);
}
}
}
if (hit && hitlist != NULL) {
__skb_unlink(skb, &x->sk_receive_queue);
__skb_queue_tail(hitlist, skb);
}
}
}
spin_unlock(&x->sk_receive_queue.lock);
}
static void scan_children(struct sock *x, void (*func)(struct unix_sock *),
struct sk_buff_head *hitlist)
{
if (x->sk_state != TCP_LISTEN) {
scan_inflight(x, func, hitlist);
} else {
struct sk_buff *skb;
struct sk_buff *next;
struct unix_sock *u;
LIST_HEAD(embryos);
/* For a listening socket collect the queued embryos
* and perform a scan on them as well.
*/
spin_lock(&x->sk_receive_queue.lock);
skb_queue_walk_safe(&x->sk_receive_queue, skb, next) {
u = unix_sk(skb->sk);
/* An embryo cannot be in-flight, so it's safe
* to use the list link.
*/
BUG_ON(!list_empty(&u->link));
list_add_tail(&u->link, &embryos);
}
spin_unlock(&x->sk_receive_queue.lock);
while (!list_empty(&embryos)) {
u = list_entry(embryos.next, struct unix_sock, link);
scan_inflight(&u->sk, func, hitlist);
list_del_init(&u->link);
}
}
}
static void dec_inflight(struct unix_sock *usk)
{
atomic_long_dec(&usk->inflight);
}
static void inc_inflight(struct unix_sock *usk)
{
atomic_long_inc(&usk->inflight);
}
static void inc_inflight_move_tail(struct unix_sock *u)
{
atomic_long_inc(&u->inflight);
/* If this still might be part of a cycle, move it to the end
* of the list, so that it's checked even if it was already
* passed over
*/
if (test_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags))
list_move_tail(&u->link, &gc_candidates);
}
static bool gc_in_progress;
#define UNIX_INFLIGHT_TRIGGER_GC 16000
void wait_for_unix_gc(void)
{
/* If number of inflight sockets is insane,
* force a garbage collect right now.
* Paired with the WRITE_ONCE() in unix_inflight(),
* unix_notinflight() and gc_in_progress().
*/
if (READ_ONCE(unix_tot_inflight) > UNIX_INFLIGHT_TRIGGER_GC &&
!READ_ONCE(gc_in_progress))
unix_gc();
wait_event(unix_gc_wait, gc_in_progress == false);
}
/* The external entry point: unix_gc() */
void unix_gc(void)
{
struct sk_buff *next_skb, *skb;
struct unix_sock *u;
struct unix_sock *next;
struct sk_buff_head hitlist;
struct list_head cursor;
LIST_HEAD(not_cycle_list);
spin_lock(&unix_gc_lock);
/* Avoid a recursive GC. */
if (gc_in_progress)
goto out;
/* Paired with READ_ONCE() in wait_for_unix_gc(). */
WRITE_ONCE(gc_in_progress, true);
/* First, select candidates for garbage collection. Only
* in-flight sockets are considered, and from those only ones
* which don't have any external reference.
*
* Holding unix_gc_lock will protect these candidates from
* being detached, and hence from gaining an external
* reference. Since there are no possible receivers, all
* buffers currently on the candidates' queues stay there
* during the garbage collection.
*
* We also know that no new candidate can be added onto the
* receive queues. Other, non candidate sockets _can_ be
* added to queue, so we must make sure only to touch
* candidates.
*/
list_for_each_entry_safe(u, next, &gc_inflight_list, link) {
long total_refs;
long inflight_refs;
total_refs = file_count(u->sk.sk_socket->file);
inflight_refs = atomic_long_read(&u->inflight);
BUG_ON(inflight_refs < 1);
BUG_ON(total_refs < inflight_refs);
if (total_refs == inflight_refs) {
list_move_tail(&u->link, &gc_candidates);
__set_bit(UNIX_GC_CANDIDATE, &u->gc_flags);
__set_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags);
}
}
/* Now remove all internal in-flight reference to children of
* the candidates.
*/
list_for_each_entry(u, &gc_candidates, link)
scan_children(&u->sk, dec_inflight, NULL);
/* Restore the references for children of all candidates,
* which have remaining references. Do this recursively, so
* only those remain, which form cyclic references.
*
* Use a "cursor" link, to make the list traversal safe, even
* though elements might be moved about.
*/
list_add(&cursor, &gc_candidates);
while (cursor.next != &gc_candidates) {
u = list_entry(cursor.next, struct unix_sock, link);
/* Move cursor to after the current position. */
list_move(&cursor, &u->link);
if (atomic_long_read(&u->inflight) > 0) {
list_move_tail(&u->link, ¬_cycle_list);
__clear_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags);
scan_children(&u->sk, inc_inflight_move_tail, NULL);
}
}
list_del(&cursor);
/* Now gc_candidates contains only garbage. Restore original
* inflight counters for these as well, and remove the skbuffs
* which are creating the cycle(s).
*/
skb_queue_head_init(&hitlist);
list_for_each_entry(u, &gc_candidates, link)
scan_children(&u->sk, inc_inflight, &hitlist);
/* not_cycle_list contains those sockets which do not make up a
* cycle. Restore these to the inflight list.
*/
while (!list_empty(¬_cycle_list)) {
u = list_entry(not_cycle_list.next, struct unix_sock, link);
__clear_bit(UNIX_GC_CANDIDATE, &u->gc_flags);
list_move_tail(&u->link, &gc_inflight_list);
}
spin_unlock(&unix_gc_lock);
/* We need io_uring to clean its registered files, ignore all io_uring
* originated skbs. It's fine as io_uring doesn't keep references to
* other io_uring instances and so killing all other files in the cycle
* will put all io_uring references forcing it to go through normal
* release.path eventually putting registered files.
*/
skb_queue_walk_safe(&hitlist, skb, next_skb) {
if (skb->destructor == io_uring_destruct_scm) {
__skb_unlink(skb, &hitlist);
skb_queue_tail(&skb->sk->sk_receive_queue, skb);
}
}
/* Here we are. Hitlist is filled. Die. */
__skb_queue_purge(&hitlist);
spin_lock(&unix_gc_lock);
/* There could be io_uring registered files, just push them back to
* the inflight list
*/
list_for_each_entry_safe(u, next, &gc_candidates, link)
list_move_tail(&u->link, &gc_inflight_list);
/* All candidates should have been detached by now. */
BUG_ON(!list_empty(&gc_candidates));
/* Paired with READ_ONCE() in wait_for_unix_gc(). */
WRITE_ONCE(gc_in_progress, false);
wake_up(&unix_gc_wait);
out:
spin_unlock(&unix_gc_lock);
}
| linux-master | net/unix/garbage.c |
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/types.h>
#include <linux/spinlock.h>
#include <linux/sock_diag.h>
#include <linux/unix_diag.h>
#include <linux/skbuff.h>
#include <linux/module.h>
#include <linux/uidgid.h>
#include <net/netlink.h>
#include <net/af_unix.h>
#include <net/tcp_states.h>
#include <net/sock.h>
static int sk_diag_dump_name(struct sock *sk, struct sk_buff *nlskb)
{
/* might or might not have a hash table lock */
struct unix_address *addr = smp_load_acquire(&unix_sk(sk)->addr);
if (!addr)
return 0;
return nla_put(nlskb, UNIX_DIAG_NAME,
addr->len - offsetof(struct sockaddr_un, sun_path),
addr->name->sun_path);
}
static int sk_diag_dump_vfs(struct sock *sk, struct sk_buff *nlskb)
{
struct dentry *dentry = unix_sk(sk)->path.dentry;
if (dentry) {
struct unix_diag_vfs uv = {
.udiag_vfs_ino = d_backing_inode(dentry)->i_ino,
.udiag_vfs_dev = dentry->d_sb->s_dev,
};
return nla_put(nlskb, UNIX_DIAG_VFS, sizeof(uv), &uv);
}
return 0;
}
static int sk_diag_dump_peer(struct sock *sk, struct sk_buff *nlskb)
{
struct sock *peer;
int ino;
peer = unix_peer_get(sk);
if (peer) {
unix_state_lock(peer);
ino = sock_i_ino(peer);
unix_state_unlock(peer);
sock_put(peer);
return nla_put_u32(nlskb, UNIX_DIAG_PEER, ino);
}
return 0;
}
static int sk_diag_dump_icons(struct sock *sk, struct sk_buff *nlskb)
{
struct sk_buff *skb;
struct nlattr *attr;
u32 *buf;
int i;
if (sk->sk_state == TCP_LISTEN) {
spin_lock(&sk->sk_receive_queue.lock);
attr = nla_reserve(nlskb, UNIX_DIAG_ICONS,
sk->sk_receive_queue.qlen * sizeof(u32));
if (!attr)
goto errout;
buf = nla_data(attr);
i = 0;
skb_queue_walk(&sk->sk_receive_queue, skb) {
struct sock *req, *peer;
req = skb->sk;
/*
* The state lock is outer for the same sk's
* queue lock. With the other's queue locked it's
* OK to lock the state.
*/
unix_state_lock_nested(req);
peer = unix_sk(req)->peer;
buf[i++] = (peer ? sock_i_ino(peer) : 0);
unix_state_unlock(req);
}
spin_unlock(&sk->sk_receive_queue.lock);
}
return 0;
errout:
spin_unlock(&sk->sk_receive_queue.lock);
return -EMSGSIZE;
}
static int sk_diag_show_rqlen(struct sock *sk, struct sk_buff *nlskb)
{
struct unix_diag_rqlen rql;
if (sk->sk_state == TCP_LISTEN) {
rql.udiag_rqueue = sk->sk_receive_queue.qlen;
rql.udiag_wqueue = sk->sk_max_ack_backlog;
} else {
rql.udiag_rqueue = (u32) unix_inq_len(sk);
rql.udiag_wqueue = (u32) unix_outq_len(sk);
}
return nla_put(nlskb, UNIX_DIAG_RQLEN, sizeof(rql), &rql);
}
static int sk_diag_dump_uid(struct sock *sk, struct sk_buff *nlskb,
struct user_namespace *user_ns)
{
uid_t uid = from_kuid_munged(user_ns, sock_i_uid(sk));
return nla_put(nlskb, UNIX_DIAG_UID, sizeof(uid_t), &uid);
}
static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, struct unix_diag_req *req,
struct user_namespace *user_ns,
u32 portid, u32 seq, u32 flags, int sk_ino)
{
struct nlmsghdr *nlh;
struct unix_diag_msg *rep;
nlh = nlmsg_put(skb, portid, seq, SOCK_DIAG_BY_FAMILY, sizeof(*rep),
flags);
if (!nlh)
return -EMSGSIZE;
rep = nlmsg_data(nlh);
rep->udiag_family = AF_UNIX;
rep->udiag_type = sk->sk_type;
rep->udiag_state = sk->sk_state;
rep->pad = 0;
rep->udiag_ino = sk_ino;
sock_diag_save_cookie(sk, rep->udiag_cookie);
if ((req->udiag_show & UDIAG_SHOW_NAME) &&
sk_diag_dump_name(sk, skb))
goto out_nlmsg_trim;
if ((req->udiag_show & UDIAG_SHOW_VFS) &&
sk_diag_dump_vfs(sk, skb))
goto out_nlmsg_trim;
if ((req->udiag_show & UDIAG_SHOW_PEER) &&
sk_diag_dump_peer(sk, skb))
goto out_nlmsg_trim;
if ((req->udiag_show & UDIAG_SHOW_ICONS) &&
sk_diag_dump_icons(sk, skb))
goto out_nlmsg_trim;
if ((req->udiag_show & UDIAG_SHOW_RQLEN) &&
sk_diag_show_rqlen(sk, skb))
goto out_nlmsg_trim;
if ((req->udiag_show & UDIAG_SHOW_MEMINFO) &&
sock_diag_put_meminfo(sk, skb, UNIX_DIAG_MEMINFO))
goto out_nlmsg_trim;
if (nla_put_u8(skb, UNIX_DIAG_SHUTDOWN, sk->sk_shutdown))
goto out_nlmsg_trim;
if ((req->udiag_show & UDIAG_SHOW_UID) &&
sk_diag_dump_uid(sk, skb, user_ns))
goto out_nlmsg_trim;
nlmsg_end(skb, nlh);
return 0;
out_nlmsg_trim:
nlmsg_cancel(skb, nlh);
return -EMSGSIZE;
}
static int sk_diag_dump(struct sock *sk, struct sk_buff *skb, struct unix_diag_req *req,
struct user_namespace *user_ns,
u32 portid, u32 seq, u32 flags)
{
int sk_ino;
unix_state_lock(sk);
sk_ino = sock_i_ino(sk);
unix_state_unlock(sk);
if (!sk_ino)
return 0;
return sk_diag_fill(sk, skb, req, user_ns, portid, seq, flags, sk_ino);
}
static int unix_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
{
struct net *net = sock_net(skb->sk);
int num, s_num, slot, s_slot;
struct unix_diag_req *req;
req = nlmsg_data(cb->nlh);
s_slot = cb->args[0];
num = s_num = cb->args[1];
for (slot = s_slot; slot < UNIX_HASH_SIZE; s_num = 0, slot++) {
struct sock *sk;
num = 0;
spin_lock(&net->unx.table.locks[slot]);
sk_for_each(sk, &net->unx.table.buckets[slot]) {
if (num < s_num)
goto next;
if (!(req->udiag_states & (1 << sk->sk_state)))
goto next;
if (sk_diag_dump(sk, skb, req, sk_user_ns(skb->sk),
NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq,
NLM_F_MULTI) < 0) {
spin_unlock(&net->unx.table.locks[slot]);
goto done;
}
next:
num++;
}
spin_unlock(&net->unx.table.locks[slot]);
}
done:
cb->args[0] = slot;
cb->args[1] = num;
return skb->len;
}
static struct sock *unix_lookup_by_ino(struct net *net, unsigned int ino)
{
struct sock *sk;
int i;
for (i = 0; i < UNIX_HASH_SIZE; i++) {
spin_lock(&net->unx.table.locks[i]);
sk_for_each(sk, &net->unx.table.buckets[i]) {
if (ino == sock_i_ino(sk)) {
sock_hold(sk);
spin_unlock(&net->unx.table.locks[i]);
return sk;
}
}
spin_unlock(&net->unx.table.locks[i]);
}
return NULL;
}
static int unix_diag_get_exact(struct sk_buff *in_skb,
const struct nlmsghdr *nlh,
struct unix_diag_req *req)
{
struct net *net = sock_net(in_skb->sk);
unsigned int extra_len;
struct sk_buff *rep;
struct sock *sk;
int err;
err = -EINVAL;
if (req->udiag_ino == 0)
goto out_nosk;
sk = unix_lookup_by_ino(net, req->udiag_ino);
err = -ENOENT;
if (sk == NULL)
goto out_nosk;
err = sock_diag_check_cookie(sk, req->udiag_cookie);
if (err)
goto out;
extra_len = 256;
again:
err = -ENOMEM;
rep = nlmsg_new(sizeof(struct unix_diag_msg) + extra_len, GFP_KERNEL);
if (!rep)
goto out;
err = sk_diag_fill(sk, rep, req, sk_user_ns(NETLINK_CB(in_skb).sk),
NETLINK_CB(in_skb).portid,
nlh->nlmsg_seq, 0, req->udiag_ino);
if (err < 0) {
nlmsg_free(rep);
extra_len += 256;
if (extra_len >= PAGE_SIZE)
goto out;
goto again;
}
err = nlmsg_unicast(net->diag_nlsk, rep, NETLINK_CB(in_skb).portid);
out:
if (sk)
sock_put(sk);
out_nosk:
return err;
}
static int unix_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h)
{
int hdrlen = sizeof(struct unix_diag_req);
if (nlmsg_len(h) < hdrlen)
return -EINVAL;
if (h->nlmsg_flags & NLM_F_DUMP) {
struct netlink_dump_control c = {
.dump = unix_diag_dump,
};
return netlink_dump_start(sock_net(skb->sk)->diag_nlsk, skb, h, &c);
} else
return unix_diag_get_exact(skb, h, nlmsg_data(h));
}
static const struct sock_diag_handler unix_diag_handler = {
.family = AF_UNIX,
.dump = unix_diag_handler_dump,
};
static int __init unix_diag_init(void)
{
return sock_diag_register(&unix_diag_handler);
}
static void __exit unix_diag_exit(void)
{
sock_diag_unregister(&unix_diag_handler);
}
module_init(unix_diag_init);
module_exit(unix_diag_exit);
MODULE_LICENSE("GPL");
MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 1 /* AF_LOCAL */);
| linux-master | net/unix/diag.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/socket.h>
#include <linux/net.h>
#include <linux/fs.h>
#include <net/af_unix.h>
#include <net/scm.h>
#include <linux/init.h>
#include <linux/io_uring.h>
#include "scm.h"
unsigned int unix_tot_inflight;
EXPORT_SYMBOL(unix_tot_inflight);
LIST_HEAD(gc_inflight_list);
EXPORT_SYMBOL(gc_inflight_list);
DEFINE_SPINLOCK(unix_gc_lock);
EXPORT_SYMBOL(unix_gc_lock);
struct sock *unix_get_socket(struct file *filp)
{
struct sock *u_sock = NULL;
struct inode *inode = file_inode(filp);
/* Socket ? */
if (S_ISSOCK(inode->i_mode) && !(filp->f_mode & FMODE_PATH)) {
struct socket *sock = SOCKET_I(inode);
const struct proto_ops *ops = READ_ONCE(sock->ops);
struct sock *s = sock->sk;
/* PF_UNIX ? */
if (s && ops && ops->family == PF_UNIX)
u_sock = s;
} else {
/* Could be an io_uring instance */
u_sock = io_uring_get_socket(filp);
}
return u_sock;
}
EXPORT_SYMBOL(unix_get_socket);
/* Keep the number of times in flight count for the file
* descriptor if it is for an AF_UNIX socket.
*/
void unix_inflight(struct user_struct *user, struct file *fp)
{
struct sock *s = unix_get_socket(fp);
spin_lock(&unix_gc_lock);
if (s) {
struct unix_sock *u = unix_sk(s);
if (atomic_long_inc_return(&u->inflight) == 1) {
BUG_ON(!list_empty(&u->link));
list_add_tail(&u->link, &gc_inflight_list);
} else {
BUG_ON(list_empty(&u->link));
}
/* Paired with READ_ONCE() in wait_for_unix_gc() */
WRITE_ONCE(unix_tot_inflight, unix_tot_inflight + 1);
}
WRITE_ONCE(user->unix_inflight, user->unix_inflight + 1);
spin_unlock(&unix_gc_lock);
}
void unix_notinflight(struct user_struct *user, struct file *fp)
{
struct sock *s = unix_get_socket(fp);
spin_lock(&unix_gc_lock);
if (s) {
struct unix_sock *u = unix_sk(s);
BUG_ON(!atomic_long_read(&u->inflight));
BUG_ON(list_empty(&u->link));
if (atomic_long_dec_and_test(&u->inflight))
list_del_init(&u->link);
/* Paired with READ_ONCE() in wait_for_unix_gc() */
WRITE_ONCE(unix_tot_inflight, unix_tot_inflight - 1);
}
WRITE_ONCE(user->unix_inflight, user->unix_inflight - 1);
spin_unlock(&unix_gc_lock);
}
/*
* The "user->unix_inflight" variable is protected by the garbage
* collection lock, and we just read it locklessly here. If you go
* over the limit, there might be a tiny race in actually noticing
* it across threads. Tough.
*/
static inline bool too_many_unix_fds(struct task_struct *p)
{
struct user_struct *user = current_user();
if (unlikely(READ_ONCE(user->unix_inflight) > task_rlimit(p, RLIMIT_NOFILE)))
return !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN);
return false;
}
int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
{
int i;
if (too_many_unix_fds(current))
return -ETOOMANYREFS;
/*
* Need to duplicate file references for the sake of garbage
* collection. Otherwise a socket in the fps might become a
* candidate for GC while the skb is not yet queued.
*/
UNIXCB(skb).fp = scm_fp_dup(scm->fp);
if (!UNIXCB(skb).fp)
return -ENOMEM;
for (i = scm->fp->count - 1; i >= 0; i--)
unix_inflight(scm->fp->user, scm->fp->fp[i]);
return 0;
}
EXPORT_SYMBOL(unix_attach_fds);
void unix_detach_fds(struct scm_cookie *scm, struct sk_buff *skb)
{
int i;
scm->fp = UNIXCB(skb).fp;
UNIXCB(skb).fp = NULL;
for (i = scm->fp->count-1; i >= 0; i--)
unix_notinflight(scm->fp->user, scm->fp->fp[i]);
}
EXPORT_SYMBOL(unix_detach_fds);
void unix_destruct_scm(struct sk_buff *skb)
{
struct scm_cookie scm;
memset(&scm, 0, sizeof(scm));
scm.pid = UNIXCB(skb).pid;
if (UNIXCB(skb).fp)
unix_detach_fds(&scm, skb);
/* Alas, it calls VFS */
/* So fscking what? fput() had been SMP-safe since the last Summer */
scm_destroy(&scm);
sock_wfree(skb);
}
EXPORT_SYMBOL(unix_destruct_scm);
void io_uring_destruct_scm(struct sk_buff *skb)
{
unix_destruct_scm(skb);
}
EXPORT_SYMBOL(io_uring_destruct_scm);
| linux-master | net/unix/scm.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2021 Cong Wang <[email protected]> */
#include <linux/skmsg.h>
#include <linux/bpf.h>
#include <net/sock.h>
#include <net/af_unix.h>
#define unix_sk_has_data(__sk, __psock) \
({ !skb_queue_empty(&__sk->sk_receive_queue) || \
!skb_queue_empty(&__psock->ingress_skb) || \
!list_empty(&__psock->ingress_msg); \
})
static int unix_msg_wait_data(struct sock *sk, struct sk_psock *psock,
long timeo)
{
DEFINE_WAIT_FUNC(wait, woken_wake_function);
struct unix_sock *u = unix_sk(sk);
int ret = 0;
if (sk->sk_shutdown & RCV_SHUTDOWN)
return 1;
if (!timeo)
return ret;
add_wait_queue(sk_sleep(sk), &wait);
sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
if (!unix_sk_has_data(sk, psock)) {
mutex_unlock(&u->iolock);
wait_woken(&wait, TASK_INTERRUPTIBLE, timeo);
mutex_lock(&u->iolock);
ret = unix_sk_has_data(sk, psock);
}
sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
remove_wait_queue(sk_sleep(sk), &wait);
return ret;
}
static int __unix_recvmsg(struct sock *sk, struct msghdr *msg,
size_t len, int flags)
{
if (sk->sk_type == SOCK_DGRAM)
return __unix_dgram_recvmsg(sk, msg, len, flags);
else
return __unix_stream_recvmsg(sk, msg, len, flags);
}
static int unix_bpf_recvmsg(struct sock *sk, struct msghdr *msg,
size_t len, int flags, int *addr_len)
{
struct unix_sock *u = unix_sk(sk);
struct sk_psock *psock;
int copied;
if (!len)
return 0;
psock = sk_psock_get(sk);
if (unlikely(!psock))
return __unix_recvmsg(sk, msg, len, flags);
mutex_lock(&u->iolock);
if (!skb_queue_empty(&sk->sk_receive_queue) &&
sk_psock_queue_empty(psock)) {
mutex_unlock(&u->iolock);
sk_psock_put(sk, psock);
return __unix_recvmsg(sk, msg, len, flags);
}
msg_bytes_ready:
copied = sk_msg_recvmsg(sk, psock, msg, len, flags);
if (!copied) {
long timeo;
int data;
timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
data = unix_msg_wait_data(sk, psock, timeo);
if (data) {
if (!sk_psock_queue_empty(psock))
goto msg_bytes_ready;
mutex_unlock(&u->iolock);
sk_psock_put(sk, psock);
return __unix_recvmsg(sk, msg, len, flags);
}
copied = -EAGAIN;
}
mutex_unlock(&u->iolock);
sk_psock_put(sk, psock);
return copied;
}
static struct proto *unix_dgram_prot_saved __read_mostly;
static DEFINE_SPINLOCK(unix_dgram_prot_lock);
static struct proto unix_dgram_bpf_prot;
static struct proto *unix_stream_prot_saved __read_mostly;
static DEFINE_SPINLOCK(unix_stream_prot_lock);
static struct proto unix_stream_bpf_prot;
static void unix_dgram_bpf_rebuild_protos(struct proto *prot, const struct proto *base)
{
*prot = *base;
prot->close = sock_map_close;
prot->recvmsg = unix_bpf_recvmsg;
prot->sock_is_readable = sk_msg_is_readable;
}
static void unix_stream_bpf_rebuild_protos(struct proto *prot,
const struct proto *base)
{
*prot = *base;
prot->close = sock_map_close;
prot->recvmsg = unix_bpf_recvmsg;
prot->sock_is_readable = sk_msg_is_readable;
prot->unhash = sock_map_unhash;
}
static void unix_dgram_bpf_check_needs_rebuild(struct proto *ops)
{
if (unlikely(ops != smp_load_acquire(&unix_dgram_prot_saved))) {
spin_lock_bh(&unix_dgram_prot_lock);
if (likely(ops != unix_dgram_prot_saved)) {
unix_dgram_bpf_rebuild_protos(&unix_dgram_bpf_prot, ops);
smp_store_release(&unix_dgram_prot_saved, ops);
}
spin_unlock_bh(&unix_dgram_prot_lock);
}
}
static void unix_stream_bpf_check_needs_rebuild(struct proto *ops)
{
if (unlikely(ops != smp_load_acquire(&unix_stream_prot_saved))) {
spin_lock_bh(&unix_stream_prot_lock);
if (likely(ops != unix_stream_prot_saved)) {
unix_stream_bpf_rebuild_protos(&unix_stream_bpf_prot, ops);
smp_store_release(&unix_stream_prot_saved, ops);
}
spin_unlock_bh(&unix_stream_prot_lock);
}
}
int unix_dgram_bpf_update_proto(struct sock *sk, struct sk_psock *psock, bool restore)
{
if (sk->sk_type != SOCK_DGRAM)
return -EOPNOTSUPP;
if (restore) {
sk->sk_write_space = psock->saved_write_space;
sock_replace_proto(sk, psock->sk_proto);
return 0;
}
unix_dgram_bpf_check_needs_rebuild(psock->sk_proto);
sock_replace_proto(sk, &unix_dgram_bpf_prot);
return 0;
}
int unix_stream_bpf_update_proto(struct sock *sk, struct sk_psock *psock, bool restore)
{
if (restore) {
sk->sk_write_space = psock->saved_write_space;
sock_replace_proto(sk, psock->sk_proto);
return 0;
}
unix_stream_bpf_check_needs_rebuild(psock->sk_proto);
sock_replace_proto(sk, &unix_stream_bpf_prot);
return 0;
}
void __init unix_bpf_build_proto(void)
{
unix_dgram_bpf_rebuild_protos(&unix_dgram_bpf_prot, &unix_dgram_proto);
unix_stream_bpf_rebuild_protos(&unix_stream_bpf_prot, &unix_stream_proto);
}
| linux-master | net/unix/unix_bpf.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* NET4: Implementation of BSD Unix domain sockets.
*
* Authors: Alan Cox, <[email protected]>
*
* Fixes:
* Linus Torvalds : Assorted bug cures.
* Niibe Yutaka : async I/O support.
* Carsten Paeth : PF_UNIX check, address fixes.
* Alan Cox : Limit size of allocated blocks.
* Alan Cox : Fixed the stupid socketpair bug.
* Alan Cox : BSD compatibility fine tuning.
* Alan Cox : Fixed a bug in connect when interrupted.
* Alan Cox : Sorted out a proper draft version of
* file descriptor passing hacked up from
* Mike Shaver's work.
* Marty Leisner : Fixes to fd passing
* Nick Nevin : recvmsg bugfix.
* Alan Cox : Started proper garbage collector
* Heiko EiBfeldt : Missing verify_area check
* Alan Cox : Started POSIXisms
* Andreas Schwab : Replace inode by dentry for proper
* reference counting
* Kirk Petersen : Made this a module
* Christoph Rohland : Elegant non-blocking accept/connect algorithm.
* Lots of bug fixes.
* Alexey Kuznetosv : Repaired (I hope) bugs introduces
* by above two patches.
* Andrea Arcangeli : If possible we block in connect(2)
* if the max backlog of the listen socket
* is been reached. This won't break
* old apps and it will avoid huge amount
* of socks hashed (this for unix_gc()
* performances reasons).
* Security fix that limits the max
* number of socks to 2*max_files and
* the number of skb queueable in the
* dgram receiver.
* Artur Skawina : Hash function optimizations
* Alexey Kuznetsov : Full scale SMP. Lot of bugs are introduced 8)
* Malcolm Beattie : Set peercred for socketpair
* Michal Ostrowski : Module initialization cleanup.
* Arnaldo C. Melo : Remove MOD_{INC,DEC}_USE_COUNT,
* the core infrastructure is doing that
* for all net proto families now (2.5.69+)
*
* Known differences from reference BSD that was tested:
*
* [TO FIX]
* ECONNREFUSED is not returned from one end of a connected() socket to the
* other the moment one end closes.
* fstat() doesn't return st_dev=0, and give the blksize as high water mark
* and a fake inode identifier (nor the BSD first socket fstat twice bug).
* [NOT TO FIX]
* accept() returns a path name even if the connecting socket has closed
* in the meantime (BSD loses the path and gives up).
* accept() returns 0 length path for an unbound connector. BSD returns 16
* and a null first byte in the path (but not for gethost/peername - BSD bug ??)
* socketpair(...SOCK_RAW..) doesn't panic the kernel.
* BSD af_unix apparently has connect forgetting to block properly.
* (need to check this with the POSIX spec in detail)
*
* Differences from 2.0.0-11-... (ANK)
* Bug fixes and improvements.
* - client shutdown killed server socket.
* - removed all useless cli/sti pairs.
*
* Semantic changes/extensions.
* - generic control message passing.
* - SCM_CREDENTIALS control message.
* - "Abstract" (not FS based) socket bindings.
* Abstract names are sequences of bytes (not zero terminated)
* started by 0, so that this name space does not intersect
* with BSD names.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/signal.h>
#include <linux/sched/signal.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/stat.h>
#include <linux/dcache.h>
#include <linux/namei.h>
#include <linux/socket.h>
#include <linux/un.h>
#include <linux/fcntl.h>
#include <linux/filter.h>
#include <linux/termios.h>
#include <linux/sockios.h>
#include <linux/net.h>
#include <linux/in.h>
#include <linux/fs.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <net/net_namespace.h>
#include <net/sock.h>
#include <net/tcp_states.h>
#include <net/af_unix.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <net/scm.h>
#include <linux/init.h>
#include <linux/poll.h>
#include <linux/rtnetlink.h>
#include <linux/mount.h>
#include <net/checksum.h>
#include <linux/security.h>
#include <linux/splice.h>
#include <linux/freezer.h>
#include <linux/file.h>
#include <linux/btf_ids.h>
#include "scm.h"
static atomic_long_t unix_nr_socks;
static struct hlist_head bsd_socket_buckets[UNIX_HASH_SIZE / 2];
static spinlock_t bsd_socket_locks[UNIX_HASH_SIZE / 2];
/* SMP locking strategy:
* hash table is protected with spinlock.
* each socket state is protected by separate spinlock.
*/
static unsigned int unix_unbound_hash(struct sock *sk)
{
unsigned long hash = (unsigned long)sk;
hash ^= hash >> 16;
hash ^= hash >> 8;
hash ^= sk->sk_type;
return hash & UNIX_HASH_MOD;
}
static unsigned int unix_bsd_hash(struct inode *i)
{
return i->i_ino & UNIX_HASH_MOD;
}
static unsigned int unix_abstract_hash(struct sockaddr_un *sunaddr,
int addr_len, int type)
{
__wsum csum = csum_partial(sunaddr, addr_len, 0);
unsigned int hash;
hash = (__force unsigned int)csum_fold(csum);
hash ^= hash >> 8;
hash ^= type;
return UNIX_HASH_MOD + 1 + (hash & UNIX_HASH_MOD);
}
static void unix_table_double_lock(struct net *net,
unsigned int hash1, unsigned int hash2)
{
if (hash1 == hash2) {
spin_lock(&net->unx.table.locks[hash1]);
return;
}
if (hash1 > hash2)
swap(hash1, hash2);
spin_lock(&net->unx.table.locks[hash1]);
spin_lock_nested(&net->unx.table.locks[hash2], SINGLE_DEPTH_NESTING);
}
static void unix_table_double_unlock(struct net *net,
unsigned int hash1, unsigned int hash2)
{
if (hash1 == hash2) {
spin_unlock(&net->unx.table.locks[hash1]);
return;
}
spin_unlock(&net->unx.table.locks[hash1]);
spin_unlock(&net->unx.table.locks[hash2]);
}
#ifdef CONFIG_SECURITY_NETWORK
static void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
{
UNIXCB(skb).secid = scm->secid;
}
static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
{
scm->secid = UNIXCB(skb).secid;
}
static inline bool unix_secdata_eq(struct scm_cookie *scm, struct sk_buff *skb)
{
return (scm->secid == UNIXCB(skb).secid);
}
#else
static inline void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
{ }
static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
{ }
static inline bool unix_secdata_eq(struct scm_cookie *scm, struct sk_buff *skb)
{
return true;
}
#endif /* CONFIG_SECURITY_NETWORK */
#define unix_peer(sk) (unix_sk(sk)->peer)
static inline int unix_our_peer(struct sock *sk, struct sock *osk)
{
return unix_peer(osk) == sk;
}
static inline int unix_may_send(struct sock *sk, struct sock *osk)
{
return unix_peer(osk) == NULL || unix_our_peer(sk, osk);
}
static inline int unix_recvq_full(const struct sock *sk)
{
return skb_queue_len(&sk->sk_receive_queue) > sk->sk_max_ack_backlog;
}
static inline int unix_recvq_full_lockless(const struct sock *sk)
{
return skb_queue_len_lockless(&sk->sk_receive_queue) >
READ_ONCE(sk->sk_max_ack_backlog);
}
struct sock *unix_peer_get(struct sock *s)
{
struct sock *peer;
unix_state_lock(s);
peer = unix_peer(s);
if (peer)
sock_hold(peer);
unix_state_unlock(s);
return peer;
}
EXPORT_SYMBOL_GPL(unix_peer_get);
static struct unix_address *unix_create_addr(struct sockaddr_un *sunaddr,
int addr_len)
{
struct unix_address *addr;
addr = kmalloc(sizeof(*addr) + addr_len, GFP_KERNEL);
if (!addr)
return NULL;
refcount_set(&addr->refcnt, 1);
addr->len = addr_len;
memcpy(addr->name, sunaddr, addr_len);
return addr;
}
static inline void unix_release_addr(struct unix_address *addr)
{
if (refcount_dec_and_test(&addr->refcnt))
kfree(addr);
}
/*
* Check unix socket name:
* - should be not zero length.
* - if started by not zero, should be NULL terminated (FS object)
* - if started by zero, it is abstract name.
*/
static int unix_validate_addr(struct sockaddr_un *sunaddr, int addr_len)
{
if (addr_len <= offsetof(struct sockaddr_un, sun_path) ||
addr_len > sizeof(*sunaddr))
return -EINVAL;
if (sunaddr->sun_family != AF_UNIX)
return -EINVAL;
return 0;
}
static int unix_mkname_bsd(struct sockaddr_un *sunaddr, int addr_len)
{
struct sockaddr_storage *addr = (struct sockaddr_storage *)sunaddr;
short offset = offsetof(struct sockaddr_storage, __data);
BUILD_BUG_ON(offset != offsetof(struct sockaddr_un, sun_path));
/* This may look like an off by one error but it is a bit more
* subtle. 108 is the longest valid AF_UNIX path for a binding.
* sun_path[108] doesn't as such exist. However in kernel space
* we are guaranteed that it is a valid memory location in our
* kernel address buffer because syscall functions always pass
* a pointer of struct sockaddr_storage which has a bigger buffer
* than 108. Also, we must terminate sun_path for strlen() in
* getname_kernel().
*/
addr->__data[addr_len - offset] = 0;
/* Don't pass sunaddr->sun_path to strlen(). Otherwise, 108 will
* cause panic if CONFIG_FORTIFY_SOURCE=y. Let __fortify_strlen()
* know the actual buffer.
*/
return strlen(addr->__data) + offset + 1;
}
static void __unix_remove_socket(struct sock *sk)
{
sk_del_node_init(sk);
}
static void __unix_insert_socket(struct net *net, struct sock *sk)
{
DEBUG_NET_WARN_ON_ONCE(!sk_unhashed(sk));
sk_add_node(sk, &net->unx.table.buckets[sk->sk_hash]);
}
static void __unix_set_addr_hash(struct net *net, struct sock *sk,
struct unix_address *addr, unsigned int hash)
{
__unix_remove_socket(sk);
smp_store_release(&unix_sk(sk)->addr, addr);
sk->sk_hash = hash;
__unix_insert_socket(net, sk);
}
static void unix_remove_socket(struct net *net, struct sock *sk)
{
spin_lock(&net->unx.table.locks[sk->sk_hash]);
__unix_remove_socket(sk);
spin_unlock(&net->unx.table.locks[sk->sk_hash]);
}
static void unix_insert_unbound_socket(struct net *net, struct sock *sk)
{
spin_lock(&net->unx.table.locks[sk->sk_hash]);
__unix_insert_socket(net, sk);
spin_unlock(&net->unx.table.locks[sk->sk_hash]);
}
static void unix_insert_bsd_socket(struct sock *sk)
{
spin_lock(&bsd_socket_locks[sk->sk_hash]);
sk_add_bind_node(sk, &bsd_socket_buckets[sk->sk_hash]);
spin_unlock(&bsd_socket_locks[sk->sk_hash]);
}
static void unix_remove_bsd_socket(struct sock *sk)
{
if (!hlist_unhashed(&sk->sk_bind_node)) {
spin_lock(&bsd_socket_locks[sk->sk_hash]);
__sk_del_bind_node(sk);
spin_unlock(&bsd_socket_locks[sk->sk_hash]);
sk_node_init(&sk->sk_bind_node);
}
}
static struct sock *__unix_find_socket_byname(struct net *net,
struct sockaddr_un *sunname,
int len, unsigned int hash)
{
struct sock *s;
sk_for_each(s, &net->unx.table.buckets[hash]) {
struct unix_sock *u = unix_sk(s);
if (u->addr->len == len &&
!memcmp(u->addr->name, sunname, len))
return s;
}
return NULL;
}
static inline struct sock *unix_find_socket_byname(struct net *net,
struct sockaddr_un *sunname,
int len, unsigned int hash)
{
struct sock *s;
spin_lock(&net->unx.table.locks[hash]);
s = __unix_find_socket_byname(net, sunname, len, hash);
if (s)
sock_hold(s);
spin_unlock(&net->unx.table.locks[hash]);
return s;
}
static struct sock *unix_find_socket_byinode(struct inode *i)
{
unsigned int hash = unix_bsd_hash(i);
struct sock *s;
spin_lock(&bsd_socket_locks[hash]);
sk_for_each_bound(s, &bsd_socket_buckets[hash]) {
struct dentry *dentry = unix_sk(s)->path.dentry;
if (dentry && d_backing_inode(dentry) == i) {
sock_hold(s);
spin_unlock(&bsd_socket_locks[hash]);
return s;
}
}
spin_unlock(&bsd_socket_locks[hash]);
return NULL;
}
/* Support code for asymmetrically connected dgram sockets
*
* If a datagram socket is connected to a socket not itself connected
* to the first socket (eg, /dev/log), clients may only enqueue more
* messages if the present receive queue of the server socket is not
* "too large". This means there's a second writeability condition
* poll and sendmsg need to test. The dgram recv code will do a wake
* up on the peer_wait wait queue of a socket upon reception of a
* datagram which needs to be propagated to sleeping would-be writers
* since these might not have sent anything so far. This can't be
* accomplished via poll_wait because the lifetime of the server
* socket might be less than that of its clients if these break their
* association with it or if the server socket is closed while clients
* are still connected to it and there's no way to inform "a polling
* implementation" that it should let go of a certain wait queue
*
* In order to propagate a wake up, a wait_queue_entry_t of the client
* socket is enqueued on the peer_wait queue of the server socket
* whose wake function does a wake_up on the ordinary client socket
* wait queue. This connection is established whenever a write (or
* poll for write) hit the flow control condition and broken when the
* association to the server socket is dissolved or after a wake up
* was relayed.
*/
static int unix_dgram_peer_wake_relay(wait_queue_entry_t *q, unsigned mode, int flags,
void *key)
{
struct unix_sock *u;
wait_queue_head_t *u_sleep;
u = container_of(q, struct unix_sock, peer_wake);
__remove_wait_queue(&unix_sk(u->peer_wake.private)->peer_wait,
q);
u->peer_wake.private = NULL;
/* relaying can only happen while the wq still exists */
u_sleep = sk_sleep(&u->sk);
if (u_sleep)
wake_up_interruptible_poll(u_sleep, key_to_poll(key));
return 0;
}
static int unix_dgram_peer_wake_connect(struct sock *sk, struct sock *other)
{
struct unix_sock *u, *u_other;
int rc;
u = unix_sk(sk);
u_other = unix_sk(other);
rc = 0;
spin_lock(&u_other->peer_wait.lock);
if (!u->peer_wake.private) {
u->peer_wake.private = other;
__add_wait_queue(&u_other->peer_wait, &u->peer_wake);
rc = 1;
}
spin_unlock(&u_other->peer_wait.lock);
return rc;
}
static void unix_dgram_peer_wake_disconnect(struct sock *sk,
struct sock *other)
{
struct unix_sock *u, *u_other;
u = unix_sk(sk);
u_other = unix_sk(other);
spin_lock(&u_other->peer_wait.lock);
if (u->peer_wake.private == other) {
__remove_wait_queue(&u_other->peer_wait, &u->peer_wake);
u->peer_wake.private = NULL;
}
spin_unlock(&u_other->peer_wait.lock);
}
static void unix_dgram_peer_wake_disconnect_wakeup(struct sock *sk,
struct sock *other)
{
unix_dgram_peer_wake_disconnect(sk, other);
wake_up_interruptible_poll(sk_sleep(sk),
EPOLLOUT |
EPOLLWRNORM |
EPOLLWRBAND);
}
/* preconditions:
* - unix_peer(sk) == other
* - association is stable
*/
static int unix_dgram_peer_wake_me(struct sock *sk, struct sock *other)
{
int connected;
connected = unix_dgram_peer_wake_connect(sk, other);
/* If other is SOCK_DEAD, we want to make sure we signal
* POLLOUT, such that a subsequent write() can get a
* -ECONNREFUSED. Otherwise, if we haven't queued any skbs
* to other and its full, we will hang waiting for POLLOUT.
*/
if (unix_recvq_full_lockless(other) && !sock_flag(other, SOCK_DEAD))
return 1;
if (connected)
unix_dgram_peer_wake_disconnect(sk, other);
return 0;
}
static int unix_writable(const struct sock *sk)
{
return sk->sk_state != TCP_LISTEN &&
(refcount_read(&sk->sk_wmem_alloc) << 2) <= sk->sk_sndbuf;
}
static void unix_write_space(struct sock *sk)
{
struct socket_wq *wq;
rcu_read_lock();
if (unix_writable(sk)) {
wq = rcu_dereference(sk->sk_wq);
if (skwq_has_sleeper(wq))
wake_up_interruptible_sync_poll(&wq->wait,
EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND);
sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
}
rcu_read_unlock();
}
/* When dgram socket disconnects (or changes its peer), we clear its receive
* queue of packets arrived from previous peer. First, it allows to do
* flow control based only on wmem_alloc; second, sk connected to peer
* may receive messages only from that peer. */
static void unix_dgram_disconnected(struct sock *sk, struct sock *other)
{
if (!skb_queue_empty(&sk->sk_receive_queue)) {
skb_queue_purge(&sk->sk_receive_queue);
wake_up_interruptible_all(&unix_sk(sk)->peer_wait);
/* If one link of bidirectional dgram pipe is disconnected,
* we signal error. Messages are lost. Do not make this,
* when peer was not connected to us.
*/
if (!sock_flag(other, SOCK_DEAD) && unix_peer(other) == sk) {
WRITE_ONCE(other->sk_err, ECONNRESET);
sk_error_report(other);
}
}
other->sk_state = TCP_CLOSE;
}
static void unix_sock_destructor(struct sock *sk)
{
struct unix_sock *u = unix_sk(sk);
skb_queue_purge(&sk->sk_receive_queue);
DEBUG_NET_WARN_ON_ONCE(refcount_read(&sk->sk_wmem_alloc));
DEBUG_NET_WARN_ON_ONCE(!sk_unhashed(sk));
DEBUG_NET_WARN_ON_ONCE(sk->sk_socket);
if (!sock_flag(sk, SOCK_DEAD)) {
pr_info("Attempt to release alive unix socket: %p\n", sk);
return;
}
if (u->addr)
unix_release_addr(u->addr);
atomic_long_dec(&unix_nr_socks);
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
#ifdef UNIX_REFCNT_DEBUG
pr_debug("UNIX %p is destroyed, %ld are still alive.\n", sk,
atomic_long_read(&unix_nr_socks));
#endif
}
static void unix_release_sock(struct sock *sk, int embrion)
{
struct unix_sock *u = unix_sk(sk);
struct sock *skpair;
struct sk_buff *skb;
struct path path;
int state;
unix_remove_socket(sock_net(sk), sk);
unix_remove_bsd_socket(sk);
/* Clear state */
unix_state_lock(sk);
sock_orphan(sk);
WRITE_ONCE(sk->sk_shutdown, SHUTDOWN_MASK);
path = u->path;
u->path.dentry = NULL;
u->path.mnt = NULL;
state = sk->sk_state;
sk->sk_state = TCP_CLOSE;
skpair = unix_peer(sk);
unix_peer(sk) = NULL;
unix_state_unlock(sk);
#if IS_ENABLED(CONFIG_AF_UNIX_OOB)
if (u->oob_skb) {
kfree_skb(u->oob_skb);
u->oob_skb = NULL;
}
#endif
wake_up_interruptible_all(&u->peer_wait);
if (skpair != NULL) {
if (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) {
unix_state_lock(skpair);
/* No more writes */
WRITE_ONCE(skpair->sk_shutdown, SHUTDOWN_MASK);
if (!skb_queue_empty(&sk->sk_receive_queue) || embrion)
WRITE_ONCE(skpair->sk_err, ECONNRESET);
unix_state_unlock(skpair);
skpair->sk_state_change(skpair);
sk_wake_async(skpair, SOCK_WAKE_WAITD, POLL_HUP);
}
unix_dgram_peer_wake_disconnect(sk, skpair);
sock_put(skpair); /* It may now die */
}
/* Try to flush out this socket. Throw out buffers at least */
while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
if (state == TCP_LISTEN)
unix_release_sock(skb->sk, 1);
/* passed fds are erased in the kfree_skb hook */
UNIXCB(skb).consumed = skb->len;
kfree_skb(skb);
}
if (path.dentry)
path_put(&path);
sock_put(sk);
/* ---- Socket is dead now and most probably destroyed ---- */
/*
* Fixme: BSD difference: In BSD all sockets connected to us get
* ECONNRESET and we die on the spot. In Linux we behave
* like files and pipes do and wait for the last
* dereference.
*
* Can't we simply set sock->err?
*
* What the above comment does talk about? --ANK(980817)
*/
if (READ_ONCE(unix_tot_inflight))
unix_gc(); /* Garbage collect fds */
}
static void init_peercred(struct sock *sk)
{
const struct cred *old_cred;
struct pid *old_pid;
spin_lock(&sk->sk_peer_lock);
old_pid = sk->sk_peer_pid;
old_cred = sk->sk_peer_cred;
sk->sk_peer_pid = get_pid(task_tgid(current));
sk->sk_peer_cred = get_current_cred();
spin_unlock(&sk->sk_peer_lock);
put_pid(old_pid);
put_cred(old_cred);
}
static void copy_peercred(struct sock *sk, struct sock *peersk)
{
const struct cred *old_cred;
struct pid *old_pid;
if (sk < peersk) {
spin_lock(&sk->sk_peer_lock);
spin_lock_nested(&peersk->sk_peer_lock, SINGLE_DEPTH_NESTING);
} else {
spin_lock(&peersk->sk_peer_lock);
spin_lock_nested(&sk->sk_peer_lock, SINGLE_DEPTH_NESTING);
}
old_pid = sk->sk_peer_pid;
old_cred = sk->sk_peer_cred;
sk->sk_peer_pid = get_pid(peersk->sk_peer_pid);
sk->sk_peer_cred = get_cred(peersk->sk_peer_cred);
spin_unlock(&sk->sk_peer_lock);
spin_unlock(&peersk->sk_peer_lock);
put_pid(old_pid);
put_cred(old_cred);
}
static int unix_listen(struct socket *sock, int backlog)
{
int err;
struct sock *sk = sock->sk;
struct unix_sock *u = unix_sk(sk);
err = -EOPNOTSUPP;
if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
goto out; /* Only stream/seqpacket sockets accept */
err = -EINVAL;
if (!u->addr)
goto out; /* No listens on an unbound socket */
unix_state_lock(sk);
if (sk->sk_state != TCP_CLOSE && sk->sk_state != TCP_LISTEN)
goto out_unlock;
if (backlog > sk->sk_max_ack_backlog)
wake_up_interruptible_all(&u->peer_wait);
sk->sk_max_ack_backlog = backlog;
sk->sk_state = TCP_LISTEN;
/* set credentials so connect can copy them */
init_peercred(sk);
err = 0;
out_unlock:
unix_state_unlock(sk);
out:
return err;
}
static int unix_release(struct socket *);
static int unix_bind(struct socket *, struct sockaddr *, int);
static int unix_stream_connect(struct socket *, struct sockaddr *,
int addr_len, int flags);
static int unix_socketpair(struct socket *, struct socket *);
static int unix_accept(struct socket *, struct socket *, int, bool);
static int unix_getname(struct socket *, struct sockaddr *, int);
static __poll_t unix_poll(struct file *, struct socket *, poll_table *);
static __poll_t unix_dgram_poll(struct file *, struct socket *,
poll_table *);
static int unix_ioctl(struct socket *, unsigned int, unsigned long);
#ifdef CONFIG_COMPAT
static int unix_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
#endif
static int unix_shutdown(struct socket *, int);
static int unix_stream_sendmsg(struct socket *, struct msghdr *, size_t);
static int unix_stream_recvmsg(struct socket *, struct msghdr *, size_t, int);
static ssize_t unix_stream_splice_read(struct socket *, loff_t *ppos,
struct pipe_inode_info *, size_t size,
unsigned int flags);
static int unix_dgram_sendmsg(struct socket *, struct msghdr *, size_t);
static int unix_dgram_recvmsg(struct socket *, struct msghdr *, size_t, int);
static int unix_read_skb(struct sock *sk, skb_read_actor_t recv_actor);
static int unix_stream_read_skb(struct sock *sk, skb_read_actor_t recv_actor);
static int unix_dgram_connect(struct socket *, struct sockaddr *,
int, int);
static int unix_seqpacket_sendmsg(struct socket *, struct msghdr *, size_t);
static int unix_seqpacket_recvmsg(struct socket *, struct msghdr *, size_t,
int);
static int unix_set_peek_off(struct sock *sk, int val)
{
struct unix_sock *u = unix_sk(sk);
if (mutex_lock_interruptible(&u->iolock))
return -EINTR;
WRITE_ONCE(sk->sk_peek_off, val);
mutex_unlock(&u->iolock);
return 0;
}
#ifdef CONFIG_PROC_FS
static int unix_count_nr_fds(struct sock *sk)
{
struct sk_buff *skb;
struct unix_sock *u;
int nr_fds = 0;
spin_lock(&sk->sk_receive_queue.lock);
skb = skb_peek(&sk->sk_receive_queue);
while (skb) {
u = unix_sk(skb->sk);
nr_fds += atomic_read(&u->scm_stat.nr_fds);
skb = skb_peek_next(skb, &sk->sk_receive_queue);
}
spin_unlock(&sk->sk_receive_queue.lock);
return nr_fds;
}
static void unix_show_fdinfo(struct seq_file *m, struct socket *sock)
{
struct sock *sk = sock->sk;
unsigned char s_state;
struct unix_sock *u;
int nr_fds = 0;
if (sk) {
s_state = READ_ONCE(sk->sk_state);
u = unix_sk(sk);
/* SOCK_STREAM and SOCK_SEQPACKET sockets never change their
* sk_state after switching to TCP_ESTABLISHED or TCP_LISTEN.
* SOCK_DGRAM is ordinary. So, no lock is needed.
*/
if (sock->type == SOCK_DGRAM || s_state == TCP_ESTABLISHED)
nr_fds = atomic_read(&u->scm_stat.nr_fds);
else if (s_state == TCP_LISTEN)
nr_fds = unix_count_nr_fds(sk);
seq_printf(m, "scm_fds: %u\n", nr_fds);
}
}
#else
#define unix_show_fdinfo NULL
#endif
static const struct proto_ops unix_stream_ops = {
.family = PF_UNIX,
.owner = THIS_MODULE,
.release = unix_release,
.bind = unix_bind,
.connect = unix_stream_connect,
.socketpair = unix_socketpair,
.accept = unix_accept,
.getname = unix_getname,
.poll = unix_poll,
.ioctl = unix_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = unix_compat_ioctl,
#endif
.listen = unix_listen,
.shutdown = unix_shutdown,
.sendmsg = unix_stream_sendmsg,
.recvmsg = unix_stream_recvmsg,
.read_skb = unix_stream_read_skb,
.mmap = sock_no_mmap,
.splice_read = unix_stream_splice_read,
.set_peek_off = unix_set_peek_off,
.show_fdinfo = unix_show_fdinfo,
};
static const struct proto_ops unix_dgram_ops = {
.family = PF_UNIX,
.owner = THIS_MODULE,
.release = unix_release,
.bind = unix_bind,
.connect = unix_dgram_connect,
.socketpair = unix_socketpair,
.accept = sock_no_accept,
.getname = unix_getname,
.poll = unix_dgram_poll,
.ioctl = unix_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = unix_compat_ioctl,
#endif
.listen = sock_no_listen,
.shutdown = unix_shutdown,
.sendmsg = unix_dgram_sendmsg,
.read_skb = unix_read_skb,
.recvmsg = unix_dgram_recvmsg,
.mmap = sock_no_mmap,
.set_peek_off = unix_set_peek_off,
.show_fdinfo = unix_show_fdinfo,
};
static const struct proto_ops unix_seqpacket_ops = {
.family = PF_UNIX,
.owner = THIS_MODULE,
.release = unix_release,
.bind = unix_bind,
.connect = unix_stream_connect,
.socketpair = unix_socketpair,
.accept = unix_accept,
.getname = unix_getname,
.poll = unix_dgram_poll,
.ioctl = unix_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = unix_compat_ioctl,
#endif
.listen = unix_listen,
.shutdown = unix_shutdown,
.sendmsg = unix_seqpacket_sendmsg,
.recvmsg = unix_seqpacket_recvmsg,
.mmap = sock_no_mmap,
.set_peek_off = unix_set_peek_off,
.show_fdinfo = unix_show_fdinfo,
};
static void unix_close(struct sock *sk, long timeout)
{
/* Nothing to do here, unix socket does not need a ->close().
* This is merely for sockmap.
*/
}
static void unix_unhash(struct sock *sk)
{
/* Nothing to do here, unix socket does not need a ->unhash().
* This is merely for sockmap.
*/
}
static bool unix_bpf_bypass_getsockopt(int level, int optname)
{
if (level == SOL_SOCKET) {
switch (optname) {
case SO_PEERPIDFD:
return true;
default:
return false;
}
}
return false;
}
struct proto unix_dgram_proto = {
.name = "UNIX",
.owner = THIS_MODULE,
.obj_size = sizeof(struct unix_sock),
.close = unix_close,
.bpf_bypass_getsockopt = unix_bpf_bypass_getsockopt,
#ifdef CONFIG_BPF_SYSCALL
.psock_update_sk_prot = unix_dgram_bpf_update_proto,
#endif
};
struct proto unix_stream_proto = {
.name = "UNIX-STREAM",
.owner = THIS_MODULE,
.obj_size = sizeof(struct unix_sock),
.close = unix_close,
.unhash = unix_unhash,
.bpf_bypass_getsockopt = unix_bpf_bypass_getsockopt,
#ifdef CONFIG_BPF_SYSCALL
.psock_update_sk_prot = unix_stream_bpf_update_proto,
#endif
};
static struct sock *unix_create1(struct net *net, struct socket *sock, int kern, int type)
{
struct unix_sock *u;
struct sock *sk;
int err;
atomic_long_inc(&unix_nr_socks);
if (atomic_long_read(&unix_nr_socks) > 2 * get_max_files()) {
err = -ENFILE;
goto err;
}
if (type == SOCK_STREAM)
sk = sk_alloc(net, PF_UNIX, GFP_KERNEL, &unix_stream_proto, kern);
else /*dgram and seqpacket */
sk = sk_alloc(net, PF_UNIX, GFP_KERNEL, &unix_dgram_proto, kern);
if (!sk) {
err = -ENOMEM;
goto err;
}
sock_init_data(sock, sk);
sk->sk_hash = unix_unbound_hash(sk);
sk->sk_allocation = GFP_KERNEL_ACCOUNT;
sk->sk_write_space = unix_write_space;
sk->sk_max_ack_backlog = net->unx.sysctl_max_dgram_qlen;
sk->sk_destruct = unix_sock_destructor;
u = unix_sk(sk);
u->path.dentry = NULL;
u->path.mnt = NULL;
spin_lock_init(&u->lock);
atomic_long_set(&u->inflight, 0);
INIT_LIST_HEAD(&u->link);
mutex_init(&u->iolock); /* single task reading lock */
mutex_init(&u->bindlock); /* single task binding lock */
init_waitqueue_head(&u->peer_wait);
init_waitqueue_func_entry(&u->peer_wake, unix_dgram_peer_wake_relay);
memset(&u->scm_stat, 0, sizeof(struct scm_stat));
unix_insert_unbound_socket(net, sk);
sock_prot_inuse_add(net, sk->sk_prot, 1);
return sk;
err:
atomic_long_dec(&unix_nr_socks);
return ERR_PTR(err);
}
static int unix_create(struct net *net, struct socket *sock, int protocol,
int kern)
{
struct sock *sk;
if (protocol && protocol != PF_UNIX)
return -EPROTONOSUPPORT;
sock->state = SS_UNCONNECTED;
switch (sock->type) {
case SOCK_STREAM:
sock->ops = &unix_stream_ops;
break;
/*
* Believe it or not BSD has AF_UNIX, SOCK_RAW though
* nothing uses it.
*/
case SOCK_RAW:
sock->type = SOCK_DGRAM;
fallthrough;
case SOCK_DGRAM:
sock->ops = &unix_dgram_ops;
break;
case SOCK_SEQPACKET:
sock->ops = &unix_seqpacket_ops;
break;
default:
return -ESOCKTNOSUPPORT;
}
sk = unix_create1(net, sock, kern, sock->type);
if (IS_ERR(sk))
return PTR_ERR(sk);
return 0;
}
static int unix_release(struct socket *sock)
{
struct sock *sk = sock->sk;
if (!sk)
return 0;
sk->sk_prot->close(sk, 0);
unix_release_sock(sk, 0);
sock->sk = NULL;
return 0;
}
static struct sock *unix_find_bsd(struct sockaddr_un *sunaddr, int addr_len,
int type)
{
struct inode *inode;
struct path path;
struct sock *sk;
int err;
unix_mkname_bsd(sunaddr, addr_len);
err = kern_path(sunaddr->sun_path, LOOKUP_FOLLOW, &path);
if (err)
goto fail;
err = path_permission(&path, MAY_WRITE);
if (err)
goto path_put;
err = -ECONNREFUSED;
inode = d_backing_inode(path.dentry);
if (!S_ISSOCK(inode->i_mode))
goto path_put;
sk = unix_find_socket_byinode(inode);
if (!sk)
goto path_put;
err = -EPROTOTYPE;
if (sk->sk_type == type)
touch_atime(&path);
else
goto sock_put;
path_put(&path);
return sk;
sock_put:
sock_put(sk);
path_put:
path_put(&path);
fail:
return ERR_PTR(err);
}
static struct sock *unix_find_abstract(struct net *net,
struct sockaddr_un *sunaddr,
int addr_len, int type)
{
unsigned int hash = unix_abstract_hash(sunaddr, addr_len, type);
struct dentry *dentry;
struct sock *sk;
sk = unix_find_socket_byname(net, sunaddr, addr_len, hash);
if (!sk)
return ERR_PTR(-ECONNREFUSED);
dentry = unix_sk(sk)->path.dentry;
if (dentry)
touch_atime(&unix_sk(sk)->path);
return sk;
}
static struct sock *unix_find_other(struct net *net,
struct sockaddr_un *sunaddr,
int addr_len, int type)
{
struct sock *sk;
if (sunaddr->sun_path[0])
sk = unix_find_bsd(sunaddr, addr_len, type);
else
sk = unix_find_abstract(net, sunaddr, addr_len, type);
return sk;
}
static int unix_autobind(struct sock *sk)
{
unsigned int new_hash, old_hash = sk->sk_hash;
struct unix_sock *u = unix_sk(sk);
struct net *net = sock_net(sk);
struct unix_address *addr;
u32 lastnum, ordernum;
int err;
err = mutex_lock_interruptible(&u->bindlock);
if (err)
return err;
if (u->addr)
goto out;
err = -ENOMEM;
addr = kzalloc(sizeof(*addr) +
offsetof(struct sockaddr_un, sun_path) + 16, GFP_KERNEL);
if (!addr)
goto out;
addr->len = offsetof(struct sockaddr_un, sun_path) + 6;
addr->name->sun_family = AF_UNIX;
refcount_set(&addr->refcnt, 1);
ordernum = get_random_u32();
lastnum = ordernum & 0xFFFFF;
retry:
ordernum = (ordernum + 1) & 0xFFFFF;
sprintf(addr->name->sun_path + 1, "%05x", ordernum);
new_hash = unix_abstract_hash(addr->name, addr->len, sk->sk_type);
unix_table_double_lock(net, old_hash, new_hash);
if (__unix_find_socket_byname(net, addr->name, addr->len, new_hash)) {
unix_table_double_unlock(net, old_hash, new_hash);
/* __unix_find_socket_byname() may take long time if many names
* are already in use.
*/
cond_resched();
if (ordernum == lastnum) {
/* Give up if all names seems to be in use. */
err = -ENOSPC;
unix_release_addr(addr);
goto out;
}
goto retry;
}
__unix_set_addr_hash(net, sk, addr, new_hash);
unix_table_double_unlock(net, old_hash, new_hash);
err = 0;
out: mutex_unlock(&u->bindlock);
return err;
}
static int unix_bind_bsd(struct sock *sk, struct sockaddr_un *sunaddr,
int addr_len)
{
umode_t mode = S_IFSOCK |
(SOCK_INODE(sk->sk_socket)->i_mode & ~current_umask());
unsigned int new_hash, old_hash = sk->sk_hash;
struct unix_sock *u = unix_sk(sk);
struct net *net = sock_net(sk);
struct mnt_idmap *idmap;
struct unix_address *addr;
struct dentry *dentry;
struct path parent;
int err;
addr_len = unix_mkname_bsd(sunaddr, addr_len);
addr = unix_create_addr(sunaddr, addr_len);
if (!addr)
return -ENOMEM;
/*
* Get the parent directory, calculate the hash for last
* component.
*/
dentry = kern_path_create(AT_FDCWD, addr->name->sun_path, &parent, 0);
if (IS_ERR(dentry)) {
err = PTR_ERR(dentry);
goto out;
}
/*
* All right, let's create it.
*/
idmap = mnt_idmap(parent.mnt);
err = security_path_mknod(&parent, dentry, mode, 0);
if (!err)
err = vfs_mknod(idmap, d_inode(parent.dentry), dentry, mode, 0);
if (err)
goto out_path;
err = mutex_lock_interruptible(&u->bindlock);
if (err)
goto out_unlink;
if (u->addr)
goto out_unlock;
new_hash = unix_bsd_hash(d_backing_inode(dentry));
unix_table_double_lock(net, old_hash, new_hash);
u->path.mnt = mntget(parent.mnt);
u->path.dentry = dget(dentry);
__unix_set_addr_hash(net, sk, addr, new_hash);
unix_table_double_unlock(net, old_hash, new_hash);
unix_insert_bsd_socket(sk);
mutex_unlock(&u->bindlock);
done_path_create(&parent, dentry);
return 0;
out_unlock:
mutex_unlock(&u->bindlock);
err = -EINVAL;
out_unlink:
/* failed after successful mknod? unlink what we'd created... */
vfs_unlink(idmap, d_inode(parent.dentry), dentry, NULL);
out_path:
done_path_create(&parent, dentry);
out:
unix_release_addr(addr);
return err == -EEXIST ? -EADDRINUSE : err;
}
static int unix_bind_abstract(struct sock *sk, struct sockaddr_un *sunaddr,
int addr_len)
{
unsigned int new_hash, old_hash = sk->sk_hash;
struct unix_sock *u = unix_sk(sk);
struct net *net = sock_net(sk);
struct unix_address *addr;
int err;
addr = unix_create_addr(sunaddr, addr_len);
if (!addr)
return -ENOMEM;
err = mutex_lock_interruptible(&u->bindlock);
if (err)
goto out;
if (u->addr) {
err = -EINVAL;
goto out_mutex;
}
new_hash = unix_abstract_hash(addr->name, addr->len, sk->sk_type);
unix_table_double_lock(net, old_hash, new_hash);
if (__unix_find_socket_byname(net, addr->name, addr->len, new_hash))
goto out_spin;
__unix_set_addr_hash(net, sk, addr, new_hash);
unix_table_double_unlock(net, old_hash, new_hash);
mutex_unlock(&u->bindlock);
return 0;
out_spin:
unix_table_double_unlock(net, old_hash, new_hash);
err = -EADDRINUSE;
out_mutex:
mutex_unlock(&u->bindlock);
out:
unix_release_addr(addr);
return err;
}
static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
{
struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
struct sock *sk = sock->sk;
int err;
if (addr_len == offsetof(struct sockaddr_un, sun_path) &&
sunaddr->sun_family == AF_UNIX)
return unix_autobind(sk);
err = unix_validate_addr(sunaddr, addr_len);
if (err)
return err;
if (sunaddr->sun_path[0])
err = unix_bind_bsd(sk, sunaddr, addr_len);
else
err = unix_bind_abstract(sk, sunaddr, addr_len);
return err;
}
static void unix_state_double_lock(struct sock *sk1, struct sock *sk2)
{
if (unlikely(sk1 == sk2) || !sk2) {
unix_state_lock(sk1);
return;
}
if (sk1 < sk2) {
unix_state_lock(sk1);
unix_state_lock_nested(sk2);
} else {
unix_state_lock(sk2);
unix_state_lock_nested(sk1);
}
}
static void unix_state_double_unlock(struct sock *sk1, struct sock *sk2)
{
if (unlikely(sk1 == sk2) || !sk2) {
unix_state_unlock(sk1);
return;
}
unix_state_unlock(sk1);
unix_state_unlock(sk2);
}
static int unix_dgram_connect(struct socket *sock, struct sockaddr *addr,
int alen, int flags)
{
struct sockaddr_un *sunaddr = (struct sockaddr_un *)addr;
struct sock *sk = sock->sk;
struct sock *other;
int err;
err = -EINVAL;
if (alen < offsetofend(struct sockaddr, sa_family))
goto out;
if (addr->sa_family != AF_UNSPEC) {
err = unix_validate_addr(sunaddr, alen);
if (err)
goto out;
if ((test_bit(SOCK_PASSCRED, &sock->flags) ||
test_bit(SOCK_PASSPIDFD, &sock->flags)) &&
!unix_sk(sk)->addr) {
err = unix_autobind(sk);
if (err)
goto out;
}
restart:
other = unix_find_other(sock_net(sk), sunaddr, alen, sock->type);
if (IS_ERR(other)) {
err = PTR_ERR(other);
goto out;
}
unix_state_double_lock(sk, other);
/* Apparently VFS overslept socket death. Retry. */
if (sock_flag(other, SOCK_DEAD)) {
unix_state_double_unlock(sk, other);
sock_put(other);
goto restart;
}
err = -EPERM;
if (!unix_may_send(sk, other))
goto out_unlock;
err = security_unix_may_send(sk->sk_socket, other->sk_socket);
if (err)
goto out_unlock;
sk->sk_state = other->sk_state = TCP_ESTABLISHED;
} else {
/*
* 1003.1g breaking connected state with AF_UNSPEC
*/
other = NULL;
unix_state_double_lock(sk, other);
}
/*
* If it was connected, reconnect.
*/
if (unix_peer(sk)) {
struct sock *old_peer = unix_peer(sk);
unix_peer(sk) = other;
if (!other)
sk->sk_state = TCP_CLOSE;
unix_dgram_peer_wake_disconnect_wakeup(sk, old_peer);
unix_state_double_unlock(sk, other);
if (other != old_peer)
unix_dgram_disconnected(sk, old_peer);
sock_put(old_peer);
} else {
unix_peer(sk) = other;
unix_state_double_unlock(sk, other);
}
return 0;
out_unlock:
unix_state_double_unlock(sk, other);
sock_put(other);
out:
return err;
}
static long unix_wait_for_peer(struct sock *other, long timeo)
__releases(&unix_sk(other)->lock)
{
struct unix_sock *u = unix_sk(other);
int sched;
DEFINE_WAIT(wait);
prepare_to_wait_exclusive(&u->peer_wait, &wait, TASK_INTERRUPTIBLE);
sched = !sock_flag(other, SOCK_DEAD) &&
!(other->sk_shutdown & RCV_SHUTDOWN) &&
unix_recvq_full_lockless(other);
unix_state_unlock(other);
if (sched)
timeo = schedule_timeout(timeo);
finish_wait(&u->peer_wait, &wait);
return timeo;
}
static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr,
int addr_len, int flags)
{
struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
struct sock *sk = sock->sk, *newsk = NULL, *other = NULL;
struct unix_sock *u = unix_sk(sk), *newu, *otheru;
struct net *net = sock_net(sk);
struct sk_buff *skb = NULL;
long timeo;
int err;
int st;
err = unix_validate_addr(sunaddr, addr_len);
if (err)
goto out;
if ((test_bit(SOCK_PASSCRED, &sock->flags) ||
test_bit(SOCK_PASSPIDFD, &sock->flags)) && !u->addr) {
err = unix_autobind(sk);
if (err)
goto out;
}
timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
/* First of all allocate resources.
If we will make it after state is locked,
we will have to recheck all again in any case.
*/
/* create new sock for complete connection */
newsk = unix_create1(net, NULL, 0, sock->type);
if (IS_ERR(newsk)) {
err = PTR_ERR(newsk);
newsk = NULL;
goto out;
}
err = -ENOMEM;
/* Allocate skb for sending to listening sock */
skb = sock_wmalloc(newsk, 1, 0, GFP_KERNEL);
if (skb == NULL)
goto out;
restart:
/* Find listening sock. */
other = unix_find_other(net, sunaddr, addr_len, sk->sk_type);
if (IS_ERR(other)) {
err = PTR_ERR(other);
other = NULL;
goto out;
}
/* Latch state of peer */
unix_state_lock(other);
/* Apparently VFS overslept socket death. Retry. */
if (sock_flag(other, SOCK_DEAD)) {
unix_state_unlock(other);
sock_put(other);
goto restart;
}
err = -ECONNREFUSED;
if (other->sk_state != TCP_LISTEN)
goto out_unlock;
if (other->sk_shutdown & RCV_SHUTDOWN)
goto out_unlock;
if (unix_recvq_full(other)) {
err = -EAGAIN;
if (!timeo)
goto out_unlock;
timeo = unix_wait_for_peer(other, timeo);
err = sock_intr_errno(timeo);
if (signal_pending(current))
goto out;
sock_put(other);
goto restart;
}
/* Latch our state.
It is tricky place. We need to grab our state lock and cannot
drop lock on peer. It is dangerous because deadlock is
possible. Connect to self case and simultaneous
attempt to connect are eliminated by checking socket
state. other is TCP_LISTEN, if sk is TCP_LISTEN we
check this before attempt to grab lock.
Well, and we have to recheck the state after socket locked.
*/
st = sk->sk_state;
switch (st) {
case TCP_CLOSE:
/* This is ok... continue with connect */
break;
case TCP_ESTABLISHED:
/* Socket is already connected */
err = -EISCONN;
goto out_unlock;
default:
err = -EINVAL;
goto out_unlock;
}
unix_state_lock_nested(sk);
if (sk->sk_state != st) {
unix_state_unlock(sk);
unix_state_unlock(other);
sock_put(other);
goto restart;
}
err = security_unix_stream_connect(sk, other, newsk);
if (err) {
unix_state_unlock(sk);
goto out_unlock;
}
/* The way is open! Fastly set all the necessary fields... */
sock_hold(sk);
unix_peer(newsk) = sk;
newsk->sk_state = TCP_ESTABLISHED;
newsk->sk_type = sk->sk_type;
init_peercred(newsk);
newu = unix_sk(newsk);
RCU_INIT_POINTER(newsk->sk_wq, &newu->peer_wq);
otheru = unix_sk(other);
/* copy address information from listening to new sock
*
* The contents of *(otheru->addr) and otheru->path
* are seen fully set up here, since we have found
* otheru in hash under its lock. Insertion into the
* hash chain we'd found it in had been done in an
* earlier critical area protected by the chain's lock,
* the same one where we'd set *(otheru->addr) contents,
* as well as otheru->path and otheru->addr itself.
*
* Using smp_store_release() here to set newu->addr
* is enough to make those stores, as well as stores
* to newu->path visible to anyone who gets newu->addr
* by smp_load_acquire(). IOW, the same warranties
* as for unix_sock instances bound in unix_bind() or
* in unix_autobind().
*/
if (otheru->path.dentry) {
path_get(&otheru->path);
newu->path = otheru->path;
}
refcount_inc(&otheru->addr->refcnt);
smp_store_release(&newu->addr, otheru->addr);
/* Set credentials */
copy_peercred(sk, other);
sock->state = SS_CONNECTED;
sk->sk_state = TCP_ESTABLISHED;
sock_hold(newsk);
smp_mb__after_atomic(); /* sock_hold() does an atomic_inc() */
unix_peer(sk) = newsk;
unix_state_unlock(sk);
/* take ten and send info to listening sock */
spin_lock(&other->sk_receive_queue.lock);
__skb_queue_tail(&other->sk_receive_queue, skb);
spin_unlock(&other->sk_receive_queue.lock);
unix_state_unlock(other);
other->sk_data_ready(other);
sock_put(other);
return 0;
out_unlock:
if (other)
unix_state_unlock(other);
out:
kfree_skb(skb);
if (newsk)
unix_release_sock(newsk, 0);
if (other)
sock_put(other);
return err;
}
static int unix_socketpair(struct socket *socka, struct socket *sockb)
{
struct sock *ska = socka->sk, *skb = sockb->sk;
/* Join our sockets back to back */
sock_hold(ska);
sock_hold(skb);
unix_peer(ska) = skb;
unix_peer(skb) = ska;
init_peercred(ska);
init_peercred(skb);
ska->sk_state = TCP_ESTABLISHED;
skb->sk_state = TCP_ESTABLISHED;
socka->state = SS_CONNECTED;
sockb->state = SS_CONNECTED;
return 0;
}
static void unix_sock_inherit_flags(const struct socket *old,
struct socket *new)
{
if (test_bit(SOCK_PASSCRED, &old->flags))
set_bit(SOCK_PASSCRED, &new->flags);
if (test_bit(SOCK_PASSPIDFD, &old->flags))
set_bit(SOCK_PASSPIDFD, &new->flags);
if (test_bit(SOCK_PASSSEC, &old->flags))
set_bit(SOCK_PASSSEC, &new->flags);
}
static int unix_accept(struct socket *sock, struct socket *newsock, int flags,
bool kern)
{
struct sock *sk = sock->sk;
struct sock *tsk;
struct sk_buff *skb;
int err;
err = -EOPNOTSUPP;
if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
goto out;
err = -EINVAL;
if (sk->sk_state != TCP_LISTEN)
goto out;
/* If socket state is TCP_LISTEN it cannot change (for now...),
* so that no locks are necessary.
*/
skb = skb_recv_datagram(sk, (flags & O_NONBLOCK) ? MSG_DONTWAIT : 0,
&err);
if (!skb) {
/* This means receive shutdown. */
if (err == 0)
err = -EINVAL;
goto out;
}
tsk = skb->sk;
skb_free_datagram(sk, skb);
wake_up_interruptible(&unix_sk(sk)->peer_wait);
/* attach accepted sock to socket */
unix_state_lock(tsk);
newsock->state = SS_CONNECTED;
unix_sock_inherit_flags(sock, newsock);
sock_graft(tsk, newsock);
unix_state_unlock(tsk);
return 0;
out:
return err;
}
static int unix_getname(struct socket *sock, struct sockaddr *uaddr, int peer)
{
struct sock *sk = sock->sk;
struct unix_address *addr;
DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, uaddr);
int err = 0;
if (peer) {
sk = unix_peer_get(sk);
err = -ENOTCONN;
if (!sk)
goto out;
err = 0;
} else {
sock_hold(sk);
}
addr = smp_load_acquire(&unix_sk(sk)->addr);
if (!addr) {
sunaddr->sun_family = AF_UNIX;
sunaddr->sun_path[0] = 0;
err = offsetof(struct sockaddr_un, sun_path);
} else {
err = addr->len;
memcpy(sunaddr, addr->name, addr->len);
}
sock_put(sk);
out:
return err;
}
static void unix_peek_fds(struct scm_cookie *scm, struct sk_buff *skb)
{
scm->fp = scm_fp_dup(UNIXCB(skb).fp);
/*
* Garbage collection of unix sockets starts by selecting a set of
* candidate sockets which have reference only from being in flight
* (total_refs == inflight_refs). This condition is checked once during
* the candidate collection phase, and candidates are marked as such, so
* that non-candidates can later be ignored. While inflight_refs is
* protected by unix_gc_lock, total_refs (file count) is not, hence this
* is an instantaneous decision.
*
* Once a candidate, however, the socket must not be reinstalled into a
* file descriptor while the garbage collection is in progress.
*
* If the above conditions are met, then the directed graph of
* candidates (*) does not change while unix_gc_lock is held.
*
* Any operations that changes the file count through file descriptors
* (dup, close, sendmsg) does not change the graph since candidates are
* not installed in fds.
*
* Dequeing a candidate via recvmsg would install it into an fd, but
* that takes unix_gc_lock to decrement the inflight count, so it's
* serialized with garbage collection.
*
* MSG_PEEK is special in that it does not change the inflight count,
* yet does install the socket into an fd. The following lock/unlock
* pair is to ensure serialization with garbage collection. It must be
* done between incrementing the file count and installing the file into
* an fd.
*
* If garbage collection starts after the barrier provided by the
* lock/unlock, then it will see the elevated refcount and not mark this
* as a candidate. If a garbage collection is already in progress
* before the file count was incremented, then the lock/unlock pair will
* ensure that garbage collection is finished before progressing to
* installing the fd.
*
* (*) A -> B where B is on the queue of A or B is on the queue of C
* which is on the queue of listening socket A.
*/
spin_lock(&unix_gc_lock);
spin_unlock(&unix_gc_lock);
}
static int unix_scm_to_skb(struct scm_cookie *scm, struct sk_buff *skb, bool send_fds)
{
int err = 0;
UNIXCB(skb).pid = get_pid(scm->pid);
UNIXCB(skb).uid = scm->creds.uid;
UNIXCB(skb).gid = scm->creds.gid;
UNIXCB(skb).fp = NULL;
unix_get_secdata(scm, skb);
if (scm->fp && send_fds)
err = unix_attach_fds(scm, skb);
skb->destructor = unix_destruct_scm;
return err;
}
static bool unix_passcred_enabled(const struct socket *sock,
const struct sock *other)
{
return test_bit(SOCK_PASSCRED, &sock->flags) ||
test_bit(SOCK_PASSPIDFD, &sock->flags) ||
!other->sk_socket ||
test_bit(SOCK_PASSCRED, &other->sk_socket->flags) ||
test_bit(SOCK_PASSPIDFD, &other->sk_socket->flags);
}
/*
* Some apps rely on write() giving SCM_CREDENTIALS
* We include credentials if source or destination socket
* asserted SOCK_PASSCRED.
*/
static void maybe_add_creds(struct sk_buff *skb, const struct socket *sock,
const struct sock *other)
{
if (UNIXCB(skb).pid)
return;
if (unix_passcred_enabled(sock, other)) {
UNIXCB(skb).pid = get_pid(task_tgid(current));
current_uid_gid(&UNIXCB(skb).uid, &UNIXCB(skb).gid);
}
}
static bool unix_skb_scm_eq(struct sk_buff *skb,
struct scm_cookie *scm)
{
return UNIXCB(skb).pid == scm->pid &&
uid_eq(UNIXCB(skb).uid, scm->creds.uid) &&
gid_eq(UNIXCB(skb).gid, scm->creds.gid) &&
unix_secdata_eq(scm, skb);
}
static void scm_stat_add(struct sock *sk, struct sk_buff *skb)
{
struct scm_fp_list *fp = UNIXCB(skb).fp;
struct unix_sock *u = unix_sk(sk);
if (unlikely(fp && fp->count))
atomic_add(fp->count, &u->scm_stat.nr_fds);
}
static void scm_stat_del(struct sock *sk, struct sk_buff *skb)
{
struct scm_fp_list *fp = UNIXCB(skb).fp;
struct unix_sock *u = unix_sk(sk);
if (unlikely(fp && fp->count))
atomic_sub(fp->count, &u->scm_stat.nr_fds);
}
/*
* Send AF_UNIX data.
*/
static int unix_dgram_sendmsg(struct socket *sock, struct msghdr *msg,
size_t len)
{
DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, msg->msg_name);
struct sock *sk = sock->sk, *other = NULL;
struct unix_sock *u = unix_sk(sk);
struct scm_cookie scm;
struct sk_buff *skb;
int data_len = 0;
int sk_locked;
long timeo;
int err;
wait_for_unix_gc();
err = scm_send(sock, msg, &scm, false);
if (err < 0)
return err;
err = -EOPNOTSUPP;
if (msg->msg_flags&MSG_OOB)
goto out;
if (msg->msg_namelen) {
err = unix_validate_addr(sunaddr, msg->msg_namelen);
if (err)
goto out;
} else {
sunaddr = NULL;
err = -ENOTCONN;
other = unix_peer_get(sk);
if (!other)
goto out;
}
if ((test_bit(SOCK_PASSCRED, &sock->flags) ||
test_bit(SOCK_PASSPIDFD, &sock->flags)) && !u->addr) {
err = unix_autobind(sk);
if (err)
goto out;
}
err = -EMSGSIZE;
if (len > sk->sk_sndbuf - 32)
goto out;
if (len > SKB_MAX_ALLOC) {
data_len = min_t(size_t,
len - SKB_MAX_ALLOC,
MAX_SKB_FRAGS * PAGE_SIZE);
data_len = PAGE_ALIGN(data_len);
BUILD_BUG_ON(SKB_MAX_ALLOC < PAGE_SIZE);
}
skb = sock_alloc_send_pskb(sk, len - data_len, data_len,
msg->msg_flags & MSG_DONTWAIT, &err,
PAGE_ALLOC_COSTLY_ORDER);
if (skb == NULL)
goto out;
err = unix_scm_to_skb(&scm, skb, true);
if (err < 0)
goto out_free;
skb_put(skb, len - data_len);
skb->data_len = data_len;
skb->len = len;
err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, len);
if (err)
goto out_free;
timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
restart:
if (!other) {
err = -ECONNRESET;
if (sunaddr == NULL)
goto out_free;
other = unix_find_other(sock_net(sk), sunaddr, msg->msg_namelen,
sk->sk_type);
if (IS_ERR(other)) {
err = PTR_ERR(other);
other = NULL;
goto out_free;
}
}
if (sk_filter(other, skb) < 0) {
/* Toss the packet but do not return any error to the sender */
err = len;
goto out_free;
}
sk_locked = 0;
unix_state_lock(other);
restart_locked:
err = -EPERM;
if (!unix_may_send(sk, other))
goto out_unlock;
if (unlikely(sock_flag(other, SOCK_DEAD))) {
/*
* Check with 1003.1g - what should
* datagram error
*/
unix_state_unlock(other);
sock_put(other);
if (!sk_locked)
unix_state_lock(sk);
err = 0;
if (sk->sk_type == SOCK_SEQPACKET) {
/* We are here only when racing with unix_release_sock()
* is clearing @other. Never change state to TCP_CLOSE
* unlike SOCK_DGRAM wants.
*/
unix_state_unlock(sk);
err = -EPIPE;
} else if (unix_peer(sk) == other) {
unix_peer(sk) = NULL;
unix_dgram_peer_wake_disconnect_wakeup(sk, other);
sk->sk_state = TCP_CLOSE;
unix_state_unlock(sk);
unix_dgram_disconnected(sk, other);
sock_put(other);
err = -ECONNREFUSED;
} else {
unix_state_unlock(sk);
}
other = NULL;
if (err)
goto out_free;
goto restart;
}
err = -EPIPE;
if (other->sk_shutdown & RCV_SHUTDOWN)
goto out_unlock;
if (sk->sk_type != SOCK_SEQPACKET) {
err = security_unix_may_send(sk->sk_socket, other->sk_socket);
if (err)
goto out_unlock;
}
/* other == sk && unix_peer(other) != sk if
* - unix_peer(sk) == NULL, destination address bound to sk
* - unix_peer(sk) == sk by time of get but disconnected before lock
*/
if (other != sk &&
unlikely(unix_peer(other) != sk &&
unix_recvq_full_lockless(other))) {
if (timeo) {
timeo = unix_wait_for_peer(other, timeo);
err = sock_intr_errno(timeo);
if (signal_pending(current))
goto out_free;
goto restart;
}
if (!sk_locked) {
unix_state_unlock(other);
unix_state_double_lock(sk, other);
}
if (unix_peer(sk) != other ||
unix_dgram_peer_wake_me(sk, other)) {
err = -EAGAIN;
sk_locked = 1;
goto out_unlock;
}
if (!sk_locked) {
sk_locked = 1;
goto restart_locked;
}
}
if (unlikely(sk_locked))
unix_state_unlock(sk);
if (sock_flag(other, SOCK_RCVTSTAMP))
__net_timestamp(skb);
maybe_add_creds(skb, sock, other);
scm_stat_add(other, skb);
skb_queue_tail(&other->sk_receive_queue, skb);
unix_state_unlock(other);
other->sk_data_ready(other);
sock_put(other);
scm_destroy(&scm);
return len;
out_unlock:
if (sk_locked)
unix_state_unlock(sk);
unix_state_unlock(other);
out_free:
kfree_skb(skb);
out:
if (other)
sock_put(other);
scm_destroy(&scm);
return err;
}
/* We use paged skbs for stream sockets, and limit occupancy to 32768
* bytes, and a minimum of a full page.
*/
#define UNIX_SKB_FRAGS_SZ (PAGE_SIZE << get_order(32768))
#if IS_ENABLED(CONFIG_AF_UNIX_OOB)
static int queue_oob(struct socket *sock, struct msghdr *msg, struct sock *other,
struct scm_cookie *scm, bool fds_sent)
{
struct unix_sock *ousk = unix_sk(other);
struct sk_buff *skb;
int err = 0;
skb = sock_alloc_send_skb(sock->sk, 1, msg->msg_flags & MSG_DONTWAIT, &err);
if (!skb)
return err;
err = unix_scm_to_skb(scm, skb, !fds_sent);
if (err < 0) {
kfree_skb(skb);
return err;
}
skb_put(skb, 1);
err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, 1);
if (err) {
kfree_skb(skb);
return err;
}
unix_state_lock(other);
if (sock_flag(other, SOCK_DEAD) ||
(other->sk_shutdown & RCV_SHUTDOWN)) {
unix_state_unlock(other);
kfree_skb(skb);
return -EPIPE;
}
maybe_add_creds(skb, sock, other);
skb_get(skb);
if (ousk->oob_skb)
consume_skb(ousk->oob_skb);
WRITE_ONCE(ousk->oob_skb, skb);
scm_stat_add(other, skb);
skb_queue_tail(&other->sk_receive_queue, skb);
sk_send_sigurg(other);
unix_state_unlock(other);
other->sk_data_ready(other);
return err;
}
#endif
static int unix_stream_sendmsg(struct socket *sock, struct msghdr *msg,
size_t len)
{
struct sock *sk = sock->sk;
struct sock *other = NULL;
int err, size;
struct sk_buff *skb;
int sent = 0;
struct scm_cookie scm;
bool fds_sent = false;
int data_len;
wait_for_unix_gc();
err = scm_send(sock, msg, &scm, false);
if (err < 0)
return err;
err = -EOPNOTSUPP;
if (msg->msg_flags & MSG_OOB) {
#if IS_ENABLED(CONFIG_AF_UNIX_OOB)
if (len)
len--;
else
#endif
goto out_err;
}
if (msg->msg_namelen) {
err = sk->sk_state == TCP_ESTABLISHED ? -EISCONN : -EOPNOTSUPP;
goto out_err;
} else {
err = -ENOTCONN;
other = unix_peer(sk);
if (!other)
goto out_err;
}
if (sk->sk_shutdown & SEND_SHUTDOWN)
goto pipe_err;
while (sent < len) {
size = len - sent;
if (unlikely(msg->msg_flags & MSG_SPLICE_PAGES)) {
skb = sock_alloc_send_pskb(sk, 0, 0,
msg->msg_flags & MSG_DONTWAIT,
&err, 0);
} else {
/* Keep two messages in the pipe so it schedules better */
size = min_t(int, size, (sk->sk_sndbuf >> 1) - 64);
/* allow fallback to order-0 allocations */
size = min_t(int, size, SKB_MAX_HEAD(0) + UNIX_SKB_FRAGS_SZ);
data_len = max_t(int, 0, size - SKB_MAX_HEAD(0));
data_len = min_t(size_t, size, PAGE_ALIGN(data_len));
skb = sock_alloc_send_pskb(sk, size - data_len, data_len,
msg->msg_flags & MSG_DONTWAIT, &err,
get_order(UNIX_SKB_FRAGS_SZ));
}
if (!skb)
goto out_err;
/* Only send the fds in the first buffer */
err = unix_scm_to_skb(&scm, skb, !fds_sent);
if (err < 0) {
kfree_skb(skb);
goto out_err;
}
fds_sent = true;
if (unlikely(msg->msg_flags & MSG_SPLICE_PAGES)) {
err = skb_splice_from_iter(skb, &msg->msg_iter, size,
sk->sk_allocation);
if (err < 0) {
kfree_skb(skb);
goto out_err;
}
size = err;
refcount_add(size, &sk->sk_wmem_alloc);
} else {
skb_put(skb, size - data_len);
skb->data_len = data_len;
skb->len = size;
err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, size);
if (err) {
kfree_skb(skb);
goto out_err;
}
}
unix_state_lock(other);
if (sock_flag(other, SOCK_DEAD) ||
(other->sk_shutdown & RCV_SHUTDOWN))
goto pipe_err_free;
maybe_add_creds(skb, sock, other);
scm_stat_add(other, skb);
skb_queue_tail(&other->sk_receive_queue, skb);
unix_state_unlock(other);
other->sk_data_ready(other);
sent += size;
}
#if IS_ENABLED(CONFIG_AF_UNIX_OOB)
if (msg->msg_flags & MSG_OOB) {
err = queue_oob(sock, msg, other, &scm, fds_sent);
if (err)
goto out_err;
sent++;
}
#endif
scm_destroy(&scm);
return sent;
pipe_err_free:
unix_state_unlock(other);
kfree_skb(skb);
pipe_err:
if (sent == 0 && !(msg->msg_flags&MSG_NOSIGNAL))
send_sig(SIGPIPE, current, 0);
err = -EPIPE;
out_err:
scm_destroy(&scm);
return sent ? : err;
}
static int unix_seqpacket_sendmsg(struct socket *sock, struct msghdr *msg,
size_t len)
{
int err;
struct sock *sk = sock->sk;
err = sock_error(sk);
if (err)
return err;
if (sk->sk_state != TCP_ESTABLISHED)
return -ENOTCONN;
if (msg->msg_namelen)
msg->msg_namelen = 0;
return unix_dgram_sendmsg(sock, msg, len);
}
static int unix_seqpacket_recvmsg(struct socket *sock, struct msghdr *msg,
size_t size, int flags)
{
struct sock *sk = sock->sk;
if (sk->sk_state != TCP_ESTABLISHED)
return -ENOTCONN;
return unix_dgram_recvmsg(sock, msg, size, flags);
}
static void unix_copy_addr(struct msghdr *msg, struct sock *sk)
{
struct unix_address *addr = smp_load_acquire(&unix_sk(sk)->addr);
if (addr) {
msg->msg_namelen = addr->len;
memcpy(msg->msg_name, addr->name, addr->len);
}
}
int __unix_dgram_recvmsg(struct sock *sk, struct msghdr *msg, size_t size,
int flags)
{
struct scm_cookie scm;
struct socket *sock = sk->sk_socket;
struct unix_sock *u = unix_sk(sk);
struct sk_buff *skb, *last;
long timeo;
int skip;
int err;
err = -EOPNOTSUPP;
if (flags&MSG_OOB)
goto out;
timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
do {
mutex_lock(&u->iolock);
skip = sk_peek_offset(sk, flags);
skb = __skb_try_recv_datagram(sk, &sk->sk_receive_queue, flags,
&skip, &err, &last);
if (skb) {
if (!(flags & MSG_PEEK))
scm_stat_del(sk, skb);
break;
}
mutex_unlock(&u->iolock);
if (err != -EAGAIN)
break;
} while (timeo &&
!__skb_wait_for_more_packets(sk, &sk->sk_receive_queue,
&err, &timeo, last));
if (!skb) { /* implies iolock unlocked */
unix_state_lock(sk);
/* Signal EOF on disconnected non-blocking SEQPACKET socket. */
if (sk->sk_type == SOCK_SEQPACKET && err == -EAGAIN &&
(sk->sk_shutdown & RCV_SHUTDOWN))
err = 0;
unix_state_unlock(sk);
goto out;
}
if (wq_has_sleeper(&u->peer_wait))
wake_up_interruptible_sync_poll(&u->peer_wait,
EPOLLOUT | EPOLLWRNORM |
EPOLLWRBAND);
if (msg->msg_name)
unix_copy_addr(msg, skb->sk);
if (size > skb->len - skip)
size = skb->len - skip;
else if (size < skb->len - skip)
msg->msg_flags |= MSG_TRUNC;
err = skb_copy_datagram_msg(skb, skip, msg, size);
if (err)
goto out_free;
if (sock_flag(sk, SOCK_RCVTSTAMP))
__sock_recv_timestamp(msg, sk, skb);
memset(&scm, 0, sizeof(scm));
scm_set_cred(&scm, UNIXCB(skb).pid, UNIXCB(skb).uid, UNIXCB(skb).gid);
unix_set_secdata(&scm, skb);
if (!(flags & MSG_PEEK)) {
if (UNIXCB(skb).fp)
unix_detach_fds(&scm, skb);
sk_peek_offset_bwd(sk, skb->len);
} else {
/* It is questionable: on PEEK we could:
- do not return fds - good, but too simple 8)
- return fds, and do not return them on read (old strategy,
apparently wrong)
- clone fds (I chose it for now, it is the most universal
solution)
POSIX 1003.1g does not actually define this clearly
at all. POSIX 1003.1g doesn't define a lot of things
clearly however!
*/
sk_peek_offset_fwd(sk, size);
if (UNIXCB(skb).fp)
unix_peek_fds(&scm, skb);
}
err = (flags & MSG_TRUNC) ? skb->len - skip : size;
scm_recv_unix(sock, msg, &scm, flags);
out_free:
skb_free_datagram(sk, skb);
mutex_unlock(&u->iolock);
out:
return err;
}
static int unix_dgram_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
int flags)
{
struct sock *sk = sock->sk;
#ifdef CONFIG_BPF_SYSCALL
const struct proto *prot = READ_ONCE(sk->sk_prot);
if (prot != &unix_dgram_proto)
return prot->recvmsg(sk, msg, size, flags, NULL);
#endif
return __unix_dgram_recvmsg(sk, msg, size, flags);
}
static int unix_read_skb(struct sock *sk, skb_read_actor_t recv_actor)
{
struct unix_sock *u = unix_sk(sk);
struct sk_buff *skb;
int err;
mutex_lock(&u->iolock);
skb = skb_recv_datagram(sk, MSG_DONTWAIT, &err);
mutex_unlock(&u->iolock);
if (!skb)
return err;
return recv_actor(sk, skb);
}
/*
* Sleep until more data has arrived. But check for races..
*/
static long unix_stream_data_wait(struct sock *sk, long timeo,
struct sk_buff *last, unsigned int last_len,
bool freezable)
{
unsigned int state = TASK_INTERRUPTIBLE | freezable * TASK_FREEZABLE;
struct sk_buff *tail;
DEFINE_WAIT(wait);
unix_state_lock(sk);
for (;;) {
prepare_to_wait(sk_sleep(sk), &wait, state);
tail = skb_peek_tail(&sk->sk_receive_queue);
if (tail != last ||
(tail && tail->len != last_len) ||
sk->sk_err ||
(sk->sk_shutdown & RCV_SHUTDOWN) ||
signal_pending(current) ||
!timeo)
break;
sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
unix_state_unlock(sk);
timeo = schedule_timeout(timeo);
unix_state_lock(sk);
if (sock_flag(sk, SOCK_DEAD))
break;
sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
}
finish_wait(sk_sleep(sk), &wait);
unix_state_unlock(sk);
return timeo;
}
static unsigned int unix_skb_len(const struct sk_buff *skb)
{
return skb->len - UNIXCB(skb).consumed;
}
struct unix_stream_read_state {
int (*recv_actor)(struct sk_buff *, int, int,
struct unix_stream_read_state *);
struct socket *socket;
struct msghdr *msg;
struct pipe_inode_info *pipe;
size_t size;
int flags;
unsigned int splice_flags;
};
#if IS_ENABLED(CONFIG_AF_UNIX_OOB)
static int unix_stream_recv_urg(struct unix_stream_read_state *state)
{
struct socket *sock = state->socket;
struct sock *sk = sock->sk;
struct unix_sock *u = unix_sk(sk);
int chunk = 1;
struct sk_buff *oob_skb;
mutex_lock(&u->iolock);
unix_state_lock(sk);
if (sock_flag(sk, SOCK_URGINLINE) || !u->oob_skb) {
unix_state_unlock(sk);
mutex_unlock(&u->iolock);
return -EINVAL;
}
oob_skb = u->oob_skb;
if (!(state->flags & MSG_PEEK))
WRITE_ONCE(u->oob_skb, NULL);
unix_state_unlock(sk);
chunk = state->recv_actor(oob_skb, 0, chunk, state);
if (!(state->flags & MSG_PEEK)) {
UNIXCB(oob_skb).consumed += 1;
kfree_skb(oob_skb);
}
mutex_unlock(&u->iolock);
if (chunk < 0)
return -EFAULT;
state->msg->msg_flags |= MSG_OOB;
return 1;
}
static struct sk_buff *manage_oob(struct sk_buff *skb, struct sock *sk,
int flags, int copied)
{
struct unix_sock *u = unix_sk(sk);
if (!unix_skb_len(skb) && !(flags & MSG_PEEK)) {
skb_unlink(skb, &sk->sk_receive_queue);
consume_skb(skb);
skb = NULL;
} else {
if (skb == u->oob_skb) {
if (copied) {
skb = NULL;
} else if (sock_flag(sk, SOCK_URGINLINE)) {
if (!(flags & MSG_PEEK)) {
WRITE_ONCE(u->oob_skb, NULL);
consume_skb(skb);
}
} else if (!(flags & MSG_PEEK)) {
skb_unlink(skb, &sk->sk_receive_queue);
consume_skb(skb);
skb = skb_peek(&sk->sk_receive_queue);
}
}
}
return skb;
}
#endif
static int unix_stream_read_skb(struct sock *sk, skb_read_actor_t recv_actor)
{
if (unlikely(sk->sk_state != TCP_ESTABLISHED))
return -ENOTCONN;
return unix_read_skb(sk, recv_actor);
}
static int unix_stream_read_generic(struct unix_stream_read_state *state,
bool freezable)
{
struct scm_cookie scm;
struct socket *sock = state->socket;
struct sock *sk = sock->sk;
struct unix_sock *u = unix_sk(sk);
int copied = 0;
int flags = state->flags;
int noblock = flags & MSG_DONTWAIT;
bool check_creds = false;
int target;
int err = 0;
long timeo;
int skip;
size_t size = state->size;
unsigned int last_len;
if (unlikely(sk->sk_state != TCP_ESTABLISHED)) {
err = -EINVAL;
goto out;
}
if (unlikely(flags & MSG_OOB)) {
err = -EOPNOTSUPP;
#if IS_ENABLED(CONFIG_AF_UNIX_OOB)
err = unix_stream_recv_urg(state);
#endif
goto out;
}
target = sock_rcvlowat(sk, flags & MSG_WAITALL, size);
timeo = sock_rcvtimeo(sk, noblock);
memset(&scm, 0, sizeof(scm));
/* Lock the socket to prevent queue disordering
* while sleeps in memcpy_tomsg
*/
mutex_lock(&u->iolock);
skip = max(sk_peek_offset(sk, flags), 0);
do {
int chunk;
bool drop_skb;
struct sk_buff *skb, *last;
redo:
unix_state_lock(sk);
if (sock_flag(sk, SOCK_DEAD)) {
err = -ECONNRESET;
goto unlock;
}
last = skb = skb_peek(&sk->sk_receive_queue);
last_len = last ? last->len : 0;
#if IS_ENABLED(CONFIG_AF_UNIX_OOB)
if (skb) {
skb = manage_oob(skb, sk, flags, copied);
if (!skb) {
unix_state_unlock(sk);
if (copied)
break;
goto redo;
}
}
#endif
again:
if (skb == NULL) {
if (copied >= target)
goto unlock;
/*
* POSIX 1003.1g mandates this order.
*/
err = sock_error(sk);
if (err)
goto unlock;
if (sk->sk_shutdown & RCV_SHUTDOWN)
goto unlock;
unix_state_unlock(sk);
if (!timeo) {
err = -EAGAIN;
break;
}
mutex_unlock(&u->iolock);
timeo = unix_stream_data_wait(sk, timeo, last,
last_len, freezable);
if (signal_pending(current)) {
err = sock_intr_errno(timeo);
scm_destroy(&scm);
goto out;
}
mutex_lock(&u->iolock);
goto redo;
unlock:
unix_state_unlock(sk);
break;
}
while (skip >= unix_skb_len(skb)) {
skip -= unix_skb_len(skb);
last = skb;
last_len = skb->len;
skb = skb_peek_next(skb, &sk->sk_receive_queue);
if (!skb)
goto again;
}
unix_state_unlock(sk);
if (check_creds) {
/* Never glue messages from different writers */
if (!unix_skb_scm_eq(skb, &scm))
break;
} else if (test_bit(SOCK_PASSCRED, &sock->flags) ||
test_bit(SOCK_PASSPIDFD, &sock->flags)) {
/* Copy credentials */
scm_set_cred(&scm, UNIXCB(skb).pid, UNIXCB(skb).uid, UNIXCB(skb).gid);
unix_set_secdata(&scm, skb);
check_creds = true;
}
/* Copy address just once */
if (state->msg && state->msg->msg_name) {
DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr,
state->msg->msg_name);
unix_copy_addr(state->msg, skb->sk);
sunaddr = NULL;
}
chunk = min_t(unsigned int, unix_skb_len(skb) - skip, size);
skb_get(skb);
chunk = state->recv_actor(skb, skip, chunk, state);
drop_skb = !unix_skb_len(skb);
/* skb is only safe to use if !drop_skb */
consume_skb(skb);
if (chunk < 0) {
if (copied == 0)
copied = -EFAULT;
break;
}
copied += chunk;
size -= chunk;
if (drop_skb) {
/* the skb was touched by a concurrent reader;
* we should not expect anything from this skb
* anymore and assume it invalid - we can be
* sure it was dropped from the socket queue
*
* let's report a short read
*/
err = 0;
break;
}
/* Mark read part of skb as used */
if (!(flags & MSG_PEEK)) {
UNIXCB(skb).consumed += chunk;
sk_peek_offset_bwd(sk, chunk);
if (UNIXCB(skb).fp) {
scm_stat_del(sk, skb);
unix_detach_fds(&scm, skb);
}
if (unix_skb_len(skb))
break;
skb_unlink(skb, &sk->sk_receive_queue);
consume_skb(skb);
if (scm.fp)
break;
} else {
/* It is questionable, see note in unix_dgram_recvmsg.
*/
if (UNIXCB(skb).fp)
unix_peek_fds(&scm, skb);
sk_peek_offset_fwd(sk, chunk);
if (UNIXCB(skb).fp)
break;
skip = 0;
last = skb;
last_len = skb->len;
unix_state_lock(sk);
skb = skb_peek_next(skb, &sk->sk_receive_queue);
if (skb)
goto again;
unix_state_unlock(sk);
break;
}
} while (size);
mutex_unlock(&u->iolock);
if (state->msg)
scm_recv_unix(sock, state->msg, &scm, flags);
else
scm_destroy(&scm);
out:
return copied ? : err;
}
static int unix_stream_read_actor(struct sk_buff *skb,
int skip, int chunk,
struct unix_stream_read_state *state)
{
int ret;
ret = skb_copy_datagram_msg(skb, UNIXCB(skb).consumed + skip,
state->msg, chunk);
return ret ?: chunk;
}
int __unix_stream_recvmsg(struct sock *sk, struct msghdr *msg,
size_t size, int flags)
{
struct unix_stream_read_state state = {
.recv_actor = unix_stream_read_actor,
.socket = sk->sk_socket,
.msg = msg,
.size = size,
.flags = flags
};
return unix_stream_read_generic(&state, true);
}
static int unix_stream_recvmsg(struct socket *sock, struct msghdr *msg,
size_t size, int flags)
{
struct unix_stream_read_state state = {
.recv_actor = unix_stream_read_actor,
.socket = sock,
.msg = msg,
.size = size,
.flags = flags
};
#ifdef CONFIG_BPF_SYSCALL
struct sock *sk = sock->sk;
const struct proto *prot = READ_ONCE(sk->sk_prot);
if (prot != &unix_stream_proto)
return prot->recvmsg(sk, msg, size, flags, NULL);
#endif
return unix_stream_read_generic(&state, true);
}
static int unix_stream_splice_actor(struct sk_buff *skb,
int skip, int chunk,
struct unix_stream_read_state *state)
{
return skb_splice_bits(skb, state->socket->sk,
UNIXCB(skb).consumed + skip,
state->pipe, chunk, state->splice_flags);
}
static ssize_t unix_stream_splice_read(struct socket *sock, loff_t *ppos,
struct pipe_inode_info *pipe,
size_t size, unsigned int flags)
{
struct unix_stream_read_state state = {
.recv_actor = unix_stream_splice_actor,
.socket = sock,
.pipe = pipe,
.size = size,
.splice_flags = flags,
};
if (unlikely(*ppos))
return -ESPIPE;
if (sock->file->f_flags & O_NONBLOCK ||
flags & SPLICE_F_NONBLOCK)
state.flags = MSG_DONTWAIT;
return unix_stream_read_generic(&state, false);
}
static int unix_shutdown(struct socket *sock, int mode)
{
struct sock *sk = sock->sk;
struct sock *other;
if (mode < SHUT_RD || mode > SHUT_RDWR)
return -EINVAL;
/* This maps:
* SHUT_RD (0) -> RCV_SHUTDOWN (1)
* SHUT_WR (1) -> SEND_SHUTDOWN (2)
* SHUT_RDWR (2) -> SHUTDOWN_MASK (3)
*/
++mode;
unix_state_lock(sk);
WRITE_ONCE(sk->sk_shutdown, sk->sk_shutdown | mode);
other = unix_peer(sk);
if (other)
sock_hold(other);
unix_state_unlock(sk);
sk->sk_state_change(sk);
if (other &&
(sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET)) {
int peer_mode = 0;
const struct proto *prot = READ_ONCE(other->sk_prot);
if (prot->unhash)
prot->unhash(other);
if (mode&RCV_SHUTDOWN)
peer_mode |= SEND_SHUTDOWN;
if (mode&SEND_SHUTDOWN)
peer_mode |= RCV_SHUTDOWN;
unix_state_lock(other);
WRITE_ONCE(other->sk_shutdown, other->sk_shutdown | peer_mode);
unix_state_unlock(other);
other->sk_state_change(other);
if (peer_mode == SHUTDOWN_MASK)
sk_wake_async(other, SOCK_WAKE_WAITD, POLL_HUP);
else if (peer_mode & RCV_SHUTDOWN)
sk_wake_async(other, SOCK_WAKE_WAITD, POLL_IN);
}
if (other)
sock_put(other);
return 0;
}
long unix_inq_len(struct sock *sk)
{
struct sk_buff *skb;
long amount = 0;
if (sk->sk_state == TCP_LISTEN)
return -EINVAL;
spin_lock(&sk->sk_receive_queue.lock);
if (sk->sk_type == SOCK_STREAM ||
sk->sk_type == SOCK_SEQPACKET) {
skb_queue_walk(&sk->sk_receive_queue, skb)
amount += unix_skb_len(skb);
} else {
skb = skb_peek(&sk->sk_receive_queue);
if (skb)
amount = skb->len;
}
spin_unlock(&sk->sk_receive_queue.lock);
return amount;
}
EXPORT_SYMBOL_GPL(unix_inq_len);
long unix_outq_len(struct sock *sk)
{
return sk_wmem_alloc_get(sk);
}
EXPORT_SYMBOL_GPL(unix_outq_len);
static int unix_open_file(struct sock *sk)
{
struct path path;
struct file *f;
int fd;
if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
return -EPERM;
if (!smp_load_acquire(&unix_sk(sk)->addr))
return -ENOENT;
path = unix_sk(sk)->path;
if (!path.dentry)
return -ENOENT;
path_get(&path);
fd = get_unused_fd_flags(O_CLOEXEC);
if (fd < 0)
goto out;
f = dentry_open(&path, O_PATH, current_cred());
if (IS_ERR(f)) {
put_unused_fd(fd);
fd = PTR_ERR(f);
goto out;
}
fd_install(fd, f);
out:
path_put(&path);
return fd;
}
static int unix_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
{
struct sock *sk = sock->sk;
long amount = 0;
int err;
switch (cmd) {
case SIOCOUTQ:
amount = unix_outq_len(sk);
err = put_user(amount, (int __user *)arg);
break;
case SIOCINQ:
amount = unix_inq_len(sk);
if (amount < 0)
err = amount;
else
err = put_user(amount, (int __user *)arg);
break;
case SIOCUNIXFILE:
err = unix_open_file(sk);
break;
#if IS_ENABLED(CONFIG_AF_UNIX_OOB)
case SIOCATMARK:
{
struct sk_buff *skb;
int answ = 0;
skb = skb_peek(&sk->sk_receive_queue);
if (skb && skb == READ_ONCE(unix_sk(sk)->oob_skb))
answ = 1;
err = put_user(answ, (int __user *)arg);
}
break;
#endif
default:
err = -ENOIOCTLCMD;
break;
}
return err;
}
#ifdef CONFIG_COMPAT
static int unix_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
{
return unix_ioctl(sock, cmd, (unsigned long)compat_ptr(arg));
}
#endif
static __poll_t unix_poll(struct file *file, struct socket *sock, poll_table *wait)
{
struct sock *sk = sock->sk;
__poll_t mask;
u8 shutdown;
sock_poll_wait(file, sock, wait);
mask = 0;
shutdown = READ_ONCE(sk->sk_shutdown);
/* exceptional events? */
if (READ_ONCE(sk->sk_err))
mask |= EPOLLERR;
if (shutdown == SHUTDOWN_MASK)
mask |= EPOLLHUP;
if (shutdown & RCV_SHUTDOWN)
mask |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM;
/* readable? */
if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
mask |= EPOLLIN | EPOLLRDNORM;
if (sk_is_readable(sk))
mask |= EPOLLIN | EPOLLRDNORM;
#if IS_ENABLED(CONFIG_AF_UNIX_OOB)
if (READ_ONCE(unix_sk(sk)->oob_skb))
mask |= EPOLLPRI;
#endif
/* Connection-based need to check for termination and startup */
if ((sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) &&
sk->sk_state == TCP_CLOSE)
mask |= EPOLLHUP;
/*
* we set writable also when the other side has shut down the
* connection. This prevents stuck sockets.
*/
if (unix_writable(sk))
mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND;
return mask;
}
static __poll_t unix_dgram_poll(struct file *file, struct socket *sock,
poll_table *wait)
{
struct sock *sk = sock->sk, *other;
unsigned int writable;
__poll_t mask;
u8 shutdown;
sock_poll_wait(file, sock, wait);
mask = 0;
shutdown = READ_ONCE(sk->sk_shutdown);
/* exceptional events? */
if (READ_ONCE(sk->sk_err) ||
!skb_queue_empty_lockless(&sk->sk_error_queue))
mask |= EPOLLERR |
(sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0);
if (shutdown & RCV_SHUTDOWN)
mask |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM;
if (shutdown == SHUTDOWN_MASK)
mask |= EPOLLHUP;
/* readable? */
if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
mask |= EPOLLIN | EPOLLRDNORM;
if (sk_is_readable(sk))
mask |= EPOLLIN | EPOLLRDNORM;
/* Connection-based need to check for termination and startup */
if (sk->sk_type == SOCK_SEQPACKET) {
if (sk->sk_state == TCP_CLOSE)
mask |= EPOLLHUP;
/* connection hasn't started yet? */
if (sk->sk_state == TCP_SYN_SENT)
return mask;
}
/* No write status requested, avoid expensive OUT tests. */
if (!(poll_requested_events(wait) & (EPOLLWRBAND|EPOLLWRNORM|EPOLLOUT)))
return mask;
writable = unix_writable(sk);
if (writable) {
unix_state_lock(sk);
other = unix_peer(sk);
if (other && unix_peer(other) != sk &&
unix_recvq_full_lockless(other) &&
unix_dgram_peer_wake_me(sk, other))
writable = 0;
unix_state_unlock(sk);
}
if (writable)
mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND;
else
sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
return mask;
}
#ifdef CONFIG_PROC_FS
#define BUCKET_SPACE (BITS_PER_LONG - (UNIX_HASH_BITS + 1) - 1)
#define get_bucket(x) ((x) >> BUCKET_SPACE)
#define get_offset(x) ((x) & ((1UL << BUCKET_SPACE) - 1))
#define set_bucket_offset(b, o) ((b) << BUCKET_SPACE | (o))
static struct sock *unix_from_bucket(struct seq_file *seq, loff_t *pos)
{
unsigned long offset = get_offset(*pos);
unsigned long bucket = get_bucket(*pos);
unsigned long count = 0;
struct sock *sk;
for (sk = sk_head(&seq_file_net(seq)->unx.table.buckets[bucket]);
sk; sk = sk_next(sk)) {
if (++count == offset)
break;
}
return sk;
}
static struct sock *unix_get_first(struct seq_file *seq, loff_t *pos)
{
unsigned long bucket = get_bucket(*pos);
struct net *net = seq_file_net(seq);
struct sock *sk;
while (bucket < UNIX_HASH_SIZE) {
spin_lock(&net->unx.table.locks[bucket]);
sk = unix_from_bucket(seq, pos);
if (sk)
return sk;
spin_unlock(&net->unx.table.locks[bucket]);
*pos = set_bucket_offset(++bucket, 1);
}
return NULL;
}
static struct sock *unix_get_next(struct seq_file *seq, struct sock *sk,
loff_t *pos)
{
unsigned long bucket = get_bucket(*pos);
sk = sk_next(sk);
if (sk)
return sk;
spin_unlock(&seq_file_net(seq)->unx.table.locks[bucket]);
*pos = set_bucket_offset(++bucket, 1);
return unix_get_first(seq, pos);
}
static void *unix_seq_start(struct seq_file *seq, loff_t *pos)
{
if (!*pos)
return SEQ_START_TOKEN;
return unix_get_first(seq, pos);
}
static void *unix_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
++*pos;
if (v == SEQ_START_TOKEN)
return unix_get_first(seq, pos);
return unix_get_next(seq, v, pos);
}
static void unix_seq_stop(struct seq_file *seq, void *v)
{
struct sock *sk = v;
if (sk)
spin_unlock(&seq_file_net(seq)->unx.table.locks[sk->sk_hash]);
}
static int unix_seq_show(struct seq_file *seq, void *v)
{
if (v == SEQ_START_TOKEN)
seq_puts(seq, "Num RefCount Protocol Flags Type St "
"Inode Path\n");
else {
struct sock *s = v;
struct unix_sock *u = unix_sk(s);
unix_state_lock(s);
seq_printf(seq, "%pK: %08X %08X %08X %04X %02X %5lu",
s,
refcount_read(&s->sk_refcnt),
0,
s->sk_state == TCP_LISTEN ? __SO_ACCEPTCON : 0,
s->sk_type,
s->sk_socket ?
(s->sk_state == TCP_ESTABLISHED ? SS_CONNECTED : SS_UNCONNECTED) :
(s->sk_state == TCP_ESTABLISHED ? SS_CONNECTING : SS_DISCONNECTING),
sock_i_ino(s));
if (u->addr) { // under a hash table lock here
int i, len;
seq_putc(seq, ' ');
i = 0;
len = u->addr->len -
offsetof(struct sockaddr_un, sun_path);
if (u->addr->name->sun_path[0]) {
len--;
} else {
seq_putc(seq, '@');
i++;
}
for ( ; i < len; i++)
seq_putc(seq, u->addr->name->sun_path[i] ?:
'@');
}
unix_state_unlock(s);
seq_putc(seq, '\n');
}
return 0;
}
static const struct seq_operations unix_seq_ops = {
.start = unix_seq_start,
.next = unix_seq_next,
.stop = unix_seq_stop,
.show = unix_seq_show,
};
#if IS_BUILTIN(CONFIG_UNIX) && defined(CONFIG_BPF_SYSCALL)
struct bpf_unix_iter_state {
struct seq_net_private p;
unsigned int cur_sk;
unsigned int end_sk;
unsigned int max_sk;
struct sock **batch;
bool st_bucket_done;
};
struct bpf_iter__unix {
__bpf_md_ptr(struct bpf_iter_meta *, meta);
__bpf_md_ptr(struct unix_sock *, unix_sk);
uid_t uid __aligned(8);
};
static int unix_prog_seq_show(struct bpf_prog *prog, struct bpf_iter_meta *meta,
struct unix_sock *unix_sk, uid_t uid)
{
struct bpf_iter__unix ctx;
meta->seq_num--; /* skip SEQ_START_TOKEN */
ctx.meta = meta;
ctx.unix_sk = unix_sk;
ctx.uid = uid;
return bpf_iter_run_prog(prog, &ctx);
}
static int bpf_iter_unix_hold_batch(struct seq_file *seq, struct sock *start_sk)
{
struct bpf_unix_iter_state *iter = seq->private;
unsigned int expected = 1;
struct sock *sk;
sock_hold(start_sk);
iter->batch[iter->end_sk++] = start_sk;
for (sk = sk_next(start_sk); sk; sk = sk_next(sk)) {
if (iter->end_sk < iter->max_sk) {
sock_hold(sk);
iter->batch[iter->end_sk++] = sk;
}
expected++;
}
spin_unlock(&seq_file_net(seq)->unx.table.locks[start_sk->sk_hash]);
return expected;
}
static void bpf_iter_unix_put_batch(struct bpf_unix_iter_state *iter)
{
while (iter->cur_sk < iter->end_sk)
sock_put(iter->batch[iter->cur_sk++]);
}
static int bpf_iter_unix_realloc_batch(struct bpf_unix_iter_state *iter,
unsigned int new_batch_sz)
{
struct sock **new_batch;
new_batch = kvmalloc(sizeof(*new_batch) * new_batch_sz,
GFP_USER | __GFP_NOWARN);
if (!new_batch)
return -ENOMEM;
bpf_iter_unix_put_batch(iter);
kvfree(iter->batch);
iter->batch = new_batch;
iter->max_sk = new_batch_sz;
return 0;
}
static struct sock *bpf_iter_unix_batch(struct seq_file *seq,
loff_t *pos)
{
struct bpf_unix_iter_state *iter = seq->private;
unsigned int expected;
bool resized = false;
struct sock *sk;
if (iter->st_bucket_done)
*pos = set_bucket_offset(get_bucket(*pos) + 1, 1);
again:
/* Get a new batch */
iter->cur_sk = 0;
iter->end_sk = 0;
sk = unix_get_first(seq, pos);
if (!sk)
return NULL; /* Done */
expected = bpf_iter_unix_hold_batch(seq, sk);
if (iter->end_sk == expected) {
iter->st_bucket_done = true;
return sk;
}
if (!resized && !bpf_iter_unix_realloc_batch(iter, expected * 3 / 2)) {
resized = true;
goto again;
}
return sk;
}
static void *bpf_iter_unix_seq_start(struct seq_file *seq, loff_t *pos)
{
if (!*pos)
return SEQ_START_TOKEN;
/* bpf iter does not support lseek, so it always
* continue from where it was stop()-ped.
*/
return bpf_iter_unix_batch(seq, pos);
}
static void *bpf_iter_unix_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
struct bpf_unix_iter_state *iter = seq->private;
struct sock *sk;
/* Whenever seq_next() is called, the iter->cur_sk is
* done with seq_show(), so advance to the next sk in
* the batch.
*/
if (iter->cur_sk < iter->end_sk)
sock_put(iter->batch[iter->cur_sk++]);
++*pos;
if (iter->cur_sk < iter->end_sk)
sk = iter->batch[iter->cur_sk];
else
sk = bpf_iter_unix_batch(seq, pos);
return sk;
}
static int bpf_iter_unix_seq_show(struct seq_file *seq, void *v)
{
struct bpf_iter_meta meta;
struct bpf_prog *prog;
struct sock *sk = v;
uid_t uid;
bool slow;
int ret;
if (v == SEQ_START_TOKEN)
return 0;
slow = lock_sock_fast(sk);
if (unlikely(sk_unhashed(sk))) {
ret = SEQ_SKIP;
goto unlock;
}
uid = from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk));
meta.seq = seq;
prog = bpf_iter_get_info(&meta, false);
ret = unix_prog_seq_show(prog, &meta, v, uid);
unlock:
unlock_sock_fast(sk, slow);
return ret;
}
static void bpf_iter_unix_seq_stop(struct seq_file *seq, void *v)
{
struct bpf_unix_iter_state *iter = seq->private;
struct bpf_iter_meta meta;
struct bpf_prog *prog;
if (!v) {
meta.seq = seq;
prog = bpf_iter_get_info(&meta, true);
if (prog)
(void)unix_prog_seq_show(prog, &meta, v, 0);
}
if (iter->cur_sk < iter->end_sk)
bpf_iter_unix_put_batch(iter);
}
static const struct seq_operations bpf_iter_unix_seq_ops = {
.start = bpf_iter_unix_seq_start,
.next = bpf_iter_unix_seq_next,
.stop = bpf_iter_unix_seq_stop,
.show = bpf_iter_unix_seq_show,
};
#endif
#endif
static const struct net_proto_family unix_family_ops = {
.family = PF_UNIX,
.create = unix_create,
.owner = THIS_MODULE,
};
static int __net_init unix_net_init(struct net *net)
{
int i;
net->unx.sysctl_max_dgram_qlen = 10;
if (unix_sysctl_register(net))
goto out;
#ifdef CONFIG_PROC_FS
if (!proc_create_net("unix", 0, net->proc_net, &unix_seq_ops,
sizeof(struct seq_net_private)))
goto err_sysctl;
#endif
net->unx.table.locks = kvmalloc_array(UNIX_HASH_SIZE,
sizeof(spinlock_t), GFP_KERNEL);
if (!net->unx.table.locks)
goto err_proc;
net->unx.table.buckets = kvmalloc_array(UNIX_HASH_SIZE,
sizeof(struct hlist_head),
GFP_KERNEL);
if (!net->unx.table.buckets)
goto free_locks;
for (i = 0; i < UNIX_HASH_SIZE; i++) {
spin_lock_init(&net->unx.table.locks[i]);
INIT_HLIST_HEAD(&net->unx.table.buckets[i]);
}
return 0;
free_locks:
kvfree(net->unx.table.locks);
err_proc:
#ifdef CONFIG_PROC_FS
remove_proc_entry("unix", net->proc_net);
err_sysctl:
#endif
unix_sysctl_unregister(net);
out:
return -ENOMEM;
}
static void __net_exit unix_net_exit(struct net *net)
{
kvfree(net->unx.table.buckets);
kvfree(net->unx.table.locks);
unix_sysctl_unregister(net);
remove_proc_entry("unix", net->proc_net);
}
static struct pernet_operations unix_net_ops = {
.init = unix_net_init,
.exit = unix_net_exit,
};
#if IS_BUILTIN(CONFIG_UNIX) && defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS)
DEFINE_BPF_ITER_FUNC(unix, struct bpf_iter_meta *meta,
struct unix_sock *unix_sk, uid_t uid)
#define INIT_BATCH_SZ 16
static int bpf_iter_init_unix(void *priv_data, struct bpf_iter_aux_info *aux)
{
struct bpf_unix_iter_state *iter = priv_data;
int err;
err = bpf_iter_init_seq_net(priv_data, aux);
if (err)
return err;
err = bpf_iter_unix_realloc_batch(iter, INIT_BATCH_SZ);
if (err) {
bpf_iter_fini_seq_net(priv_data);
return err;
}
return 0;
}
static void bpf_iter_fini_unix(void *priv_data)
{
struct bpf_unix_iter_state *iter = priv_data;
bpf_iter_fini_seq_net(priv_data);
kvfree(iter->batch);
}
static const struct bpf_iter_seq_info unix_seq_info = {
.seq_ops = &bpf_iter_unix_seq_ops,
.init_seq_private = bpf_iter_init_unix,
.fini_seq_private = bpf_iter_fini_unix,
.seq_priv_size = sizeof(struct bpf_unix_iter_state),
};
static const struct bpf_func_proto *
bpf_iter_unix_get_func_proto(enum bpf_func_id func_id,
const struct bpf_prog *prog)
{
switch (func_id) {
case BPF_FUNC_setsockopt:
return &bpf_sk_setsockopt_proto;
case BPF_FUNC_getsockopt:
return &bpf_sk_getsockopt_proto;
default:
return NULL;
}
}
static struct bpf_iter_reg unix_reg_info = {
.target = "unix",
.ctx_arg_info_size = 1,
.ctx_arg_info = {
{ offsetof(struct bpf_iter__unix, unix_sk),
PTR_TO_BTF_ID_OR_NULL },
},
.get_func_proto = bpf_iter_unix_get_func_proto,
.seq_info = &unix_seq_info,
};
static void __init bpf_iter_register(void)
{
unix_reg_info.ctx_arg_info[0].btf_id = btf_sock_ids[BTF_SOCK_TYPE_UNIX];
if (bpf_iter_reg_target(&unix_reg_info))
pr_warn("Warning: could not register bpf iterator unix\n");
}
#endif
static int __init af_unix_init(void)
{
int i, rc = -1;
BUILD_BUG_ON(sizeof(struct unix_skb_parms) > sizeof_field(struct sk_buff, cb));
for (i = 0; i < UNIX_HASH_SIZE / 2; i++) {
spin_lock_init(&bsd_socket_locks[i]);
INIT_HLIST_HEAD(&bsd_socket_buckets[i]);
}
rc = proto_register(&unix_dgram_proto, 1);
if (rc != 0) {
pr_crit("%s: Cannot create unix_sock SLAB cache!\n", __func__);
goto out;
}
rc = proto_register(&unix_stream_proto, 1);
if (rc != 0) {
pr_crit("%s: Cannot create unix_sock SLAB cache!\n", __func__);
proto_unregister(&unix_dgram_proto);
goto out;
}
sock_register(&unix_family_ops);
register_pernet_subsys(&unix_net_ops);
unix_bpf_build_proto();
#if IS_BUILTIN(CONFIG_UNIX) && defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS)
bpf_iter_register();
#endif
out:
return rc;
}
static void __exit af_unix_exit(void)
{
sock_unregister(PF_UNIX);
proto_unregister(&unix_dgram_proto);
proto_unregister(&unix_stream_proto);
unregister_pernet_subsys(&unix_net_ops);
}
/* Earlier than device_initcall() so that other drivers invoking
request_module() don't end up in a loop when modprobe tries
to use a UNIX socket. But later than subsys_initcall() because
we depend on stuff initialised there */
fs_initcall(af_unix_init);
module_exit(af_unix_exit);
MODULE_LICENSE("GPL");
MODULE_ALIAS_NETPROTO(PF_UNIX);
| linux-master | net/unix/af_unix.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* NET4: Sysctl interface to net af_unix subsystem.
*
* Authors: Mike Shaver.
*/
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/sysctl.h>
#include <net/af_unix.h>
static struct ctl_table unix_table[] = {
{
.procname = "max_dgram_qlen",
.data = &init_net.unx.sysctl_max_dgram_qlen,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec
},
{ }
};
int __net_init unix_sysctl_register(struct net *net)
{
struct ctl_table *table;
if (net_eq(net, &init_net)) {
table = unix_table;
} else {
table = kmemdup(unix_table, sizeof(unix_table), GFP_KERNEL);
if (!table)
goto err_alloc;
table[0].data = &net->unx.sysctl_max_dgram_qlen;
}
net->unx.ctl = register_net_sysctl_sz(net, "net/unix", table,
ARRAY_SIZE(unix_table));
if (net->unx.ctl == NULL)
goto err_reg;
return 0;
err_reg:
if (!net_eq(net, &init_net))
kfree(table);
err_alloc:
return -ENOMEM;
}
void unix_sysctl_unregister(struct net *net)
{
struct ctl_table *table;
table = net->unx.ctl->ctl_table_arg;
unregister_net_sysctl_table(net->unx.ctl);
if (!net_eq(net, &init_net))
kfree(table);
}
| linux-master | net/unix/sysctl_net_unix.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2015, Sony Mobile Communications Inc.
* Copyright (c) 2013, The Linux Foundation. All rights reserved.
*/
#include <linux/module.h>
#include <linux/netlink.h>
#include <linux/qrtr.h>
#include <linux/termios.h> /* For TIOCINQ/OUTQ */
#include <linux/spinlock.h>
#include <linux/wait.h>
#include <net/sock.h>
#include "qrtr.h"
#define QRTR_PROTO_VER_1 1
#define QRTR_PROTO_VER_2 3
/* auto-bind range */
#define QRTR_MIN_EPH_SOCKET 0x4000
#define QRTR_MAX_EPH_SOCKET 0x7fff
#define QRTR_EPH_PORT_RANGE \
XA_LIMIT(QRTR_MIN_EPH_SOCKET, QRTR_MAX_EPH_SOCKET)
#define QRTR_PORT_CTRL_LEGACY 0xffff
/**
* struct qrtr_hdr_v1 - (I|R)PCrouter packet header version 1
* @version: protocol version
* @type: packet type; one of QRTR_TYPE_*
* @src_node_id: source node
* @src_port_id: source port
* @confirm_rx: boolean; whether a resume-tx packet should be send in reply
* @size: length of packet, excluding this header
* @dst_node_id: destination node
* @dst_port_id: destination port
*/
struct qrtr_hdr_v1 {
__le32 version;
__le32 type;
__le32 src_node_id;
__le32 src_port_id;
__le32 confirm_rx;
__le32 size;
__le32 dst_node_id;
__le32 dst_port_id;
} __packed;
/**
* struct qrtr_hdr_v2 - (I|R)PCrouter packet header later versions
* @version: protocol version
* @type: packet type; one of QRTR_TYPE_*
* @flags: bitmask of QRTR_FLAGS_*
* @optlen: length of optional header data
* @size: length of packet, excluding this header and optlen
* @src_node_id: source node
* @src_port_id: source port
* @dst_node_id: destination node
* @dst_port_id: destination port
*/
struct qrtr_hdr_v2 {
u8 version;
u8 type;
u8 flags;
u8 optlen;
__le32 size;
__le16 src_node_id;
__le16 src_port_id;
__le16 dst_node_id;
__le16 dst_port_id;
};
#define QRTR_FLAGS_CONFIRM_RX BIT(0)
struct qrtr_cb {
u32 src_node;
u32 src_port;
u32 dst_node;
u32 dst_port;
u8 type;
u8 confirm_rx;
};
#define QRTR_HDR_MAX_SIZE max_t(size_t, sizeof(struct qrtr_hdr_v1), \
sizeof(struct qrtr_hdr_v2))
struct qrtr_sock {
/* WARNING: sk must be the first member */
struct sock sk;
struct sockaddr_qrtr us;
struct sockaddr_qrtr peer;
};
static inline struct qrtr_sock *qrtr_sk(struct sock *sk)
{
BUILD_BUG_ON(offsetof(struct qrtr_sock, sk) != 0);
return container_of(sk, struct qrtr_sock, sk);
}
static unsigned int qrtr_local_nid = 1;
/* for node ids */
static RADIX_TREE(qrtr_nodes, GFP_ATOMIC);
static DEFINE_SPINLOCK(qrtr_nodes_lock);
/* broadcast list */
static LIST_HEAD(qrtr_all_nodes);
/* lock for qrtr_all_nodes and node reference */
static DEFINE_MUTEX(qrtr_node_lock);
/* local port allocation management */
static DEFINE_XARRAY_ALLOC(qrtr_ports);
/**
* struct qrtr_node - endpoint node
* @ep_lock: lock for endpoint management and callbacks
* @ep: endpoint
* @ref: reference count for node
* @nid: node id
* @qrtr_tx_flow: tree of qrtr_tx_flow, keyed by node << 32 | port
* @qrtr_tx_lock: lock for qrtr_tx_flow inserts
* @rx_queue: receive queue
* @item: list item for broadcast list
*/
struct qrtr_node {
struct mutex ep_lock;
struct qrtr_endpoint *ep;
struct kref ref;
unsigned int nid;
struct radix_tree_root qrtr_tx_flow;
struct mutex qrtr_tx_lock; /* for qrtr_tx_flow */
struct sk_buff_head rx_queue;
struct list_head item;
};
/**
* struct qrtr_tx_flow - tx flow control
* @resume_tx: waiters for a resume tx from the remote
* @pending: number of waiting senders
* @tx_failed: indicates that a message with confirm_rx flag was lost
*/
struct qrtr_tx_flow {
struct wait_queue_head resume_tx;
int pending;
int tx_failed;
};
#define QRTR_TX_FLOW_HIGH 10
#define QRTR_TX_FLOW_LOW 5
static int qrtr_local_enqueue(struct qrtr_node *node, struct sk_buff *skb,
int type, struct sockaddr_qrtr *from,
struct sockaddr_qrtr *to);
static int qrtr_bcast_enqueue(struct qrtr_node *node, struct sk_buff *skb,
int type, struct sockaddr_qrtr *from,
struct sockaddr_qrtr *to);
static struct qrtr_sock *qrtr_port_lookup(int port);
static void qrtr_port_put(struct qrtr_sock *ipc);
/* Release node resources and free the node.
*
* Do not call directly, use qrtr_node_release. To be used with
* kref_put_mutex. As such, the node mutex is expected to be locked on call.
*/
static void __qrtr_node_release(struct kref *kref)
{
struct qrtr_node *node = container_of(kref, struct qrtr_node, ref);
struct radix_tree_iter iter;
struct qrtr_tx_flow *flow;
unsigned long flags;
void __rcu **slot;
spin_lock_irqsave(&qrtr_nodes_lock, flags);
/* If the node is a bridge for other nodes, there are possibly
* multiple entries pointing to our released node, delete them all.
*/
radix_tree_for_each_slot(slot, &qrtr_nodes, &iter, 0) {
if (*slot == node)
radix_tree_iter_delete(&qrtr_nodes, &iter, slot);
}
spin_unlock_irqrestore(&qrtr_nodes_lock, flags);
list_del(&node->item);
mutex_unlock(&qrtr_node_lock);
skb_queue_purge(&node->rx_queue);
/* Free tx flow counters */
radix_tree_for_each_slot(slot, &node->qrtr_tx_flow, &iter, 0) {
flow = *slot;
radix_tree_iter_delete(&node->qrtr_tx_flow, &iter, slot);
kfree(flow);
}
kfree(node);
}
/* Increment reference to node. */
static struct qrtr_node *qrtr_node_acquire(struct qrtr_node *node)
{
if (node)
kref_get(&node->ref);
return node;
}
/* Decrement reference to node and release as necessary. */
static void qrtr_node_release(struct qrtr_node *node)
{
if (!node)
return;
kref_put_mutex(&node->ref, __qrtr_node_release, &qrtr_node_lock);
}
/**
* qrtr_tx_resume() - reset flow control counter
* @node: qrtr_node that the QRTR_TYPE_RESUME_TX packet arrived on
* @skb: resume_tx packet
*/
static void qrtr_tx_resume(struct qrtr_node *node, struct sk_buff *skb)
{
struct qrtr_ctrl_pkt *pkt = (struct qrtr_ctrl_pkt *)skb->data;
u64 remote_node = le32_to_cpu(pkt->client.node);
u32 remote_port = le32_to_cpu(pkt->client.port);
struct qrtr_tx_flow *flow;
unsigned long key;
key = remote_node << 32 | remote_port;
rcu_read_lock();
flow = radix_tree_lookup(&node->qrtr_tx_flow, key);
rcu_read_unlock();
if (flow) {
spin_lock(&flow->resume_tx.lock);
flow->pending = 0;
spin_unlock(&flow->resume_tx.lock);
wake_up_interruptible_all(&flow->resume_tx);
}
consume_skb(skb);
}
/**
* qrtr_tx_wait() - flow control for outgoing packets
* @node: qrtr_node that the packet is to be send to
* @dest_node: node id of the destination
* @dest_port: port number of the destination
* @type: type of message
*
* The flow control scheme is based around the low and high "watermarks". When
* the low watermark is passed the confirm_rx flag is set on the outgoing
* message, which will trigger the remote to send a control message of the type
* QRTR_TYPE_RESUME_TX to reset the counter. If the high watermark is hit
* further transmision should be paused.
*
* Return: 1 if confirm_rx should be set, 0 otherwise or errno failure
*/
static int qrtr_tx_wait(struct qrtr_node *node, int dest_node, int dest_port,
int type)
{
unsigned long key = (u64)dest_node << 32 | dest_port;
struct qrtr_tx_flow *flow;
int confirm_rx = 0;
int ret;
/* Never set confirm_rx on non-data packets */
if (type != QRTR_TYPE_DATA)
return 0;
mutex_lock(&node->qrtr_tx_lock);
flow = radix_tree_lookup(&node->qrtr_tx_flow, key);
if (!flow) {
flow = kzalloc(sizeof(*flow), GFP_KERNEL);
if (flow) {
init_waitqueue_head(&flow->resume_tx);
if (radix_tree_insert(&node->qrtr_tx_flow, key, flow)) {
kfree(flow);
flow = NULL;
}
}
}
mutex_unlock(&node->qrtr_tx_lock);
/* Set confirm_rx if we where unable to find and allocate a flow */
if (!flow)
return 1;
spin_lock_irq(&flow->resume_tx.lock);
ret = wait_event_interruptible_locked_irq(flow->resume_tx,
flow->pending < QRTR_TX_FLOW_HIGH ||
flow->tx_failed ||
!node->ep);
if (ret < 0) {
confirm_rx = ret;
} else if (!node->ep) {
confirm_rx = -EPIPE;
} else if (flow->tx_failed) {
flow->tx_failed = 0;
confirm_rx = 1;
} else {
flow->pending++;
confirm_rx = flow->pending == QRTR_TX_FLOW_LOW;
}
spin_unlock_irq(&flow->resume_tx.lock);
return confirm_rx;
}
/**
* qrtr_tx_flow_failed() - flag that tx of confirm_rx flagged messages failed
* @node: qrtr_node that the packet is to be send to
* @dest_node: node id of the destination
* @dest_port: port number of the destination
*
* Signal that the transmission of a message with confirm_rx flag failed. The
* flow's "pending" counter will keep incrementing towards QRTR_TX_FLOW_HIGH,
* at which point transmission would stall forever waiting for the resume TX
* message associated with the dropped confirm_rx message.
* Work around this by marking the flow as having a failed transmission and
* cause the next transmission attempt to be sent with the confirm_rx.
*/
static void qrtr_tx_flow_failed(struct qrtr_node *node, int dest_node,
int dest_port)
{
unsigned long key = (u64)dest_node << 32 | dest_port;
struct qrtr_tx_flow *flow;
rcu_read_lock();
flow = radix_tree_lookup(&node->qrtr_tx_flow, key);
rcu_read_unlock();
if (flow) {
spin_lock_irq(&flow->resume_tx.lock);
flow->tx_failed = 1;
spin_unlock_irq(&flow->resume_tx.lock);
}
}
/* Pass an outgoing packet socket buffer to the endpoint driver. */
static int qrtr_node_enqueue(struct qrtr_node *node, struct sk_buff *skb,
int type, struct sockaddr_qrtr *from,
struct sockaddr_qrtr *to)
{
struct qrtr_hdr_v1 *hdr;
size_t len = skb->len;
int rc, confirm_rx;
confirm_rx = qrtr_tx_wait(node, to->sq_node, to->sq_port, type);
if (confirm_rx < 0) {
kfree_skb(skb);
return confirm_rx;
}
hdr = skb_push(skb, sizeof(*hdr));
hdr->version = cpu_to_le32(QRTR_PROTO_VER_1);
hdr->type = cpu_to_le32(type);
hdr->src_node_id = cpu_to_le32(from->sq_node);
hdr->src_port_id = cpu_to_le32(from->sq_port);
if (to->sq_port == QRTR_PORT_CTRL) {
hdr->dst_node_id = cpu_to_le32(node->nid);
hdr->dst_port_id = cpu_to_le32(QRTR_PORT_CTRL);
} else {
hdr->dst_node_id = cpu_to_le32(to->sq_node);
hdr->dst_port_id = cpu_to_le32(to->sq_port);
}
hdr->size = cpu_to_le32(len);
hdr->confirm_rx = !!confirm_rx;
rc = skb_put_padto(skb, ALIGN(len, 4) + sizeof(*hdr));
if (!rc) {
mutex_lock(&node->ep_lock);
rc = -ENODEV;
if (node->ep)
rc = node->ep->xmit(node->ep, skb);
else
kfree_skb(skb);
mutex_unlock(&node->ep_lock);
}
/* Need to ensure that a subsequent message carries the otherwise lost
* confirm_rx flag if we dropped this one */
if (rc && confirm_rx)
qrtr_tx_flow_failed(node, to->sq_node, to->sq_port);
return rc;
}
/* Lookup node by id.
*
* callers must release with qrtr_node_release()
*/
static struct qrtr_node *qrtr_node_lookup(unsigned int nid)
{
struct qrtr_node *node;
unsigned long flags;
mutex_lock(&qrtr_node_lock);
spin_lock_irqsave(&qrtr_nodes_lock, flags);
node = radix_tree_lookup(&qrtr_nodes, nid);
node = qrtr_node_acquire(node);
spin_unlock_irqrestore(&qrtr_nodes_lock, flags);
mutex_unlock(&qrtr_node_lock);
return node;
}
/* Assign node id to node.
*
* This is mostly useful for automatic node id assignment, based on
* the source id in the incoming packet.
*/
static void qrtr_node_assign(struct qrtr_node *node, unsigned int nid)
{
unsigned long flags;
if (nid == QRTR_EP_NID_AUTO)
return;
spin_lock_irqsave(&qrtr_nodes_lock, flags);
radix_tree_insert(&qrtr_nodes, nid, node);
if (node->nid == QRTR_EP_NID_AUTO)
node->nid = nid;
spin_unlock_irqrestore(&qrtr_nodes_lock, flags);
}
/**
* qrtr_endpoint_post() - post incoming data
* @ep: endpoint handle
* @data: data pointer
* @len: size of data in bytes
*
* Return: 0 on success; negative error code on failure
*/
int qrtr_endpoint_post(struct qrtr_endpoint *ep, const void *data, size_t len)
{
struct qrtr_node *node = ep->node;
const struct qrtr_hdr_v1 *v1;
const struct qrtr_hdr_v2 *v2;
struct qrtr_sock *ipc;
struct sk_buff *skb;
struct qrtr_cb *cb;
size_t size;
unsigned int ver;
size_t hdrlen;
if (len == 0 || len & 3)
return -EINVAL;
skb = __netdev_alloc_skb(NULL, len, GFP_ATOMIC | __GFP_NOWARN);
if (!skb)
return -ENOMEM;
cb = (struct qrtr_cb *)skb->cb;
/* Version field in v1 is little endian, so this works for both cases */
ver = *(u8*)data;
switch (ver) {
case QRTR_PROTO_VER_1:
if (len < sizeof(*v1))
goto err;
v1 = data;
hdrlen = sizeof(*v1);
cb->type = le32_to_cpu(v1->type);
cb->src_node = le32_to_cpu(v1->src_node_id);
cb->src_port = le32_to_cpu(v1->src_port_id);
cb->confirm_rx = !!v1->confirm_rx;
cb->dst_node = le32_to_cpu(v1->dst_node_id);
cb->dst_port = le32_to_cpu(v1->dst_port_id);
size = le32_to_cpu(v1->size);
break;
case QRTR_PROTO_VER_2:
if (len < sizeof(*v2))
goto err;
v2 = data;
hdrlen = sizeof(*v2) + v2->optlen;
cb->type = v2->type;
cb->confirm_rx = !!(v2->flags & QRTR_FLAGS_CONFIRM_RX);
cb->src_node = le16_to_cpu(v2->src_node_id);
cb->src_port = le16_to_cpu(v2->src_port_id);
cb->dst_node = le16_to_cpu(v2->dst_node_id);
cb->dst_port = le16_to_cpu(v2->dst_port_id);
if (cb->src_port == (u16)QRTR_PORT_CTRL)
cb->src_port = QRTR_PORT_CTRL;
if (cb->dst_port == (u16)QRTR_PORT_CTRL)
cb->dst_port = QRTR_PORT_CTRL;
size = le32_to_cpu(v2->size);
break;
default:
pr_err("qrtr: Invalid version %d\n", ver);
goto err;
}
if (cb->dst_port == QRTR_PORT_CTRL_LEGACY)
cb->dst_port = QRTR_PORT_CTRL;
if (!size || len != ALIGN(size, 4) + hdrlen)
goto err;
if ((cb->type == QRTR_TYPE_NEW_SERVER ||
cb->type == QRTR_TYPE_RESUME_TX) &&
size < sizeof(struct qrtr_ctrl_pkt))
goto err;
if (cb->dst_port != QRTR_PORT_CTRL && cb->type != QRTR_TYPE_DATA &&
cb->type != QRTR_TYPE_RESUME_TX)
goto err;
skb_put_data(skb, data + hdrlen, size);
qrtr_node_assign(node, cb->src_node);
if (cb->type == QRTR_TYPE_NEW_SERVER) {
/* Remote node endpoint can bridge other distant nodes */
const struct qrtr_ctrl_pkt *pkt;
pkt = data + hdrlen;
qrtr_node_assign(node, le32_to_cpu(pkt->server.node));
}
if (cb->type == QRTR_TYPE_RESUME_TX) {
qrtr_tx_resume(node, skb);
} else {
ipc = qrtr_port_lookup(cb->dst_port);
if (!ipc)
goto err;
if (sock_queue_rcv_skb(&ipc->sk, skb)) {
qrtr_port_put(ipc);
goto err;
}
qrtr_port_put(ipc);
}
return 0;
err:
kfree_skb(skb);
return -EINVAL;
}
EXPORT_SYMBOL_GPL(qrtr_endpoint_post);
/**
* qrtr_alloc_ctrl_packet() - allocate control packet skb
* @pkt: reference to qrtr_ctrl_pkt pointer
* @flags: the type of memory to allocate
*
* Returns newly allocated sk_buff, or NULL on failure
*
* This function allocates a sk_buff large enough to carry a qrtr_ctrl_pkt and
* on success returns a reference to the control packet in @pkt.
*/
static struct sk_buff *qrtr_alloc_ctrl_packet(struct qrtr_ctrl_pkt **pkt,
gfp_t flags)
{
const int pkt_len = sizeof(struct qrtr_ctrl_pkt);
struct sk_buff *skb;
skb = alloc_skb(QRTR_HDR_MAX_SIZE + pkt_len, flags);
if (!skb)
return NULL;
skb_reserve(skb, QRTR_HDR_MAX_SIZE);
*pkt = skb_put_zero(skb, pkt_len);
return skb;
}
/**
* qrtr_endpoint_register() - register a new endpoint
* @ep: endpoint to register
* @nid: desired node id; may be QRTR_EP_NID_AUTO for auto-assignment
* Return: 0 on success; negative error code on failure
*
* The specified endpoint must have the xmit function pointer set on call.
*/
int qrtr_endpoint_register(struct qrtr_endpoint *ep, unsigned int nid)
{
struct qrtr_node *node;
if (!ep || !ep->xmit)
return -EINVAL;
node = kzalloc(sizeof(*node), GFP_KERNEL);
if (!node)
return -ENOMEM;
kref_init(&node->ref);
mutex_init(&node->ep_lock);
skb_queue_head_init(&node->rx_queue);
node->nid = QRTR_EP_NID_AUTO;
node->ep = ep;
INIT_RADIX_TREE(&node->qrtr_tx_flow, GFP_KERNEL);
mutex_init(&node->qrtr_tx_lock);
qrtr_node_assign(node, nid);
mutex_lock(&qrtr_node_lock);
list_add(&node->item, &qrtr_all_nodes);
mutex_unlock(&qrtr_node_lock);
ep->node = node;
return 0;
}
EXPORT_SYMBOL_GPL(qrtr_endpoint_register);
/**
* qrtr_endpoint_unregister - unregister endpoint
* @ep: endpoint to unregister
*/
void qrtr_endpoint_unregister(struct qrtr_endpoint *ep)
{
struct qrtr_node *node = ep->node;
struct sockaddr_qrtr src = {AF_QIPCRTR, node->nid, QRTR_PORT_CTRL};
struct sockaddr_qrtr dst = {AF_QIPCRTR, qrtr_local_nid, QRTR_PORT_CTRL};
struct radix_tree_iter iter;
struct qrtr_ctrl_pkt *pkt;
struct qrtr_tx_flow *flow;
struct sk_buff *skb;
unsigned long flags;
void __rcu **slot;
mutex_lock(&node->ep_lock);
node->ep = NULL;
mutex_unlock(&node->ep_lock);
/* Notify the local controller about the event */
spin_lock_irqsave(&qrtr_nodes_lock, flags);
radix_tree_for_each_slot(slot, &qrtr_nodes, &iter, 0) {
if (*slot != node)
continue;
src.sq_node = iter.index;
skb = qrtr_alloc_ctrl_packet(&pkt, GFP_ATOMIC);
if (skb) {
pkt->cmd = cpu_to_le32(QRTR_TYPE_BYE);
qrtr_local_enqueue(NULL, skb, QRTR_TYPE_BYE, &src, &dst);
}
}
spin_unlock_irqrestore(&qrtr_nodes_lock, flags);
/* Wake up any transmitters waiting for resume-tx from the node */
mutex_lock(&node->qrtr_tx_lock);
radix_tree_for_each_slot(slot, &node->qrtr_tx_flow, &iter, 0) {
flow = *slot;
wake_up_interruptible_all(&flow->resume_tx);
}
mutex_unlock(&node->qrtr_tx_lock);
qrtr_node_release(node);
ep->node = NULL;
}
EXPORT_SYMBOL_GPL(qrtr_endpoint_unregister);
/* Lookup socket by port.
*
* Callers must release with qrtr_port_put()
*/
static struct qrtr_sock *qrtr_port_lookup(int port)
{
struct qrtr_sock *ipc;
if (port == QRTR_PORT_CTRL)
port = 0;
rcu_read_lock();
ipc = xa_load(&qrtr_ports, port);
if (ipc)
sock_hold(&ipc->sk);
rcu_read_unlock();
return ipc;
}
/* Release acquired socket. */
static void qrtr_port_put(struct qrtr_sock *ipc)
{
sock_put(&ipc->sk);
}
/* Remove port assignment. */
static void qrtr_port_remove(struct qrtr_sock *ipc)
{
struct qrtr_ctrl_pkt *pkt;
struct sk_buff *skb;
int port = ipc->us.sq_port;
struct sockaddr_qrtr to;
to.sq_family = AF_QIPCRTR;
to.sq_node = QRTR_NODE_BCAST;
to.sq_port = QRTR_PORT_CTRL;
skb = qrtr_alloc_ctrl_packet(&pkt, GFP_KERNEL);
if (skb) {
pkt->cmd = cpu_to_le32(QRTR_TYPE_DEL_CLIENT);
pkt->client.node = cpu_to_le32(ipc->us.sq_node);
pkt->client.port = cpu_to_le32(ipc->us.sq_port);
skb_set_owner_w(skb, &ipc->sk);
qrtr_bcast_enqueue(NULL, skb, QRTR_TYPE_DEL_CLIENT, &ipc->us,
&to);
}
if (port == QRTR_PORT_CTRL)
port = 0;
__sock_put(&ipc->sk);
xa_erase(&qrtr_ports, port);
/* Ensure that if qrtr_port_lookup() did enter the RCU read section we
* wait for it to up increment the refcount */
synchronize_rcu();
}
/* Assign port number to socket.
*
* Specify port in the integer pointed to by port, and it will be adjusted
* on return as necesssary.
*
* Port may be:
* 0: Assign ephemeral port in [QRTR_MIN_EPH_SOCKET, QRTR_MAX_EPH_SOCKET]
* <QRTR_MIN_EPH_SOCKET: Specified; requires CAP_NET_ADMIN
* >QRTR_MIN_EPH_SOCKET: Specified; available to all
*/
static int qrtr_port_assign(struct qrtr_sock *ipc, int *port)
{
int rc;
if (!*port) {
rc = xa_alloc(&qrtr_ports, port, ipc, QRTR_EPH_PORT_RANGE,
GFP_KERNEL);
} else if (*port < QRTR_MIN_EPH_SOCKET && !capable(CAP_NET_ADMIN)) {
rc = -EACCES;
} else if (*port == QRTR_PORT_CTRL) {
rc = xa_insert(&qrtr_ports, 0, ipc, GFP_KERNEL);
} else {
rc = xa_insert(&qrtr_ports, *port, ipc, GFP_KERNEL);
}
if (rc == -EBUSY)
return -EADDRINUSE;
else if (rc < 0)
return rc;
sock_hold(&ipc->sk);
return 0;
}
/* Reset all non-control ports */
static void qrtr_reset_ports(void)
{
struct qrtr_sock *ipc;
unsigned long index;
rcu_read_lock();
xa_for_each_start(&qrtr_ports, index, ipc, 1) {
sock_hold(&ipc->sk);
ipc->sk.sk_err = ENETRESET;
sk_error_report(&ipc->sk);
sock_put(&ipc->sk);
}
rcu_read_unlock();
}
/* Bind socket to address.
*
* Socket should be locked upon call.
*/
static int __qrtr_bind(struct socket *sock,
const struct sockaddr_qrtr *addr, int zapped)
{
struct qrtr_sock *ipc = qrtr_sk(sock->sk);
struct sock *sk = sock->sk;
int port;
int rc;
/* rebinding ok */
if (!zapped && addr->sq_port == ipc->us.sq_port)
return 0;
port = addr->sq_port;
rc = qrtr_port_assign(ipc, &port);
if (rc)
return rc;
/* unbind previous, if any */
if (!zapped)
qrtr_port_remove(ipc);
ipc->us.sq_port = port;
sock_reset_flag(sk, SOCK_ZAPPED);
/* Notify all open ports about the new controller */
if (port == QRTR_PORT_CTRL)
qrtr_reset_ports();
return 0;
}
/* Auto bind to an ephemeral port. */
static int qrtr_autobind(struct socket *sock)
{
struct sock *sk = sock->sk;
struct sockaddr_qrtr addr;
if (!sock_flag(sk, SOCK_ZAPPED))
return 0;
addr.sq_family = AF_QIPCRTR;
addr.sq_node = qrtr_local_nid;
addr.sq_port = 0;
return __qrtr_bind(sock, &addr, 1);
}
/* Bind socket to specified sockaddr. */
static int qrtr_bind(struct socket *sock, struct sockaddr *saddr, int len)
{
DECLARE_SOCKADDR(struct sockaddr_qrtr *, addr, saddr);
struct qrtr_sock *ipc = qrtr_sk(sock->sk);
struct sock *sk = sock->sk;
int rc;
if (len < sizeof(*addr) || addr->sq_family != AF_QIPCRTR)
return -EINVAL;
if (addr->sq_node != ipc->us.sq_node)
return -EINVAL;
lock_sock(sk);
rc = __qrtr_bind(sock, addr, sock_flag(sk, SOCK_ZAPPED));
release_sock(sk);
return rc;
}
/* Queue packet to local peer socket. */
static int qrtr_local_enqueue(struct qrtr_node *node, struct sk_buff *skb,
int type, struct sockaddr_qrtr *from,
struct sockaddr_qrtr *to)
{
struct qrtr_sock *ipc;
struct qrtr_cb *cb;
ipc = qrtr_port_lookup(to->sq_port);
if (!ipc || &ipc->sk == skb->sk) { /* do not send to self */
if (ipc)
qrtr_port_put(ipc);
kfree_skb(skb);
return -ENODEV;
}
cb = (struct qrtr_cb *)skb->cb;
cb->src_node = from->sq_node;
cb->src_port = from->sq_port;
if (sock_queue_rcv_skb(&ipc->sk, skb)) {
qrtr_port_put(ipc);
kfree_skb(skb);
return -ENOSPC;
}
qrtr_port_put(ipc);
return 0;
}
/* Queue packet for broadcast. */
static int qrtr_bcast_enqueue(struct qrtr_node *node, struct sk_buff *skb,
int type, struct sockaddr_qrtr *from,
struct sockaddr_qrtr *to)
{
struct sk_buff *skbn;
mutex_lock(&qrtr_node_lock);
list_for_each_entry(node, &qrtr_all_nodes, item) {
skbn = skb_clone(skb, GFP_KERNEL);
if (!skbn)
break;
skb_set_owner_w(skbn, skb->sk);
qrtr_node_enqueue(node, skbn, type, from, to);
}
mutex_unlock(&qrtr_node_lock);
qrtr_local_enqueue(NULL, skb, type, from, to);
return 0;
}
static int qrtr_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
{
DECLARE_SOCKADDR(struct sockaddr_qrtr *, addr, msg->msg_name);
int (*enqueue_fn)(struct qrtr_node *, struct sk_buff *, int,
struct sockaddr_qrtr *, struct sockaddr_qrtr *);
__le32 qrtr_type = cpu_to_le32(QRTR_TYPE_DATA);
struct qrtr_sock *ipc = qrtr_sk(sock->sk);
struct sock *sk = sock->sk;
struct qrtr_node *node;
struct sk_buff *skb;
size_t plen;
u32 type;
int rc;
if (msg->msg_flags & ~(MSG_DONTWAIT))
return -EINVAL;
if (len > 65535)
return -EMSGSIZE;
lock_sock(sk);
if (addr) {
if (msg->msg_namelen < sizeof(*addr)) {
release_sock(sk);
return -EINVAL;
}
if (addr->sq_family != AF_QIPCRTR) {
release_sock(sk);
return -EINVAL;
}
rc = qrtr_autobind(sock);
if (rc) {
release_sock(sk);
return rc;
}
} else if (sk->sk_state == TCP_ESTABLISHED) {
addr = &ipc->peer;
} else {
release_sock(sk);
return -ENOTCONN;
}
node = NULL;
if (addr->sq_node == QRTR_NODE_BCAST) {
if (addr->sq_port != QRTR_PORT_CTRL &&
qrtr_local_nid != QRTR_NODE_BCAST) {
release_sock(sk);
return -ENOTCONN;
}
enqueue_fn = qrtr_bcast_enqueue;
} else if (addr->sq_node == ipc->us.sq_node) {
enqueue_fn = qrtr_local_enqueue;
} else {
node = qrtr_node_lookup(addr->sq_node);
if (!node) {
release_sock(sk);
return -ECONNRESET;
}
enqueue_fn = qrtr_node_enqueue;
}
plen = (len + 3) & ~3;
skb = sock_alloc_send_skb(sk, plen + QRTR_HDR_MAX_SIZE,
msg->msg_flags & MSG_DONTWAIT, &rc);
if (!skb) {
rc = -ENOMEM;
goto out_node;
}
skb_reserve(skb, QRTR_HDR_MAX_SIZE);
rc = memcpy_from_msg(skb_put(skb, len), msg, len);
if (rc) {
kfree_skb(skb);
goto out_node;
}
if (ipc->us.sq_port == QRTR_PORT_CTRL) {
if (len < 4) {
rc = -EINVAL;
kfree_skb(skb);
goto out_node;
}
/* control messages already require the type as 'command' */
skb_copy_bits(skb, 0, &qrtr_type, 4);
}
type = le32_to_cpu(qrtr_type);
rc = enqueue_fn(node, skb, type, &ipc->us, addr);
if (rc >= 0)
rc = len;
out_node:
qrtr_node_release(node);
release_sock(sk);
return rc;
}
static int qrtr_send_resume_tx(struct qrtr_cb *cb)
{
struct sockaddr_qrtr remote = { AF_QIPCRTR, cb->src_node, cb->src_port };
struct sockaddr_qrtr local = { AF_QIPCRTR, cb->dst_node, cb->dst_port };
struct qrtr_ctrl_pkt *pkt;
struct qrtr_node *node;
struct sk_buff *skb;
int ret;
node = qrtr_node_lookup(remote.sq_node);
if (!node)
return -EINVAL;
skb = qrtr_alloc_ctrl_packet(&pkt, GFP_KERNEL);
if (!skb)
return -ENOMEM;
pkt->cmd = cpu_to_le32(QRTR_TYPE_RESUME_TX);
pkt->client.node = cpu_to_le32(cb->dst_node);
pkt->client.port = cpu_to_le32(cb->dst_port);
ret = qrtr_node_enqueue(node, skb, QRTR_TYPE_RESUME_TX, &local, &remote);
qrtr_node_release(node);
return ret;
}
static int qrtr_recvmsg(struct socket *sock, struct msghdr *msg,
size_t size, int flags)
{
DECLARE_SOCKADDR(struct sockaddr_qrtr *, addr, msg->msg_name);
struct sock *sk = sock->sk;
struct sk_buff *skb;
struct qrtr_cb *cb;
int copied, rc;
lock_sock(sk);
if (sock_flag(sk, SOCK_ZAPPED)) {
release_sock(sk);
return -EADDRNOTAVAIL;
}
skb = skb_recv_datagram(sk, flags, &rc);
if (!skb) {
release_sock(sk);
return rc;
}
cb = (struct qrtr_cb *)skb->cb;
copied = skb->len;
if (copied > size) {
copied = size;
msg->msg_flags |= MSG_TRUNC;
}
rc = skb_copy_datagram_msg(skb, 0, msg, copied);
if (rc < 0)
goto out;
rc = copied;
if (addr) {
/* There is an anonymous 2-byte hole after sq_family,
* make sure to clear it.
*/
memset(addr, 0, sizeof(*addr));
addr->sq_family = AF_QIPCRTR;
addr->sq_node = cb->src_node;
addr->sq_port = cb->src_port;
msg->msg_namelen = sizeof(*addr);
}
out:
if (cb->confirm_rx)
qrtr_send_resume_tx(cb);
skb_free_datagram(sk, skb);
release_sock(sk);
return rc;
}
static int qrtr_connect(struct socket *sock, struct sockaddr *saddr,
int len, int flags)
{
DECLARE_SOCKADDR(struct sockaddr_qrtr *, addr, saddr);
struct qrtr_sock *ipc = qrtr_sk(sock->sk);
struct sock *sk = sock->sk;
int rc;
if (len < sizeof(*addr) || addr->sq_family != AF_QIPCRTR)
return -EINVAL;
lock_sock(sk);
sk->sk_state = TCP_CLOSE;
sock->state = SS_UNCONNECTED;
rc = qrtr_autobind(sock);
if (rc) {
release_sock(sk);
return rc;
}
ipc->peer = *addr;
sock->state = SS_CONNECTED;
sk->sk_state = TCP_ESTABLISHED;
release_sock(sk);
return 0;
}
static int qrtr_getname(struct socket *sock, struct sockaddr *saddr,
int peer)
{
struct qrtr_sock *ipc = qrtr_sk(sock->sk);
struct sockaddr_qrtr qaddr;
struct sock *sk = sock->sk;
lock_sock(sk);
if (peer) {
if (sk->sk_state != TCP_ESTABLISHED) {
release_sock(sk);
return -ENOTCONN;
}
qaddr = ipc->peer;
} else {
qaddr = ipc->us;
}
release_sock(sk);
qaddr.sq_family = AF_QIPCRTR;
memcpy(saddr, &qaddr, sizeof(qaddr));
return sizeof(qaddr);
}
static int qrtr_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
{
void __user *argp = (void __user *)arg;
struct qrtr_sock *ipc = qrtr_sk(sock->sk);
struct sock *sk = sock->sk;
struct sockaddr_qrtr *sq;
struct sk_buff *skb;
struct ifreq ifr;
long len = 0;
int rc = 0;
lock_sock(sk);
switch (cmd) {
case TIOCOUTQ:
len = sk->sk_sndbuf - sk_wmem_alloc_get(sk);
if (len < 0)
len = 0;
rc = put_user(len, (int __user *)argp);
break;
case TIOCINQ:
skb = skb_peek(&sk->sk_receive_queue);
if (skb)
len = skb->len;
rc = put_user(len, (int __user *)argp);
break;
case SIOCGIFADDR:
if (get_user_ifreq(&ifr, NULL, argp)) {
rc = -EFAULT;
break;
}
sq = (struct sockaddr_qrtr *)&ifr.ifr_addr;
*sq = ipc->us;
if (put_user_ifreq(&ifr, argp)) {
rc = -EFAULT;
break;
}
break;
case SIOCADDRT:
case SIOCDELRT:
case SIOCSIFADDR:
case SIOCGIFDSTADDR:
case SIOCSIFDSTADDR:
case SIOCGIFBRDADDR:
case SIOCSIFBRDADDR:
case SIOCGIFNETMASK:
case SIOCSIFNETMASK:
rc = -EINVAL;
break;
default:
rc = -ENOIOCTLCMD;
break;
}
release_sock(sk);
return rc;
}
static int qrtr_release(struct socket *sock)
{
struct sock *sk = sock->sk;
struct qrtr_sock *ipc;
if (!sk)
return 0;
lock_sock(sk);
ipc = qrtr_sk(sk);
sk->sk_shutdown = SHUTDOWN_MASK;
if (!sock_flag(sk, SOCK_DEAD))
sk->sk_state_change(sk);
sock_set_flag(sk, SOCK_DEAD);
sock_orphan(sk);
sock->sk = NULL;
if (!sock_flag(sk, SOCK_ZAPPED))
qrtr_port_remove(ipc);
skb_queue_purge(&sk->sk_receive_queue);
release_sock(sk);
sock_put(sk);
return 0;
}
static const struct proto_ops qrtr_proto_ops = {
.owner = THIS_MODULE,
.family = AF_QIPCRTR,
.bind = qrtr_bind,
.connect = qrtr_connect,
.socketpair = sock_no_socketpair,
.accept = sock_no_accept,
.listen = sock_no_listen,
.sendmsg = qrtr_sendmsg,
.recvmsg = qrtr_recvmsg,
.getname = qrtr_getname,
.ioctl = qrtr_ioctl,
.gettstamp = sock_gettstamp,
.poll = datagram_poll,
.shutdown = sock_no_shutdown,
.release = qrtr_release,
.mmap = sock_no_mmap,
};
static struct proto qrtr_proto = {
.name = "QIPCRTR",
.owner = THIS_MODULE,
.obj_size = sizeof(struct qrtr_sock),
};
static int qrtr_create(struct net *net, struct socket *sock,
int protocol, int kern)
{
struct qrtr_sock *ipc;
struct sock *sk;
if (sock->type != SOCK_DGRAM)
return -EPROTOTYPE;
sk = sk_alloc(net, AF_QIPCRTR, GFP_KERNEL, &qrtr_proto, kern);
if (!sk)
return -ENOMEM;
sock_set_flag(sk, SOCK_ZAPPED);
sock_init_data(sock, sk);
sock->ops = &qrtr_proto_ops;
ipc = qrtr_sk(sk);
ipc->us.sq_family = AF_QIPCRTR;
ipc->us.sq_node = qrtr_local_nid;
ipc->us.sq_port = 0;
return 0;
}
static const struct net_proto_family qrtr_family = {
.owner = THIS_MODULE,
.family = AF_QIPCRTR,
.create = qrtr_create,
};
static int __init qrtr_proto_init(void)
{
int rc;
rc = proto_register(&qrtr_proto, 1);
if (rc)
return rc;
rc = sock_register(&qrtr_family);
if (rc)
goto err_proto;
rc = qrtr_ns_init();
if (rc)
goto err_sock;
return 0;
err_sock:
sock_unregister(qrtr_family.family);
err_proto:
proto_unregister(&qrtr_proto);
return rc;
}
postcore_initcall(qrtr_proto_init);
static void __exit qrtr_proto_fini(void)
{
qrtr_ns_remove();
sock_unregister(qrtr_family.family);
proto_unregister(&qrtr_proto);
}
module_exit(qrtr_proto_fini);
MODULE_DESCRIPTION("Qualcomm IPC-router driver");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS_NETPROTO(PF_QIPCRTR);
| linux-master | net/qrtr/af_qrtr.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2015, Sony Mobile Communications Inc.
* Copyright (c) 2013, The Linux Foundation. All rights reserved.
*/
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/rpmsg.h>
#include "qrtr.h"
struct qrtr_smd_dev {
struct qrtr_endpoint ep;
struct rpmsg_endpoint *channel;
struct device *dev;
};
/* from smd to qrtr */
static int qcom_smd_qrtr_callback(struct rpmsg_device *rpdev,
void *data, int len, void *priv, u32 addr)
{
struct qrtr_smd_dev *qdev = dev_get_drvdata(&rpdev->dev);
int rc;
if (!qdev)
return -EAGAIN;
rc = qrtr_endpoint_post(&qdev->ep, data, len);
if (rc == -EINVAL) {
dev_err(qdev->dev, "invalid ipcrouter packet\n");
/* return 0 to let smd drop the packet */
rc = 0;
}
return rc;
}
/* from qrtr to smd */
static int qcom_smd_qrtr_send(struct qrtr_endpoint *ep, struct sk_buff *skb)
{
struct qrtr_smd_dev *qdev = container_of(ep, struct qrtr_smd_dev, ep);
int rc;
rc = skb_linearize(skb);
if (rc)
goto out;
rc = rpmsg_send(qdev->channel, skb->data, skb->len);
out:
if (rc)
kfree_skb(skb);
else
consume_skb(skb);
return rc;
}
static int qcom_smd_qrtr_probe(struct rpmsg_device *rpdev)
{
struct qrtr_smd_dev *qdev;
int rc;
qdev = devm_kzalloc(&rpdev->dev, sizeof(*qdev), GFP_KERNEL);
if (!qdev)
return -ENOMEM;
qdev->channel = rpdev->ept;
qdev->dev = &rpdev->dev;
qdev->ep.xmit = qcom_smd_qrtr_send;
rc = qrtr_endpoint_register(&qdev->ep, QRTR_EP_NID_AUTO);
if (rc)
return rc;
dev_set_drvdata(&rpdev->dev, qdev);
dev_dbg(&rpdev->dev, "Qualcomm SMD QRTR driver probed\n");
return 0;
}
static void qcom_smd_qrtr_remove(struct rpmsg_device *rpdev)
{
struct qrtr_smd_dev *qdev = dev_get_drvdata(&rpdev->dev);
qrtr_endpoint_unregister(&qdev->ep);
dev_set_drvdata(&rpdev->dev, NULL);
}
static const struct rpmsg_device_id qcom_smd_qrtr_smd_match[] = {
{ "IPCRTR" },
{}
};
static struct rpmsg_driver qcom_smd_qrtr_driver = {
.probe = qcom_smd_qrtr_probe,
.remove = qcom_smd_qrtr_remove,
.callback = qcom_smd_qrtr_callback,
.id_table = qcom_smd_qrtr_smd_match,
.drv = {
.name = "qcom_smd_qrtr",
},
};
module_rpmsg_driver(qcom_smd_qrtr_driver);
MODULE_ALIAS("rpmsg:IPCRTR");
MODULE_DESCRIPTION("Qualcomm IPC-Router SMD interface driver");
MODULE_LICENSE("GPL v2");
| linux-master | net/qrtr/smd.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2018, Linaro Ltd */
#include <linux/miscdevice.h>
#include <linux/module.h>
#include <linux/poll.h>
#include <linux/skbuff.h>
#include <linux/uaccess.h>
#include "qrtr.h"
struct qrtr_tun {
struct qrtr_endpoint ep;
struct sk_buff_head queue;
wait_queue_head_t readq;
};
static int qrtr_tun_send(struct qrtr_endpoint *ep, struct sk_buff *skb)
{
struct qrtr_tun *tun = container_of(ep, struct qrtr_tun, ep);
skb_queue_tail(&tun->queue, skb);
/* wake up any blocking processes, waiting for new data */
wake_up_interruptible(&tun->readq);
return 0;
}
static int qrtr_tun_open(struct inode *inode, struct file *filp)
{
struct qrtr_tun *tun;
int ret;
tun = kzalloc(sizeof(*tun), GFP_KERNEL);
if (!tun)
return -ENOMEM;
skb_queue_head_init(&tun->queue);
init_waitqueue_head(&tun->readq);
tun->ep.xmit = qrtr_tun_send;
filp->private_data = tun;
ret = qrtr_endpoint_register(&tun->ep, QRTR_EP_NID_AUTO);
if (ret)
goto out;
return 0;
out:
filp->private_data = NULL;
kfree(tun);
return ret;
}
static ssize_t qrtr_tun_read_iter(struct kiocb *iocb, struct iov_iter *to)
{
struct file *filp = iocb->ki_filp;
struct qrtr_tun *tun = filp->private_data;
struct sk_buff *skb;
int count;
while (!(skb = skb_dequeue(&tun->queue))) {
if (filp->f_flags & O_NONBLOCK)
return -EAGAIN;
/* Wait until we get data or the endpoint goes away */
if (wait_event_interruptible(tun->readq,
!skb_queue_empty(&tun->queue)))
return -ERESTARTSYS;
}
count = min_t(size_t, iov_iter_count(to), skb->len);
if (copy_to_iter(skb->data, count, to) != count)
count = -EFAULT;
kfree_skb(skb);
return count;
}
static ssize_t qrtr_tun_write_iter(struct kiocb *iocb, struct iov_iter *from)
{
struct file *filp = iocb->ki_filp;
struct qrtr_tun *tun = filp->private_data;
size_t len = iov_iter_count(from);
ssize_t ret;
void *kbuf;
if (!len)
return -EINVAL;
if (len > KMALLOC_MAX_SIZE)
return -ENOMEM;
kbuf = kzalloc(len, GFP_KERNEL);
if (!kbuf)
return -ENOMEM;
if (!copy_from_iter_full(kbuf, len, from)) {
kfree(kbuf);
return -EFAULT;
}
ret = qrtr_endpoint_post(&tun->ep, kbuf, len);
kfree(kbuf);
return ret < 0 ? ret : len;
}
static __poll_t qrtr_tun_poll(struct file *filp, poll_table *wait)
{
struct qrtr_tun *tun = filp->private_data;
__poll_t mask = 0;
poll_wait(filp, &tun->readq, wait);
if (!skb_queue_empty(&tun->queue))
mask |= EPOLLIN | EPOLLRDNORM;
return mask;
}
static int qrtr_tun_release(struct inode *inode, struct file *filp)
{
struct qrtr_tun *tun = filp->private_data;
qrtr_endpoint_unregister(&tun->ep);
/* Discard all SKBs */
skb_queue_purge(&tun->queue);
kfree(tun);
return 0;
}
static const struct file_operations qrtr_tun_ops = {
.owner = THIS_MODULE,
.open = qrtr_tun_open,
.poll = qrtr_tun_poll,
.read_iter = qrtr_tun_read_iter,
.write_iter = qrtr_tun_write_iter,
.release = qrtr_tun_release,
};
static struct miscdevice qrtr_tun_miscdev = {
MISC_DYNAMIC_MINOR,
"qrtr-tun",
&qrtr_tun_ops,
};
static int __init qrtr_tun_init(void)
{
int ret;
ret = misc_register(&qrtr_tun_miscdev);
if (ret)
pr_err("failed to register Qualcomm IPC Router tun device\n");
return ret;
}
static void __exit qrtr_tun_exit(void)
{
misc_deregister(&qrtr_tun_miscdev);
}
module_init(qrtr_tun_init);
module_exit(qrtr_tun_exit);
MODULE_DESCRIPTION("Qualcomm IPC Router TUN device");
MODULE_LICENSE("GPL v2");
| linux-master | net/qrtr/tun.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
*/
#include <linux/mhi.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/skbuff.h>
#include <net/sock.h>
#include "qrtr.h"
struct qrtr_mhi_dev {
struct qrtr_endpoint ep;
struct mhi_device *mhi_dev;
struct device *dev;
};
/* From MHI to QRTR */
static void qcom_mhi_qrtr_dl_callback(struct mhi_device *mhi_dev,
struct mhi_result *mhi_res)
{
struct qrtr_mhi_dev *qdev = dev_get_drvdata(&mhi_dev->dev);
int rc;
if (!qdev || mhi_res->transaction_status)
return;
rc = qrtr_endpoint_post(&qdev->ep, mhi_res->buf_addr,
mhi_res->bytes_xferd);
if (rc == -EINVAL)
dev_err(qdev->dev, "invalid ipcrouter packet\n");
}
/* From QRTR to MHI */
static void qcom_mhi_qrtr_ul_callback(struct mhi_device *mhi_dev,
struct mhi_result *mhi_res)
{
struct sk_buff *skb = mhi_res->buf_addr;
if (skb->sk)
sock_put(skb->sk);
consume_skb(skb);
}
/* Send data over MHI */
static int qcom_mhi_qrtr_send(struct qrtr_endpoint *ep, struct sk_buff *skb)
{
struct qrtr_mhi_dev *qdev = container_of(ep, struct qrtr_mhi_dev, ep);
int rc;
if (skb->sk)
sock_hold(skb->sk);
rc = skb_linearize(skb);
if (rc)
goto free_skb;
rc = mhi_queue_skb(qdev->mhi_dev, DMA_TO_DEVICE, skb, skb->len,
MHI_EOT);
if (rc)
goto free_skb;
return rc;
free_skb:
if (skb->sk)
sock_put(skb->sk);
kfree_skb(skb);
return rc;
}
static int qcom_mhi_qrtr_probe(struct mhi_device *mhi_dev,
const struct mhi_device_id *id)
{
struct qrtr_mhi_dev *qdev;
int rc;
qdev = devm_kzalloc(&mhi_dev->dev, sizeof(*qdev), GFP_KERNEL);
if (!qdev)
return -ENOMEM;
qdev->mhi_dev = mhi_dev;
qdev->dev = &mhi_dev->dev;
qdev->ep.xmit = qcom_mhi_qrtr_send;
dev_set_drvdata(&mhi_dev->dev, qdev);
rc = qrtr_endpoint_register(&qdev->ep, QRTR_EP_NID_AUTO);
if (rc)
return rc;
/* start channels */
rc = mhi_prepare_for_transfer_autoqueue(mhi_dev);
if (rc) {
qrtr_endpoint_unregister(&qdev->ep);
return rc;
}
dev_dbg(qdev->dev, "Qualcomm MHI QRTR driver probed\n");
return 0;
}
static void qcom_mhi_qrtr_remove(struct mhi_device *mhi_dev)
{
struct qrtr_mhi_dev *qdev = dev_get_drvdata(&mhi_dev->dev);
qrtr_endpoint_unregister(&qdev->ep);
mhi_unprepare_from_transfer(mhi_dev);
dev_set_drvdata(&mhi_dev->dev, NULL);
}
static const struct mhi_device_id qcom_mhi_qrtr_id_table[] = {
{ .chan = "IPCR" },
{}
};
MODULE_DEVICE_TABLE(mhi, qcom_mhi_qrtr_id_table);
static struct mhi_driver qcom_mhi_qrtr_driver = {
.probe = qcom_mhi_qrtr_probe,
.remove = qcom_mhi_qrtr_remove,
.dl_xfer_cb = qcom_mhi_qrtr_dl_callback,
.ul_xfer_cb = qcom_mhi_qrtr_ul_callback,
.id_table = qcom_mhi_qrtr_id_table,
.driver = {
.name = "qcom_mhi_qrtr",
},
};
module_mhi_driver(qcom_mhi_qrtr_driver);
MODULE_AUTHOR("Chris Lew <[email protected]>");
MODULE_AUTHOR("Manivannan Sadhasivam <[email protected]>");
MODULE_DESCRIPTION("Qualcomm IPC-Router MHI interface driver");
MODULE_LICENSE("GPL v2");
| linux-master | net/qrtr/mhi.c |
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (c) 2015, Sony Mobile Communications Inc.
* Copyright (c) 2013, The Linux Foundation. All rights reserved.
* Copyright (c) 2020, Linaro Ltd.
*/
#include <linux/module.h>
#include <linux/qrtr.h>
#include <linux/workqueue.h>
#include <net/sock.h>
#include "qrtr.h"
#include <trace/events/sock.h>
#define CREATE_TRACE_POINTS
#include <trace/events/qrtr.h>
static DEFINE_XARRAY(nodes);
static struct {
struct socket *sock;
struct sockaddr_qrtr bcast_sq;
struct list_head lookups;
struct workqueue_struct *workqueue;
struct work_struct work;
int local_node;
} qrtr_ns;
static const char * const qrtr_ctrl_pkt_strings[] = {
[QRTR_TYPE_HELLO] = "hello",
[QRTR_TYPE_BYE] = "bye",
[QRTR_TYPE_NEW_SERVER] = "new-server",
[QRTR_TYPE_DEL_SERVER] = "del-server",
[QRTR_TYPE_DEL_CLIENT] = "del-client",
[QRTR_TYPE_RESUME_TX] = "resume-tx",
[QRTR_TYPE_EXIT] = "exit",
[QRTR_TYPE_PING] = "ping",
[QRTR_TYPE_NEW_LOOKUP] = "new-lookup",
[QRTR_TYPE_DEL_LOOKUP] = "del-lookup",
};
struct qrtr_server_filter {
unsigned int service;
unsigned int instance;
unsigned int ifilter;
};
struct qrtr_lookup {
unsigned int service;
unsigned int instance;
struct sockaddr_qrtr sq;
struct list_head li;
};
struct qrtr_server {
unsigned int service;
unsigned int instance;
unsigned int node;
unsigned int port;
struct list_head qli;
};
struct qrtr_node {
unsigned int id;
struct xarray servers;
};
static struct qrtr_node *node_get(unsigned int node_id)
{
struct qrtr_node *node;
node = xa_load(&nodes, node_id);
if (node)
return node;
/* If node didn't exist, allocate and insert it to the tree */
node = kzalloc(sizeof(*node), GFP_KERNEL);
if (!node)
return NULL;
node->id = node_id;
xa_init(&node->servers);
if (xa_store(&nodes, node_id, node, GFP_KERNEL)) {
kfree(node);
return NULL;
}
return node;
}
static int server_match(const struct qrtr_server *srv,
const struct qrtr_server_filter *f)
{
unsigned int ifilter = f->ifilter;
if (f->service != 0 && srv->service != f->service)
return 0;
if (!ifilter && f->instance)
ifilter = ~0;
return (srv->instance & ifilter) == f->instance;
}
static int service_announce_new(struct sockaddr_qrtr *dest,
struct qrtr_server *srv)
{
struct qrtr_ctrl_pkt pkt;
struct msghdr msg = { };
struct kvec iv;
trace_qrtr_ns_service_announce_new(srv->service, srv->instance,
srv->node, srv->port);
iv.iov_base = &pkt;
iv.iov_len = sizeof(pkt);
memset(&pkt, 0, sizeof(pkt));
pkt.cmd = cpu_to_le32(QRTR_TYPE_NEW_SERVER);
pkt.server.service = cpu_to_le32(srv->service);
pkt.server.instance = cpu_to_le32(srv->instance);
pkt.server.node = cpu_to_le32(srv->node);
pkt.server.port = cpu_to_le32(srv->port);
msg.msg_name = (struct sockaddr *)dest;
msg.msg_namelen = sizeof(*dest);
return kernel_sendmsg(qrtr_ns.sock, &msg, &iv, 1, sizeof(pkt));
}
static int service_announce_del(struct sockaddr_qrtr *dest,
struct qrtr_server *srv)
{
struct qrtr_ctrl_pkt pkt;
struct msghdr msg = { };
struct kvec iv;
int ret;
trace_qrtr_ns_service_announce_del(srv->service, srv->instance,
srv->node, srv->port);
iv.iov_base = &pkt;
iv.iov_len = sizeof(pkt);
memset(&pkt, 0, sizeof(pkt));
pkt.cmd = cpu_to_le32(QRTR_TYPE_DEL_SERVER);
pkt.server.service = cpu_to_le32(srv->service);
pkt.server.instance = cpu_to_le32(srv->instance);
pkt.server.node = cpu_to_le32(srv->node);
pkt.server.port = cpu_to_le32(srv->port);
msg.msg_name = (struct sockaddr *)dest;
msg.msg_namelen = sizeof(*dest);
ret = kernel_sendmsg(qrtr_ns.sock, &msg, &iv, 1, sizeof(pkt));
if (ret < 0)
pr_err("failed to announce del service\n");
return ret;
}
static void lookup_notify(struct sockaddr_qrtr *to, struct qrtr_server *srv,
bool new)
{
struct qrtr_ctrl_pkt pkt;
struct msghdr msg = { };
struct kvec iv;
int ret;
iv.iov_base = &pkt;
iv.iov_len = sizeof(pkt);
memset(&pkt, 0, sizeof(pkt));
pkt.cmd = new ? cpu_to_le32(QRTR_TYPE_NEW_SERVER) :
cpu_to_le32(QRTR_TYPE_DEL_SERVER);
if (srv) {
pkt.server.service = cpu_to_le32(srv->service);
pkt.server.instance = cpu_to_le32(srv->instance);
pkt.server.node = cpu_to_le32(srv->node);
pkt.server.port = cpu_to_le32(srv->port);
}
msg.msg_name = (struct sockaddr *)to;
msg.msg_namelen = sizeof(*to);
ret = kernel_sendmsg(qrtr_ns.sock, &msg, &iv, 1, sizeof(pkt));
if (ret < 0)
pr_err("failed to send lookup notification\n");
}
static int announce_servers(struct sockaddr_qrtr *sq)
{
struct qrtr_server *srv;
struct qrtr_node *node;
unsigned long index;
int ret;
node = node_get(qrtr_ns.local_node);
if (!node)
return 0;
/* Announce the list of servers registered in this node */
xa_for_each(&node->servers, index, srv) {
ret = service_announce_new(sq, srv);
if (ret < 0) {
pr_err("failed to announce new service\n");
return ret;
}
}
return 0;
}
static struct qrtr_server *server_add(unsigned int service,
unsigned int instance,
unsigned int node_id,
unsigned int port)
{
struct qrtr_server *srv;
struct qrtr_server *old;
struct qrtr_node *node;
if (!service || !port)
return NULL;
srv = kzalloc(sizeof(*srv), GFP_KERNEL);
if (!srv)
return NULL;
srv->service = service;
srv->instance = instance;
srv->node = node_id;
srv->port = port;
node = node_get(node_id);
if (!node)
goto err;
/* Delete the old server on the same port */
old = xa_store(&node->servers, port, srv, GFP_KERNEL);
if (old) {
if (xa_is_err(old)) {
pr_err("failed to add server [0x%x:0x%x] ret:%d\n",
srv->service, srv->instance, xa_err(old));
goto err;
} else {
kfree(old);
}
}
trace_qrtr_ns_server_add(srv->service, srv->instance,
srv->node, srv->port);
return srv;
err:
kfree(srv);
return NULL;
}
static int server_del(struct qrtr_node *node, unsigned int port, bool bcast)
{
struct qrtr_lookup *lookup;
struct qrtr_server *srv;
struct list_head *li;
srv = xa_load(&node->servers, port);
if (!srv)
return -ENOENT;
xa_erase(&node->servers, port);
/* Broadcast the removal of local servers */
if (srv->node == qrtr_ns.local_node && bcast)
service_announce_del(&qrtr_ns.bcast_sq, srv);
/* Announce the service's disappearance to observers */
list_for_each(li, &qrtr_ns.lookups) {
lookup = container_of(li, struct qrtr_lookup, li);
if (lookup->service && lookup->service != srv->service)
continue;
if (lookup->instance && lookup->instance != srv->instance)
continue;
lookup_notify(&lookup->sq, srv, false);
}
kfree(srv);
return 0;
}
static int say_hello(struct sockaddr_qrtr *dest)
{
struct qrtr_ctrl_pkt pkt;
struct msghdr msg = { };
struct kvec iv;
int ret;
iv.iov_base = &pkt;
iv.iov_len = sizeof(pkt);
memset(&pkt, 0, sizeof(pkt));
pkt.cmd = cpu_to_le32(QRTR_TYPE_HELLO);
msg.msg_name = (struct sockaddr *)dest;
msg.msg_namelen = sizeof(*dest);
ret = kernel_sendmsg(qrtr_ns.sock, &msg, &iv, 1, sizeof(pkt));
if (ret < 0)
pr_err("failed to send hello msg\n");
return ret;
}
/* Announce the list of servers registered on the local node */
static int ctrl_cmd_hello(struct sockaddr_qrtr *sq)
{
int ret;
ret = say_hello(sq);
if (ret < 0)
return ret;
return announce_servers(sq);
}
static int ctrl_cmd_bye(struct sockaddr_qrtr *from)
{
struct qrtr_node *local_node;
struct qrtr_ctrl_pkt pkt;
struct qrtr_server *srv;
struct sockaddr_qrtr sq;
struct msghdr msg = { };
struct qrtr_node *node;
unsigned long index;
struct kvec iv;
int ret;
iv.iov_base = &pkt;
iv.iov_len = sizeof(pkt);
node = node_get(from->sq_node);
if (!node)
return 0;
/* Advertise removal of this client to all servers of remote node */
xa_for_each(&node->servers, index, srv)
server_del(node, srv->port, true);
/* Advertise the removal of this client to all local servers */
local_node = node_get(qrtr_ns.local_node);
if (!local_node)
return 0;
memset(&pkt, 0, sizeof(pkt));
pkt.cmd = cpu_to_le32(QRTR_TYPE_BYE);
pkt.client.node = cpu_to_le32(from->sq_node);
xa_for_each(&local_node->servers, index, srv) {
sq.sq_family = AF_QIPCRTR;
sq.sq_node = srv->node;
sq.sq_port = srv->port;
msg.msg_name = (struct sockaddr *)&sq;
msg.msg_namelen = sizeof(sq);
ret = kernel_sendmsg(qrtr_ns.sock, &msg, &iv, 1, sizeof(pkt));
if (ret < 0) {
pr_err("failed to send bye cmd\n");
return ret;
}
}
return 0;
}
static int ctrl_cmd_del_client(struct sockaddr_qrtr *from,
unsigned int node_id, unsigned int port)
{
struct qrtr_node *local_node;
struct qrtr_lookup *lookup;
struct qrtr_ctrl_pkt pkt;
struct msghdr msg = { };
struct qrtr_server *srv;
struct sockaddr_qrtr sq;
struct qrtr_node *node;
struct list_head *tmp;
struct list_head *li;
unsigned long index;
struct kvec iv;
int ret;
iv.iov_base = &pkt;
iv.iov_len = sizeof(pkt);
/* Don't accept spoofed messages */
if (from->sq_node != node_id)
return -EINVAL;
/* Local DEL_CLIENT messages comes from the port being closed */
if (from->sq_node == qrtr_ns.local_node && from->sq_port != port)
return -EINVAL;
/* Remove any lookups by this client */
list_for_each_safe(li, tmp, &qrtr_ns.lookups) {
lookup = container_of(li, struct qrtr_lookup, li);
if (lookup->sq.sq_node != node_id)
continue;
if (lookup->sq.sq_port != port)
continue;
list_del(&lookup->li);
kfree(lookup);
}
/* Remove the server belonging to this port but don't broadcast
* DEL_SERVER. Neighbours would've already removed the server belonging
* to this port due to the DEL_CLIENT broadcast from qrtr_port_remove().
*/
node = node_get(node_id);
if (node)
server_del(node, port, false);
/* Advertise the removal of this client to all local servers */
local_node = node_get(qrtr_ns.local_node);
if (!local_node)
return 0;
memset(&pkt, 0, sizeof(pkt));
pkt.cmd = cpu_to_le32(QRTR_TYPE_DEL_CLIENT);
pkt.client.node = cpu_to_le32(node_id);
pkt.client.port = cpu_to_le32(port);
xa_for_each(&local_node->servers, index, srv) {
sq.sq_family = AF_QIPCRTR;
sq.sq_node = srv->node;
sq.sq_port = srv->port;
msg.msg_name = (struct sockaddr *)&sq;
msg.msg_namelen = sizeof(sq);
ret = kernel_sendmsg(qrtr_ns.sock, &msg, &iv, 1, sizeof(pkt));
if (ret < 0) {
pr_err("failed to send del client cmd\n");
return ret;
}
}
return 0;
}
static int ctrl_cmd_new_server(struct sockaddr_qrtr *from,
unsigned int service, unsigned int instance,
unsigned int node_id, unsigned int port)
{
struct qrtr_lookup *lookup;
struct qrtr_server *srv;
struct list_head *li;
int ret = 0;
/* Ignore specified node and port for local servers */
if (from->sq_node == qrtr_ns.local_node) {
node_id = from->sq_node;
port = from->sq_port;
}
srv = server_add(service, instance, node_id, port);
if (!srv)
return -EINVAL;
if (srv->node == qrtr_ns.local_node) {
ret = service_announce_new(&qrtr_ns.bcast_sq, srv);
if (ret < 0) {
pr_err("failed to announce new service\n");
return ret;
}
}
/* Notify any potential lookups about the new server */
list_for_each(li, &qrtr_ns.lookups) {
lookup = container_of(li, struct qrtr_lookup, li);
if (lookup->service && lookup->service != service)
continue;
if (lookup->instance && lookup->instance != instance)
continue;
lookup_notify(&lookup->sq, srv, true);
}
return ret;
}
static int ctrl_cmd_del_server(struct sockaddr_qrtr *from,
unsigned int service, unsigned int instance,
unsigned int node_id, unsigned int port)
{
struct qrtr_node *node;
/* Ignore specified node and port for local servers*/
if (from->sq_node == qrtr_ns.local_node) {
node_id = from->sq_node;
port = from->sq_port;
}
/* Local servers may only unregister themselves */
if (from->sq_node == qrtr_ns.local_node && from->sq_port != port)
return -EINVAL;
node = node_get(node_id);
if (!node)
return -ENOENT;
return server_del(node, port, true);
}
static int ctrl_cmd_new_lookup(struct sockaddr_qrtr *from,
unsigned int service, unsigned int instance)
{
struct qrtr_server_filter filter;
struct qrtr_lookup *lookup;
struct qrtr_server *srv;
struct qrtr_node *node;
unsigned long node_idx;
unsigned long srv_idx;
/* Accept only local observers */
if (from->sq_node != qrtr_ns.local_node)
return -EINVAL;
lookup = kzalloc(sizeof(*lookup), GFP_KERNEL);
if (!lookup)
return -ENOMEM;
lookup->sq = *from;
lookup->service = service;
lookup->instance = instance;
list_add_tail(&lookup->li, &qrtr_ns.lookups);
memset(&filter, 0, sizeof(filter));
filter.service = service;
filter.instance = instance;
xa_for_each(&nodes, node_idx, node) {
xa_for_each(&node->servers, srv_idx, srv) {
if (!server_match(srv, &filter))
continue;
lookup_notify(from, srv, true);
}
}
/* Empty notification, to indicate end of listing */
lookup_notify(from, NULL, true);
return 0;
}
static void ctrl_cmd_del_lookup(struct sockaddr_qrtr *from,
unsigned int service, unsigned int instance)
{
struct qrtr_lookup *lookup;
struct list_head *tmp;
struct list_head *li;
list_for_each_safe(li, tmp, &qrtr_ns.lookups) {
lookup = container_of(li, struct qrtr_lookup, li);
if (lookup->sq.sq_node != from->sq_node)
continue;
if (lookup->sq.sq_port != from->sq_port)
continue;
if (lookup->service != service)
continue;
if (lookup->instance && lookup->instance != instance)
continue;
list_del(&lookup->li);
kfree(lookup);
}
}
static void qrtr_ns_worker(struct work_struct *work)
{
const struct qrtr_ctrl_pkt *pkt;
size_t recv_buf_size = 4096;
struct sockaddr_qrtr sq;
struct msghdr msg = { };
unsigned int cmd;
ssize_t msglen;
void *recv_buf;
struct kvec iv;
int ret;
msg.msg_name = (struct sockaddr *)&sq;
msg.msg_namelen = sizeof(sq);
recv_buf = kzalloc(recv_buf_size, GFP_KERNEL);
if (!recv_buf)
return;
for (;;) {
iv.iov_base = recv_buf;
iv.iov_len = recv_buf_size;
msglen = kernel_recvmsg(qrtr_ns.sock, &msg, &iv, 1,
iv.iov_len, MSG_DONTWAIT);
if (msglen == -EAGAIN)
break;
if (msglen < 0) {
pr_err("error receiving packet: %zd\n", msglen);
break;
}
pkt = recv_buf;
cmd = le32_to_cpu(pkt->cmd);
if (cmd < ARRAY_SIZE(qrtr_ctrl_pkt_strings) &&
qrtr_ctrl_pkt_strings[cmd])
trace_qrtr_ns_message(qrtr_ctrl_pkt_strings[cmd],
sq.sq_node, sq.sq_port);
ret = 0;
switch (cmd) {
case QRTR_TYPE_HELLO:
ret = ctrl_cmd_hello(&sq);
break;
case QRTR_TYPE_BYE:
ret = ctrl_cmd_bye(&sq);
break;
case QRTR_TYPE_DEL_CLIENT:
ret = ctrl_cmd_del_client(&sq,
le32_to_cpu(pkt->client.node),
le32_to_cpu(pkt->client.port));
break;
case QRTR_TYPE_NEW_SERVER:
ret = ctrl_cmd_new_server(&sq,
le32_to_cpu(pkt->server.service),
le32_to_cpu(pkt->server.instance),
le32_to_cpu(pkt->server.node),
le32_to_cpu(pkt->server.port));
break;
case QRTR_TYPE_DEL_SERVER:
ret = ctrl_cmd_del_server(&sq,
le32_to_cpu(pkt->server.service),
le32_to_cpu(pkt->server.instance),
le32_to_cpu(pkt->server.node),
le32_to_cpu(pkt->server.port));
break;
case QRTR_TYPE_EXIT:
case QRTR_TYPE_PING:
case QRTR_TYPE_RESUME_TX:
break;
case QRTR_TYPE_NEW_LOOKUP:
ret = ctrl_cmd_new_lookup(&sq,
le32_to_cpu(pkt->server.service),
le32_to_cpu(pkt->server.instance));
break;
case QRTR_TYPE_DEL_LOOKUP:
ctrl_cmd_del_lookup(&sq,
le32_to_cpu(pkt->server.service),
le32_to_cpu(pkt->server.instance));
break;
}
if (ret < 0)
pr_err("failed while handling packet from %d:%d",
sq.sq_node, sq.sq_port);
}
kfree(recv_buf);
}
static void qrtr_ns_data_ready(struct sock *sk)
{
trace_sk_data_ready(sk);
queue_work(qrtr_ns.workqueue, &qrtr_ns.work);
}
int qrtr_ns_init(void)
{
struct sockaddr_qrtr sq;
int ret;
INIT_LIST_HEAD(&qrtr_ns.lookups);
INIT_WORK(&qrtr_ns.work, qrtr_ns_worker);
ret = sock_create_kern(&init_net, AF_QIPCRTR, SOCK_DGRAM,
PF_QIPCRTR, &qrtr_ns.sock);
if (ret < 0)
return ret;
ret = kernel_getsockname(qrtr_ns.sock, (struct sockaddr *)&sq);
if (ret < 0) {
pr_err("failed to get socket name\n");
goto err_sock;
}
qrtr_ns.workqueue = alloc_ordered_workqueue("qrtr_ns_handler", 0);
if (!qrtr_ns.workqueue) {
ret = -ENOMEM;
goto err_sock;
}
qrtr_ns.sock->sk->sk_data_ready = qrtr_ns_data_ready;
sq.sq_port = QRTR_PORT_CTRL;
qrtr_ns.local_node = sq.sq_node;
ret = kernel_bind(qrtr_ns.sock, (struct sockaddr *)&sq, sizeof(sq));
if (ret < 0) {
pr_err("failed to bind to socket\n");
goto err_wq;
}
qrtr_ns.bcast_sq.sq_family = AF_QIPCRTR;
qrtr_ns.bcast_sq.sq_node = QRTR_NODE_BCAST;
qrtr_ns.bcast_sq.sq_port = QRTR_PORT_CTRL;
ret = say_hello(&qrtr_ns.bcast_sq);
if (ret < 0)
goto err_wq;
return 0;
err_wq:
destroy_workqueue(qrtr_ns.workqueue);
err_sock:
sock_release(qrtr_ns.sock);
return ret;
}
EXPORT_SYMBOL_GPL(qrtr_ns_init);
void qrtr_ns_remove(void)
{
cancel_work_sync(&qrtr_ns.work);
destroy_workqueue(qrtr_ns.workqueue);
sock_release(qrtr_ns.sock);
}
EXPORT_SYMBOL_GPL(qrtr_ns_remove);
MODULE_AUTHOR("Manivannan Sadhasivam <[email protected]>");
MODULE_DESCRIPTION("Qualcomm IPC Router Nameservice");
MODULE_LICENSE("Dual BSD/GPL");
| linux-master | net/qrtr/ns.c |
/*
* Copyright (c) 2016-2017, Mellanox Technologies. All rights reserved.
* Copyright (c) 2016-2017, Dave Watson <[email protected]>. All rights reserved.
* Copyright (c) 2016-2017, Lance Chao <[email protected]>. All rights reserved.
* Copyright (c) 2016, Fridolin Pokorny <[email protected]>. All rights reserved.
* Copyright (c) 2016, Nikos Mavrogiannopoulos <[email protected]>. All rights reserved.
* Copyright (c) 2018, Covalent IO, Inc. http://covalent.io
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/bug.h>
#include <linux/sched/signal.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/splice.h>
#include <crypto/aead.h>
#include <net/strparser.h>
#include <net/tls.h>
#include <trace/events/sock.h>
#include "tls.h"
struct tls_decrypt_arg {
struct_group(inargs,
bool zc;
bool async;
u8 tail;
);
struct sk_buff *skb;
};
struct tls_decrypt_ctx {
struct sock *sk;
u8 iv[MAX_IV_SIZE];
u8 aad[TLS_MAX_AAD_SIZE];
u8 tail;
struct scatterlist sg[];
};
noinline void tls_err_abort(struct sock *sk, int err)
{
WARN_ON_ONCE(err >= 0);
/* sk->sk_err should contain a positive error code. */
WRITE_ONCE(sk->sk_err, -err);
/* Paired with smp_rmb() in tcp_poll() */
smp_wmb();
sk_error_report(sk);
}
static int __skb_nsg(struct sk_buff *skb, int offset, int len,
unsigned int recursion_level)
{
int start = skb_headlen(skb);
int i, chunk = start - offset;
struct sk_buff *frag_iter;
int elt = 0;
if (unlikely(recursion_level >= 24))
return -EMSGSIZE;
if (chunk > 0) {
if (chunk > len)
chunk = len;
elt++;
len -= chunk;
if (len == 0)
return elt;
offset += chunk;
}
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
int end;
WARN_ON(start > offset + len);
end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
chunk = end - offset;
if (chunk > 0) {
if (chunk > len)
chunk = len;
elt++;
len -= chunk;
if (len == 0)
return elt;
offset += chunk;
}
start = end;
}
if (unlikely(skb_has_frag_list(skb))) {
skb_walk_frags(skb, frag_iter) {
int end, ret;
WARN_ON(start > offset + len);
end = start + frag_iter->len;
chunk = end - offset;
if (chunk > 0) {
if (chunk > len)
chunk = len;
ret = __skb_nsg(frag_iter, offset - start, chunk,
recursion_level + 1);
if (unlikely(ret < 0))
return ret;
elt += ret;
len -= chunk;
if (len == 0)
return elt;
offset += chunk;
}
start = end;
}
}
BUG_ON(len);
return elt;
}
/* Return the number of scatterlist elements required to completely map the
* skb, or -EMSGSIZE if the recursion depth is exceeded.
*/
static int skb_nsg(struct sk_buff *skb, int offset, int len)
{
return __skb_nsg(skb, offset, len, 0);
}
static int tls_padding_length(struct tls_prot_info *prot, struct sk_buff *skb,
struct tls_decrypt_arg *darg)
{
struct strp_msg *rxm = strp_msg(skb);
struct tls_msg *tlm = tls_msg(skb);
int sub = 0;
/* Determine zero-padding length */
if (prot->version == TLS_1_3_VERSION) {
int offset = rxm->full_len - TLS_TAG_SIZE - 1;
char content_type = darg->zc ? darg->tail : 0;
int err;
while (content_type == 0) {
if (offset < prot->prepend_size)
return -EBADMSG;
err = skb_copy_bits(skb, rxm->offset + offset,
&content_type, 1);
if (err)
return err;
if (content_type)
break;
sub++;
offset--;
}
tlm->control = content_type;
}
return sub;
}
static void tls_decrypt_done(void *data, int err)
{
struct aead_request *aead_req = data;
struct crypto_aead *aead = crypto_aead_reqtfm(aead_req);
struct scatterlist *sgout = aead_req->dst;
struct scatterlist *sgin = aead_req->src;
struct tls_sw_context_rx *ctx;
struct tls_decrypt_ctx *dctx;
struct tls_context *tls_ctx;
struct scatterlist *sg;
unsigned int pages;
struct sock *sk;
int aead_size;
aead_size = sizeof(*aead_req) + crypto_aead_reqsize(aead);
aead_size = ALIGN(aead_size, __alignof__(*dctx));
dctx = (void *)((u8 *)aead_req + aead_size);
sk = dctx->sk;
tls_ctx = tls_get_ctx(sk);
ctx = tls_sw_ctx_rx(tls_ctx);
/* Propagate if there was an err */
if (err) {
if (err == -EBADMSG)
TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSDECRYPTERROR);
ctx->async_wait.err = err;
tls_err_abort(sk, err);
}
/* Free the destination pages if skb was not decrypted inplace */
if (sgout != sgin) {
/* Skip the first S/G entry as it points to AAD */
for_each_sg(sg_next(sgout), sg, UINT_MAX, pages) {
if (!sg)
break;
put_page(sg_page(sg));
}
}
kfree(aead_req);
spin_lock_bh(&ctx->decrypt_compl_lock);
if (!atomic_dec_return(&ctx->decrypt_pending))
complete(&ctx->async_wait.completion);
spin_unlock_bh(&ctx->decrypt_compl_lock);
}
static int tls_do_decryption(struct sock *sk,
struct scatterlist *sgin,
struct scatterlist *sgout,
char *iv_recv,
size_t data_len,
struct aead_request *aead_req,
struct tls_decrypt_arg *darg)
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_prot_info *prot = &tls_ctx->prot_info;
struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
int ret;
aead_request_set_tfm(aead_req, ctx->aead_recv);
aead_request_set_ad(aead_req, prot->aad_size);
aead_request_set_crypt(aead_req, sgin, sgout,
data_len + prot->tag_size,
(u8 *)iv_recv);
if (darg->async) {
aead_request_set_callback(aead_req,
CRYPTO_TFM_REQ_MAY_BACKLOG,
tls_decrypt_done, aead_req);
atomic_inc(&ctx->decrypt_pending);
} else {
aead_request_set_callback(aead_req,
CRYPTO_TFM_REQ_MAY_BACKLOG,
crypto_req_done, &ctx->async_wait);
}
ret = crypto_aead_decrypt(aead_req);
if (ret == -EINPROGRESS) {
if (darg->async)
return 0;
ret = crypto_wait_req(ret, &ctx->async_wait);
}
darg->async = false;
return ret;
}
static void tls_trim_both_msgs(struct sock *sk, int target_size)
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_prot_info *prot = &tls_ctx->prot_info;
struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
struct tls_rec *rec = ctx->open_rec;
sk_msg_trim(sk, &rec->msg_plaintext, target_size);
if (target_size > 0)
target_size += prot->overhead_size;
sk_msg_trim(sk, &rec->msg_encrypted, target_size);
}
static int tls_alloc_encrypted_msg(struct sock *sk, int len)
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
struct tls_rec *rec = ctx->open_rec;
struct sk_msg *msg_en = &rec->msg_encrypted;
return sk_msg_alloc(sk, msg_en, len, 0);
}
static int tls_clone_plaintext_msg(struct sock *sk, int required)
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_prot_info *prot = &tls_ctx->prot_info;
struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
struct tls_rec *rec = ctx->open_rec;
struct sk_msg *msg_pl = &rec->msg_plaintext;
struct sk_msg *msg_en = &rec->msg_encrypted;
int skip, len;
/* We add page references worth len bytes from encrypted sg
* at the end of plaintext sg. It is guaranteed that msg_en
* has enough required room (ensured by caller).
*/
len = required - msg_pl->sg.size;
/* Skip initial bytes in msg_en's data to be able to use
* same offset of both plain and encrypted data.
*/
skip = prot->prepend_size + msg_pl->sg.size;
return sk_msg_clone(sk, msg_pl, msg_en, skip, len);
}
static struct tls_rec *tls_get_rec(struct sock *sk)
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_prot_info *prot = &tls_ctx->prot_info;
struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
struct sk_msg *msg_pl, *msg_en;
struct tls_rec *rec;
int mem_size;
mem_size = sizeof(struct tls_rec) + crypto_aead_reqsize(ctx->aead_send);
rec = kzalloc(mem_size, sk->sk_allocation);
if (!rec)
return NULL;
msg_pl = &rec->msg_plaintext;
msg_en = &rec->msg_encrypted;
sk_msg_init(msg_pl);
sk_msg_init(msg_en);
sg_init_table(rec->sg_aead_in, 2);
sg_set_buf(&rec->sg_aead_in[0], rec->aad_space, prot->aad_size);
sg_unmark_end(&rec->sg_aead_in[1]);
sg_init_table(rec->sg_aead_out, 2);
sg_set_buf(&rec->sg_aead_out[0], rec->aad_space, prot->aad_size);
sg_unmark_end(&rec->sg_aead_out[1]);
rec->sk = sk;
return rec;
}
static void tls_free_rec(struct sock *sk, struct tls_rec *rec)
{
sk_msg_free(sk, &rec->msg_encrypted);
sk_msg_free(sk, &rec->msg_plaintext);
kfree(rec);
}
static void tls_free_open_rec(struct sock *sk)
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
struct tls_rec *rec = ctx->open_rec;
if (rec) {
tls_free_rec(sk, rec);
ctx->open_rec = NULL;
}
}
int tls_tx_records(struct sock *sk, int flags)
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
struct tls_rec *rec, *tmp;
struct sk_msg *msg_en;
int tx_flags, rc = 0;
if (tls_is_partially_sent_record(tls_ctx)) {
rec = list_first_entry(&ctx->tx_list,
struct tls_rec, list);
if (flags == -1)
tx_flags = rec->tx_flags;
else
tx_flags = flags;
rc = tls_push_partial_record(sk, tls_ctx, tx_flags);
if (rc)
goto tx_err;
/* Full record has been transmitted.
* Remove the head of tx_list
*/
list_del(&rec->list);
sk_msg_free(sk, &rec->msg_plaintext);
kfree(rec);
}
/* Tx all ready records */
list_for_each_entry_safe(rec, tmp, &ctx->tx_list, list) {
if (READ_ONCE(rec->tx_ready)) {
if (flags == -1)
tx_flags = rec->tx_flags;
else
tx_flags = flags;
msg_en = &rec->msg_encrypted;
rc = tls_push_sg(sk, tls_ctx,
&msg_en->sg.data[msg_en->sg.curr],
0, tx_flags);
if (rc)
goto tx_err;
list_del(&rec->list);
sk_msg_free(sk, &rec->msg_plaintext);
kfree(rec);
} else {
break;
}
}
tx_err:
if (rc < 0 && rc != -EAGAIN)
tls_err_abort(sk, -EBADMSG);
return rc;
}
static void tls_encrypt_done(void *data, int err)
{
struct tls_sw_context_tx *ctx;
struct tls_context *tls_ctx;
struct tls_prot_info *prot;
struct tls_rec *rec = data;
struct scatterlist *sge;
struct sk_msg *msg_en;
bool ready = false;
struct sock *sk;
int pending;
msg_en = &rec->msg_encrypted;
sk = rec->sk;
tls_ctx = tls_get_ctx(sk);
prot = &tls_ctx->prot_info;
ctx = tls_sw_ctx_tx(tls_ctx);
sge = sk_msg_elem(msg_en, msg_en->sg.curr);
sge->offset -= prot->prepend_size;
sge->length += prot->prepend_size;
/* Check if error is previously set on socket */
if (err || sk->sk_err) {
rec = NULL;
/* If err is already set on socket, return the same code */
if (sk->sk_err) {
ctx->async_wait.err = -sk->sk_err;
} else {
ctx->async_wait.err = err;
tls_err_abort(sk, err);
}
}
if (rec) {
struct tls_rec *first_rec;
/* Mark the record as ready for transmission */
smp_store_mb(rec->tx_ready, true);
/* If received record is at head of tx_list, schedule tx */
first_rec = list_first_entry(&ctx->tx_list,
struct tls_rec, list);
if (rec == first_rec)
ready = true;
}
spin_lock_bh(&ctx->encrypt_compl_lock);
pending = atomic_dec_return(&ctx->encrypt_pending);
if (!pending && ctx->async_notify)
complete(&ctx->async_wait.completion);
spin_unlock_bh(&ctx->encrypt_compl_lock);
if (!ready)
return;
/* Schedule the transmission */
if (!test_and_set_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask))
schedule_delayed_work(&ctx->tx_work.work, 1);
}
static int tls_do_encryption(struct sock *sk,
struct tls_context *tls_ctx,
struct tls_sw_context_tx *ctx,
struct aead_request *aead_req,
size_t data_len, u32 start)
{
struct tls_prot_info *prot = &tls_ctx->prot_info;
struct tls_rec *rec = ctx->open_rec;
struct sk_msg *msg_en = &rec->msg_encrypted;
struct scatterlist *sge = sk_msg_elem(msg_en, start);
int rc, iv_offset = 0;
/* For CCM based ciphers, first byte of IV is a constant */
switch (prot->cipher_type) {
case TLS_CIPHER_AES_CCM_128:
rec->iv_data[0] = TLS_AES_CCM_IV_B0_BYTE;
iv_offset = 1;
break;
case TLS_CIPHER_SM4_CCM:
rec->iv_data[0] = TLS_SM4_CCM_IV_B0_BYTE;
iv_offset = 1;
break;
}
memcpy(&rec->iv_data[iv_offset], tls_ctx->tx.iv,
prot->iv_size + prot->salt_size);
tls_xor_iv_with_seq(prot, rec->iv_data + iv_offset,
tls_ctx->tx.rec_seq);
sge->offset += prot->prepend_size;
sge->length -= prot->prepend_size;
msg_en->sg.curr = start;
aead_request_set_tfm(aead_req, ctx->aead_send);
aead_request_set_ad(aead_req, prot->aad_size);
aead_request_set_crypt(aead_req, rec->sg_aead_in,
rec->sg_aead_out,
data_len, rec->iv_data);
aead_request_set_callback(aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
tls_encrypt_done, rec);
/* Add the record in tx_list */
list_add_tail((struct list_head *)&rec->list, &ctx->tx_list);
atomic_inc(&ctx->encrypt_pending);
rc = crypto_aead_encrypt(aead_req);
if (!rc || rc != -EINPROGRESS) {
atomic_dec(&ctx->encrypt_pending);
sge->offset -= prot->prepend_size;
sge->length += prot->prepend_size;
}
if (!rc) {
WRITE_ONCE(rec->tx_ready, true);
} else if (rc != -EINPROGRESS) {
list_del(&rec->list);
return rc;
}
/* Unhook the record from context if encryption is not failure */
ctx->open_rec = NULL;
tls_advance_record_sn(sk, prot, &tls_ctx->tx);
return rc;
}
static int tls_split_open_record(struct sock *sk, struct tls_rec *from,
struct tls_rec **to, struct sk_msg *msg_opl,
struct sk_msg *msg_oen, u32 split_point,
u32 tx_overhead_size, u32 *orig_end)
{
u32 i, j, bytes = 0, apply = msg_opl->apply_bytes;
struct scatterlist *sge, *osge, *nsge;
u32 orig_size = msg_opl->sg.size;
struct scatterlist tmp = { };
struct sk_msg *msg_npl;
struct tls_rec *new;
int ret;
new = tls_get_rec(sk);
if (!new)
return -ENOMEM;
ret = sk_msg_alloc(sk, &new->msg_encrypted, msg_opl->sg.size +
tx_overhead_size, 0);
if (ret < 0) {
tls_free_rec(sk, new);
return ret;
}
*orig_end = msg_opl->sg.end;
i = msg_opl->sg.start;
sge = sk_msg_elem(msg_opl, i);
while (apply && sge->length) {
if (sge->length > apply) {
u32 len = sge->length - apply;
get_page(sg_page(sge));
sg_set_page(&tmp, sg_page(sge), len,
sge->offset + apply);
sge->length = apply;
bytes += apply;
apply = 0;
} else {
apply -= sge->length;
bytes += sge->length;
}
sk_msg_iter_var_next(i);
if (i == msg_opl->sg.end)
break;
sge = sk_msg_elem(msg_opl, i);
}
msg_opl->sg.end = i;
msg_opl->sg.curr = i;
msg_opl->sg.copybreak = 0;
msg_opl->apply_bytes = 0;
msg_opl->sg.size = bytes;
msg_npl = &new->msg_plaintext;
msg_npl->apply_bytes = apply;
msg_npl->sg.size = orig_size - bytes;
j = msg_npl->sg.start;
nsge = sk_msg_elem(msg_npl, j);
if (tmp.length) {
memcpy(nsge, &tmp, sizeof(*nsge));
sk_msg_iter_var_next(j);
nsge = sk_msg_elem(msg_npl, j);
}
osge = sk_msg_elem(msg_opl, i);
while (osge->length) {
memcpy(nsge, osge, sizeof(*nsge));
sg_unmark_end(nsge);
sk_msg_iter_var_next(i);
sk_msg_iter_var_next(j);
if (i == *orig_end)
break;
osge = sk_msg_elem(msg_opl, i);
nsge = sk_msg_elem(msg_npl, j);
}
msg_npl->sg.end = j;
msg_npl->sg.curr = j;
msg_npl->sg.copybreak = 0;
*to = new;
return 0;
}
static void tls_merge_open_record(struct sock *sk, struct tls_rec *to,
struct tls_rec *from, u32 orig_end)
{
struct sk_msg *msg_npl = &from->msg_plaintext;
struct sk_msg *msg_opl = &to->msg_plaintext;
struct scatterlist *osge, *nsge;
u32 i, j;
i = msg_opl->sg.end;
sk_msg_iter_var_prev(i);
j = msg_npl->sg.start;
osge = sk_msg_elem(msg_opl, i);
nsge = sk_msg_elem(msg_npl, j);
if (sg_page(osge) == sg_page(nsge) &&
osge->offset + osge->length == nsge->offset) {
osge->length += nsge->length;
put_page(sg_page(nsge));
}
msg_opl->sg.end = orig_end;
msg_opl->sg.curr = orig_end;
msg_opl->sg.copybreak = 0;
msg_opl->apply_bytes = msg_opl->sg.size + msg_npl->sg.size;
msg_opl->sg.size += msg_npl->sg.size;
sk_msg_free(sk, &to->msg_encrypted);
sk_msg_xfer_full(&to->msg_encrypted, &from->msg_encrypted);
kfree(from);
}
static int tls_push_record(struct sock *sk, int flags,
unsigned char record_type)
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_prot_info *prot = &tls_ctx->prot_info;
struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
struct tls_rec *rec = ctx->open_rec, *tmp = NULL;
u32 i, split_point, orig_end;
struct sk_msg *msg_pl, *msg_en;
struct aead_request *req;
bool split;
int rc;
if (!rec)
return 0;
msg_pl = &rec->msg_plaintext;
msg_en = &rec->msg_encrypted;
split_point = msg_pl->apply_bytes;
split = split_point && split_point < msg_pl->sg.size;
if (unlikely((!split &&
msg_pl->sg.size +
prot->overhead_size > msg_en->sg.size) ||
(split &&
split_point +
prot->overhead_size > msg_en->sg.size))) {
split = true;
split_point = msg_en->sg.size;
}
if (split) {
rc = tls_split_open_record(sk, rec, &tmp, msg_pl, msg_en,
split_point, prot->overhead_size,
&orig_end);
if (rc < 0)
return rc;
/* This can happen if above tls_split_open_record allocates
* a single large encryption buffer instead of two smaller
* ones. In this case adjust pointers and continue without
* split.
*/
if (!msg_pl->sg.size) {
tls_merge_open_record(sk, rec, tmp, orig_end);
msg_pl = &rec->msg_plaintext;
msg_en = &rec->msg_encrypted;
split = false;
}
sk_msg_trim(sk, msg_en, msg_pl->sg.size +
prot->overhead_size);
}
rec->tx_flags = flags;
req = &rec->aead_req;
i = msg_pl->sg.end;
sk_msg_iter_var_prev(i);
rec->content_type = record_type;
if (prot->version == TLS_1_3_VERSION) {
/* Add content type to end of message. No padding added */
sg_set_buf(&rec->sg_content_type, &rec->content_type, 1);
sg_mark_end(&rec->sg_content_type);
sg_chain(msg_pl->sg.data, msg_pl->sg.end + 1,
&rec->sg_content_type);
} else {
sg_mark_end(sk_msg_elem(msg_pl, i));
}
if (msg_pl->sg.end < msg_pl->sg.start) {
sg_chain(&msg_pl->sg.data[msg_pl->sg.start],
MAX_SKB_FRAGS - msg_pl->sg.start + 1,
msg_pl->sg.data);
}
i = msg_pl->sg.start;
sg_chain(rec->sg_aead_in, 2, &msg_pl->sg.data[i]);
i = msg_en->sg.end;
sk_msg_iter_var_prev(i);
sg_mark_end(sk_msg_elem(msg_en, i));
i = msg_en->sg.start;
sg_chain(rec->sg_aead_out, 2, &msg_en->sg.data[i]);
tls_make_aad(rec->aad_space, msg_pl->sg.size + prot->tail_size,
tls_ctx->tx.rec_seq, record_type, prot);
tls_fill_prepend(tls_ctx,
page_address(sg_page(&msg_en->sg.data[i])) +
msg_en->sg.data[i].offset,
msg_pl->sg.size + prot->tail_size,
record_type);
tls_ctx->pending_open_record_frags = false;
rc = tls_do_encryption(sk, tls_ctx, ctx, req,
msg_pl->sg.size + prot->tail_size, i);
if (rc < 0) {
if (rc != -EINPROGRESS) {
tls_err_abort(sk, -EBADMSG);
if (split) {
tls_ctx->pending_open_record_frags = true;
tls_merge_open_record(sk, rec, tmp, orig_end);
}
}
ctx->async_capable = 1;
return rc;
} else if (split) {
msg_pl = &tmp->msg_plaintext;
msg_en = &tmp->msg_encrypted;
sk_msg_trim(sk, msg_en, msg_pl->sg.size + prot->overhead_size);
tls_ctx->pending_open_record_frags = true;
ctx->open_rec = tmp;
}
return tls_tx_records(sk, flags);
}
static int bpf_exec_tx_verdict(struct sk_msg *msg, struct sock *sk,
bool full_record, u8 record_type,
ssize_t *copied, int flags)
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
struct sk_msg msg_redir = { };
struct sk_psock *psock;
struct sock *sk_redir;
struct tls_rec *rec;
bool enospc, policy, redir_ingress;
int err = 0, send;
u32 delta = 0;
policy = !(flags & MSG_SENDPAGE_NOPOLICY);
psock = sk_psock_get(sk);
if (!psock || !policy) {
err = tls_push_record(sk, flags, record_type);
if (err && err != -EINPROGRESS && sk->sk_err == EBADMSG) {
*copied -= sk_msg_free(sk, msg);
tls_free_open_rec(sk);
err = -sk->sk_err;
}
if (psock)
sk_psock_put(sk, psock);
return err;
}
more_data:
enospc = sk_msg_full(msg);
if (psock->eval == __SK_NONE) {
delta = msg->sg.size;
psock->eval = sk_psock_msg_verdict(sk, psock, msg);
delta -= msg->sg.size;
}
if (msg->cork_bytes && msg->cork_bytes > msg->sg.size &&
!enospc && !full_record) {
err = -ENOSPC;
goto out_err;
}
msg->cork_bytes = 0;
send = msg->sg.size;
if (msg->apply_bytes && msg->apply_bytes < send)
send = msg->apply_bytes;
switch (psock->eval) {
case __SK_PASS:
err = tls_push_record(sk, flags, record_type);
if (err && err != -EINPROGRESS && sk->sk_err == EBADMSG) {
*copied -= sk_msg_free(sk, msg);
tls_free_open_rec(sk);
err = -sk->sk_err;
goto out_err;
}
break;
case __SK_REDIRECT:
redir_ingress = psock->redir_ingress;
sk_redir = psock->sk_redir;
memcpy(&msg_redir, msg, sizeof(*msg));
if (msg->apply_bytes < send)
msg->apply_bytes = 0;
else
msg->apply_bytes -= send;
sk_msg_return_zero(sk, msg, send);
msg->sg.size -= send;
release_sock(sk);
err = tcp_bpf_sendmsg_redir(sk_redir, redir_ingress,
&msg_redir, send, flags);
lock_sock(sk);
if (err < 0) {
*copied -= sk_msg_free_nocharge(sk, &msg_redir);
msg->sg.size = 0;
}
if (msg->sg.size == 0)
tls_free_open_rec(sk);
break;
case __SK_DROP:
default:
sk_msg_free_partial(sk, msg, send);
if (msg->apply_bytes < send)
msg->apply_bytes = 0;
else
msg->apply_bytes -= send;
if (msg->sg.size == 0)
tls_free_open_rec(sk);
*copied -= (send + delta);
err = -EACCES;
}
if (likely(!err)) {
bool reset_eval = !ctx->open_rec;
rec = ctx->open_rec;
if (rec) {
msg = &rec->msg_plaintext;
if (!msg->apply_bytes)
reset_eval = true;
}
if (reset_eval) {
psock->eval = __SK_NONE;
if (psock->sk_redir) {
sock_put(psock->sk_redir);
psock->sk_redir = NULL;
}
}
if (rec)
goto more_data;
}
out_err:
sk_psock_put(sk, psock);
return err;
}
static int tls_sw_push_pending_record(struct sock *sk, int flags)
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
struct tls_rec *rec = ctx->open_rec;
struct sk_msg *msg_pl;
size_t copied;
if (!rec)
return 0;
msg_pl = &rec->msg_plaintext;
copied = msg_pl->sg.size;
if (!copied)
return 0;
return bpf_exec_tx_verdict(msg_pl, sk, true, TLS_RECORD_TYPE_DATA,
&copied, flags);
}
static int tls_sw_sendmsg_splice(struct sock *sk, struct msghdr *msg,
struct sk_msg *msg_pl, size_t try_to_copy,
ssize_t *copied)
{
struct page *page = NULL, **pages = &page;
do {
ssize_t part;
size_t off;
part = iov_iter_extract_pages(&msg->msg_iter, &pages,
try_to_copy, 1, 0, &off);
if (part <= 0)
return part ?: -EIO;
if (WARN_ON_ONCE(!sendpage_ok(page))) {
iov_iter_revert(&msg->msg_iter, part);
return -EIO;
}
sk_msg_page_add(msg_pl, page, part, off);
sk_mem_charge(sk, part);
*copied += part;
try_to_copy -= part;
} while (try_to_copy && !sk_msg_full(msg_pl));
return 0;
}
static int tls_sw_sendmsg_locked(struct sock *sk, struct msghdr *msg,
size_t size)
{
long timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_prot_info *prot = &tls_ctx->prot_info;
struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
bool async_capable = ctx->async_capable;
unsigned char record_type = TLS_RECORD_TYPE_DATA;
bool is_kvec = iov_iter_is_kvec(&msg->msg_iter);
bool eor = !(msg->msg_flags & MSG_MORE);
size_t try_to_copy;
ssize_t copied = 0;
struct sk_msg *msg_pl, *msg_en;
struct tls_rec *rec;
int required_size;
int num_async = 0;
bool full_record;
int record_room;
int num_zc = 0;
int orig_size;
int ret = 0;
int pending;
if (!eor && (msg->msg_flags & MSG_EOR))
return -EINVAL;
if (unlikely(msg->msg_controllen)) {
ret = tls_process_cmsg(sk, msg, &record_type);
if (ret) {
if (ret == -EINPROGRESS)
num_async++;
else if (ret != -EAGAIN)
goto send_end;
}
}
while (msg_data_left(msg)) {
if (sk->sk_err) {
ret = -sk->sk_err;
goto send_end;
}
if (ctx->open_rec)
rec = ctx->open_rec;
else
rec = ctx->open_rec = tls_get_rec(sk);
if (!rec) {
ret = -ENOMEM;
goto send_end;
}
msg_pl = &rec->msg_plaintext;
msg_en = &rec->msg_encrypted;
orig_size = msg_pl->sg.size;
full_record = false;
try_to_copy = msg_data_left(msg);
record_room = TLS_MAX_PAYLOAD_SIZE - msg_pl->sg.size;
if (try_to_copy >= record_room) {
try_to_copy = record_room;
full_record = true;
}
required_size = msg_pl->sg.size + try_to_copy +
prot->overhead_size;
if (!sk_stream_memory_free(sk))
goto wait_for_sndbuf;
alloc_encrypted:
ret = tls_alloc_encrypted_msg(sk, required_size);
if (ret) {
if (ret != -ENOSPC)
goto wait_for_memory;
/* Adjust try_to_copy according to the amount that was
* actually allocated. The difference is due
* to max sg elements limit
*/
try_to_copy -= required_size - msg_en->sg.size;
full_record = true;
}
if (try_to_copy && (msg->msg_flags & MSG_SPLICE_PAGES)) {
ret = tls_sw_sendmsg_splice(sk, msg, msg_pl,
try_to_copy, &copied);
if (ret < 0)
goto send_end;
tls_ctx->pending_open_record_frags = true;
if (full_record || eor || sk_msg_full(msg_pl))
goto copied;
continue;
}
if (!is_kvec && (full_record || eor) && !async_capable) {
u32 first = msg_pl->sg.end;
ret = sk_msg_zerocopy_from_iter(sk, &msg->msg_iter,
msg_pl, try_to_copy);
if (ret)
goto fallback_to_reg_send;
num_zc++;
copied += try_to_copy;
sk_msg_sg_copy_set(msg_pl, first);
ret = bpf_exec_tx_verdict(msg_pl, sk, full_record,
record_type, &copied,
msg->msg_flags);
if (ret) {
if (ret == -EINPROGRESS)
num_async++;
else if (ret == -ENOMEM)
goto wait_for_memory;
else if (ctx->open_rec && ret == -ENOSPC)
goto rollback_iter;
else if (ret != -EAGAIN)
goto send_end;
}
continue;
rollback_iter:
copied -= try_to_copy;
sk_msg_sg_copy_clear(msg_pl, first);
iov_iter_revert(&msg->msg_iter,
msg_pl->sg.size - orig_size);
fallback_to_reg_send:
sk_msg_trim(sk, msg_pl, orig_size);
}
required_size = msg_pl->sg.size + try_to_copy;
ret = tls_clone_plaintext_msg(sk, required_size);
if (ret) {
if (ret != -ENOSPC)
goto send_end;
/* Adjust try_to_copy according to the amount that was
* actually allocated. The difference is due
* to max sg elements limit
*/
try_to_copy -= required_size - msg_pl->sg.size;
full_record = true;
sk_msg_trim(sk, msg_en,
msg_pl->sg.size + prot->overhead_size);
}
if (try_to_copy) {
ret = sk_msg_memcopy_from_iter(sk, &msg->msg_iter,
msg_pl, try_to_copy);
if (ret < 0)
goto trim_sgl;
}
/* Open records defined only if successfully copied, otherwise
* we would trim the sg but not reset the open record frags.
*/
tls_ctx->pending_open_record_frags = true;
copied += try_to_copy;
copied:
if (full_record || eor) {
ret = bpf_exec_tx_verdict(msg_pl, sk, full_record,
record_type, &copied,
msg->msg_flags);
if (ret) {
if (ret == -EINPROGRESS)
num_async++;
else if (ret == -ENOMEM)
goto wait_for_memory;
else if (ret != -EAGAIN) {
if (ret == -ENOSPC)
ret = 0;
goto send_end;
}
}
}
continue;
wait_for_sndbuf:
set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
wait_for_memory:
ret = sk_stream_wait_memory(sk, &timeo);
if (ret) {
trim_sgl:
if (ctx->open_rec)
tls_trim_both_msgs(sk, orig_size);
goto send_end;
}
if (ctx->open_rec && msg_en->sg.size < required_size)
goto alloc_encrypted;
}
if (!num_async) {
goto send_end;
} else if (num_zc) {
/* Wait for pending encryptions to get completed */
spin_lock_bh(&ctx->encrypt_compl_lock);
ctx->async_notify = true;
pending = atomic_read(&ctx->encrypt_pending);
spin_unlock_bh(&ctx->encrypt_compl_lock);
if (pending)
crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
else
reinit_completion(&ctx->async_wait.completion);
/* There can be no concurrent accesses, since we have no
* pending encrypt operations
*/
WRITE_ONCE(ctx->async_notify, false);
if (ctx->async_wait.err) {
ret = ctx->async_wait.err;
copied = 0;
}
}
/* Transmit if any encryptions have completed */
if (test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) {
cancel_delayed_work(&ctx->tx_work.work);
tls_tx_records(sk, msg->msg_flags);
}
send_end:
ret = sk_stream_error(sk, msg->msg_flags, ret);
return copied > 0 ? copied : ret;
}
int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
int ret;
if (msg->msg_flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
MSG_CMSG_COMPAT | MSG_SPLICE_PAGES | MSG_EOR |
MSG_SENDPAGE_NOPOLICY))
return -EOPNOTSUPP;
ret = mutex_lock_interruptible(&tls_ctx->tx_lock);
if (ret)
return ret;
lock_sock(sk);
ret = tls_sw_sendmsg_locked(sk, msg, size);
release_sock(sk);
mutex_unlock(&tls_ctx->tx_lock);
return ret;
}
/*
* Handle unexpected EOF during splice without SPLICE_F_MORE set.
*/
void tls_sw_splice_eof(struct socket *sock)
{
struct sock *sk = sock->sk;
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
struct tls_rec *rec;
struct sk_msg *msg_pl;
ssize_t copied = 0;
bool retrying = false;
int ret = 0;
int pending;
if (!ctx->open_rec)
return;
mutex_lock(&tls_ctx->tx_lock);
lock_sock(sk);
retry:
rec = ctx->open_rec;
if (!rec)
goto unlock;
msg_pl = &rec->msg_plaintext;
/* Check the BPF advisor and perform transmission. */
ret = bpf_exec_tx_verdict(msg_pl, sk, false, TLS_RECORD_TYPE_DATA,
&copied, 0);
switch (ret) {
case 0:
case -EAGAIN:
if (retrying)
goto unlock;
retrying = true;
goto retry;
case -EINPROGRESS:
break;
default:
goto unlock;
}
/* Wait for pending encryptions to get completed */
spin_lock_bh(&ctx->encrypt_compl_lock);
ctx->async_notify = true;
pending = atomic_read(&ctx->encrypt_pending);
spin_unlock_bh(&ctx->encrypt_compl_lock);
if (pending)
crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
else
reinit_completion(&ctx->async_wait.completion);
/* There can be no concurrent accesses, since we have no pending
* encrypt operations
*/
WRITE_ONCE(ctx->async_notify, false);
if (ctx->async_wait.err)
goto unlock;
/* Transmit if any encryptions have completed */
if (test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) {
cancel_delayed_work(&ctx->tx_work.work);
tls_tx_records(sk, 0);
}
unlock:
release_sock(sk);
mutex_unlock(&tls_ctx->tx_lock);
}
static int
tls_rx_rec_wait(struct sock *sk, struct sk_psock *psock, bool nonblock,
bool released)
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
DEFINE_WAIT_FUNC(wait, woken_wake_function);
long timeo;
timeo = sock_rcvtimeo(sk, nonblock);
while (!tls_strp_msg_ready(ctx)) {
if (!sk_psock_queue_empty(psock))
return 0;
if (sk->sk_err)
return sock_error(sk);
if (!skb_queue_empty(&sk->sk_receive_queue)) {
tls_strp_check_rcv(&ctx->strp);
if (tls_strp_msg_ready(ctx))
break;
}
if (sk->sk_shutdown & RCV_SHUTDOWN)
return 0;
if (sock_flag(sk, SOCK_DONE))
return 0;
if (!timeo)
return -EAGAIN;
released = true;
add_wait_queue(sk_sleep(sk), &wait);
sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
sk_wait_event(sk, &timeo,
tls_strp_msg_ready(ctx) ||
!sk_psock_queue_empty(psock),
&wait);
sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
remove_wait_queue(sk_sleep(sk), &wait);
/* Handle signals */
if (signal_pending(current))
return sock_intr_errno(timeo);
}
tls_strp_msg_load(&ctx->strp, released);
return 1;
}
static int tls_setup_from_iter(struct iov_iter *from,
int length, int *pages_used,
struct scatterlist *to,
int to_max_pages)
{
int rc = 0, i = 0, num_elem = *pages_used, maxpages;
struct page *pages[MAX_SKB_FRAGS];
unsigned int size = 0;
ssize_t copied, use;
size_t offset;
while (length > 0) {
i = 0;
maxpages = to_max_pages - num_elem;
if (maxpages == 0) {
rc = -EFAULT;
goto out;
}
copied = iov_iter_get_pages2(from, pages,
length,
maxpages, &offset);
if (copied <= 0) {
rc = -EFAULT;
goto out;
}
length -= copied;
size += copied;
while (copied) {
use = min_t(int, copied, PAGE_SIZE - offset);
sg_set_page(&to[num_elem],
pages[i], use, offset);
sg_unmark_end(&to[num_elem]);
/* We do not uncharge memory from this API */
offset = 0;
copied -= use;
i++;
num_elem++;
}
}
/* Mark the end in the last sg entry if newly added */
if (num_elem > *pages_used)
sg_mark_end(&to[num_elem - 1]);
out:
if (rc)
iov_iter_revert(from, size);
*pages_used = num_elem;
return rc;
}
static struct sk_buff *
tls_alloc_clrtxt_skb(struct sock *sk, struct sk_buff *skb,
unsigned int full_len)
{
struct strp_msg *clr_rxm;
struct sk_buff *clr_skb;
int err;
clr_skb = alloc_skb_with_frags(0, full_len, TLS_PAGE_ORDER,
&err, sk->sk_allocation);
if (!clr_skb)
return NULL;
skb_copy_header(clr_skb, skb);
clr_skb->len = full_len;
clr_skb->data_len = full_len;
clr_rxm = strp_msg(clr_skb);
clr_rxm->offset = 0;
return clr_skb;
}
/* Decrypt handlers
*
* tls_decrypt_sw() and tls_decrypt_device() are decrypt handlers.
* They must transform the darg in/out argument are as follows:
* | Input | Output
* -------------------------------------------------------------------
* zc | Zero-copy decrypt allowed | Zero-copy performed
* async | Async decrypt allowed | Async crypto used / in progress
* skb | * | Output skb
*
* If ZC decryption was performed darg.skb will point to the input skb.
*/
/* This function decrypts the input skb into either out_iov or in out_sg
* or in skb buffers itself. The input parameter 'darg->zc' indicates if
* zero-copy mode needs to be tried or not. With zero-copy mode, either
* out_iov or out_sg must be non-NULL. In case both out_iov and out_sg are
* NULL, then the decryption happens inside skb buffers itself, i.e.
* zero-copy gets disabled and 'darg->zc' is updated.
*/
static int tls_decrypt_sg(struct sock *sk, struct iov_iter *out_iov,
struct scatterlist *out_sg,
struct tls_decrypt_arg *darg)
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
struct tls_prot_info *prot = &tls_ctx->prot_info;
int n_sgin, n_sgout, aead_size, err, pages = 0;
struct sk_buff *skb = tls_strp_msg(ctx);
const struct strp_msg *rxm = strp_msg(skb);
const struct tls_msg *tlm = tls_msg(skb);
struct aead_request *aead_req;
struct scatterlist *sgin = NULL;
struct scatterlist *sgout = NULL;
const int data_len = rxm->full_len - prot->overhead_size;
int tail_pages = !!prot->tail_size;
struct tls_decrypt_ctx *dctx;
struct sk_buff *clear_skb;
int iv_offset = 0;
u8 *mem;
n_sgin = skb_nsg(skb, rxm->offset + prot->prepend_size,
rxm->full_len - prot->prepend_size);
if (n_sgin < 1)
return n_sgin ?: -EBADMSG;
if (darg->zc && (out_iov || out_sg)) {
clear_skb = NULL;
if (out_iov)
n_sgout = 1 + tail_pages +
iov_iter_npages_cap(out_iov, INT_MAX, data_len);
else
n_sgout = sg_nents(out_sg);
} else {
darg->zc = false;
clear_skb = tls_alloc_clrtxt_skb(sk, skb, rxm->full_len);
if (!clear_skb)
return -ENOMEM;
n_sgout = 1 + skb_shinfo(clear_skb)->nr_frags;
}
/* Increment to accommodate AAD */
n_sgin = n_sgin + 1;
/* Allocate a single block of memory which contains
* aead_req || tls_decrypt_ctx.
* Both structs are variable length.
*/
aead_size = sizeof(*aead_req) + crypto_aead_reqsize(ctx->aead_recv);
aead_size = ALIGN(aead_size, __alignof__(*dctx));
mem = kmalloc(aead_size + struct_size(dctx, sg, n_sgin + n_sgout),
sk->sk_allocation);
if (!mem) {
err = -ENOMEM;
goto exit_free_skb;
}
/* Segment the allocated memory */
aead_req = (struct aead_request *)mem;
dctx = (struct tls_decrypt_ctx *)(mem + aead_size);
dctx->sk = sk;
sgin = &dctx->sg[0];
sgout = &dctx->sg[n_sgin];
/* For CCM based ciphers, first byte of nonce+iv is a constant */
switch (prot->cipher_type) {
case TLS_CIPHER_AES_CCM_128:
dctx->iv[0] = TLS_AES_CCM_IV_B0_BYTE;
iv_offset = 1;
break;
case TLS_CIPHER_SM4_CCM:
dctx->iv[0] = TLS_SM4_CCM_IV_B0_BYTE;
iv_offset = 1;
break;
}
/* Prepare IV */
if (prot->version == TLS_1_3_VERSION ||
prot->cipher_type == TLS_CIPHER_CHACHA20_POLY1305) {
memcpy(&dctx->iv[iv_offset], tls_ctx->rx.iv,
prot->iv_size + prot->salt_size);
} else {
err = skb_copy_bits(skb, rxm->offset + TLS_HEADER_SIZE,
&dctx->iv[iv_offset] + prot->salt_size,
prot->iv_size);
if (err < 0)
goto exit_free;
memcpy(&dctx->iv[iv_offset], tls_ctx->rx.iv, prot->salt_size);
}
tls_xor_iv_with_seq(prot, &dctx->iv[iv_offset], tls_ctx->rx.rec_seq);
/* Prepare AAD */
tls_make_aad(dctx->aad, rxm->full_len - prot->overhead_size +
prot->tail_size,
tls_ctx->rx.rec_seq, tlm->control, prot);
/* Prepare sgin */
sg_init_table(sgin, n_sgin);
sg_set_buf(&sgin[0], dctx->aad, prot->aad_size);
err = skb_to_sgvec(skb, &sgin[1],
rxm->offset + prot->prepend_size,
rxm->full_len - prot->prepend_size);
if (err < 0)
goto exit_free;
if (clear_skb) {
sg_init_table(sgout, n_sgout);
sg_set_buf(&sgout[0], dctx->aad, prot->aad_size);
err = skb_to_sgvec(clear_skb, &sgout[1], prot->prepend_size,
data_len + prot->tail_size);
if (err < 0)
goto exit_free;
} else if (out_iov) {
sg_init_table(sgout, n_sgout);
sg_set_buf(&sgout[0], dctx->aad, prot->aad_size);
err = tls_setup_from_iter(out_iov, data_len, &pages, &sgout[1],
(n_sgout - 1 - tail_pages));
if (err < 0)
goto exit_free_pages;
if (prot->tail_size) {
sg_unmark_end(&sgout[pages]);
sg_set_buf(&sgout[pages + 1], &dctx->tail,
prot->tail_size);
sg_mark_end(&sgout[pages + 1]);
}
} else if (out_sg) {
memcpy(sgout, out_sg, n_sgout * sizeof(*sgout));
}
/* Prepare and submit AEAD request */
err = tls_do_decryption(sk, sgin, sgout, dctx->iv,
data_len + prot->tail_size, aead_req, darg);
if (err)
goto exit_free_pages;
darg->skb = clear_skb ?: tls_strp_msg(ctx);
clear_skb = NULL;
if (unlikely(darg->async)) {
err = tls_strp_msg_hold(&ctx->strp, &ctx->async_hold);
if (err)
__skb_queue_tail(&ctx->async_hold, darg->skb);
return err;
}
if (prot->tail_size)
darg->tail = dctx->tail;
exit_free_pages:
/* Release the pages in case iov was mapped to pages */
for (; pages > 0; pages--)
put_page(sg_page(&sgout[pages]));
exit_free:
kfree(mem);
exit_free_skb:
consume_skb(clear_skb);
return err;
}
static int
tls_decrypt_sw(struct sock *sk, struct tls_context *tls_ctx,
struct msghdr *msg, struct tls_decrypt_arg *darg)
{
struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
struct tls_prot_info *prot = &tls_ctx->prot_info;
struct strp_msg *rxm;
int pad, err;
err = tls_decrypt_sg(sk, &msg->msg_iter, NULL, darg);
if (err < 0) {
if (err == -EBADMSG)
TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSDECRYPTERROR);
return err;
}
/* keep going even for ->async, the code below is TLS 1.3 */
/* If opportunistic TLS 1.3 ZC failed retry without ZC */
if (unlikely(darg->zc && prot->version == TLS_1_3_VERSION &&
darg->tail != TLS_RECORD_TYPE_DATA)) {
darg->zc = false;
if (!darg->tail)
TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSRXNOPADVIOL);
TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSDECRYPTRETRY);
return tls_decrypt_sw(sk, tls_ctx, msg, darg);
}
pad = tls_padding_length(prot, darg->skb, darg);
if (pad < 0) {
if (darg->skb != tls_strp_msg(ctx))
consume_skb(darg->skb);
return pad;
}
rxm = strp_msg(darg->skb);
rxm->full_len -= pad;
return 0;
}
static int
tls_decrypt_device(struct sock *sk, struct msghdr *msg,
struct tls_context *tls_ctx, struct tls_decrypt_arg *darg)
{
struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
struct tls_prot_info *prot = &tls_ctx->prot_info;
struct strp_msg *rxm;
int pad, err;
if (tls_ctx->rx_conf != TLS_HW)
return 0;
err = tls_device_decrypted(sk, tls_ctx);
if (err <= 0)
return err;
pad = tls_padding_length(prot, tls_strp_msg(ctx), darg);
if (pad < 0)
return pad;
darg->async = false;
darg->skb = tls_strp_msg(ctx);
/* ->zc downgrade check, in case TLS 1.3 gets here */
darg->zc &= !(prot->version == TLS_1_3_VERSION &&
tls_msg(darg->skb)->control != TLS_RECORD_TYPE_DATA);
rxm = strp_msg(darg->skb);
rxm->full_len -= pad;
if (!darg->zc) {
/* Non-ZC case needs a real skb */
darg->skb = tls_strp_msg_detach(ctx);
if (!darg->skb)
return -ENOMEM;
} else {
unsigned int off, len;
/* In ZC case nobody cares about the output skb.
* Just copy the data here. Note the skb is not fully trimmed.
*/
off = rxm->offset + prot->prepend_size;
len = rxm->full_len - prot->overhead_size;
err = skb_copy_datagram_msg(darg->skb, off, msg, len);
if (err)
return err;
}
return 1;
}
static int tls_rx_one_record(struct sock *sk, struct msghdr *msg,
struct tls_decrypt_arg *darg)
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_prot_info *prot = &tls_ctx->prot_info;
struct strp_msg *rxm;
int err;
err = tls_decrypt_device(sk, msg, tls_ctx, darg);
if (!err)
err = tls_decrypt_sw(sk, tls_ctx, msg, darg);
if (err < 0)
return err;
rxm = strp_msg(darg->skb);
rxm->offset += prot->prepend_size;
rxm->full_len -= prot->overhead_size;
tls_advance_record_sn(sk, prot, &tls_ctx->rx);
return 0;
}
int decrypt_skb(struct sock *sk, struct scatterlist *sgout)
{
struct tls_decrypt_arg darg = { .zc = true, };
return tls_decrypt_sg(sk, NULL, sgout, &darg);
}
static int tls_record_content_type(struct msghdr *msg, struct tls_msg *tlm,
u8 *control)
{
int err;
if (!*control) {
*control = tlm->control;
if (!*control)
return -EBADMSG;
err = put_cmsg(msg, SOL_TLS, TLS_GET_RECORD_TYPE,
sizeof(*control), control);
if (*control != TLS_RECORD_TYPE_DATA) {
if (err || msg->msg_flags & MSG_CTRUNC)
return -EIO;
}
} else if (*control != tlm->control) {
return 0;
}
return 1;
}
static void tls_rx_rec_done(struct tls_sw_context_rx *ctx)
{
tls_strp_msg_done(&ctx->strp);
}
/* This function traverses the rx_list in tls receive context to copies the
* decrypted records into the buffer provided by caller zero copy is not
* true. Further, the records are removed from the rx_list if it is not a peek
* case and the record has been consumed completely.
*/
static int process_rx_list(struct tls_sw_context_rx *ctx,
struct msghdr *msg,
u8 *control,
size_t skip,
size_t len,
bool is_peek)
{
struct sk_buff *skb = skb_peek(&ctx->rx_list);
struct tls_msg *tlm;
ssize_t copied = 0;
int err;
while (skip && skb) {
struct strp_msg *rxm = strp_msg(skb);
tlm = tls_msg(skb);
err = tls_record_content_type(msg, tlm, control);
if (err <= 0)
goto out;
if (skip < rxm->full_len)
break;
skip = skip - rxm->full_len;
skb = skb_peek_next(skb, &ctx->rx_list);
}
while (len && skb) {
struct sk_buff *next_skb;
struct strp_msg *rxm = strp_msg(skb);
int chunk = min_t(unsigned int, rxm->full_len - skip, len);
tlm = tls_msg(skb);
err = tls_record_content_type(msg, tlm, control);
if (err <= 0)
goto out;
err = skb_copy_datagram_msg(skb, rxm->offset + skip,
msg, chunk);
if (err < 0)
goto out;
len = len - chunk;
copied = copied + chunk;
/* Consume the data from record if it is non-peek case*/
if (!is_peek) {
rxm->offset = rxm->offset + chunk;
rxm->full_len = rxm->full_len - chunk;
/* Return if there is unconsumed data in the record */
if (rxm->full_len - skip)
break;
}
/* The remaining skip-bytes must lie in 1st record in rx_list.
* So from the 2nd record, 'skip' should be 0.
*/
skip = 0;
if (msg)
msg->msg_flags |= MSG_EOR;
next_skb = skb_peek_next(skb, &ctx->rx_list);
if (!is_peek) {
__skb_unlink(skb, &ctx->rx_list);
consume_skb(skb);
}
skb = next_skb;
}
err = 0;
out:
return copied ? : err;
}
static bool
tls_read_flush_backlog(struct sock *sk, struct tls_prot_info *prot,
size_t len_left, size_t decrypted, ssize_t done,
size_t *flushed_at)
{
size_t max_rec;
if (len_left <= decrypted)
return false;
max_rec = prot->overhead_size - prot->tail_size + TLS_MAX_PAYLOAD_SIZE;
if (done - *flushed_at < SZ_128K && tcp_inq(sk) > max_rec)
return false;
*flushed_at = done;
return sk_flush_backlog(sk);
}
static int tls_rx_reader_acquire(struct sock *sk, struct tls_sw_context_rx *ctx,
bool nonblock)
{
long timeo;
timeo = sock_rcvtimeo(sk, nonblock);
while (unlikely(ctx->reader_present)) {
DEFINE_WAIT_FUNC(wait, woken_wake_function);
ctx->reader_contended = 1;
add_wait_queue(&ctx->wq, &wait);
sk_wait_event(sk, &timeo,
!READ_ONCE(ctx->reader_present), &wait);
remove_wait_queue(&ctx->wq, &wait);
if (timeo <= 0)
return -EAGAIN;
if (signal_pending(current))
return sock_intr_errno(timeo);
}
WRITE_ONCE(ctx->reader_present, 1);
return 0;
}
static int tls_rx_reader_lock(struct sock *sk, struct tls_sw_context_rx *ctx,
bool nonblock)
{
int err;
lock_sock(sk);
err = tls_rx_reader_acquire(sk, ctx, nonblock);
if (err)
release_sock(sk);
return err;
}
static void tls_rx_reader_release(struct sock *sk, struct tls_sw_context_rx *ctx)
{
if (unlikely(ctx->reader_contended)) {
if (wq_has_sleeper(&ctx->wq))
wake_up(&ctx->wq);
else
ctx->reader_contended = 0;
WARN_ON_ONCE(!ctx->reader_present);
}
WRITE_ONCE(ctx->reader_present, 0);
}
static void tls_rx_reader_unlock(struct sock *sk, struct tls_sw_context_rx *ctx)
{
tls_rx_reader_release(sk, ctx);
release_sock(sk);
}
int tls_sw_recvmsg(struct sock *sk,
struct msghdr *msg,
size_t len,
int flags,
int *addr_len)
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
struct tls_prot_info *prot = &tls_ctx->prot_info;
ssize_t decrypted = 0, async_copy_bytes = 0;
struct sk_psock *psock;
unsigned char control = 0;
size_t flushed_at = 0;
struct strp_msg *rxm;
struct tls_msg *tlm;
ssize_t copied = 0;
bool async = false;
int target, err;
bool is_kvec = iov_iter_is_kvec(&msg->msg_iter);
bool is_peek = flags & MSG_PEEK;
bool released = true;
bool bpf_strp_enabled;
bool zc_capable;
if (unlikely(flags & MSG_ERRQUEUE))
return sock_recv_errqueue(sk, msg, len, SOL_IP, IP_RECVERR);
psock = sk_psock_get(sk);
err = tls_rx_reader_lock(sk, ctx, flags & MSG_DONTWAIT);
if (err < 0)
return err;
bpf_strp_enabled = sk_psock_strp_enabled(psock);
/* If crypto failed the connection is broken */
err = ctx->async_wait.err;
if (err)
goto end;
/* Process pending decrypted records. It must be non-zero-copy */
err = process_rx_list(ctx, msg, &control, 0, len, is_peek);
if (err < 0)
goto end;
copied = err;
if (len <= copied)
goto end;
target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
len = len - copied;
zc_capable = !bpf_strp_enabled && !is_kvec && !is_peek &&
ctx->zc_capable;
decrypted = 0;
while (len && (decrypted + copied < target || tls_strp_msg_ready(ctx))) {
struct tls_decrypt_arg darg;
int to_decrypt, chunk;
err = tls_rx_rec_wait(sk, psock, flags & MSG_DONTWAIT,
released);
if (err <= 0) {
if (psock) {
chunk = sk_msg_recvmsg(sk, psock, msg, len,
flags);
if (chunk > 0) {
decrypted += chunk;
len -= chunk;
continue;
}
}
goto recv_end;
}
memset(&darg.inargs, 0, sizeof(darg.inargs));
rxm = strp_msg(tls_strp_msg(ctx));
tlm = tls_msg(tls_strp_msg(ctx));
to_decrypt = rxm->full_len - prot->overhead_size;
if (zc_capable && to_decrypt <= len &&
tlm->control == TLS_RECORD_TYPE_DATA)
darg.zc = true;
/* Do not use async mode if record is non-data */
if (tlm->control == TLS_RECORD_TYPE_DATA && !bpf_strp_enabled)
darg.async = ctx->async_capable;
else
darg.async = false;
err = tls_rx_one_record(sk, msg, &darg);
if (err < 0) {
tls_err_abort(sk, -EBADMSG);
goto recv_end;
}
async |= darg.async;
/* If the type of records being processed is not known yet,
* set it to record type just dequeued. If it is already known,
* but does not match the record type just dequeued, go to end.
* We always get record type here since for tls1.2, record type
* is known just after record is dequeued from stream parser.
* For tls1.3, we disable async.
*/
err = tls_record_content_type(msg, tls_msg(darg.skb), &control);
if (err <= 0) {
DEBUG_NET_WARN_ON_ONCE(darg.zc);
tls_rx_rec_done(ctx);
put_on_rx_list_err:
__skb_queue_tail(&ctx->rx_list, darg.skb);
goto recv_end;
}
/* periodically flush backlog, and feed strparser */
released = tls_read_flush_backlog(sk, prot, len, to_decrypt,
decrypted + copied,
&flushed_at);
/* TLS 1.3 may have updated the length by more than overhead */
rxm = strp_msg(darg.skb);
chunk = rxm->full_len;
tls_rx_rec_done(ctx);
if (!darg.zc) {
bool partially_consumed = chunk > len;
struct sk_buff *skb = darg.skb;
DEBUG_NET_WARN_ON_ONCE(darg.skb == ctx->strp.anchor);
if (async) {
/* TLS 1.2-only, to_decrypt must be text len */
chunk = min_t(int, to_decrypt, len);
async_copy_bytes += chunk;
put_on_rx_list:
decrypted += chunk;
len -= chunk;
__skb_queue_tail(&ctx->rx_list, skb);
continue;
}
if (bpf_strp_enabled) {
released = true;
err = sk_psock_tls_strp_read(psock, skb);
if (err != __SK_PASS) {
rxm->offset = rxm->offset + rxm->full_len;
rxm->full_len = 0;
if (err == __SK_DROP)
consume_skb(skb);
continue;
}
}
if (partially_consumed)
chunk = len;
err = skb_copy_datagram_msg(skb, rxm->offset,
msg, chunk);
if (err < 0)
goto put_on_rx_list_err;
if (is_peek)
goto put_on_rx_list;
if (partially_consumed) {
rxm->offset += chunk;
rxm->full_len -= chunk;
goto put_on_rx_list;
}
consume_skb(skb);
}
decrypted += chunk;
len -= chunk;
/* Return full control message to userspace before trying
* to parse another message type
*/
msg->msg_flags |= MSG_EOR;
if (control != TLS_RECORD_TYPE_DATA)
break;
}
recv_end:
if (async) {
int ret, pending;
/* Wait for all previously submitted records to be decrypted */
spin_lock_bh(&ctx->decrypt_compl_lock);
reinit_completion(&ctx->async_wait.completion);
pending = atomic_read(&ctx->decrypt_pending);
spin_unlock_bh(&ctx->decrypt_compl_lock);
ret = 0;
if (pending)
ret = crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
__skb_queue_purge(&ctx->async_hold);
if (ret) {
if (err >= 0 || err == -EINPROGRESS)
err = ret;
decrypted = 0;
goto end;
}
/* Drain records from the rx_list & copy if required */
if (is_peek || is_kvec)
err = process_rx_list(ctx, msg, &control, copied,
decrypted, is_peek);
else
err = process_rx_list(ctx, msg, &control, 0,
async_copy_bytes, is_peek);
decrypted += max(err, 0);
}
copied += decrypted;
end:
tls_rx_reader_unlock(sk, ctx);
if (psock)
sk_psock_put(sk, psock);
return copied ? : err;
}
ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos,
struct pipe_inode_info *pipe,
size_t len, unsigned int flags)
{
struct tls_context *tls_ctx = tls_get_ctx(sock->sk);
struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
struct strp_msg *rxm = NULL;
struct sock *sk = sock->sk;
struct tls_msg *tlm;
struct sk_buff *skb;
ssize_t copied = 0;
int chunk;
int err;
err = tls_rx_reader_lock(sk, ctx, flags & SPLICE_F_NONBLOCK);
if (err < 0)
return err;
if (!skb_queue_empty(&ctx->rx_list)) {
skb = __skb_dequeue(&ctx->rx_list);
} else {
struct tls_decrypt_arg darg;
err = tls_rx_rec_wait(sk, NULL, flags & SPLICE_F_NONBLOCK,
true);
if (err <= 0)
goto splice_read_end;
memset(&darg.inargs, 0, sizeof(darg.inargs));
err = tls_rx_one_record(sk, NULL, &darg);
if (err < 0) {
tls_err_abort(sk, -EBADMSG);
goto splice_read_end;
}
tls_rx_rec_done(ctx);
skb = darg.skb;
}
rxm = strp_msg(skb);
tlm = tls_msg(skb);
/* splice does not support reading control messages */
if (tlm->control != TLS_RECORD_TYPE_DATA) {
err = -EINVAL;
goto splice_requeue;
}
chunk = min_t(unsigned int, rxm->full_len, len);
copied = skb_splice_bits(skb, sk, rxm->offset, pipe, chunk, flags);
if (copied < 0)
goto splice_requeue;
if (chunk < rxm->full_len) {
rxm->offset += len;
rxm->full_len -= len;
goto splice_requeue;
}
consume_skb(skb);
splice_read_end:
tls_rx_reader_unlock(sk, ctx);
return copied ? : err;
splice_requeue:
__skb_queue_head(&ctx->rx_list, skb);
goto splice_read_end;
}
int tls_sw_read_sock(struct sock *sk, read_descriptor_t *desc,
sk_read_actor_t read_actor)
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
struct tls_prot_info *prot = &tls_ctx->prot_info;
struct strp_msg *rxm = NULL;
struct sk_buff *skb = NULL;
struct sk_psock *psock;
size_t flushed_at = 0;
bool released = true;
struct tls_msg *tlm;
ssize_t copied = 0;
ssize_t decrypted;
int err, used;
psock = sk_psock_get(sk);
if (psock) {
sk_psock_put(sk, psock);
return -EINVAL;
}
err = tls_rx_reader_acquire(sk, ctx, true);
if (err < 0)
return err;
/* If crypto failed the connection is broken */
err = ctx->async_wait.err;
if (err)
goto read_sock_end;
decrypted = 0;
do {
if (!skb_queue_empty(&ctx->rx_list)) {
skb = __skb_dequeue(&ctx->rx_list);
rxm = strp_msg(skb);
tlm = tls_msg(skb);
} else {
struct tls_decrypt_arg darg;
err = tls_rx_rec_wait(sk, NULL, true, released);
if (err <= 0)
goto read_sock_end;
memset(&darg.inargs, 0, sizeof(darg.inargs));
err = tls_rx_one_record(sk, NULL, &darg);
if (err < 0) {
tls_err_abort(sk, -EBADMSG);
goto read_sock_end;
}
released = tls_read_flush_backlog(sk, prot, INT_MAX,
0, decrypted,
&flushed_at);
skb = darg.skb;
rxm = strp_msg(skb);
tlm = tls_msg(skb);
decrypted += rxm->full_len;
tls_rx_rec_done(ctx);
}
/* read_sock does not support reading control messages */
if (tlm->control != TLS_RECORD_TYPE_DATA) {
err = -EINVAL;
goto read_sock_requeue;
}
used = read_actor(desc, skb, rxm->offset, rxm->full_len);
if (used <= 0) {
if (!copied)
err = used;
goto read_sock_requeue;
}
copied += used;
if (used < rxm->full_len) {
rxm->offset += used;
rxm->full_len -= used;
if (!desc->count)
goto read_sock_requeue;
} else {
consume_skb(skb);
if (!desc->count)
skb = NULL;
}
} while (skb);
read_sock_end:
tls_rx_reader_release(sk, ctx);
return copied ? : err;
read_sock_requeue:
__skb_queue_head(&ctx->rx_list, skb);
goto read_sock_end;
}
bool tls_sw_sock_is_readable(struct sock *sk)
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
bool ingress_empty = true;
struct sk_psock *psock;
rcu_read_lock();
psock = sk_psock(sk);
if (psock)
ingress_empty = list_empty(&psock->ingress_msg);
rcu_read_unlock();
return !ingress_empty || tls_strp_msg_ready(ctx) ||
!skb_queue_empty(&ctx->rx_list);
}
int tls_rx_msg_size(struct tls_strparser *strp, struct sk_buff *skb)
{
struct tls_context *tls_ctx = tls_get_ctx(strp->sk);
struct tls_prot_info *prot = &tls_ctx->prot_info;
char header[TLS_HEADER_SIZE + MAX_IV_SIZE];
size_t cipher_overhead;
size_t data_len = 0;
int ret;
/* Verify that we have a full TLS header, or wait for more data */
if (strp->stm.offset + prot->prepend_size > skb->len)
return 0;
/* Sanity-check size of on-stack buffer. */
if (WARN_ON(prot->prepend_size > sizeof(header))) {
ret = -EINVAL;
goto read_failure;
}
/* Linearize header to local buffer */
ret = skb_copy_bits(skb, strp->stm.offset, header, prot->prepend_size);
if (ret < 0)
goto read_failure;
strp->mark = header[0];
data_len = ((header[4] & 0xFF) | (header[3] << 8));
cipher_overhead = prot->tag_size;
if (prot->version != TLS_1_3_VERSION &&
prot->cipher_type != TLS_CIPHER_CHACHA20_POLY1305)
cipher_overhead += prot->iv_size;
if (data_len > TLS_MAX_PAYLOAD_SIZE + cipher_overhead +
prot->tail_size) {
ret = -EMSGSIZE;
goto read_failure;
}
if (data_len < cipher_overhead) {
ret = -EBADMSG;
goto read_failure;
}
/* Note that both TLS1.3 and TLS1.2 use TLS_1_2 version here */
if (header[1] != TLS_1_2_VERSION_MINOR ||
header[2] != TLS_1_2_VERSION_MAJOR) {
ret = -EINVAL;
goto read_failure;
}
tls_device_rx_resync_new_rec(strp->sk, data_len + TLS_HEADER_SIZE,
TCP_SKB_CB(skb)->seq + strp->stm.offset);
return data_len + TLS_HEADER_SIZE;
read_failure:
tls_err_abort(strp->sk, ret);
return ret;
}
void tls_rx_msg_ready(struct tls_strparser *strp)
{
struct tls_sw_context_rx *ctx;
ctx = container_of(strp, struct tls_sw_context_rx, strp);
ctx->saved_data_ready(strp->sk);
}
static void tls_data_ready(struct sock *sk)
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
struct sk_psock *psock;
gfp_t alloc_save;
trace_sk_data_ready(sk);
alloc_save = sk->sk_allocation;
sk->sk_allocation = GFP_ATOMIC;
tls_strp_data_ready(&ctx->strp);
sk->sk_allocation = alloc_save;
psock = sk_psock_get(sk);
if (psock) {
if (!list_empty(&psock->ingress_msg))
ctx->saved_data_ready(sk);
sk_psock_put(sk, psock);
}
}
void tls_sw_cancel_work_tx(struct tls_context *tls_ctx)
{
struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
set_bit(BIT_TX_CLOSING, &ctx->tx_bitmask);
set_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask);
cancel_delayed_work_sync(&ctx->tx_work.work);
}
void tls_sw_release_resources_tx(struct sock *sk)
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
struct tls_rec *rec, *tmp;
int pending;
/* Wait for any pending async encryptions to complete */
spin_lock_bh(&ctx->encrypt_compl_lock);
ctx->async_notify = true;
pending = atomic_read(&ctx->encrypt_pending);
spin_unlock_bh(&ctx->encrypt_compl_lock);
if (pending)
crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
tls_tx_records(sk, -1);
/* Free up un-sent records in tx_list. First, free
* the partially sent record if any at head of tx_list.
*/
if (tls_ctx->partially_sent_record) {
tls_free_partial_record(sk, tls_ctx);
rec = list_first_entry(&ctx->tx_list,
struct tls_rec, list);
list_del(&rec->list);
sk_msg_free(sk, &rec->msg_plaintext);
kfree(rec);
}
list_for_each_entry_safe(rec, tmp, &ctx->tx_list, list) {
list_del(&rec->list);
sk_msg_free(sk, &rec->msg_encrypted);
sk_msg_free(sk, &rec->msg_plaintext);
kfree(rec);
}
crypto_free_aead(ctx->aead_send);
tls_free_open_rec(sk);
}
void tls_sw_free_ctx_tx(struct tls_context *tls_ctx)
{
struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
kfree(ctx);
}
void tls_sw_release_resources_rx(struct sock *sk)
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
kfree(tls_ctx->rx.rec_seq);
kfree(tls_ctx->rx.iv);
if (ctx->aead_recv) {
__skb_queue_purge(&ctx->rx_list);
crypto_free_aead(ctx->aead_recv);
tls_strp_stop(&ctx->strp);
/* If tls_sw_strparser_arm() was not called (cleanup paths)
* we still want to tls_strp_stop(), but sk->sk_data_ready was
* never swapped.
*/
if (ctx->saved_data_ready) {
write_lock_bh(&sk->sk_callback_lock);
sk->sk_data_ready = ctx->saved_data_ready;
write_unlock_bh(&sk->sk_callback_lock);
}
}
}
void tls_sw_strparser_done(struct tls_context *tls_ctx)
{
struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
tls_strp_done(&ctx->strp);
}
void tls_sw_free_ctx_rx(struct tls_context *tls_ctx)
{
struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
kfree(ctx);
}
void tls_sw_free_resources_rx(struct sock *sk)
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
tls_sw_release_resources_rx(sk);
tls_sw_free_ctx_rx(tls_ctx);
}
/* The work handler to transmitt the encrypted records in tx_list */
static void tx_work_handler(struct work_struct *work)
{
struct delayed_work *delayed_work = to_delayed_work(work);
struct tx_work *tx_work = container_of(delayed_work,
struct tx_work, work);
struct sock *sk = tx_work->sk;
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_sw_context_tx *ctx;
if (unlikely(!tls_ctx))
return;
ctx = tls_sw_ctx_tx(tls_ctx);
if (test_bit(BIT_TX_CLOSING, &ctx->tx_bitmask))
return;
if (!test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask))
return;
if (mutex_trylock(&tls_ctx->tx_lock)) {
lock_sock(sk);
tls_tx_records(sk, -1);
release_sock(sk);
mutex_unlock(&tls_ctx->tx_lock);
} else if (!test_and_set_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) {
/* Someone is holding the tx_lock, they will likely run Tx
* and cancel the work on their way out of the lock section.
* Schedule a long delay just in case.
*/
schedule_delayed_work(&ctx->tx_work.work, msecs_to_jiffies(10));
}
}
static bool tls_is_tx_ready(struct tls_sw_context_tx *ctx)
{
struct tls_rec *rec;
rec = list_first_entry_or_null(&ctx->tx_list, struct tls_rec, list);
if (!rec)
return false;
return READ_ONCE(rec->tx_ready);
}
void tls_sw_write_space(struct sock *sk, struct tls_context *ctx)
{
struct tls_sw_context_tx *tx_ctx = tls_sw_ctx_tx(ctx);
/* Schedule the transmission if tx list is ready */
if (tls_is_tx_ready(tx_ctx) &&
!test_and_set_bit(BIT_TX_SCHEDULED, &tx_ctx->tx_bitmask))
schedule_delayed_work(&tx_ctx->tx_work.work, 0);
}
void tls_sw_strparser_arm(struct sock *sk, struct tls_context *tls_ctx)
{
struct tls_sw_context_rx *rx_ctx = tls_sw_ctx_rx(tls_ctx);
write_lock_bh(&sk->sk_callback_lock);
rx_ctx->saved_data_ready = sk->sk_data_ready;
sk->sk_data_ready = tls_data_ready;
write_unlock_bh(&sk->sk_callback_lock);
}
void tls_update_rx_zc_capable(struct tls_context *tls_ctx)
{
struct tls_sw_context_rx *rx_ctx = tls_sw_ctx_rx(tls_ctx);
rx_ctx->zc_capable = tls_ctx->rx_no_pad ||
tls_ctx->prot_info.version != TLS_1_3_VERSION;
}
int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_prot_info *prot = &tls_ctx->prot_info;
struct tls_crypto_info *crypto_info;
struct tls_sw_context_tx *sw_ctx_tx = NULL;
struct tls_sw_context_rx *sw_ctx_rx = NULL;
struct cipher_context *cctx;
struct crypto_aead **aead;
struct crypto_tfm *tfm;
char *iv, *rec_seq, *key, *salt;
const struct tls_cipher_desc *cipher_desc;
u16 nonce_size;
int rc = 0;
if (!ctx) {
rc = -EINVAL;
goto out;
}
if (tx) {
if (!ctx->priv_ctx_tx) {
sw_ctx_tx = kzalloc(sizeof(*sw_ctx_tx), GFP_KERNEL);
if (!sw_ctx_tx) {
rc = -ENOMEM;
goto out;
}
ctx->priv_ctx_tx = sw_ctx_tx;
} else {
sw_ctx_tx =
(struct tls_sw_context_tx *)ctx->priv_ctx_tx;
}
} else {
if (!ctx->priv_ctx_rx) {
sw_ctx_rx = kzalloc(sizeof(*sw_ctx_rx), GFP_KERNEL);
if (!sw_ctx_rx) {
rc = -ENOMEM;
goto out;
}
ctx->priv_ctx_rx = sw_ctx_rx;
} else {
sw_ctx_rx =
(struct tls_sw_context_rx *)ctx->priv_ctx_rx;
}
}
if (tx) {
crypto_init_wait(&sw_ctx_tx->async_wait);
spin_lock_init(&sw_ctx_tx->encrypt_compl_lock);
crypto_info = &ctx->crypto_send.info;
cctx = &ctx->tx;
aead = &sw_ctx_tx->aead_send;
INIT_LIST_HEAD(&sw_ctx_tx->tx_list);
INIT_DELAYED_WORK(&sw_ctx_tx->tx_work.work, tx_work_handler);
sw_ctx_tx->tx_work.sk = sk;
} else {
crypto_init_wait(&sw_ctx_rx->async_wait);
spin_lock_init(&sw_ctx_rx->decrypt_compl_lock);
init_waitqueue_head(&sw_ctx_rx->wq);
crypto_info = &ctx->crypto_recv.info;
cctx = &ctx->rx;
skb_queue_head_init(&sw_ctx_rx->rx_list);
skb_queue_head_init(&sw_ctx_rx->async_hold);
aead = &sw_ctx_rx->aead_recv;
}
cipher_desc = get_cipher_desc(crypto_info->cipher_type);
if (!cipher_desc) {
rc = -EINVAL;
goto free_priv;
}
nonce_size = cipher_desc->nonce;
iv = crypto_info_iv(crypto_info, cipher_desc);
key = crypto_info_key(crypto_info, cipher_desc);
salt = crypto_info_salt(crypto_info, cipher_desc);
rec_seq = crypto_info_rec_seq(crypto_info, cipher_desc);
if (crypto_info->version == TLS_1_3_VERSION) {
nonce_size = 0;
prot->aad_size = TLS_HEADER_SIZE;
prot->tail_size = 1;
} else {
prot->aad_size = TLS_AAD_SPACE_SIZE;
prot->tail_size = 0;
}
/* Sanity-check the sizes for stack allocations. */
if (nonce_size > MAX_IV_SIZE || prot->aad_size > TLS_MAX_AAD_SIZE) {
rc = -EINVAL;
goto free_priv;
}
prot->version = crypto_info->version;
prot->cipher_type = crypto_info->cipher_type;
prot->prepend_size = TLS_HEADER_SIZE + nonce_size;
prot->tag_size = cipher_desc->tag;
prot->overhead_size = prot->prepend_size +
prot->tag_size + prot->tail_size;
prot->iv_size = cipher_desc->iv;
prot->salt_size = cipher_desc->salt;
cctx->iv = kmalloc(cipher_desc->iv + cipher_desc->salt, GFP_KERNEL);
if (!cctx->iv) {
rc = -ENOMEM;
goto free_priv;
}
/* Note: 128 & 256 bit salt are the same size */
prot->rec_seq_size = cipher_desc->rec_seq;
memcpy(cctx->iv, salt, cipher_desc->salt);
memcpy(cctx->iv + cipher_desc->salt, iv, cipher_desc->iv);
cctx->rec_seq = kmemdup(rec_seq, cipher_desc->rec_seq, GFP_KERNEL);
if (!cctx->rec_seq) {
rc = -ENOMEM;
goto free_iv;
}
if (!*aead) {
*aead = crypto_alloc_aead(cipher_desc->cipher_name, 0, 0);
if (IS_ERR(*aead)) {
rc = PTR_ERR(*aead);
*aead = NULL;
goto free_rec_seq;
}
}
ctx->push_pending_record = tls_sw_push_pending_record;
rc = crypto_aead_setkey(*aead, key, cipher_desc->key);
if (rc)
goto free_aead;
rc = crypto_aead_setauthsize(*aead, prot->tag_size);
if (rc)
goto free_aead;
if (sw_ctx_rx) {
tfm = crypto_aead_tfm(sw_ctx_rx->aead_recv);
tls_update_rx_zc_capable(ctx);
sw_ctx_rx->async_capable =
crypto_info->version != TLS_1_3_VERSION &&
!!(tfm->__crt_alg->cra_flags & CRYPTO_ALG_ASYNC);
rc = tls_strp_init(&sw_ctx_rx->strp, sk);
if (rc)
goto free_aead;
}
goto out;
free_aead:
crypto_free_aead(*aead);
*aead = NULL;
free_rec_seq:
kfree(cctx->rec_seq);
cctx->rec_seq = NULL;
free_iv:
kfree(cctx->iv);
cctx->iv = NULL;
free_priv:
if (tx) {
kfree(ctx->priv_ctx_tx);
ctx->priv_ctx_tx = NULL;
} else {
kfree(ctx->priv_ctx_rx);
ctx->priv_ctx_rx = NULL;
}
out:
return rc;
}
| linux-master | net/tls/tls_sw.c |
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
/* Copyright (C) 2019 Netronome Systems, Inc. */
#include <linux/module.h>
#ifndef __CHECKER__
#define CREATE_TRACE_POINTS
#include "trace.h"
#endif
| linux-master | net/tls/trace.c |
/* Copyright (c) 2018, Mellanox Technologies All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <net/tls.h>
#include <crypto/aead.h>
#include <crypto/scatterwalk.h>
#include <net/ip6_checksum.h>
#include "tls.h"
static void chain_to_walk(struct scatterlist *sg, struct scatter_walk *walk)
{
struct scatterlist *src = walk->sg;
int diff = walk->offset - src->offset;
sg_set_page(sg, sg_page(src),
src->length - diff, walk->offset);
scatterwalk_crypto_chain(sg, sg_next(src), 2);
}
static int tls_enc_record(struct aead_request *aead_req,
struct crypto_aead *aead, char *aad,
char *iv, __be64 rcd_sn,
struct scatter_walk *in,
struct scatter_walk *out, int *in_len,
struct tls_prot_info *prot)
{
unsigned char buf[TLS_HEADER_SIZE + MAX_IV_SIZE];
const struct tls_cipher_desc *cipher_desc;
struct scatterlist sg_in[3];
struct scatterlist sg_out[3];
unsigned int buf_size;
u16 len;
int rc;
switch (prot->cipher_type) {
case TLS_CIPHER_AES_GCM_128:
case TLS_CIPHER_AES_GCM_256:
break;
default:
return -EINVAL;
}
cipher_desc = get_cipher_desc(prot->cipher_type);
buf_size = TLS_HEADER_SIZE + cipher_desc->iv;
len = min_t(int, *in_len, buf_size);
scatterwalk_copychunks(buf, in, len, 0);
scatterwalk_copychunks(buf, out, len, 1);
*in_len -= len;
if (!*in_len)
return 0;
scatterwalk_pagedone(in, 0, 1);
scatterwalk_pagedone(out, 1, 1);
len = buf[4] | (buf[3] << 8);
len -= cipher_desc->iv;
tls_make_aad(aad, len - cipher_desc->tag, (char *)&rcd_sn, buf[0], prot);
memcpy(iv + cipher_desc->salt, buf + TLS_HEADER_SIZE, cipher_desc->iv);
sg_init_table(sg_in, ARRAY_SIZE(sg_in));
sg_init_table(sg_out, ARRAY_SIZE(sg_out));
sg_set_buf(sg_in, aad, TLS_AAD_SPACE_SIZE);
sg_set_buf(sg_out, aad, TLS_AAD_SPACE_SIZE);
chain_to_walk(sg_in + 1, in);
chain_to_walk(sg_out + 1, out);
*in_len -= len;
if (*in_len < 0) {
*in_len += cipher_desc->tag;
/* the input buffer doesn't contain the entire record.
* trim len accordingly. The resulting authentication tag
* will contain garbage, but we don't care, so we won't
* include any of it in the output skb
* Note that we assume the output buffer length
* is larger then input buffer length + tag size
*/
if (*in_len < 0)
len += *in_len;
*in_len = 0;
}
if (*in_len) {
scatterwalk_copychunks(NULL, in, len, 2);
scatterwalk_pagedone(in, 0, 1);
scatterwalk_copychunks(NULL, out, len, 2);
scatterwalk_pagedone(out, 1, 1);
}
len -= cipher_desc->tag;
aead_request_set_crypt(aead_req, sg_in, sg_out, len, iv);
rc = crypto_aead_encrypt(aead_req);
return rc;
}
static void tls_init_aead_request(struct aead_request *aead_req,
struct crypto_aead *aead)
{
aead_request_set_tfm(aead_req, aead);
aead_request_set_ad(aead_req, TLS_AAD_SPACE_SIZE);
}
static struct aead_request *tls_alloc_aead_request(struct crypto_aead *aead,
gfp_t flags)
{
unsigned int req_size = sizeof(struct aead_request) +
crypto_aead_reqsize(aead);
struct aead_request *aead_req;
aead_req = kzalloc(req_size, flags);
if (aead_req)
tls_init_aead_request(aead_req, aead);
return aead_req;
}
static int tls_enc_records(struct aead_request *aead_req,
struct crypto_aead *aead, struct scatterlist *sg_in,
struct scatterlist *sg_out, char *aad, char *iv,
u64 rcd_sn, int len, struct tls_prot_info *prot)
{
struct scatter_walk out, in;
int rc;
scatterwalk_start(&in, sg_in);
scatterwalk_start(&out, sg_out);
do {
rc = tls_enc_record(aead_req, aead, aad, iv,
cpu_to_be64(rcd_sn), &in, &out, &len, prot);
rcd_sn++;
} while (rc == 0 && len);
scatterwalk_done(&in, 0, 0);
scatterwalk_done(&out, 1, 0);
return rc;
}
/* Can't use icsk->icsk_af_ops->send_check here because the ip addresses
* might have been changed by NAT.
*/
static void update_chksum(struct sk_buff *skb, int headln)
{
struct tcphdr *th = tcp_hdr(skb);
int datalen = skb->len - headln;
const struct ipv6hdr *ipv6h;
const struct iphdr *iph;
/* We only changed the payload so if we are using partial we don't
* need to update anything.
*/
if (likely(skb->ip_summed == CHECKSUM_PARTIAL))
return;
skb->ip_summed = CHECKSUM_PARTIAL;
skb->csum_start = skb_transport_header(skb) - skb->head;
skb->csum_offset = offsetof(struct tcphdr, check);
if (skb->sk->sk_family == AF_INET6) {
ipv6h = ipv6_hdr(skb);
th->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr,
datalen, IPPROTO_TCP, 0);
} else {
iph = ip_hdr(skb);
th->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, datalen,
IPPROTO_TCP, 0);
}
}
static void complete_skb(struct sk_buff *nskb, struct sk_buff *skb, int headln)
{
struct sock *sk = skb->sk;
int delta;
skb_copy_header(nskb, skb);
skb_put(nskb, skb->len);
memcpy(nskb->data, skb->data, headln);
nskb->destructor = skb->destructor;
nskb->sk = sk;
skb->destructor = NULL;
skb->sk = NULL;
update_chksum(nskb, headln);
/* sock_efree means skb must gone through skb_orphan_partial() */
if (nskb->destructor == sock_efree)
return;
delta = nskb->truesize - skb->truesize;
if (likely(delta < 0))
WARN_ON_ONCE(refcount_sub_and_test(-delta, &sk->sk_wmem_alloc));
else if (delta)
refcount_add(delta, &sk->sk_wmem_alloc);
}
/* This function may be called after the user socket is already
* closed so make sure we don't use anything freed during
* tls_sk_proto_close here
*/
static int fill_sg_in(struct scatterlist *sg_in,
struct sk_buff *skb,
struct tls_offload_context_tx *ctx,
u64 *rcd_sn,
s32 *sync_size,
int *resync_sgs)
{
int tcp_payload_offset = skb_tcp_all_headers(skb);
int payload_len = skb->len - tcp_payload_offset;
u32 tcp_seq = ntohl(tcp_hdr(skb)->seq);
struct tls_record_info *record;
unsigned long flags;
int remaining;
int i;
spin_lock_irqsave(&ctx->lock, flags);
record = tls_get_record(ctx, tcp_seq, rcd_sn);
if (!record) {
spin_unlock_irqrestore(&ctx->lock, flags);
return -EINVAL;
}
*sync_size = tcp_seq - tls_record_start_seq(record);
if (*sync_size < 0) {
int is_start_marker = tls_record_is_start_marker(record);
spin_unlock_irqrestore(&ctx->lock, flags);
/* This should only occur if the relevant record was
* already acked. In that case it should be ok
* to drop the packet and avoid retransmission.
*
* There is a corner case where the packet contains
* both an acked and a non-acked record.
* We currently don't handle that case and rely
* on TCP to retransmit a packet that doesn't contain
* already acked payload.
*/
if (!is_start_marker)
*sync_size = 0;
return -EINVAL;
}
remaining = *sync_size;
for (i = 0; remaining > 0; i++) {
skb_frag_t *frag = &record->frags[i];
__skb_frag_ref(frag);
sg_set_page(sg_in + i, skb_frag_page(frag),
skb_frag_size(frag), skb_frag_off(frag));
remaining -= skb_frag_size(frag);
if (remaining < 0)
sg_in[i].length += remaining;
}
*resync_sgs = i;
spin_unlock_irqrestore(&ctx->lock, flags);
if (skb_to_sgvec(skb, &sg_in[i], tcp_payload_offset, payload_len) < 0)
return -EINVAL;
return 0;
}
static void fill_sg_out(struct scatterlist sg_out[3], void *buf,
struct tls_context *tls_ctx,
struct sk_buff *nskb,
int tcp_payload_offset,
int payload_len,
int sync_size,
void *dummy_buf)
{
const struct tls_cipher_desc *cipher_desc =
get_cipher_desc(tls_ctx->crypto_send.info.cipher_type);
sg_set_buf(&sg_out[0], dummy_buf, sync_size);
sg_set_buf(&sg_out[1], nskb->data + tcp_payload_offset, payload_len);
/* Add room for authentication tag produced by crypto */
dummy_buf += sync_size;
sg_set_buf(&sg_out[2], dummy_buf, cipher_desc->tag);
}
static struct sk_buff *tls_enc_skb(struct tls_context *tls_ctx,
struct scatterlist sg_out[3],
struct scatterlist *sg_in,
struct sk_buff *skb,
s32 sync_size, u64 rcd_sn)
{
struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx);
int tcp_payload_offset = skb_tcp_all_headers(skb);
int payload_len = skb->len - tcp_payload_offset;
const struct tls_cipher_desc *cipher_desc;
void *buf, *iv, *aad, *dummy_buf, *salt;
struct aead_request *aead_req;
struct sk_buff *nskb = NULL;
int buf_len;
aead_req = tls_alloc_aead_request(ctx->aead_send, GFP_ATOMIC);
if (!aead_req)
return NULL;
switch (tls_ctx->crypto_send.info.cipher_type) {
case TLS_CIPHER_AES_GCM_128:
salt = tls_ctx->crypto_send.aes_gcm_128.salt;
break;
case TLS_CIPHER_AES_GCM_256:
salt = tls_ctx->crypto_send.aes_gcm_256.salt;
break;
default:
goto free_req;
}
cipher_desc = get_cipher_desc(tls_ctx->crypto_send.info.cipher_type);
buf_len = cipher_desc->salt + cipher_desc->iv + TLS_AAD_SPACE_SIZE +
sync_size + cipher_desc->tag;
buf = kmalloc(buf_len, GFP_ATOMIC);
if (!buf)
goto free_req;
iv = buf;
memcpy(iv, salt, cipher_desc->salt);
aad = buf + cipher_desc->salt + cipher_desc->iv;
dummy_buf = aad + TLS_AAD_SPACE_SIZE;
nskb = alloc_skb(skb_headroom(skb) + skb->len, GFP_ATOMIC);
if (!nskb)
goto free_buf;
skb_reserve(nskb, skb_headroom(skb));
fill_sg_out(sg_out, buf, tls_ctx, nskb, tcp_payload_offset,
payload_len, sync_size, dummy_buf);
if (tls_enc_records(aead_req, ctx->aead_send, sg_in, sg_out, aad, iv,
rcd_sn, sync_size + payload_len,
&tls_ctx->prot_info) < 0)
goto free_nskb;
complete_skb(nskb, skb, tcp_payload_offset);
/* validate_xmit_skb_list assumes that if the skb wasn't segmented
* nskb->prev will point to the skb itself
*/
nskb->prev = nskb;
free_buf:
kfree(buf);
free_req:
kfree(aead_req);
return nskb;
free_nskb:
kfree_skb(nskb);
nskb = NULL;
goto free_buf;
}
static struct sk_buff *tls_sw_fallback(struct sock *sk, struct sk_buff *skb)
{
int tcp_payload_offset = skb_tcp_all_headers(skb);
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx);
int payload_len = skb->len - tcp_payload_offset;
struct scatterlist *sg_in, sg_out[3];
struct sk_buff *nskb = NULL;
int sg_in_max_elements;
int resync_sgs = 0;
s32 sync_size = 0;
u64 rcd_sn;
/* worst case is:
* MAX_SKB_FRAGS in tls_record_info
* MAX_SKB_FRAGS + 1 in SKB head and frags.
*/
sg_in_max_elements = 2 * MAX_SKB_FRAGS + 1;
if (!payload_len)
return skb;
sg_in = kmalloc_array(sg_in_max_elements, sizeof(*sg_in), GFP_ATOMIC);
if (!sg_in)
goto free_orig;
sg_init_table(sg_in, sg_in_max_elements);
sg_init_table(sg_out, ARRAY_SIZE(sg_out));
if (fill_sg_in(sg_in, skb, ctx, &rcd_sn, &sync_size, &resync_sgs)) {
/* bypass packets before kernel TLS socket option was set */
if (sync_size < 0 && payload_len <= -sync_size)
nskb = skb_get(skb);
goto put_sg;
}
nskb = tls_enc_skb(tls_ctx, sg_out, sg_in, skb, sync_size, rcd_sn);
put_sg:
while (resync_sgs)
put_page(sg_page(&sg_in[--resync_sgs]));
kfree(sg_in);
free_orig:
if (nskb)
consume_skb(skb);
else
kfree_skb(skb);
return nskb;
}
struct sk_buff *tls_validate_xmit_skb(struct sock *sk,
struct net_device *dev,
struct sk_buff *skb)
{
if (dev == rcu_dereference_bh(tls_get_ctx(sk)->netdev) ||
netif_is_bond_master(dev))
return skb;
return tls_sw_fallback(sk, skb);
}
EXPORT_SYMBOL_GPL(tls_validate_xmit_skb);
struct sk_buff *tls_validate_xmit_skb_sw(struct sock *sk,
struct net_device *dev,
struct sk_buff *skb)
{
return tls_sw_fallback(sk, skb);
}
struct sk_buff *tls_encrypt_skb(struct sk_buff *skb)
{
return tls_sw_fallback(skb->sk, skb);
}
EXPORT_SYMBOL_GPL(tls_encrypt_skb);
int tls_sw_fallback_init(struct sock *sk,
struct tls_offload_context_tx *offload_ctx,
struct tls_crypto_info *crypto_info)
{
const struct tls_cipher_desc *cipher_desc;
int rc;
cipher_desc = get_cipher_desc(crypto_info->cipher_type);
if (!cipher_desc || !cipher_desc->offloadable)
return -EINVAL;
offload_ctx->aead_send =
crypto_alloc_aead(cipher_desc->cipher_name, 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(offload_ctx->aead_send)) {
rc = PTR_ERR(offload_ctx->aead_send);
pr_err_ratelimited("crypto_alloc_aead failed rc=%d\n", rc);
offload_ctx->aead_send = NULL;
goto err_out;
}
rc = crypto_aead_setkey(offload_ctx->aead_send,
crypto_info_key(crypto_info, cipher_desc),
cipher_desc->key);
if (rc)
goto free_aead;
rc = crypto_aead_setauthsize(offload_ctx->aead_send, cipher_desc->tag);
if (rc)
goto free_aead;
return 0;
free_aead:
crypto_free_aead(offload_ctx->aead_send);
err_out:
return rc;
}
| linux-master | net/tls/tls_device_fallback.c |
/* Copyright (c) 2018, Mellanox Technologies All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <crypto/aead.h>
#include <linux/highmem.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include <net/dst.h>
#include <net/inet_connection_sock.h>
#include <net/tcp.h>
#include <net/tls.h>
#include "tls.h"
#include "trace.h"
/* device_offload_lock is used to synchronize tls_dev_add
* against NETDEV_DOWN notifications.
*/
static DECLARE_RWSEM(device_offload_lock);
static struct workqueue_struct *destruct_wq __read_mostly;
static LIST_HEAD(tls_device_list);
static LIST_HEAD(tls_device_down_list);
static DEFINE_SPINLOCK(tls_device_lock);
static struct page *dummy_page;
static void tls_device_free_ctx(struct tls_context *ctx)
{
if (ctx->tx_conf == TLS_HW) {
kfree(tls_offload_ctx_tx(ctx));
kfree(ctx->tx.rec_seq);
kfree(ctx->tx.iv);
}
if (ctx->rx_conf == TLS_HW)
kfree(tls_offload_ctx_rx(ctx));
tls_ctx_free(NULL, ctx);
}
static void tls_device_tx_del_task(struct work_struct *work)
{
struct tls_offload_context_tx *offload_ctx =
container_of(work, struct tls_offload_context_tx, destruct_work);
struct tls_context *ctx = offload_ctx->ctx;
struct net_device *netdev;
/* Safe, because this is the destroy flow, refcount is 0, so
* tls_device_down can't store this field in parallel.
*/
netdev = rcu_dereference_protected(ctx->netdev,
!refcount_read(&ctx->refcount));
netdev->tlsdev_ops->tls_dev_del(netdev, ctx, TLS_OFFLOAD_CTX_DIR_TX);
dev_put(netdev);
ctx->netdev = NULL;
tls_device_free_ctx(ctx);
}
static void tls_device_queue_ctx_destruction(struct tls_context *ctx)
{
struct net_device *netdev;
unsigned long flags;
bool async_cleanup;
spin_lock_irqsave(&tls_device_lock, flags);
if (unlikely(!refcount_dec_and_test(&ctx->refcount))) {
spin_unlock_irqrestore(&tls_device_lock, flags);
return;
}
list_del(&ctx->list); /* Remove from tls_device_list / tls_device_down_list */
/* Safe, because this is the destroy flow, refcount is 0, so
* tls_device_down can't store this field in parallel.
*/
netdev = rcu_dereference_protected(ctx->netdev,
!refcount_read(&ctx->refcount));
async_cleanup = netdev && ctx->tx_conf == TLS_HW;
if (async_cleanup) {
struct tls_offload_context_tx *offload_ctx = tls_offload_ctx_tx(ctx);
/* queue_work inside the spinlock
* to make sure tls_device_down waits for that work.
*/
queue_work(destruct_wq, &offload_ctx->destruct_work);
}
spin_unlock_irqrestore(&tls_device_lock, flags);
if (!async_cleanup)
tls_device_free_ctx(ctx);
}
/* We assume that the socket is already connected */
static struct net_device *get_netdev_for_sock(struct sock *sk)
{
struct dst_entry *dst = sk_dst_get(sk);
struct net_device *netdev = NULL;
if (likely(dst)) {
netdev = netdev_sk_get_lowest_dev(dst->dev, sk);
dev_hold(netdev);
}
dst_release(dst);
return netdev;
}
static void destroy_record(struct tls_record_info *record)
{
int i;
for (i = 0; i < record->num_frags; i++)
__skb_frag_unref(&record->frags[i], false);
kfree(record);
}
static void delete_all_records(struct tls_offload_context_tx *offload_ctx)
{
struct tls_record_info *info, *temp;
list_for_each_entry_safe(info, temp, &offload_ctx->records_list, list) {
list_del(&info->list);
destroy_record(info);
}
offload_ctx->retransmit_hint = NULL;
}
static void tls_icsk_clean_acked(struct sock *sk, u32 acked_seq)
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_record_info *info, *temp;
struct tls_offload_context_tx *ctx;
u64 deleted_records = 0;
unsigned long flags;
if (!tls_ctx)
return;
ctx = tls_offload_ctx_tx(tls_ctx);
spin_lock_irqsave(&ctx->lock, flags);
info = ctx->retransmit_hint;
if (info && !before(acked_seq, info->end_seq))
ctx->retransmit_hint = NULL;
list_for_each_entry_safe(info, temp, &ctx->records_list, list) {
if (before(acked_seq, info->end_seq))
break;
list_del(&info->list);
destroy_record(info);
deleted_records++;
}
ctx->unacked_record_sn += deleted_records;
spin_unlock_irqrestore(&ctx->lock, flags);
}
/* At this point, there should be no references on this
* socket and no in-flight SKBs associated with this
* socket, so it is safe to free all the resources.
*/
void tls_device_sk_destruct(struct sock *sk)
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx);
tls_ctx->sk_destruct(sk);
if (tls_ctx->tx_conf == TLS_HW) {
if (ctx->open_record)
destroy_record(ctx->open_record);
delete_all_records(ctx);
crypto_free_aead(ctx->aead_send);
clean_acked_data_disable(inet_csk(sk));
}
tls_device_queue_ctx_destruction(tls_ctx);
}
EXPORT_SYMBOL_GPL(tls_device_sk_destruct);
void tls_device_free_resources_tx(struct sock *sk)
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
tls_free_partial_record(sk, tls_ctx);
}
void tls_offload_tx_resync_request(struct sock *sk, u32 got_seq, u32 exp_seq)
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
trace_tls_device_tx_resync_req(sk, got_seq, exp_seq);
WARN_ON(test_and_set_bit(TLS_TX_SYNC_SCHED, &tls_ctx->flags));
}
EXPORT_SYMBOL_GPL(tls_offload_tx_resync_request);
static void tls_device_resync_tx(struct sock *sk, struct tls_context *tls_ctx,
u32 seq)
{
struct net_device *netdev;
struct sk_buff *skb;
int err = 0;
u8 *rcd_sn;
skb = tcp_write_queue_tail(sk);
if (skb)
TCP_SKB_CB(skb)->eor = 1;
rcd_sn = tls_ctx->tx.rec_seq;
trace_tls_device_tx_resync_send(sk, seq, rcd_sn);
down_read(&device_offload_lock);
netdev = rcu_dereference_protected(tls_ctx->netdev,
lockdep_is_held(&device_offload_lock));
if (netdev)
err = netdev->tlsdev_ops->tls_dev_resync(netdev, sk, seq,
rcd_sn,
TLS_OFFLOAD_CTX_DIR_TX);
up_read(&device_offload_lock);
if (err)
return;
clear_bit_unlock(TLS_TX_SYNC_SCHED, &tls_ctx->flags);
}
static void tls_append_frag(struct tls_record_info *record,
struct page_frag *pfrag,
int size)
{
skb_frag_t *frag;
frag = &record->frags[record->num_frags - 1];
if (skb_frag_page(frag) == pfrag->page &&
skb_frag_off(frag) + skb_frag_size(frag) == pfrag->offset) {
skb_frag_size_add(frag, size);
} else {
++frag;
skb_frag_fill_page_desc(frag, pfrag->page, pfrag->offset,
size);
++record->num_frags;
get_page(pfrag->page);
}
pfrag->offset += size;
record->len += size;
}
static int tls_push_record(struct sock *sk,
struct tls_context *ctx,
struct tls_offload_context_tx *offload_ctx,
struct tls_record_info *record,
int flags)
{
struct tls_prot_info *prot = &ctx->prot_info;
struct tcp_sock *tp = tcp_sk(sk);
skb_frag_t *frag;
int i;
record->end_seq = tp->write_seq + record->len;
list_add_tail_rcu(&record->list, &offload_ctx->records_list);
offload_ctx->open_record = NULL;
if (test_bit(TLS_TX_SYNC_SCHED, &ctx->flags))
tls_device_resync_tx(sk, ctx, tp->write_seq);
tls_advance_record_sn(sk, prot, &ctx->tx);
for (i = 0; i < record->num_frags; i++) {
frag = &record->frags[i];
sg_unmark_end(&offload_ctx->sg_tx_data[i]);
sg_set_page(&offload_ctx->sg_tx_data[i], skb_frag_page(frag),
skb_frag_size(frag), skb_frag_off(frag));
sk_mem_charge(sk, skb_frag_size(frag));
get_page(skb_frag_page(frag));
}
sg_mark_end(&offload_ctx->sg_tx_data[record->num_frags - 1]);
/* all ready, send */
return tls_push_sg(sk, ctx, offload_ctx->sg_tx_data, 0, flags);
}
static void tls_device_record_close(struct sock *sk,
struct tls_context *ctx,
struct tls_record_info *record,
struct page_frag *pfrag,
unsigned char record_type)
{
struct tls_prot_info *prot = &ctx->prot_info;
struct page_frag dummy_tag_frag;
/* append tag
* device will fill in the tag, we just need to append a placeholder
* use socket memory to improve coalescing (re-using a single buffer
* increases frag count)
* if we can't allocate memory now use the dummy page
*/
if (unlikely(pfrag->size - pfrag->offset < prot->tag_size) &&
!skb_page_frag_refill(prot->tag_size, pfrag, sk->sk_allocation)) {
dummy_tag_frag.page = dummy_page;
dummy_tag_frag.offset = 0;
pfrag = &dummy_tag_frag;
}
tls_append_frag(record, pfrag, prot->tag_size);
/* fill prepend */
tls_fill_prepend(ctx, skb_frag_address(&record->frags[0]),
record->len - prot->overhead_size,
record_type);
}
static int tls_create_new_record(struct tls_offload_context_tx *offload_ctx,
struct page_frag *pfrag,
size_t prepend_size)
{
struct tls_record_info *record;
skb_frag_t *frag;
record = kmalloc(sizeof(*record), GFP_KERNEL);
if (!record)
return -ENOMEM;
frag = &record->frags[0];
skb_frag_fill_page_desc(frag, pfrag->page, pfrag->offset,
prepend_size);
get_page(pfrag->page);
pfrag->offset += prepend_size;
record->num_frags = 1;
record->len = prepend_size;
offload_ctx->open_record = record;
return 0;
}
static int tls_do_allocation(struct sock *sk,
struct tls_offload_context_tx *offload_ctx,
struct page_frag *pfrag,
size_t prepend_size)
{
int ret;
if (!offload_ctx->open_record) {
if (unlikely(!skb_page_frag_refill(prepend_size, pfrag,
sk->sk_allocation))) {
READ_ONCE(sk->sk_prot)->enter_memory_pressure(sk);
sk_stream_moderate_sndbuf(sk);
return -ENOMEM;
}
ret = tls_create_new_record(offload_ctx, pfrag, prepend_size);
if (ret)
return ret;
if (pfrag->size > pfrag->offset)
return 0;
}
if (!sk_page_frag_refill(sk, pfrag))
return -ENOMEM;
return 0;
}
static int tls_device_copy_data(void *addr, size_t bytes, struct iov_iter *i)
{
size_t pre_copy, nocache;
pre_copy = ~((unsigned long)addr - 1) & (SMP_CACHE_BYTES - 1);
if (pre_copy) {
pre_copy = min(pre_copy, bytes);
if (copy_from_iter(addr, pre_copy, i) != pre_copy)
return -EFAULT;
bytes -= pre_copy;
addr += pre_copy;
}
nocache = round_down(bytes, SMP_CACHE_BYTES);
if (copy_from_iter_nocache(addr, nocache, i) != nocache)
return -EFAULT;
bytes -= nocache;
addr += nocache;
if (bytes && copy_from_iter(addr, bytes, i) != bytes)
return -EFAULT;
return 0;
}
static int tls_push_data(struct sock *sk,
struct iov_iter *iter,
size_t size, int flags,
unsigned char record_type)
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_prot_info *prot = &tls_ctx->prot_info;
struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx);
struct tls_record_info *record;
int tls_push_record_flags;
struct page_frag *pfrag;
size_t orig_size = size;
u32 max_open_record_len;
bool more = false;
bool done = false;
int copy, rc = 0;
long timeo;
if (flags &
~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
MSG_SPLICE_PAGES | MSG_EOR))
return -EOPNOTSUPP;
if ((flags & (MSG_MORE | MSG_EOR)) == (MSG_MORE | MSG_EOR))
return -EINVAL;
if (unlikely(sk->sk_err))
return -sk->sk_err;
flags |= MSG_SENDPAGE_DECRYPTED;
tls_push_record_flags = flags | MSG_MORE;
timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
if (tls_is_partially_sent_record(tls_ctx)) {
rc = tls_push_partial_record(sk, tls_ctx, flags);
if (rc < 0)
return rc;
}
pfrag = sk_page_frag(sk);
/* TLS_HEADER_SIZE is not counted as part of the TLS record, and
* we need to leave room for an authentication tag.
*/
max_open_record_len = TLS_MAX_PAYLOAD_SIZE +
prot->prepend_size;
do {
rc = tls_do_allocation(sk, ctx, pfrag, prot->prepend_size);
if (unlikely(rc)) {
rc = sk_stream_wait_memory(sk, &timeo);
if (!rc)
continue;
record = ctx->open_record;
if (!record)
break;
handle_error:
if (record_type != TLS_RECORD_TYPE_DATA) {
/* avoid sending partial
* record with type !=
* application_data
*/
size = orig_size;
destroy_record(record);
ctx->open_record = NULL;
} else if (record->len > prot->prepend_size) {
goto last_record;
}
break;
}
record = ctx->open_record;
copy = min_t(size_t, size, max_open_record_len - record->len);
if (copy && (flags & MSG_SPLICE_PAGES)) {
struct page_frag zc_pfrag;
struct page **pages = &zc_pfrag.page;
size_t off;
rc = iov_iter_extract_pages(iter, &pages,
copy, 1, 0, &off);
if (rc <= 0) {
if (rc == 0)
rc = -EIO;
goto handle_error;
}
copy = rc;
if (WARN_ON_ONCE(!sendpage_ok(zc_pfrag.page))) {
iov_iter_revert(iter, copy);
rc = -EIO;
goto handle_error;
}
zc_pfrag.offset = off;
zc_pfrag.size = copy;
tls_append_frag(record, &zc_pfrag, copy);
} else if (copy) {
copy = min_t(size_t, copy, pfrag->size - pfrag->offset);
rc = tls_device_copy_data(page_address(pfrag->page) +
pfrag->offset, copy,
iter);
if (rc)
goto handle_error;
tls_append_frag(record, pfrag, copy);
}
size -= copy;
if (!size) {
last_record:
tls_push_record_flags = flags;
if (flags & MSG_MORE) {
more = true;
break;
}
done = true;
}
if (done || record->len >= max_open_record_len ||
(record->num_frags >= MAX_SKB_FRAGS - 1)) {
tls_device_record_close(sk, tls_ctx, record,
pfrag, record_type);
rc = tls_push_record(sk,
tls_ctx,
ctx,
record,
tls_push_record_flags);
if (rc < 0)
break;
}
} while (!done);
tls_ctx->pending_open_record_frags = more;
if (orig_size - size > 0)
rc = orig_size - size;
return rc;
}
int tls_device_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
{
unsigned char record_type = TLS_RECORD_TYPE_DATA;
struct tls_context *tls_ctx = tls_get_ctx(sk);
int rc;
if (!tls_ctx->zerocopy_sendfile)
msg->msg_flags &= ~MSG_SPLICE_PAGES;
mutex_lock(&tls_ctx->tx_lock);
lock_sock(sk);
if (unlikely(msg->msg_controllen)) {
rc = tls_process_cmsg(sk, msg, &record_type);
if (rc)
goto out;
}
rc = tls_push_data(sk, &msg->msg_iter, size, msg->msg_flags,
record_type);
out:
release_sock(sk);
mutex_unlock(&tls_ctx->tx_lock);
return rc;
}
void tls_device_splice_eof(struct socket *sock)
{
struct sock *sk = sock->sk;
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct iov_iter iter = {};
if (!tls_is_partially_sent_record(tls_ctx))
return;
mutex_lock(&tls_ctx->tx_lock);
lock_sock(sk);
if (tls_is_partially_sent_record(tls_ctx)) {
iov_iter_bvec(&iter, ITER_SOURCE, NULL, 0, 0);
tls_push_data(sk, &iter, 0, 0, TLS_RECORD_TYPE_DATA);
}
release_sock(sk);
mutex_unlock(&tls_ctx->tx_lock);
}
struct tls_record_info *tls_get_record(struct tls_offload_context_tx *context,
u32 seq, u64 *p_record_sn)
{
u64 record_sn = context->hint_record_sn;
struct tls_record_info *info, *last;
info = context->retransmit_hint;
if (!info ||
before(seq, info->end_seq - info->len)) {
/* if retransmit_hint is irrelevant start
* from the beginning of the list
*/
info = list_first_entry_or_null(&context->records_list,
struct tls_record_info, list);
if (!info)
return NULL;
/* send the start_marker record if seq number is before the
* tls offload start marker sequence number. This record is
* required to handle TCP packets which are before TLS offload
* started.
* And if it's not start marker, look if this seq number
* belongs to the list.
*/
if (likely(!tls_record_is_start_marker(info))) {
/* we have the first record, get the last record to see
* if this seq number belongs to the list.
*/
last = list_last_entry(&context->records_list,
struct tls_record_info, list);
if (!between(seq, tls_record_start_seq(info),
last->end_seq))
return NULL;
}
record_sn = context->unacked_record_sn;
}
/* We just need the _rcu for the READ_ONCE() */
rcu_read_lock();
list_for_each_entry_from_rcu(info, &context->records_list, list) {
if (before(seq, info->end_seq)) {
if (!context->retransmit_hint ||
after(info->end_seq,
context->retransmit_hint->end_seq)) {
context->hint_record_sn = record_sn;
context->retransmit_hint = info;
}
*p_record_sn = record_sn;
goto exit_rcu_unlock;
}
record_sn++;
}
info = NULL;
exit_rcu_unlock:
rcu_read_unlock();
return info;
}
EXPORT_SYMBOL(tls_get_record);
static int tls_device_push_pending_record(struct sock *sk, int flags)
{
struct iov_iter iter;
iov_iter_kvec(&iter, ITER_SOURCE, NULL, 0, 0);
return tls_push_data(sk, &iter, 0, flags, TLS_RECORD_TYPE_DATA);
}
void tls_device_write_space(struct sock *sk, struct tls_context *ctx)
{
if (tls_is_partially_sent_record(ctx)) {
gfp_t sk_allocation = sk->sk_allocation;
WARN_ON_ONCE(sk->sk_write_pending);
sk->sk_allocation = GFP_ATOMIC;
tls_push_partial_record(sk, ctx,
MSG_DONTWAIT | MSG_NOSIGNAL |
MSG_SENDPAGE_DECRYPTED);
sk->sk_allocation = sk_allocation;
}
}
static void tls_device_resync_rx(struct tls_context *tls_ctx,
struct sock *sk, u32 seq, u8 *rcd_sn)
{
struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx);
struct net_device *netdev;
trace_tls_device_rx_resync_send(sk, seq, rcd_sn, rx_ctx->resync_type);
rcu_read_lock();
netdev = rcu_dereference(tls_ctx->netdev);
if (netdev)
netdev->tlsdev_ops->tls_dev_resync(netdev, sk, seq, rcd_sn,
TLS_OFFLOAD_CTX_DIR_RX);
rcu_read_unlock();
TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSRXDEVICERESYNC);
}
static bool
tls_device_rx_resync_async(struct tls_offload_resync_async *resync_async,
s64 resync_req, u32 *seq, u16 *rcd_delta)
{
u32 is_async = resync_req & RESYNC_REQ_ASYNC;
u32 req_seq = resync_req >> 32;
u32 req_end = req_seq + ((resync_req >> 16) & 0xffff);
u16 i;
*rcd_delta = 0;
if (is_async) {
/* shouldn't get to wraparound:
* too long in async stage, something bad happened
*/
if (WARN_ON_ONCE(resync_async->rcd_delta == USHRT_MAX))
return false;
/* asynchronous stage: log all headers seq such that
* req_seq <= seq <= end_seq, and wait for real resync request
*/
if (before(*seq, req_seq))
return false;
if (!after(*seq, req_end) &&
resync_async->loglen < TLS_DEVICE_RESYNC_ASYNC_LOGMAX)
resync_async->log[resync_async->loglen++] = *seq;
resync_async->rcd_delta++;
return false;
}
/* synchronous stage: check against the logged entries and
* proceed to check the next entries if no match was found
*/
for (i = 0; i < resync_async->loglen; i++)
if (req_seq == resync_async->log[i] &&
atomic64_try_cmpxchg(&resync_async->req, &resync_req, 0)) {
*rcd_delta = resync_async->rcd_delta - i;
*seq = req_seq;
resync_async->loglen = 0;
resync_async->rcd_delta = 0;
return true;
}
resync_async->loglen = 0;
resync_async->rcd_delta = 0;
if (req_seq == *seq &&
atomic64_try_cmpxchg(&resync_async->req,
&resync_req, 0))
return true;
return false;
}
void tls_device_rx_resync_new_rec(struct sock *sk, u32 rcd_len, u32 seq)
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_offload_context_rx *rx_ctx;
u8 rcd_sn[TLS_MAX_REC_SEQ_SIZE];
u32 sock_data, is_req_pending;
struct tls_prot_info *prot;
s64 resync_req;
u16 rcd_delta;
u32 req_seq;
if (tls_ctx->rx_conf != TLS_HW)
return;
if (unlikely(test_bit(TLS_RX_DEV_DEGRADED, &tls_ctx->flags)))
return;
prot = &tls_ctx->prot_info;
rx_ctx = tls_offload_ctx_rx(tls_ctx);
memcpy(rcd_sn, tls_ctx->rx.rec_seq, prot->rec_seq_size);
switch (rx_ctx->resync_type) {
case TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ:
resync_req = atomic64_read(&rx_ctx->resync_req);
req_seq = resync_req >> 32;
seq += TLS_HEADER_SIZE - 1;
is_req_pending = resync_req;
if (likely(!is_req_pending) || req_seq != seq ||
!atomic64_try_cmpxchg(&rx_ctx->resync_req, &resync_req, 0))
return;
break;
case TLS_OFFLOAD_SYNC_TYPE_CORE_NEXT_HINT:
if (likely(!rx_ctx->resync_nh_do_now))
return;
/* head of next rec is already in, note that the sock_inq will
* include the currently parsed message when called from parser
*/
sock_data = tcp_inq(sk);
if (sock_data > rcd_len) {
trace_tls_device_rx_resync_nh_delay(sk, sock_data,
rcd_len);
return;
}
rx_ctx->resync_nh_do_now = 0;
seq += rcd_len;
tls_bigint_increment(rcd_sn, prot->rec_seq_size);
break;
case TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ_ASYNC:
resync_req = atomic64_read(&rx_ctx->resync_async->req);
is_req_pending = resync_req;
if (likely(!is_req_pending))
return;
if (!tls_device_rx_resync_async(rx_ctx->resync_async,
resync_req, &seq, &rcd_delta))
return;
tls_bigint_subtract(rcd_sn, rcd_delta);
break;
}
tls_device_resync_rx(tls_ctx, sk, seq, rcd_sn);
}
static void tls_device_core_ctrl_rx_resync(struct tls_context *tls_ctx,
struct tls_offload_context_rx *ctx,
struct sock *sk, struct sk_buff *skb)
{
struct strp_msg *rxm;
/* device will request resyncs by itself based on stream scan */
if (ctx->resync_type != TLS_OFFLOAD_SYNC_TYPE_CORE_NEXT_HINT)
return;
/* already scheduled */
if (ctx->resync_nh_do_now)
return;
/* seen decrypted fragments since last fully-failed record */
if (ctx->resync_nh_reset) {
ctx->resync_nh_reset = 0;
ctx->resync_nh.decrypted_failed = 1;
ctx->resync_nh.decrypted_tgt = TLS_DEVICE_RESYNC_NH_START_IVAL;
return;
}
if (++ctx->resync_nh.decrypted_failed <= ctx->resync_nh.decrypted_tgt)
return;
/* doing resync, bump the next target in case it fails */
if (ctx->resync_nh.decrypted_tgt < TLS_DEVICE_RESYNC_NH_MAX_IVAL)
ctx->resync_nh.decrypted_tgt *= 2;
else
ctx->resync_nh.decrypted_tgt += TLS_DEVICE_RESYNC_NH_MAX_IVAL;
rxm = strp_msg(skb);
/* head of next rec is already in, parser will sync for us */
if (tcp_inq(sk) > rxm->full_len) {
trace_tls_device_rx_resync_nh_schedule(sk);
ctx->resync_nh_do_now = 1;
} else {
struct tls_prot_info *prot = &tls_ctx->prot_info;
u8 rcd_sn[TLS_MAX_REC_SEQ_SIZE];
memcpy(rcd_sn, tls_ctx->rx.rec_seq, prot->rec_seq_size);
tls_bigint_increment(rcd_sn, prot->rec_seq_size);
tls_device_resync_rx(tls_ctx, sk, tcp_sk(sk)->copied_seq,
rcd_sn);
}
}
static int
tls_device_reencrypt(struct sock *sk, struct tls_context *tls_ctx)
{
struct tls_sw_context_rx *sw_ctx = tls_sw_ctx_rx(tls_ctx);
const struct tls_cipher_desc *cipher_desc;
int err, offset, copy, data_len, pos;
struct sk_buff *skb, *skb_iter;
struct scatterlist sg[1];
struct strp_msg *rxm;
char *orig_buf, *buf;
switch (tls_ctx->crypto_recv.info.cipher_type) {
case TLS_CIPHER_AES_GCM_128:
case TLS_CIPHER_AES_GCM_256:
break;
default:
return -EINVAL;
}
cipher_desc = get_cipher_desc(tls_ctx->crypto_recv.info.cipher_type);
rxm = strp_msg(tls_strp_msg(sw_ctx));
orig_buf = kmalloc(rxm->full_len + TLS_HEADER_SIZE + cipher_desc->iv,
sk->sk_allocation);
if (!orig_buf)
return -ENOMEM;
buf = orig_buf;
err = tls_strp_msg_cow(sw_ctx);
if (unlikely(err))
goto free_buf;
skb = tls_strp_msg(sw_ctx);
rxm = strp_msg(skb);
offset = rxm->offset;
sg_init_table(sg, 1);
sg_set_buf(&sg[0], buf,
rxm->full_len + TLS_HEADER_SIZE + cipher_desc->iv);
err = skb_copy_bits(skb, offset, buf, TLS_HEADER_SIZE + cipher_desc->iv);
if (err)
goto free_buf;
/* We are interested only in the decrypted data not the auth */
err = decrypt_skb(sk, sg);
if (err != -EBADMSG)
goto free_buf;
else
err = 0;
data_len = rxm->full_len - cipher_desc->tag;
if (skb_pagelen(skb) > offset) {
copy = min_t(int, skb_pagelen(skb) - offset, data_len);
if (skb->decrypted) {
err = skb_store_bits(skb, offset, buf, copy);
if (err)
goto free_buf;
}
offset += copy;
buf += copy;
}
pos = skb_pagelen(skb);
skb_walk_frags(skb, skb_iter) {
int frag_pos;
/* Practically all frags must belong to msg if reencrypt
* is needed with current strparser and coalescing logic,
* but strparser may "get optimized", so let's be safe.
*/
if (pos + skb_iter->len <= offset)
goto done_with_frag;
if (pos >= data_len + rxm->offset)
break;
frag_pos = offset - pos;
copy = min_t(int, skb_iter->len - frag_pos,
data_len + rxm->offset - offset);
if (skb_iter->decrypted) {
err = skb_store_bits(skb_iter, frag_pos, buf, copy);
if (err)
goto free_buf;
}
offset += copy;
buf += copy;
done_with_frag:
pos += skb_iter->len;
}
free_buf:
kfree(orig_buf);
return err;
}
int tls_device_decrypted(struct sock *sk, struct tls_context *tls_ctx)
{
struct tls_offload_context_rx *ctx = tls_offload_ctx_rx(tls_ctx);
struct tls_sw_context_rx *sw_ctx = tls_sw_ctx_rx(tls_ctx);
struct sk_buff *skb = tls_strp_msg(sw_ctx);
struct strp_msg *rxm = strp_msg(skb);
int is_decrypted, is_encrypted;
if (!tls_strp_msg_mixed_decrypted(sw_ctx)) {
is_decrypted = skb->decrypted;
is_encrypted = !is_decrypted;
} else {
is_decrypted = 0;
is_encrypted = 0;
}
trace_tls_device_decrypted(sk, tcp_sk(sk)->copied_seq - rxm->full_len,
tls_ctx->rx.rec_seq, rxm->full_len,
is_encrypted, is_decrypted);
if (unlikely(test_bit(TLS_RX_DEV_DEGRADED, &tls_ctx->flags))) {
if (likely(is_encrypted || is_decrypted))
return is_decrypted;
/* After tls_device_down disables the offload, the next SKB will
* likely have initial fragments decrypted, and final ones not
* decrypted. We need to reencrypt that single SKB.
*/
return tls_device_reencrypt(sk, tls_ctx);
}
/* Return immediately if the record is either entirely plaintext or
* entirely ciphertext. Otherwise handle reencrypt partially decrypted
* record.
*/
if (is_decrypted) {
ctx->resync_nh_reset = 1;
return is_decrypted;
}
if (is_encrypted) {
tls_device_core_ctrl_rx_resync(tls_ctx, ctx, sk, skb);
return 0;
}
ctx->resync_nh_reset = 1;
return tls_device_reencrypt(sk, tls_ctx);
}
static void tls_device_attach(struct tls_context *ctx, struct sock *sk,
struct net_device *netdev)
{
if (sk->sk_destruct != tls_device_sk_destruct) {
refcount_set(&ctx->refcount, 1);
dev_hold(netdev);
RCU_INIT_POINTER(ctx->netdev, netdev);
spin_lock_irq(&tls_device_lock);
list_add_tail(&ctx->list, &tls_device_list);
spin_unlock_irq(&tls_device_lock);
ctx->sk_destruct = sk->sk_destruct;
smp_store_release(&sk->sk_destruct, tls_device_sk_destruct);
}
}
int tls_set_device_offload(struct sock *sk, struct tls_context *ctx)
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_prot_info *prot = &tls_ctx->prot_info;
const struct tls_cipher_desc *cipher_desc;
struct tls_record_info *start_marker_record;
struct tls_offload_context_tx *offload_ctx;
struct tls_crypto_info *crypto_info;
struct net_device *netdev;
char *iv, *rec_seq;
struct sk_buff *skb;
__be64 rcd_sn;
int rc;
if (!ctx)
return -EINVAL;
if (ctx->priv_ctx_tx)
return -EEXIST;
netdev = get_netdev_for_sock(sk);
if (!netdev) {
pr_err_ratelimited("%s: netdev not found\n", __func__);
return -EINVAL;
}
if (!(netdev->features & NETIF_F_HW_TLS_TX)) {
rc = -EOPNOTSUPP;
goto release_netdev;
}
crypto_info = &ctx->crypto_send.info;
if (crypto_info->version != TLS_1_2_VERSION) {
rc = -EOPNOTSUPP;
goto release_netdev;
}
cipher_desc = get_cipher_desc(crypto_info->cipher_type);
if (!cipher_desc || !cipher_desc->offloadable) {
rc = -EINVAL;
goto release_netdev;
}
iv = crypto_info_iv(crypto_info, cipher_desc);
rec_seq = crypto_info_rec_seq(crypto_info, cipher_desc);
prot->version = crypto_info->version;
prot->cipher_type = crypto_info->cipher_type;
prot->prepend_size = TLS_HEADER_SIZE + cipher_desc->iv;
prot->tag_size = cipher_desc->tag;
prot->overhead_size = prot->prepend_size + prot->tag_size;
prot->iv_size = cipher_desc->iv;
prot->salt_size = cipher_desc->salt;
ctx->tx.iv = kmalloc(cipher_desc->iv + cipher_desc->salt, GFP_KERNEL);
if (!ctx->tx.iv) {
rc = -ENOMEM;
goto release_netdev;
}
memcpy(ctx->tx.iv + cipher_desc->salt, iv, cipher_desc->iv);
prot->rec_seq_size = cipher_desc->rec_seq;
ctx->tx.rec_seq = kmemdup(rec_seq, cipher_desc->rec_seq, GFP_KERNEL);
if (!ctx->tx.rec_seq) {
rc = -ENOMEM;
goto free_iv;
}
start_marker_record = kmalloc(sizeof(*start_marker_record), GFP_KERNEL);
if (!start_marker_record) {
rc = -ENOMEM;
goto free_rec_seq;
}
offload_ctx = kzalloc(TLS_OFFLOAD_CONTEXT_SIZE_TX, GFP_KERNEL);
if (!offload_ctx) {
rc = -ENOMEM;
goto free_marker_record;
}
rc = tls_sw_fallback_init(sk, offload_ctx, crypto_info);
if (rc)
goto free_offload_ctx;
/* start at rec_seq - 1 to account for the start marker record */
memcpy(&rcd_sn, ctx->tx.rec_seq, sizeof(rcd_sn));
offload_ctx->unacked_record_sn = be64_to_cpu(rcd_sn) - 1;
start_marker_record->end_seq = tcp_sk(sk)->write_seq;
start_marker_record->len = 0;
start_marker_record->num_frags = 0;
INIT_WORK(&offload_ctx->destruct_work, tls_device_tx_del_task);
offload_ctx->ctx = ctx;
INIT_LIST_HEAD(&offload_ctx->records_list);
list_add_tail(&start_marker_record->list, &offload_ctx->records_list);
spin_lock_init(&offload_ctx->lock);
sg_init_table(offload_ctx->sg_tx_data,
ARRAY_SIZE(offload_ctx->sg_tx_data));
clean_acked_data_enable(inet_csk(sk), &tls_icsk_clean_acked);
ctx->push_pending_record = tls_device_push_pending_record;
/* TLS offload is greatly simplified if we don't send
* SKBs where only part of the payload needs to be encrypted.
* So mark the last skb in the write queue as end of record.
*/
skb = tcp_write_queue_tail(sk);
if (skb)
TCP_SKB_CB(skb)->eor = 1;
/* Avoid offloading if the device is down
* We don't want to offload new flows after
* the NETDEV_DOWN event
*
* device_offload_lock is taken in tls_devices's NETDEV_DOWN
* handler thus protecting from the device going down before
* ctx was added to tls_device_list.
*/
down_read(&device_offload_lock);
if (!(netdev->flags & IFF_UP)) {
rc = -EINVAL;
goto release_lock;
}
ctx->priv_ctx_tx = offload_ctx;
rc = netdev->tlsdev_ops->tls_dev_add(netdev, sk, TLS_OFFLOAD_CTX_DIR_TX,
&ctx->crypto_send.info,
tcp_sk(sk)->write_seq);
trace_tls_device_offload_set(sk, TLS_OFFLOAD_CTX_DIR_TX,
tcp_sk(sk)->write_seq, rec_seq, rc);
if (rc)
goto release_lock;
tls_device_attach(ctx, sk, netdev);
up_read(&device_offload_lock);
/* following this assignment tls_is_skb_tx_device_offloaded
* will return true and the context might be accessed
* by the netdev's xmit function.
*/
smp_store_release(&sk->sk_validate_xmit_skb, tls_validate_xmit_skb);
dev_put(netdev);
return 0;
release_lock:
up_read(&device_offload_lock);
clean_acked_data_disable(inet_csk(sk));
crypto_free_aead(offload_ctx->aead_send);
free_offload_ctx:
kfree(offload_ctx);
ctx->priv_ctx_tx = NULL;
free_marker_record:
kfree(start_marker_record);
free_rec_seq:
kfree(ctx->tx.rec_seq);
free_iv:
kfree(ctx->tx.iv);
release_netdev:
dev_put(netdev);
return rc;
}
int tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx)
{
struct tls12_crypto_info_aes_gcm_128 *info;
struct tls_offload_context_rx *context;
struct net_device *netdev;
int rc = 0;
if (ctx->crypto_recv.info.version != TLS_1_2_VERSION)
return -EOPNOTSUPP;
netdev = get_netdev_for_sock(sk);
if (!netdev) {
pr_err_ratelimited("%s: netdev not found\n", __func__);
return -EINVAL;
}
if (!(netdev->features & NETIF_F_HW_TLS_RX)) {
rc = -EOPNOTSUPP;
goto release_netdev;
}
/* Avoid offloading if the device is down
* We don't want to offload new flows after
* the NETDEV_DOWN event
*
* device_offload_lock is taken in tls_devices's NETDEV_DOWN
* handler thus protecting from the device going down before
* ctx was added to tls_device_list.
*/
down_read(&device_offload_lock);
if (!(netdev->flags & IFF_UP)) {
rc = -EINVAL;
goto release_lock;
}
context = kzalloc(TLS_OFFLOAD_CONTEXT_SIZE_RX, GFP_KERNEL);
if (!context) {
rc = -ENOMEM;
goto release_lock;
}
context->resync_nh_reset = 1;
ctx->priv_ctx_rx = context;
rc = tls_set_sw_offload(sk, ctx, 0);
if (rc)
goto release_ctx;
rc = netdev->tlsdev_ops->tls_dev_add(netdev, sk, TLS_OFFLOAD_CTX_DIR_RX,
&ctx->crypto_recv.info,
tcp_sk(sk)->copied_seq);
info = (void *)&ctx->crypto_recv.info;
trace_tls_device_offload_set(sk, TLS_OFFLOAD_CTX_DIR_RX,
tcp_sk(sk)->copied_seq, info->rec_seq, rc);
if (rc)
goto free_sw_resources;
tls_device_attach(ctx, sk, netdev);
up_read(&device_offload_lock);
dev_put(netdev);
return 0;
free_sw_resources:
up_read(&device_offload_lock);
tls_sw_free_resources_rx(sk);
down_read(&device_offload_lock);
release_ctx:
ctx->priv_ctx_rx = NULL;
release_lock:
up_read(&device_offload_lock);
release_netdev:
dev_put(netdev);
return rc;
}
void tls_device_offload_cleanup_rx(struct sock *sk)
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct net_device *netdev;
down_read(&device_offload_lock);
netdev = rcu_dereference_protected(tls_ctx->netdev,
lockdep_is_held(&device_offload_lock));
if (!netdev)
goto out;
netdev->tlsdev_ops->tls_dev_del(netdev, tls_ctx,
TLS_OFFLOAD_CTX_DIR_RX);
if (tls_ctx->tx_conf != TLS_HW) {
dev_put(netdev);
rcu_assign_pointer(tls_ctx->netdev, NULL);
} else {
set_bit(TLS_RX_DEV_CLOSED, &tls_ctx->flags);
}
out:
up_read(&device_offload_lock);
tls_sw_release_resources_rx(sk);
}
static int tls_device_down(struct net_device *netdev)
{
struct tls_context *ctx, *tmp;
unsigned long flags;
LIST_HEAD(list);
/* Request a write lock to block new offload attempts */
down_write(&device_offload_lock);
spin_lock_irqsave(&tls_device_lock, flags);
list_for_each_entry_safe(ctx, tmp, &tls_device_list, list) {
struct net_device *ctx_netdev =
rcu_dereference_protected(ctx->netdev,
lockdep_is_held(&device_offload_lock));
if (ctx_netdev != netdev ||
!refcount_inc_not_zero(&ctx->refcount))
continue;
list_move(&ctx->list, &list);
}
spin_unlock_irqrestore(&tls_device_lock, flags);
list_for_each_entry_safe(ctx, tmp, &list, list) {
/* Stop offloaded TX and switch to the fallback.
* tls_is_skb_tx_device_offloaded will return false.
*/
WRITE_ONCE(ctx->sk->sk_validate_xmit_skb, tls_validate_xmit_skb_sw);
/* Stop the RX and TX resync.
* tls_dev_resync must not be called after tls_dev_del.
*/
rcu_assign_pointer(ctx->netdev, NULL);
/* Start skipping the RX resync logic completely. */
set_bit(TLS_RX_DEV_DEGRADED, &ctx->flags);
/* Sync with inflight packets. After this point:
* TX: no non-encrypted packets will be passed to the driver.
* RX: resync requests from the driver will be ignored.
*/
synchronize_net();
/* Release the offload context on the driver side. */
if (ctx->tx_conf == TLS_HW)
netdev->tlsdev_ops->tls_dev_del(netdev, ctx,
TLS_OFFLOAD_CTX_DIR_TX);
if (ctx->rx_conf == TLS_HW &&
!test_bit(TLS_RX_DEV_CLOSED, &ctx->flags))
netdev->tlsdev_ops->tls_dev_del(netdev, ctx,
TLS_OFFLOAD_CTX_DIR_RX);
dev_put(netdev);
/* Move the context to a separate list for two reasons:
* 1. When the context is deallocated, list_del is called.
* 2. It's no longer an offloaded context, so we don't want to
* run offload-specific code on this context.
*/
spin_lock_irqsave(&tls_device_lock, flags);
list_move_tail(&ctx->list, &tls_device_down_list);
spin_unlock_irqrestore(&tls_device_lock, flags);
/* Device contexts for RX and TX will be freed in on sk_destruct
* by tls_device_free_ctx. rx_conf and tx_conf stay in TLS_HW.
* Now release the ref taken above.
*/
if (refcount_dec_and_test(&ctx->refcount)) {
/* sk_destruct ran after tls_device_down took a ref, and
* it returned early. Complete the destruction here.
*/
list_del(&ctx->list);
tls_device_free_ctx(ctx);
}
}
up_write(&device_offload_lock);
flush_workqueue(destruct_wq);
return NOTIFY_DONE;
}
static int tls_dev_event(struct notifier_block *this, unsigned long event,
void *ptr)
{
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
if (!dev->tlsdev_ops &&
!(dev->features & (NETIF_F_HW_TLS_RX | NETIF_F_HW_TLS_TX)))
return NOTIFY_DONE;
switch (event) {
case NETDEV_REGISTER:
case NETDEV_FEAT_CHANGE:
if (netif_is_bond_master(dev))
return NOTIFY_DONE;
if ((dev->features & NETIF_F_HW_TLS_RX) &&
!dev->tlsdev_ops->tls_dev_resync)
return NOTIFY_BAD;
if (dev->tlsdev_ops &&
dev->tlsdev_ops->tls_dev_add &&
dev->tlsdev_ops->tls_dev_del)
return NOTIFY_DONE;
else
return NOTIFY_BAD;
case NETDEV_DOWN:
return tls_device_down(dev);
}
return NOTIFY_DONE;
}
static struct notifier_block tls_dev_notifier = {
.notifier_call = tls_dev_event,
};
int __init tls_device_init(void)
{
int err;
dummy_page = alloc_page(GFP_KERNEL);
if (!dummy_page)
return -ENOMEM;
destruct_wq = alloc_workqueue("ktls_device_destruct", 0, 0);
if (!destruct_wq) {
err = -ENOMEM;
goto err_free_dummy;
}
err = register_netdevice_notifier(&tls_dev_notifier);
if (err)
goto err_destroy_wq;
return 0;
err_destroy_wq:
destroy_workqueue(destruct_wq);
err_free_dummy:
put_page(dummy_page);
return err;
}
void __exit tls_device_cleanup(void)
{
unregister_netdevice_notifier(&tls_dev_notifier);
destroy_workqueue(destruct_wq);
clean_acked_data_flush();
put_page(dummy_page);
}
| linux-master | net/tls/tls_device.c |
/*
* Copyright (c) 2016-2017, Mellanox Technologies. All rights reserved.
* Copyright (c) 2016-2017, Dave Watson <[email protected]>. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/module.h>
#include <net/tcp.h>
#include <net/inet_common.h>
#include <linux/highmem.h>
#include <linux/netdevice.h>
#include <linux/sched/signal.h>
#include <linux/inetdevice.h>
#include <linux/inet_diag.h>
#include <net/snmp.h>
#include <net/tls.h>
#include <net/tls_toe.h>
#include "tls.h"
MODULE_AUTHOR("Mellanox Technologies");
MODULE_DESCRIPTION("Transport Layer Security Support");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_ALIAS_TCP_ULP("tls");
enum {
TLSV4,
TLSV6,
TLS_NUM_PROTS,
};
#define CHECK_CIPHER_DESC(cipher,ci) \
static_assert(cipher ## _IV_SIZE <= MAX_IV_SIZE); \
static_assert(cipher ## _REC_SEQ_SIZE <= TLS_MAX_REC_SEQ_SIZE); \
static_assert(cipher ## _TAG_SIZE == TLS_TAG_SIZE); \
static_assert(sizeof_field(struct ci, iv) == cipher ## _IV_SIZE); \
static_assert(sizeof_field(struct ci, key) == cipher ## _KEY_SIZE); \
static_assert(sizeof_field(struct ci, salt) == cipher ## _SALT_SIZE); \
static_assert(sizeof_field(struct ci, rec_seq) == cipher ## _REC_SEQ_SIZE);
#define __CIPHER_DESC(ci) \
.iv_offset = offsetof(struct ci, iv), \
.key_offset = offsetof(struct ci, key), \
.salt_offset = offsetof(struct ci, salt), \
.rec_seq_offset = offsetof(struct ci, rec_seq), \
.crypto_info = sizeof(struct ci)
#define CIPHER_DESC(cipher,ci,algname,_offloadable) [cipher - TLS_CIPHER_MIN] = { \
.nonce = cipher ## _IV_SIZE, \
.iv = cipher ## _IV_SIZE, \
.key = cipher ## _KEY_SIZE, \
.salt = cipher ## _SALT_SIZE, \
.tag = cipher ## _TAG_SIZE, \
.rec_seq = cipher ## _REC_SEQ_SIZE, \
.cipher_name = algname, \
.offloadable = _offloadable, \
__CIPHER_DESC(ci), \
}
#define CIPHER_DESC_NONCE0(cipher,ci,algname,_offloadable) [cipher - TLS_CIPHER_MIN] = { \
.nonce = 0, \
.iv = cipher ## _IV_SIZE, \
.key = cipher ## _KEY_SIZE, \
.salt = cipher ## _SALT_SIZE, \
.tag = cipher ## _TAG_SIZE, \
.rec_seq = cipher ## _REC_SEQ_SIZE, \
.cipher_name = algname, \
.offloadable = _offloadable, \
__CIPHER_DESC(ci), \
}
const struct tls_cipher_desc tls_cipher_desc[TLS_CIPHER_MAX + 1 - TLS_CIPHER_MIN] = {
CIPHER_DESC(TLS_CIPHER_AES_GCM_128, tls12_crypto_info_aes_gcm_128, "gcm(aes)", true),
CIPHER_DESC(TLS_CIPHER_AES_GCM_256, tls12_crypto_info_aes_gcm_256, "gcm(aes)", true),
CIPHER_DESC(TLS_CIPHER_AES_CCM_128, tls12_crypto_info_aes_ccm_128, "ccm(aes)", false),
CIPHER_DESC_NONCE0(TLS_CIPHER_CHACHA20_POLY1305, tls12_crypto_info_chacha20_poly1305, "rfc7539(chacha20,poly1305)", false),
CIPHER_DESC(TLS_CIPHER_SM4_GCM, tls12_crypto_info_sm4_gcm, "gcm(sm4)", false),
CIPHER_DESC(TLS_CIPHER_SM4_CCM, tls12_crypto_info_sm4_ccm, "ccm(sm4)", false),
CIPHER_DESC(TLS_CIPHER_ARIA_GCM_128, tls12_crypto_info_aria_gcm_128, "gcm(aria)", false),
CIPHER_DESC(TLS_CIPHER_ARIA_GCM_256, tls12_crypto_info_aria_gcm_256, "gcm(aria)", false),
};
CHECK_CIPHER_DESC(TLS_CIPHER_AES_GCM_128, tls12_crypto_info_aes_gcm_128);
CHECK_CIPHER_DESC(TLS_CIPHER_AES_GCM_256, tls12_crypto_info_aes_gcm_256);
CHECK_CIPHER_DESC(TLS_CIPHER_AES_CCM_128, tls12_crypto_info_aes_ccm_128);
CHECK_CIPHER_DESC(TLS_CIPHER_CHACHA20_POLY1305, tls12_crypto_info_chacha20_poly1305);
CHECK_CIPHER_DESC(TLS_CIPHER_SM4_GCM, tls12_crypto_info_sm4_gcm);
CHECK_CIPHER_DESC(TLS_CIPHER_SM4_CCM, tls12_crypto_info_sm4_ccm);
CHECK_CIPHER_DESC(TLS_CIPHER_ARIA_GCM_128, tls12_crypto_info_aria_gcm_128);
CHECK_CIPHER_DESC(TLS_CIPHER_ARIA_GCM_256, tls12_crypto_info_aria_gcm_256);
static const struct proto *saved_tcpv6_prot;
static DEFINE_MUTEX(tcpv6_prot_mutex);
static const struct proto *saved_tcpv4_prot;
static DEFINE_MUTEX(tcpv4_prot_mutex);
static struct proto tls_prots[TLS_NUM_PROTS][TLS_NUM_CONFIG][TLS_NUM_CONFIG];
static struct proto_ops tls_proto_ops[TLS_NUM_PROTS][TLS_NUM_CONFIG][TLS_NUM_CONFIG];
static void build_protos(struct proto prot[TLS_NUM_CONFIG][TLS_NUM_CONFIG],
const struct proto *base);
void update_sk_prot(struct sock *sk, struct tls_context *ctx)
{
int ip_ver = sk->sk_family == AF_INET6 ? TLSV6 : TLSV4;
WRITE_ONCE(sk->sk_prot,
&tls_prots[ip_ver][ctx->tx_conf][ctx->rx_conf]);
WRITE_ONCE(sk->sk_socket->ops,
&tls_proto_ops[ip_ver][ctx->tx_conf][ctx->rx_conf]);
}
int wait_on_pending_writer(struct sock *sk, long *timeo)
{
int rc = 0;
DEFINE_WAIT_FUNC(wait, woken_wake_function);
add_wait_queue(sk_sleep(sk), &wait);
while (1) {
if (!*timeo) {
rc = -EAGAIN;
break;
}
if (signal_pending(current)) {
rc = sock_intr_errno(*timeo);
break;
}
if (sk_wait_event(sk, timeo,
!READ_ONCE(sk->sk_write_pending), &wait))
break;
}
remove_wait_queue(sk_sleep(sk), &wait);
return rc;
}
int tls_push_sg(struct sock *sk,
struct tls_context *ctx,
struct scatterlist *sg,
u16 first_offset,
int flags)
{
struct bio_vec bvec;
struct msghdr msg = {
.msg_flags = MSG_SPLICE_PAGES | flags,
};
int ret = 0;
struct page *p;
size_t size;
int offset = first_offset;
size = sg->length - offset;
offset += sg->offset;
ctx->splicing_pages = true;
while (1) {
/* is sending application-limited? */
tcp_rate_check_app_limited(sk);
p = sg_page(sg);
retry:
bvec_set_page(&bvec, p, size, offset);
iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, size);
ret = tcp_sendmsg_locked(sk, &msg, size);
if (ret != size) {
if (ret > 0) {
offset += ret;
size -= ret;
goto retry;
}
offset -= sg->offset;
ctx->partially_sent_offset = offset;
ctx->partially_sent_record = (void *)sg;
ctx->splicing_pages = false;
return ret;
}
put_page(p);
sk_mem_uncharge(sk, sg->length);
sg = sg_next(sg);
if (!sg)
break;
offset = sg->offset;
size = sg->length;
}
ctx->splicing_pages = false;
return 0;
}
static int tls_handle_open_record(struct sock *sk, int flags)
{
struct tls_context *ctx = tls_get_ctx(sk);
if (tls_is_pending_open_record(ctx))
return ctx->push_pending_record(sk, flags);
return 0;
}
int tls_process_cmsg(struct sock *sk, struct msghdr *msg,
unsigned char *record_type)
{
struct cmsghdr *cmsg;
int rc = -EINVAL;
for_each_cmsghdr(cmsg, msg) {
if (!CMSG_OK(msg, cmsg))
return -EINVAL;
if (cmsg->cmsg_level != SOL_TLS)
continue;
switch (cmsg->cmsg_type) {
case TLS_SET_RECORD_TYPE:
if (cmsg->cmsg_len < CMSG_LEN(sizeof(*record_type)))
return -EINVAL;
if (msg->msg_flags & MSG_MORE)
return -EINVAL;
rc = tls_handle_open_record(sk, msg->msg_flags);
if (rc)
return rc;
*record_type = *(unsigned char *)CMSG_DATA(cmsg);
rc = 0;
break;
default:
return -EINVAL;
}
}
return rc;
}
int tls_push_partial_record(struct sock *sk, struct tls_context *ctx,
int flags)
{
struct scatterlist *sg;
u16 offset;
sg = ctx->partially_sent_record;
offset = ctx->partially_sent_offset;
ctx->partially_sent_record = NULL;
return tls_push_sg(sk, ctx, sg, offset, flags);
}
void tls_free_partial_record(struct sock *sk, struct tls_context *ctx)
{
struct scatterlist *sg;
for (sg = ctx->partially_sent_record; sg; sg = sg_next(sg)) {
put_page(sg_page(sg));
sk_mem_uncharge(sk, sg->length);
}
ctx->partially_sent_record = NULL;
}
static void tls_write_space(struct sock *sk)
{
struct tls_context *ctx = tls_get_ctx(sk);
/* If splicing_pages call lower protocol write space handler
* to ensure we wake up any waiting operations there. For example
* if splicing pages where to call sk_wait_event.
*/
if (ctx->splicing_pages) {
ctx->sk_write_space(sk);
return;
}
#ifdef CONFIG_TLS_DEVICE
if (ctx->tx_conf == TLS_HW)
tls_device_write_space(sk, ctx);
else
#endif
tls_sw_write_space(sk, ctx);
ctx->sk_write_space(sk);
}
/**
* tls_ctx_free() - free TLS ULP context
* @sk: socket to with @ctx is attached
* @ctx: TLS context structure
*
* Free TLS context. If @sk is %NULL caller guarantees that the socket
* to which @ctx was attached has no outstanding references.
*/
void tls_ctx_free(struct sock *sk, struct tls_context *ctx)
{
if (!ctx)
return;
memzero_explicit(&ctx->crypto_send, sizeof(ctx->crypto_send));
memzero_explicit(&ctx->crypto_recv, sizeof(ctx->crypto_recv));
mutex_destroy(&ctx->tx_lock);
if (sk)
kfree_rcu(ctx, rcu);
else
kfree(ctx);
}
static void tls_sk_proto_cleanup(struct sock *sk,
struct tls_context *ctx, long timeo)
{
if (unlikely(sk->sk_write_pending) &&
!wait_on_pending_writer(sk, &timeo))
tls_handle_open_record(sk, 0);
/* We need these for tls_sw_fallback handling of other packets */
if (ctx->tx_conf == TLS_SW) {
kfree(ctx->tx.rec_seq);
kfree(ctx->tx.iv);
tls_sw_release_resources_tx(sk);
TLS_DEC_STATS(sock_net(sk), LINUX_MIB_TLSCURRTXSW);
} else if (ctx->tx_conf == TLS_HW) {
tls_device_free_resources_tx(sk);
TLS_DEC_STATS(sock_net(sk), LINUX_MIB_TLSCURRTXDEVICE);
}
if (ctx->rx_conf == TLS_SW) {
tls_sw_release_resources_rx(sk);
TLS_DEC_STATS(sock_net(sk), LINUX_MIB_TLSCURRRXSW);
} else if (ctx->rx_conf == TLS_HW) {
tls_device_offload_cleanup_rx(sk);
TLS_DEC_STATS(sock_net(sk), LINUX_MIB_TLSCURRRXDEVICE);
}
}
static void tls_sk_proto_close(struct sock *sk, long timeout)
{
struct inet_connection_sock *icsk = inet_csk(sk);
struct tls_context *ctx = tls_get_ctx(sk);
long timeo = sock_sndtimeo(sk, 0);
bool free_ctx;
if (ctx->tx_conf == TLS_SW)
tls_sw_cancel_work_tx(ctx);
lock_sock(sk);
free_ctx = ctx->tx_conf != TLS_HW && ctx->rx_conf != TLS_HW;
if (ctx->tx_conf != TLS_BASE || ctx->rx_conf != TLS_BASE)
tls_sk_proto_cleanup(sk, ctx, timeo);
write_lock_bh(&sk->sk_callback_lock);
if (free_ctx)
rcu_assign_pointer(icsk->icsk_ulp_data, NULL);
WRITE_ONCE(sk->sk_prot, ctx->sk_proto);
if (sk->sk_write_space == tls_write_space)
sk->sk_write_space = ctx->sk_write_space;
write_unlock_bh(&sk->sk_callback_lock);
release_sock(sk);
if (ctx->tx_conf == TLS_SW)
tls_sw_free_ctx_tx(ctx);
if (ctx->rx_conf == TLS_SW || ctx->rx_conf == TLS_HW)
tls_sw_strparser_done(ctx);
if (ctx->rx_conf == TLS_SW)
tls_sw_free_ctx_rx(ctx);
ctx->sk_proto->close(sk, timeout);
if (free_ctx)
tls_ctx_free(sk, ctx);
}
static __poll_t tls_sk_poll(struct file *file, struct socket *sock,
struct poll_table_struct *wait)
{
struct tls_sw_context_rx *ctx;
struct tls_context *tls_ctx;
struct sock *sk = sock->sk;
struct sk_psock *psock;
__poll_t mask = 0;
u8 shutdown;
int state;
mask = tcp_poll(file, sock, wait);
state = inet_sk_state_load(sk);
shutdown = READ_ONCE(sk->sk_shutdown);
if (unlikely(state != TCP_ESTABLISHED || shutdown & RCV_SHUTDOWN))
return mask;
tls_ctx = tls_get_ctx(sk);
ctx = tls_sw_ctx_rx(tls_ctx);
psock = sk_psock_get(sk);
if (skb_queue_empty_lockless(&ctx->rx_list) &&
!tls_strp_msg_ready(ctx) &&
sk_psock_queue_empty(psock))
mask &= ~(EPOLLIN | EPOLLRDNORM);
if (psock)
sk_psock_put(sk, psock);
return mask;
}
static int do_tls_getsockopt_conf(struct sock *sk, char __user *optval,
int __user *optlen, int tx)
{
int rc = 0;
const struct tls_cipher_desc *cipher_desc;
struct tls_context *ctx = tls_get_ctx(sk);
struct tls_crypto_info *crypto_info;
struct cipher_context *cctx;
int len;
if (get_user(len, optlen))
return -EFAULT;
if (!optval || (len < sizeof(*crypto_info))) {
rc = -EINVAL;
goto out;
}
if (!ctx) {
rc = -EBUSY;
goto out;
}
/* get user crypto info */
if (tx) {
crypto_info = &ctx->crypto_send.info;
cctx = &ctx->tx;
} else {
crypto_info = &ctx->crypto_recv.info;
cctx = &ctx->rx;
}
if (!TLS_CRYPTO_INFO_READY(crypto_info)) {
rc = -EBUSY;
goto out;
}
if (len == sizeof(*crypto_info)) {
if (copy_to_user(optval, crypto_info, sizeof(*crypto_info)))
rc = -EFAULT;
goto out;
}
cipher_desc = get_cipher_desc(crypto_info->cipher_type);
if (!cipher_desc || len != cipher_desc->crypto_info) {
rc = -EINVAL;
goto out;
}
memcpy(crypto_info_iv(crypto_info, cipher_desc),
cctx->iv + cipher_desc->salt, cipher_desc->iv);
memcpy(crypto_info_rec_seq(crypto_info, cipher_desc),
cctx->rec_seq, cipher_desc->rec_seq);
if (copy_to_user(optval, crypto_info, cipher_desc->crypto_info))
rc = -EFAULT;
out:
return rc;
}
static int do_tls_getsockopt_tx_zc(struct sock *sk, char __user *optval,
int __user *optlen)
{
struct tls_context *ctx = tls_get_ctx(sk);
unsigned int value;
int len;
if (get_user(len, optlen))
return -EFAULT;
if (len != sizeof(value))
return -EINVAL;
value = ctx->zerocopy_sendfile;
if (copy_to_user(optval, &value, sizeof(value)))
return -EFAULT;
return 0;
}
static int do_tls_getsockopt_no_pad(struct sock *sk, char __user *optval,
int __user *optlen)
{
struct tls_context *ctx = tls_get_ctx(sk);
int value, len;
if (ctx->prot_info.version != TLS_1_3_VERSION)
return -EINVAL;
if (get_user(len, optlen))
return -EFAULT;
if (len < sizeof(value))
return -EINVAL;
value = -EINVAL;
if (ctx->rx_conf == TLS_SW || ctx->rx_conf == TLS_HW)
value = ctx->rx_no_pad;
if (value < 0)
return value;
if (put_user(sizeof(value), optlen))
return -EFAULT;
if (copy_to_user(optval, &value, sizeof(value)))
return -EFAULT;
return 0;
}
static int do_tls_getsockopt(struct sock *sk, int optname,
char __user *optval, int __user *optlen)
{
int rc = 0;
lock_sock(sk);
switch (optname) {
case TLS_TX:
case TLS_RX:
rc = do_tls_getsockopt_conf(sk, optval, optlen,
optname == TLS_TX);
break;
case TLS_TX_ZEROCOPY_RO:
rc = do_tls_getsockopt_tx_zc(sk, optval, optlen);
break;
case TLS_RX_EXPECT_NO_PAD:
rc = do_tls_getsockopt_no_pad(sk, optval, optlen);
break;
default:
rc = -ENOPROTOOPT;
break;
}
release_sock(sk);
return rc;
}
static int tls_getsockopt(struct sock *sk, int level, int optname,
char __user *optval, int __user *optlen)
{
struct tls_context *ctx = tls_get_ctx(sk);
if (level != SOL_TLS)
return ctx->sk_proto->getsockopt(sk, level,
optname, optval, optlen);
return do_tls_getsockopt(sk, optname, optval, optlen);
}
static int do_tls_setsockopt_conf(struct sock *sk, sockptr_t optval,
unsigned int optlen, int tx)
{
struct tls_crypto_info *crypto_info;
struct tls_crypto_info *alt_crypto_info;
struct tls_context *ctx = tls_get_ctx(sk);
const struct tls_cipher_desc *cipher_desc;
int rc = 0;
int conf;
if (sockptr_is_null(optval) || (optlen < sizeof(*crypto_info)))
return -EINVAL;
if (tx) {
crypto_info = &ctx->crypto_send.info;
alt_crypto_info = &ctx->crypto_recv.info;
} else {
crypto_info = &ctx->crypto_recv.info;
alt_crypto_info = &ctx->crypto_send.info;
}
/* Currently we don't support set crypto info more than one time */
if (TLS_CRYPTO_INFO_READY(crypto_info))
return -EBUSY;
rc = copy_from_sockptr(crypto_info, optval, sizeof(*crypto_info));
if (rc) {
rc = -EFAULT;
goto err_crypto_info;
}
/* check version */
if (crypto_info->version != TLS_1_2_VERSION &&
crypto_info->version != TLS_1_3_VERSION) {
rc = -EINVAL;
goto err_crypto_info;
}
/* Ensure that TLS version and ciphers are same in both directions */
if (TLS_CRYPTO_INFO_READY(alt_crypto_info)) {
if (alt_crypto_info->version != crypto_info->version ||
alt_crypto_info->cipher_type != crypto_info->cipher_type) {
rc = -EINVAL;
goto err_crypto_info;
}
}
cipher_desc = get_cipher_desc(crypto_info->cipher_type);
if (!cipher_desc) {
rc = -EINVAL;
goto err_crypto_info;
}
switch (crypto_info->cipher_type) {
case TLS_CIPHER_ARIA_GCM_128:
case TLS_CIPHER_ARIA_GCM_256:
if (crypto_info->version != TLS_1_2_VERSION) {
rc = -EINVAL;
goto err_crypto_info;
}
break;
}
if (optlen != cipher_desc->crypto_info) {
rc = -EINVAL;
goto err_crypto_info;
}
rc = copy_from_sockptr_offset(crypto_info + 1, optval,
sizeof(*crypto_info),
optlen - sizeof(*crypto_info));
if (rc) {
rc = -EFAULT;
goto err_crypto_info;
}
if (tx) {
rc = tls_set_device_offload(sk, ctx);
conf = TLS_HW;
if (!rc) {
TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSTXDEVICE);
TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSCURRTXDEVICE);
} else {
rc = tls_set_sw_offload(sk, ctx, 1);
if (rc)
goto err_crypto_info;
TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSTXSW);
TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSCURRTXSW);
conf = TLS_SW;
}
} else {
rc = tls_set_device_offload_rx(sk, ctx);
conf = TLS_HW;
if (!rc) {
TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSRXDEVICE);
TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSCURRRXDEVICE);
} else {
rc = tls_set_sw_offload(sk, ctx, 0);
if (rc)
goto err_crypto_info;
TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSRXSW);
TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSCURRRXSW);
conf = TLS_SW;
}
tls_sw_strparser_arm(sk, ctx);
}
if (tx)
ctx->tx_conf = conf;
else
ctx->rx_conf = conf;
update_sk_prot(sk, ctx);
if (tx) {
ctx->sk_write_space = sk->sk_write_space;
sk->sk_write_space = tls_write_space;
} else {
struct tls_sw_context_rx *rx_ctx = tls_sw_ctx_rx(ctx);
tls_strp_check_rcv(&rx_ctx->strp);
}
return 0;
err_crypto_info:
memzero_explicit(crypto_info, sizeof(union tls_crypto_context));
return rc;
}
static int do_tls_setsockopt_tx_zc(struct sock *sk, sockptr_t optval,
unsigned int optlen)
{
struct tls_context *ctx = tls_get_ctx(sk);
unsigned int value;
if (sockptr_is_null(optval) || optlen != sizeof(value))
return -EINVAL;
if (copy_from_sockptr(&value, optval, sizeof(value)))
return -EFAULT;
if (value > 1)
return -EINVAL;
ctx->zerocopy_sendfile = value;
return 0;
}
static int do_tls_setsockopt_no_pad(struct sock *sk, sockptr_t optval,
unsigned int optlen)
{
struct tls_context *ctx = tls_get_ctx(sk);
u32 val;
int rc;
if (ctx->prot_info.version != TLS_1_3_VERSION ||
sockptr_is_null(optval) || optlen < sizeof(val))
return -EINVAL;
rc = copy_from_sockptr(&val, optval, sizeof(val));
if (rc)
return -EFAULT;
if (val > 1)
return -EINVAL;
rc = check_zeroed_sockptr(optval, sizeof(val), optlen - sizeof(val));
if (rc < 1)
return rc == 0 ? -EINVAL : rc;
lock_sock(sk);
rc = -EINVAL;
if (ctx->rx_conf == TLS_SW || ctx->rx_conf == TLS_HW) {
ctx->rx_no_pad = val;
tls_update_rx_zc_capable(ctx);
rc = 0;
}
release_sock(sk);
return rc;
}
static int do_tls_setsockopt(struct sock *sk, int optname, sockptr_t optval,
unsigned int optlen)
{
int rc = 0;
switch (optname) {
case TLS_TX:
case TLS_RX:
lock_sock(sk);
rc = do_tls_setsockopt_conf(sk, optval, optlen,
optname == TLS_TX);
release_sock(sk);
break;
case TLS_TX_ZEROCOPY_RO:
lock_sock(sk);
rc = do_tls_setsockopt_tx_zc(sk, optval, optlen);
release_sock(sk);
break;
case TLS_RX_EXPECT_NO_PAD:
rc = do_tls_setsockopt_no_pad(sk, optval, optlen);
break;
default:
rc = -ENOPROTOOPT;
break;
}
return rc;
}
static int tls_setsockopt(struct sock *sk, int level, int optname,
sockptr_t optval, unsigned int optlen)
{
struct tls_context *ctx = tls_get_ctx(sk);
if (level != SOL_TLS)
return ctx->sk_proto->setsockopt(sk, level, optname, optval,
optlen);
return do_tls_setsockopt(sk, optname, optval, optlen);
}
struct tls_context *tls_ctx_create(struct sock *sk)
{
struct inet_connection_sock *icsk = inet_csk(sk);
struct tls_context *ctx;
ctx = kzalloc(sizeof(*ctx), GFP_ATOMIC);
if (!ctx)
return NULL;
mutex_init(&ctx->tx_lock);
rcu_assign_pointer(icsk->icsk_ulp_data, ctx);
ctx->sk_proto = READ_ONCE(sk->sk_prot);
ctx->sk = sk;
return ctx;
}
static void build_proto_ops(struct proto_ops ops[TLS_NUM_CONFIG][TLS_NUM_CONFIG],
const struct proto_ops *base)
{
ops[TLS_BASE][TLS_BASE] = *base;
ops[TLS_SW ][TLS_BASE] = ops[TLS_BASE][TLS_BASE];
ops[TLS_SW ][TLS_BASE].splice_eof = tls_sw_splice_eof;
ops[TLS_BASE][TLS_SW ] = ops[TLS_BASE][TLS_BASE];
ops[TLS_BASE][TLS_SW ].splice_read = tls_sw_splice_read;
ops[TLS_BASE][TLS_SW ].poll = tls_sk_poll;
ops[TLS_BASE][TLS_SW ].read_sock = tls_sw_read_sock;
ops[TLS_SW ][TLS_SW ] = ops[TLS_SW ][TLS_BASE];
ops[TLS_SW ][TLS_SW ].splice_read = tls_sw_splice_read;
ops[TLS_SW ][TLS_SW ].poll = tls_sk_poll;
ops[TLS_SW ][TLS_SW ].read_sock = tls_sw_read_sock;
#ifdef CONFIG_TLS_DEVICE
ops[TLS_HW ][TLS_BASE] = ops[TLS_BASE][TLS_BASE];
ops[TLS_HW ][TLS_SW ] = ops[TLS_BASE][TLS_SW ];
ops[TLS_BASE][TLS_HW ] = ops[TLS_BASE][TLS_SW ];
ops[TLS_SW ][TLS_HW ] = ops[TLS_SW ][TLS_SW ];
ops[TLS_HW ][TLS_HW ] = ops[TLS_HW ][TLS_SW ];
#endif
#ifdef CONFIG_TLS_TOE
ops[TLS_HW_RECORD][TLS_HW_RECORD] = *base;
#endif
}
static void tls_build_proto(struct sock *sk)
{
int ip_ver = sk->sk_family == AF_INET6 ? TLSV6 : TLSV4;
struct proto *prot = READ_ONCE(sk->sk_prot);
/* Build IPv6 TLS whenever the address of tcpv6 _prot changes */
if (ip_ver == TLSV6 &&
unlikely(prot != smp_load_acquire(&saved_tcpv6_prot))) {
mutex_lock(&tcpv6_prot_mutex);
if (likely(prot != saved_tcpv6_prot)) {
build_protos(tls_prots[TLSV6], prot);
build_proto_ops(tls_proto_ops[TLSV6],
sk->sk_socket->ops);
smp_store_release(&saved_tcpv6_prot, prot);
}
mutex_unlock(&tcpv6_prot_mutex);
}
if (ip_ver == TLSV4 &&
unlikely(prot != smp_load_acquire(&saved_tcpv4_prot))) {
mutex_lock(&tcpv4_prot_mutex);
if (likely(prot != saved_tcpv4_prot)) {
build_protos(tls_prots[TLSV4], prot);
build_proto_ops(tls_proto_ops[TLSV4],
sk->sk_socket->ops);
smp_store_release(&saved_tcpv4_prot, prot);
}
mutex_unlock(&tcpv4_prot_mutex);
}
}
static void build_protos(struct proto prot[TLS_NUM_CONFIG][TLS_NUM_CONFIG],
const struct proto *base)
{
prot[TLS_BASE][TLS_BASE] = *base;
prot[TLS_BASE][TLS_BASE].setsockopt = tls_setsockopt;
prot[TLS_BASE][TLS_BASE].getsockopt = tls_getsockopt;
prot[TLS_BASE][TLS_BASE].close = tls_sk_proto_close;
prot[TLS_SW][TLS_BASE] = prot[TLS_BASE][TLS_BASE];
prot[TLS_SW][TLS_BASE].sendmsg = tls_sw_sendmsg;
prot[TLS_SW][TLS_BASE].splice_eof = tls_sw_splice_eof;
prot[TLS_BASE][TLS_SW] = prot[TLS_BASE][TLS_BASE];
prot[TLS_BASE][TLS_SW].recvmsg = tls_sw_recvmsg;
prot[TLS_BASE][TLS_SW].sock_is_readable = tls_sw_sock_is_readable;
prot[TLS_BASE][TLS_SW].close = tls_sk_proto_close;
prot[TLS_SW][TLS_SW] = prot[TLS_SW][TLS_BASE];
prot[TLS_SW][TLS_SW].recvmsg = tls_sw_recvmsg;
prot[TLS_SW][TLS_SW].sock_is_readable = tls_sw_sock_is_readable;
prot[TLS_SW][TLS_SW].close = tls_sk_proto_close;
#ifdef CONFIG_TLS_DEVICE
prot[TLS_HW][TLS_BASE] = prot[TLS_BASE][TLS_BASE];
prot[TLS_HW][TLS_BASE].sendmsg = tls_device_sendmsg;
prot[TLS_HW][TLS_BASE].splice_eof = tls_device_splice_eof;
prot[TLS_HW][TLS_SW] = prot[TLS_BASE][TLS_SW];
prot[TLS_HW][TLS_SW].sendmsg = tls_device_sendmsg;
prot[TLS_HW][TLS_SW].splice_eof = tls_device_splice_eof;
prot[TLS_BASE][TLS_HW] = prot[TLS_BASE][TLS_SW];
prot[TLS_SW][TLS_HW] = prot[TLS_SW][TLS_SW];
prot[TLS_HW][TLS_HW] = prot[TLS_HW][TLS_SW];
#endif
#ifdef CONFIG_TLS_TOE
prot[TLS_HW_RECORD][TLS_HW_RECORD] = *base;
prot[TLS_HW_RECORD][TLS_HW_RECORD].hash = tls_toe_hash;
prot[TLS_HW_RECORD][TLS_HW_RECORD].unhash = tls_toe_unhash;
#endif
}
static int tls_init(struct sock *sk)
{
struct tls_context *ctx;
int rc = 0;
tls_build_proto(sk);
#ifdef CONFIG_TLS_TOE
if (tls_toe_bypass(sk))
return 0;
#endif
/* The TLS ulp is currently supported only for TCP sockets
* in ESTABLISHED state.
* Supporting sockets in LISTEN state will require us
* to modify the accept implementation to clone rather then
* share the ulp context.
*/
if (sk->sk_state != TCP_ESTABLISHED)
return -ENOTCONN;
/* allocate tls context */
write_lock_bh(&sk->sk_callback_lock);
ctx = tls_ctx_create(sk);
if (!ctx) {
rc = -ENOMEM;
goto out;
}
ctx->tx_conf = TLS_BASE;
ctx->rx_conf = TLS_BASE;
update_sk_prot(sk, ctx);
out:
write_unlock_bh(&sk->sk_callback_lock);
return rc;
}
static void tls_update(struct sock *sk, struct proto *p,
void (*write_space)(struct sock *sk))
{
struct tls_context *ctx;
WARN_ON_ONCE(sk->sk_prot == p);
ctx = tls_get_ctx(sk);
if (likely(ctx)) {
ctx->sk_write_space = write_space;
ctx->sk_proto = p;
} else {
/* Pairs with lockless read in sk_clone_lock(). */
WRITE_ONCE(sk->sk_prot, p);
sk->sk_write_space = write_space;
}
}
static u16 tls_user_config(struct tls_context *ctx, bool tx)
{
u16 config = tx ? ctx->tx_conf : ctx->rx_conf;
switch (config) {
case TLS_BASE:
return TLS_CONF_BASE;
case TLS_SW:
return TLS_CONF_SW;
case TLS_HW:
return TLS_CONF_HW;
case TLS_HW_RECORD:
return TLS_CONF_HW_RECORD;
}
return 0;
}
static int tls_get_info(const struct sock *sk, struct sk_buff *skb)
{
u16 version, cipher_type;
struct tls_context *ctx;
struct nlattr *start;
int err;
start = nla_nest_start_noflag(skb, INET_ULP_INFO_TLS);
if (!start)
return -EMSGSIZE;
rcu_read_lock();
ctx = rcu_dereference(inet_csk(sk)->icsk_ulp_data);
if (!ctx) {
err = 0;
goto nla_failure;
}
version = ctx->prot_info.version;
if (version) {
err = nla_put_u16(skb, TLS_INFO_VERSION, version);
if (err)
goto nla_failure;
}
cipher_type = ctx->prot_info.cipher_type;
if (cipher_type) {
err = nla_put_u16(skb, TLS_INFO_CIPHER, cipher_type);
if (err)
goto nla_failure;
}
err = nla_put_u16(skb, TLS_INFO_TXCONF, tls_user_config(ctx, true));
if (err)
goto nla_failure;
err = nla_put_u16(skb, TLS_INFO_RXCONF, tls_user_config(ctx, false));
if (err)
goto nla_failure;
if (ctx->tx_conf == TLS_HW && ctx->zerocopy_sendfile) {
err = nla_put_flag(skb, TLS_INFO_ZC_RO_TX);
if (err)
goto nla_failure;
}
if (ctx->rx_no_pad) {
err = nla_put_flag(skb, TLS_INFO_RX_NO_PAD);
if (err)
goto nla_failure;
}
rcu_read_unlock();
nla_nest_end(skb, start);
return 0;
nla_failure:
rcu_read_unlock();
nla_nest_cancel(skb, start);
return err;
}
static size_t tls_get_info_size(const struct sock *sk)
{
size_t size = 0;
size += nla_total_size(0) + /* INET_ULP_INFO_TLS */
nla_total_size(sizeof(u16)) + /* TLS_INFO_VERSION */
nla_total_size(sizeof(u16)) + /* TLS_INFO_CIPHER */
nla_total_size(sizeof(u16)) + /* TLS_INFO_RXCONF */
nla_total_size(sizeof(u16)) + /* TLS_INFO_TXCONF */
nla_total_size(0) + /* TLS_INFO_ZC_RO_TX */
nla_total_size(0) + /* TLS_INFO_RX_NO_PAD */
0;
return size;
}
static int __net_init tls_init_net(struct net *net)
{
int err;
net->mib.tls_statistics = alloc_percpu(struct linux_tls_mib);
if (!net->mib.tls_statistics)
return -ENOMEM;
err = tls_proc_init(net);
if (err)
goto err_free_stats;
return 0;
err_free_stats:
free_percpu(net->mib.tls_statistics);
return err;
}
static void __net_exit tls_exit_net(struct net *net)
{
tls_proc_fini(net);
free_percpu(net->mib.tls_statistics);
}
static struct pernet_operations tls_proc_ops = {
.init = tls_init_net,
.exit = tls_exit_net,
};
static struct tcp_ulp_ops tcp_tls_ulp_ops __read_mostly = {
.name = "tls",
.owner = THIS_MODULE,
.init = tls_init,
.update = tls_update,
.get_info = tls_get_info,
.get_info_size = tls_get_info_size,
};
static int __init tls_register(void)
{
int err;
err = register_pernet_subsys(&tls_proc_ops);
if (err)
return err;
err = tls_strp_dev_init();
if (err)
goto err_pernet;
err = tls_device_init();
if (err)
goto err_strp;
tcp_register_ulp(&tcp_tls_ulp_ops);
return 0;
err_strp:
tls_strp_dev_exit();
err_pernet:
unregister_pernet_subsys(&tls_proc_ops);
return err;
}
static void __exit tls_unregister(void)
{
tcp_unregister_ulp(&tcp_tls_ulp_ops);
tls_strp_dev_exit();
tls_device_cleanup();
unregister_pernet_subsys(&tls_proc_ops);
}
module_init(tls_register);
module_exit(tls_unregister);
| linux-master | net/tls/tls_main.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2016 Tom Herbert <[email protected]> */
#include <linux/skbuff.h>
#include <linux/workqueue.h>
#include <net/strparser.h>
#include <net/tcp.h>
#include <net/sock.h>
#include <net/tls.h>
#include "tls.h"
static struct workqueue_struct *tls_strp_wq;
static void tls_strp_abort_strp(struct tls_strparser *strp, int err)
{
if (strp->stopped)
return;
strp->stopped = 1;
/* Report an error on the lower socket */
WRITE_ONCE(strp->sk->sk_err, -err);
/* Paired with smp_rmb() in tcp_poll() */
smp_wmb();
sk_error_report(strp->sk);
}
static void tls_strp_anchor_free(struct tls_strparser *strp)
{
struct skb_shared_info *shinfo = skb_shinfo(strp->anchor);
DEBUG_NET_WARN_ON_ONCE(atomic_read(&shinfo->dataref) != 1);
if (!strp->copy_mode)
shinfo->frag_list = NULL;
consume_skb(strp->anchor);
strp->anchor = NULL;
}
static struct sk_buff *
tls_strp_skb_copy(struct tls_strparser *strp, struct sk_buff *in_skb,
int offset, int len)
{
struct sk_buff *skb;
int i, err;
skb = alloc_skb_with_frags(0, len, TLS_PAGE_ORDER,
&err, strp->sk->sk_allocation);
if (!skb)
return NULL;
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
WARN_ON_ONCE(skb_copy_bits(in_skb, offset,
skb_frag_address(frag),
skb_frag_size(frag)));
offset += skb_frag_size(frag);
}
skb->len = len;
skb->data_len = len;
skb_copy_header(skb, in_skb);
return skb;
}
/* Create a new skb with the contents of input copied to its page frags */
static struct sk_buff *tls_strp_msg_make_copy(struct tls_strparser *strp)
{
struct strp_msg *rxm;
struct sk_buff *skb;
skb = tls_strp_skb_copy(strp, strp->anchor, strp->stm.offset,
strp->stm.full_len);
if (!skb)
return NULL;
rxm = strp_msg(skb);
rxm->offset = 0;
return skb;
}
/* Steal the input skb, input msg is invalid after calling this function */
struct sk_buff *tls_strp_msg_detach(struct tls_sw_context_rx *ctx)
{
struct tls_strparser *strp = &ctx->strp;
#ifdef CONFIG_TLS_DEVICE
DEBUG_NET_WARN_ON_ONCE(!strp->anchor->decrypted);
#else
/* This function turns an input into an output,
* that can only happen if we have offload.
*/
WARN_ON(1);
#endif
if (strp->copy_mode) {
struct sk_buff *skb;
/* Replace anchor with an empty skb, this is a little
* dangerous but __tls_cur_msg() warns on empty skbs
* so hopefully we'll catch abuses.
*/
skb = alloc_skb(0, strp->sk->sk_allocation);
if (!skb)
return NULL;
swap(strp->anchor, skb);
return skb;
}
return tls_strp_msg_make_copy(strp);
}
/* Force the input skb to be in copy mode. The data ownership remains
* with the input skb itself (meaning unpause will wipe it) but it can
* be modified.
*/
int tls_strp_msg_cow(struct tls_sw_context_rx *ctx)
{
struct tls_strparser *strp = &ctx->strp;
struct sk_buff *skb;
if (strp->copy_mode)
return 0;
skb = tls_strp_msg_make_copy(strp);
if (!skb)
return -ENOMEM;
tls_strp_anchor_free(strp);
strp->anchor = skb;
tcp_read_done(strp->sk, strp->stm.full_len);
strp->copy_mode = 1;
return 0;
}
/* Make a clone (in the skb sense) of the input msg to keep a reference
* to the underlying data. The reference-holding skbs get placed on
* @dst.
*/
int tls_strp_msg_hold(struct tls_strparser *strp, struct sk_buff_head *dst)
{
struct skb_shared_info *shinfo = skb_shinfo(strp->anchor);
if (strp->copy_mode) {
struct sk_buff *skb;
WARN_ON_ONCE(!shinfo->nr_frags);
/* We can't skb_clone() the anchor, it gets wiped by unpause */
skb = alloc_skb(0, strp->sk->sk_allocation);
if (!skb)
return -ENOMEM;
__skb_queue_tail(dst, strp->anchor);
strp->anchor = skb;
} else {
struct sk_buff *iter, *clone;
int chunk, len, offset;
offset = strp->stm.offset;
len = strp->stm.full_len;
iter = shinfo->frag_list;
while (len > 0) {
if (iter->len <= offset) {
offset -= iter->len;
goto next;
}
chunk = iter->len - offset;
offset = 0;
clone = skb_clone(iter, strp->sk->sk_allocation);
if (!clone)
return -ENOMEM;
__skb_queue_tail(dst, clone);
len -= chunk;
next:
iter = iter->next;
}
}
return 0;
}
static void tls_strp_flush_anchor_copy(struct tls_strparser *strp)
{
struct skb_shared_info *shinfo = skb_shinfo(strp->anchor);
int i;
DEBUG_NET_WARN_ON_ONCE(atomic_read(&shinfo->dataref) != 1);
for (i = 0; i < shinfo->nr_frags; i++)
__skb_frag_unref(&shinfo->frags[i], false);
shinfo->nr_frags = 0;
if (strp->copy_mode) {
kfree_skb_list(shinfo->frag_list);
shinfo->frag_list = NULL;
}
strp->copy_mode = 0;
strp->mixed_decrypted = 0;
}
static int tls_strp_copyin_frag(struct tls_strparser *strp, struct sk_buff *skb,
struct sk_buff *in_skb, unsigned int offset,
size_t in_len)
{
size_t len, chunk;
skb_frag_t *frag;
int sz;
frag = &skb_shinfo(skb)->frags[skb->len / PAGE_SIZE];
len = in_len;
/* First make sure we got the header */
if (!strp->stm.full_len) {
/* Assume one page is more than enough for headers */
chunk = min_t(size_t, len, PAGE_SIZE - skb_frag_size(frag));
WARN_ON_ONCE(skb_copy_bits(in_skb, offset,
skb_frag_address(frag) +
skb_frag_size(frag),
chunk));
skb->len += chunk;
skb->data_len += chunk;
skb_frag_size_add(frag, chunk);
sz = tls_rx_msg_size(strp, skb);
if (sz < 0)
return sz;
/* We may have over-read, sz == 0 is guaranteed under-read */
if (unlikely(sz && sz < skb->len)) {
int over = skb->len - sz;
WARN_ON_ONCE(over > chunk);
skb->len -= over;
skb->data_len -= over;
skb_frag_size_add(frag, -over);
chunk -= over;
}
frag++;
len -= chunk;
offset += chunk;
strp->stm.full_len = sz;
if (!strp->stm.full_len)
goto read_done;
}
/* Load up more data */
while (len && strp->stm.full_len > skb->len) {
chunk = min_t(size_t, len, strp->stm.full_len - skb->len);
chunk = min_t(size_t, chunk, PAGE_SIZE - skb_frag_size(frag));
WARN_ON_ONCE(skb_copy_bits(in_skb, offset,
skb_frag_address(frag) +
skb_frag_size(frag),
chunk));
skb->len += chunk;
skb->data_len += chunk;
skb_frag_size_add(frag, chunk);
frag++;
len -= chunk;
offset += chunk;
}
read_done:
return in_len - len;
}
static int tls_strp_copyin_skb(struct tls_strparser *strp, struct sk_buff *skb,
struct sk_buff *in_skb, unsigned int offset,
size_t in_len)
{
struct sk_buff *nskb, *first, *last;
struct skb_shared_info *shinfo;
size_t chunk;
int sz;
if (strp->stm.full_len)
chunk = strp->stm.full_len - skb->len;
else
chunk = TLS_MAX_PAYLOAD_SIZE + PAGE_SIZE;
chunk = min(chunk, in_len);
nskb = tls_strp_skb_copy(strp, in_skb, offset, chunk);
if (!nskb)
return -ENOMEM;
shinfo = skb_shinfo(skb);
if (!shinfo->frag_list) {
shinfo->frag_list = nskb;
nskb->prev = nskb;
} else {
first = shinfo->frag_list;
last = first->prev;
last->next = nskb;
first->prev = nskb;
}
skb->len += chunk;
skb->data_len += chunk;
if (!strp->stm.full_len) {
sz = tls_rx_msg_size(strp, skb);
if (sz < 0)
return sz;
/* We may have over-read, sz == 0 is guaranteed under-read */
if (unlikely(sz && sz < skb->len)) {
int over = skb->len - sz;
WARN_ON_ONCE(over > chunk);
skb->len -= over;
skb->data_len -= over;
__pskb_trim(nskb, nskb->len - over);
chunk -= over;
}
strp->stm.full_len = sz;
}
return chunk;
}
static int tls_strp_copyin(read_descriptor_t *desc, struct sk_buff *in_skb,
unsigned int offset, size_t in_len)
{
struct tls_strparser *strp = (struct tls_strparser *)desc->arg.data;
struct sk_buff *skb;
int ret;
if (strp->msg_ready)
return 0;
skb = strp->anchor;
if (!skb->len)
skb_copy_decrypted(skb, in_skb);
else
strp->mixed_decrypted |= !!skb_cmp_decrypted(skb, in_skb);
if (IS_ENABLED(CONFIG_TLS_DEVICE) && strp->mixed_decrypted)
ret = tls_strp_copyin_skb(strp, skb, in_skb, offset, in_len);
else
ret = tls_strp_copyin_frag(strp, skb, in_skb, offset, in_len);
if (ret < 0) {
desc->error = ret;
ret = 0;
}
if (strp->stm.full_len && strp->stm.full_len == skb->len) {
desc->count = 0;
strp->msg_ready = 1;
tls_rx_msg_ready(strp);
}
return ret;
}
static int tls_strp_read_copyin(struct tls_strparser *strp)
{
read_descriptor_t desc;
desc.arg.data = strp;
desc.error = 0;
desc.count = 1; /* give more than one skb per call */
/* sk should be locked here, so okay to do read_sock */
tcp_read_sock(strp->sk, &desc, tls_strp_copyin);
return desc.error;
}
static int tls_strp_read_copy(struct tls_strparser *strp, bool qshort)
{
struct skb_shared_info *shinfo;
struct page *page;
int need_spc, len;
/* If the rbuf is small or rcv window has collapsed to 0 we need
* to read the data out. Otherwise the connection will stall.
* Without pressure threshold of INT_MAX will never be ready.
*/
if (likely(qshort && !tcp_epollin_ready(strp->sk, INT_MAX)))
return 0;
shinfo = skb_shinfo(strp->anchor);
shinfo->frag_list = NULL;
/* If we don't know the length go max plus page for cipher overhead */
need_spc = strp->stm.full_len ?: TLS_MAX_PAYLOAD_SIZE + PAGE_SIZE;
for (len = need_spc; len > 0; len -= PAGE_SIZE) {
page = alloc_page(strp->sk->sk_allocation);
if (!page) {
tls_strp_flush_anchor_copy(strp);
return -ENOMEM;
}
skb_fill_page_desc(strp->anchor, shinfo->nr_frags++,
page, 0, 0);
}
strp->copy_mode = 1;
strp->stm.offset = 0;
strp->anchor->len = 0;
strp->anchor->data_len = 0;
strp->anchor->truesize = round_up(need_spc, PAGE_SIZE);
tls_strp_read_copyin(strp);
return 0;
}
static bool tls_strp_check_queue_ok(struct tls_strparser *strp)
{
unsigned int len = strp->stm.offset + strp->stm.full_len;
struct sk_buff *first, *skb;
u32 seq;
first = skb_shinfo(strp->anchor)->frag_list;
skb = first;
seq = TCP_SKB_CB(first)->seq;
/* Make sure there's no duplicate data in the queue,
* and the decrypted status matches.
*/
while (skb->len < len) {
seq += skb->len;
len -= skb->len;
skb = skb->next;
if (TCP_SKB_CB(skb)->seq != seq)
return false;
if (skb_cmp_decrypted(first, skb))
return false;
}
return true;
}
static void tls_strp_load_anchor_with_queue(struct tls_strparser *strp, int len)
{
struct tcp_sock *tp = tcp_sk(strp->sk);
struct sk_buff *first;
u32 offset;
first = tcp_recv_skb(strp->sk, tp->copied_seq, &offset);
if (WARN_ON_ONCE(!first))
return;
/* Bestow the state onto the anchor */
strp->anchor->len = offset + len;
strp->anchor->data_len = offset + len;
strp->anchor->truesize = offset + len;
skb_shinfo(strp->anchor)->frag_list = first;
skb_copy_header(strp->anchor, first);
strp->anchor->destructor = NULL;
strp->stm.offset = offset;
}
void tls_strp_msg_load(struct tls_strparser *strp, bool force_refresh)
{
struct strp_msg *rxm;
struct tls_msg *tlm;
DEBUG_NET_WARN_ON_ONCE(!strp->msg_ready);
DEBUG_NET_WARN_ON_ONCE(!strp->stm.full_len);
if (!strp->copy_mode && force_refresh) {
if (WARN_ON(tcp_inq(strp->sk) < strp->stm.full_len))
return;
tls_strp_load_anchor_with_queue(strp, strp->stm.full_len);
}
rxm = strp_msg(strp->anchor);
rxm->full_len = strp->stm.full_len;
rxm->offset = strp->stm.offset;
tlm = tls_msg(strp->anchor);
tlm->control = strp->mark;
}
/* Called with lock held on lower socket */
static int tls_strp_read_sock(struct tls_strparser *strp)
{
int sz, inq;
inq = tcp_inq(strp->sk);
if (inq < 1)
return 0;
if (unlikely(strp->copy_mode))
return tls_strp_read_copyin(strp);
if (inq < strp->stm.full_len)
return tls_strp_read_copy(strp, true);
if (!strp->stm.full_len) {
tls_strp_load_anchor_with_queue(strp, inq);
sz = tls_rx_msg_size(strp, strp->anchor);
if (sz < 0) {
tls_strp_abort_strp(strp, sz);
return sz;
}
strp->stm.full_len = sz;
if (!strp->stm.full_len || inq < strp->stm.full_len)
return tls_strp_read_copy(strp, true);
}
if (!tls_strp_check_queue_ok(strp))
return tls_strp_read_copy(strp, false);
strp->msg_ready = 1;
tls_rx_msg_ready(strp);
return 0;
}
void tls_strp_check_rcv(struct tls_strparser *strp)
{
if (unlikely(strp->stopped) || strp->msg_ready)
return;
if (tls_strp_read_sock(strp) == -ENOMEM)
queue_work(tls_strp_wq, &strp->work);
}
/* Lower sock lock held */
void tls_strp_data_ready(struct tls_strparser *strp)
{
/* This check is needed to synchronize with do_tls_strp_work.
* do_tls_strp_work acquires a process lock (lock_sock) whereas
* the lock held here is bh_lock_sock. The two locks can be
* held by different threads at the same time, but bh_lock_sock
* allows a thread in BH context to safely check if the process
* lock is held. In this case, if the lock is held, queue work.
*/
if (sock_owned_by_user_nocheck(strp->sk)) {
queue_work(tls_strp_wq, &strp->work);
return;
}
tls_strp_check_rcv(strp);
}
static void tls_strp_work(struct work_struct *w)
{
struct tls_strparser *strp =
container_of(w, struct tls_strparser, work);
lock_sock(strp->sk);
tls_strp_check_rcv(strp);
release_sock(strp->sk);
}
void tls_strp_msg_done(struct tls_strparser *strp)
{
WARN_ON(!strp->stm.full_len);
if (likely(!strp->copy_mode))
tcp_read_done(strp->sk, strp->stm.full_len);
else
tls_strp_flush_anchor_copy(strp);
strp->msg_ready = 0;
memset(&strp->stm, 0, sizeof(strp->stm));
tls_strp_check_rcv(strp);
}
void tls_strp_stop(struct tls_strparser *strp)
{
strp->stopped = 1;
}
int tls_strp_init(struct tls_strparser *strp, struct sock *sk)
{
memset(strp, 0, sizeof(*strp));
strp->sk = sk;
strp->anchor = alloc_skb(0, GFP_KERNEL);
if (!strp->anchor)
return -ENOMEM;
INIT_WORK(&strp->work, tls_strp_work);
return 0;
}
/* strp must already be stopped so that tls_strp_recv will no longer be called.
* Note that tls_strp_done is not called with the lower socket held.
*/
void tls_strp_done(struct tls_strparser *strp)
{
WARN_ON(!strp->stopped);
cancel_work_sync(&strp->work);
tls_strp_anchor_free(strp);
}
int __init tls_strp_dev_init(void)
{
tls_strp_wq = create_workqueue("tls-strp");
if (unlikely(!tls_strp_wq))
return -ENOMEM;
return 0;
}
void tls_strp_dev_exit(void)
{
destroy_workqueue(tls_strp_wq);
}
| linux-master | net/tls/tls_strp.c |
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
/* Copyright (C) 2019 Netronome Systems, Inc. */
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <net/snmp.h>
#include <net/tls.h>
#include "tls.h"
#ifdef CONFIG_PROC_FS
static const struct snmp_mib tls_mib_list[] = {
SNMP_MIB_ITEM("TlsCurrTxSw", LINUX_MIB_TLSCURRTXSW),
SNMP_MIB_ITEM("TlsCurrRxSw", LINUX_MIB_TLSCURRRXSW),
SNMP_MIB_ITEM("TlsCurrTxDevice", LINUX_MIB_TLSCURRTXDEVICE),
SNMP_MIB_ITEM("TlsCurrRxDevice", LINUX_MIB_TLSCURRRXDEVICE),
SNMP_MIB_ITEM("TlsTxSw", LINUX_MIB_TLSTXSW),
SNMP_MIB_ITEM("TlsRxSw", LINUX_MIB_TLSRXSW),
SNMP_MIB_ITEM("TlsTxDevice", LINUX_MIB_TLSTXDEVICE),
SNMP_MIB_ITEM("TlsRxDevice", LINUX_MIB_TLSRXDEVICE),
SNMP_MIB_ITEM("TlsDecryptError", LINUX_MIB_TLSDECRYPTERROR),
SNMP_MIB_ITEM("TlsRxDeviceResync", LINUX_MIB_TLSRXDEVICERESYNC),
SNMP_MIB_ITEM("TlsDecryptRetry", LINUX_MIB_TLSDECRYPTRETRY),
SNMP_MIB_ITEM("TlsRxNoPadViolation", LINUX_MIB_TLSRXNOPADVIOL),
SNMP_MIB_SENTINEL
};
static int tls_statistics_seq_show(struct seq_file *seq, void *v)
{
unsigned long buf[LINUX_MIB_TLSMAX] = {};
struct net *net = seq->private;
int i;
snmp_get_cpu_field_batch(buf, tls_mib_list, net->mib.tls_statistics);
for (i = 0; tls_mib_list[i].name; i++)
seq_printf(seq, "%-32s\t%lu\n", tls_mib_list[i].name, buf[i]);
return 0;
}
#endif
int __net_init tls_proc_init(struct net *net)
{
#ifdef CONFIG_PROC_FS
if (!proc_create_net_single("tls_stat", 0444, net->proc_net,
tls_statistics_seq_show, NULL))
return -ENOMEM;
#endif /* CONFIG_PROC_FS */
return 0;
}
void __net_exit tls_proc_fini(struct net *net)
{
remove_proc_entry("tls_stat", net->proc_net);
}
| linux-master | net/tls/tls_proc.c |
/*
* Copyright (c) 2016-2017, Mellanox Technologies. All rights reserved.
* Copyright (c) 2016-2017, Dave Watson <[email protected]>. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/list.h>
#include <linux/rcupdate.h>
#include <linux/spinlock.h>
#include <net/inet_connection_sock.h>
#include <net/tls.h>
#include <net/tls_toe.h>
#include "tls.h"
static LIST_HEAD(device_list);
static DEFINE_SPINLOCK(device_spinlock);
static void tls_toe_sk_destruct(struct sock *sk)
{
struct inet_connection_sock *icsk = inet_csk(sk);
struct tls_context *ctx = tls_get_ctx(sk);
ctx->sk_destruct(sk);
/* Free ctx */
rcu_assign_pointer(icsk->icsk_ulp_data, NULL);
tls_ctx_free(sk, ctx);
}
int tls_toe_bypass(struct sock *sk)
{
struct tls_toe_device *dev;
struct tls_context *ctx;
int rc = 0;
spin_lock_bh(&device_spinlock);
list_for_each_entry(dev, &device_list, dev_list) {
if (dev->feature && dev->feature(dev)) {
ctx = tls_ctx_create(sk);
if (!ctx)
goto out;
ctx->sk_destruct = sk->sk_destruct;
sk->sk_destruct = tls_toe_sk_destruct;
ctx->rx_conf = TLS_HW_RECORD;
ctx->tx_conf = TLS_HW_RECORD;
update_sk_prot(sk, ctx);
rc = 1;
break;
}
}
out:
spin_unlock_bh(&device_spinlock);
return rc;
}
void tls_toe_unhash(struct sock *sk)
{
struct tls_context *ctx = tls_get_ctx(sk);
struct tls_toe_device *dev;
spin_lock_bh(&device_spinlock);
list_for_each_entry(dev, &device_list, dev_list) {
if (dev->unhash) {
kref_get(&dev->kref);
spin_unlock_bh(&device_spinlock);
dev->unhash(dev, sk);
kref_put(&dev->kref, dev->release);
spin_lock_bh(&device_spinlock);
}
}
spin_unlock_bh(&device_spinlock);
ctx->sk_proto->unhash(sk);
}
int tls_toe_hash(struct sock *sk)
{
struct tls_context *ctx = tls_get_ctx(sk);
struct tls_toe_device *dev;
int err;
err = ctx->sk_proto->hash(sk);
spin_lock_bh(&device_spinlock);
list_for_each_entry(dev, &device_list, dev_list) {
if (dev->hash) {
kref_get(&dev->kref);
spin_unlock_bh(&device_spinlock);
err |= dev->hash(dev, sk);
kref_put(&dev->kref, dev->release);
spin_lock_bh(&device_spinlock);
}
}
spin_unlock_bh(&device_spinlock);
if (err)
tls_toe_unhash(sk);
return err;
}
void tls_toe_register_device(struct tls_toe_device *device)
{
spin_lock_bh(&device_spinlock);
list_add_tail(&device->dev_list, &device_list);
spin_unlock_bh(&device_spinlock);
}
EXPORT_SYMBOL(tls_toe_register_device);
void tls_toe_unregister_device(struct tls_toe_device *device)
{
spin_lock_bh(&device_spinlock);
list_del(&device->dev_list);
spin_unlock_bh(&device_spinlock);
}
EXPORT_SYMBOL(tls_toe_unregister_device);
| linux-master | net/tls/tls_toe.c |
// SPDX-License-Identifier: GPL-2.0
/*
* XFRM virtual interface
*
* Copyright (C) 2018 secunet Security Networks AG
*
* Author:
* Steffen Klassert <[email protected]>
*/
#include <linux/module.h>
#include <linux/capability.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/sockios.h>
#include <linux/icmp.h>
#include <linux/if.h>
#include <linux/in.h>
#include <linux/ip.h>
#include <linux/net.h>
#include <linux/in6.h>
#include <linux/netdevice.h>
#include <linux/if_link.h>
#include <linux/if_arp.h>
#include <linux/icmpv6.h>
#include <linux/init.h>
#include <linux/route.h>
#include <linux/rtnetlink.h>
#include <linux/netfilter_ipv6.h>
#include <linux/slab.h>
#include <linux/hash.h>
#include <linux/uaccess.h>
#include <linux/atomic.h>
#include <net/gso.h>
#include <net/icmp.h>
#include <net/ip.h>
#include <net/ipv6.h>
#include <net/ip6_route.h>
#include <net/ip_tunnels.h>
#include <net/addrconf.h>
#include <net/xfrm.h>
#include <net/net_namespace.h>
#include <net/dst_metadata.h>
#include <net/netns/generic.h>
#include <linux/etherdevice.h>
static int xfrmi_dev_init(struct net_device *dev);
static void xfrmi_dev_setup(struct net_device *dev);
static struct rtnl_link_ops xfrmi_link_ops __read_mostly;
static unsigned int xfrmi_net_id __read_mostly;
static const struct net_device_ops xfrmi_netdev_ops;
#define XFRMI_HASH_BITS 8
#define XFRMI_HASH_SIZE BIT(XFRMI_HASH_BITS)
struct xfrmi_net {
/* lists for storing interfaces in use */
struct xfrm_if __rcu *xfrmi[XFRMI_HASH_SIZE];
struct xfrm_if __rcu *collect_md_xfrmi;
};
static const struct nla_policy xfrm_lwt_policy[LWT_XFRM_MAX + 1] = {
[LWT_XFRM_IF_ID] = NLA_POLICY_MIN(NLA_U32, 1),
[LWT_XFRM_LINK] = NLA_POLICY_MIN(NLA_U32, 1),
};
static void xfrmi_destroy_state(struct lwtunnel_state *lwt)
{
}
static int xfrmi_build_state(struct net *net, struct nlattr *nla,
unsigned int family, const void *cfg,
struct lwtunnel_state **ts,
struct netlink_ext_ack *extack)
{
struct nlattr *tb[LWT_XFRM_MAX + 1];
struct lwtunnel_state *new_state;
struct xfrm_md_info *info;
int ret;
ret = nla_parse_nested(tb, LWT_XFRM_MAX, nla, xfrm_lwt_policy, extack);
if (ret < 0)
return ret;
if (!tb[LWT_XFRM_IF_ID]) {
NL_SET_ERR_MSG(extack, "if_id must be set");
return -EINVAL;
}
new_state = lwtunnel_state_alloc(sizeof(*info));
if (!new_state) {
NL_SET_ERR_MSG(extack, "failed to create encap info");
return -ENOMEM;
}
new_state->type = LWTUNNEL_ENCAP_XFRM;
info = lwt_xfrm_info(new_state);
info->if_id = nla_get_u32(tb[LWT_XFRM_IF_ID]);
if (tb[LWT_XFRM_LINK])
info->link = nla_get_u32(tb[LWT_XFRM_LINK]);
*ts = new_state;
return 0;
}
static int xfrmi_fill_encap_info(struct sk_buff *skb,
struct lwtunnel_state *lwt)
{
struct xfrm_md_info *info = lwt_xfrm_info(lwt);
if (nla_put_u32(skb, LWT_XFRM_IF_ID, info->if_id) ||
(info->link && nla_put_u32(skb, LWT_XFRM_LINK, info->link)))
return -EMSGSIZE;
return 0;
}
static int xfrmi_encap_nlsize(struct lwtunnel_state *lwtstate)
{
return nla_total_size(sizeof(u32)) + /* LWT_XFRM_IF_ID */
nla_total_size(sizeof(u32)); /* LWT_XFRM_LINK */
}
static int xfrmi_encap_cmp(struct lwtunnel_state *a, struct lwtunnel_state *b)
{
struct xfrm_md_info *a_info = lwt_xfrm_info(a);
struct xfrm_md_info *b_info = lwt_xfrm_info(b);
return memcmp(a_info, b_info, sizeof(*a_info));
}
static const struct lwtunnel_encap_ops xfrmi_encap_ops = {
.build_state = xfrmi_build_state,
.destroy_state = xfrmi_destroy_state,
.fill_encap = xfrmi_fill_encap_info,
.get_encap_size = xfrmi_encap_nlsize,
.cmp_encap = xfrmi_encap_cmp,
.owner = THIS_MODULE,
};
#define for_each_xfrmi_rcu(start, xi) \
for (xi = rcu_dereference(start); xi; xi = rcu_dereference(xi->next))
static u32 xfrmi_hash(u32 if_id)
{
return hash_32(if_id, XFRMI_HASH_BITS);
}
static struct xfrm_if *xfrmi_lookup(struct net *net, struct xfrm_state *x)
{
struct xfrmi_net *xfrmn = net_generic(net, xfrmi_net_id);
struct xfrm_if *xi;
for_each_xfrmi_rcu(xfrmn->xfrmi[xfrmi_hash(x->if_id)], xi) {
if (x->if_id == xi->p.if_id &&
(xi->dev->flags & IFF_UP))
return xi;
}
xi = rcu_dereference(xfrmn->collect_md_xfrmi);
if (xi && (xi->dev->flags & IFF_UP))
return xi;
return NULL;
}
static bool xfrmi_decode_session(struct sk_buff *skb,
unsigned short family,
struct xfrm_if_decode_session_result *res)
{
struct net_device *dev;
struct xfrm_if *xi;
int ifindex = 0;
if (!secpath_exists(skb) || !skb->dev)
return false;
switch (family) {
case AF_INET6:
ifindex = inet6_sdif(skb);
break;
case AF_INET:
ifindex = inet_sdif(skb);
break;
}
if (ifindex) {
struct net *net = xs_net(xfrm_input_state(skb));
dev = dev_get_by_index_rcu(net, ifindex);
} else {
dev = skb->dev;
}
if (!dev || !(dev->flags & IFF_UP))
return false;
if (dev->netdev_ops != &xfrmi_netdev_ops)
return false;
xi = netdev_priv(dev);
res->net = xi->net;
if (xi->p.collect_md)
res->if_id = xfrm_input_state(skb)->if_id;
else
res->if_id = xi->p.if_id;
return true;
}
static void xfrmi_link(struct xfrmi_net *xfrmn, struct xfrm_if *xi)
{
struct xfrm_if __rcu **xip = &xfrmn->xfrmi[xfrmi_hash(xi->p.if_id)];
rcu_assign_pointer(xi->next , rtnl_dereference(*xip));
rcu_assign_pointer(*xip, xi);
}
static void xfrmi_unlink(struct xfrmi_net *xfrmn, struct xfrm_if *xi)
{
struct xfrm_if __rcu **xip;
struct xfrm_if *iter;
for (xip = &xfrmn->xfrmi[xfrmi_hash(xi->p.if_id)];
(iter = rtnl_dereference(*xip)) != NULL;
xip = &iter->next) {
if (xi == iter) {
rcu_assign_pointer(*xip, xi->next);
break;
}
}
}
static void xfrmi_dev_free(struct net_device *dev)
{
struct xfrm_if *xi = netdev_priv(dev);
gro_cells_destroy(&xi->gro_cells);
free_percpu(dev->tstats);
}
static int xfrmi_create(struct net_device *dev)
{
struct xfrm_if *xi = netdev_priv(dev);
struct net *net = dev_net(dev);
struct xfrmi_net *xfrmn = net_generic(net, xfrmi_net_id);
int err;
dev->rtnl_link_ops = &xfrmi_link_ops;
err = register_netdevice(dev);
if (err < 0)
goto out;
if (xi->p.collect_md)
rcu_assign_pointer(xfrmn->collect_md_xfrmi, xi);
else
xfrmi_link(xfrmn, xi);
return 0;
out:
return err;
}
static struct xfrm_if *xfrmi_locate(struct net *net, struct xfrm_if_parms *p)
{
struct xfrm_if __rcu **xip;
struct xfrm_if *xi;
struct xfrmi_net *xfrmn = net_generic(net, xfrmi_net_id);
for (xip = &xfrmn->xfrmi[xfrmi_hash(p->if_id)];
(xi = rtnl_dereference(*xip)) != NULL;
xip = &xi->next)
if (xi->p.if_id == p->if_id)
return xi;
return NULL;
}
static void xfrmi_dev_uninit(struct net_device *dev)
{
struct xfrm_if *xi = netdev_priv(dev);
struct xfrmi_net *xfrmn = net_generic(xi->net, xfrmi_net_id);
if (xi->p.collect_md)
RCU_INIT_POINTER(xfrmn->collect_md_xfrmi, NULL);
else
xfrmi_unlink(xfrmn, xi);
}
static void xfrmi_scrub_packet(struct sk_buff *skb, bool xnet)
{
skb_clear_tstamp(skb);
skb->pkt_type = PACKET_HOST;
skb->skb_iif = 0;
skb->ignore_df = 0;
skb_dst_drop(skb);
nf_reset_ct(skb);
nf_reset_trace(skb);
if (!xnet)
return;
ipvs_reset(skb);
secpath_reset(skb);
skb_orphan(skb);
skb->mark = 0;
}
static int xfrmi_input(struct sk_buff *skb, int nexthdr, __be32 spi,
int encap_type, unsigned short family)
{
struct sec_path *sp;
sp = skb_sec_path(skb);
if (sp && (sp->len || sp->olen) &&
!xfrm_policy_check(NULL, XFRM_POLICY_IN, skb, family))
goto discard;
XFRM_SPI_SKB_CB(skb)->family = family;
if (family == AF_INET) {
XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct iphdr, daddr);
XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = NULL;
} else {
XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct ipv6hdr, daddr);
XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6 = NULL;
}
return xfrm_input(skb, nexthdr, spi, encap_type);
discard:
kfree_skb(skb);
return 0;
}
static int xfrmi4_rcv(struct sk_buff *skb)
{
return xfrmi_input(skb, ip_hdr(skb)->protocol, 0, 0, AF_INET);
}
static int xfrmi6_rcv(struct sk_buff *skb)
{
return xfrmi_input(skb, skb_network_header(skb)[IP6CB(skb)->nhoff],
0, 0, AF_INET6);
}
static int xfrmi4_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
{
return xfrmi_input(skb, nexthdr, spi, encap_type, AF_INET);
}
static int xfrmi6_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
{
return xfrmi_input(skb, nexthdr, spi, encap_type, AF_INET6);
}
static int xfrmi_rcv_cb(struct sk_buff *skb, int err)
{
const struct xfrm_mode *inner_mode;
struct net_device *dev;
struct xfrm_state *x;
struct xfrm_if *xi;
bool xnet;
int link;
if (err && !secpath_exists(skb))
return 0;
x = xfrm_input_state(skb);
xi = xfrmi_lookup(xs_net(x), x);
if (!xi)
return 1;
link = skb->dev->ifindex;
dev = xi->dev;
skb->dev = dev;
if (err) {
dev->stats.rx_errors++;
dev->stats.rx_dropped++;
return 0;
}
xnet = !net_eq(xi->net, dev_net(skb->dev));
if (xnet) {
inner_mode = &x->inner_mode;
if (x->sel.family == AF_UNSPEC) {
inner_mode = xfrm_ip2inner_mode(x, XFRM_MODE_SKB_CB(skb)->protocol);
if (inner_mode == NULL) {
XFRM_INC_STATS(dev_net(skb->dev),
LINUX_MIB_XFRMINSTATEMODEERROR);
return -EINVAL;
}
}
if (!xfrm_policy_check(NULL, XFRM_POLICY_IN, skb,
inner_mode->family))
return -EPERM;
}
xfrmi_scrub_packet(skb, xnet);
if (xi->p.collect_md) {
struct metadata_dst *md_dst;
md_dst = metadata_dst_alloc(0, METADATA_XFRM, GFP_ATOMIC);
if (!md_dst)
return -ENOMEM;
md_dst->u.xfrm_info.if_id = x->if_id;
md_dst->u.xfrm_info.link = link;
skb_dst_set(skb, (struct dst_entry *)md_dst);
}
dev_sw_netstats_rx_add(dev, skb->len);
return 0;
}
static int
xfrmi_xmit2(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
{
struct xfrm_if *xi = netdev_priv(dev);
struct net_device_stats *stats = &xi->dev->stats;
struct dst_entry *dst = skb_dst(skb);
unsigned int length = skb->len;
struct net_device *tdev;
struct xfrm_state *x;
int err = -1;
u32 if_id;
int mtu;
if (xi->p.collect_md) {
struct xfrm_md_info *md_info = skb_xfrm_md_info(skb);
if (unlikely(!md_info))
return -EINVAL;
if_id = md_info->if_id;
fl->flowi_oif = md_info->link;
if (md_info->dst_orig) {
struct dst_entry *tmp_dst = dst;
dst = md_info->dst_orig;
skb_dst_set(skb, dst);
md_info->dst_orig = NULL;
dst_release(tmp_dst);
}
} else {
if_id = xi->p.if_id;
}
dst_hold(dst);
dst = xfrm_lookup_with_ifid(xi->net, dst, fl, NULL, 0, if_id);
if (IS_ERR(dst)) {
err = PTR_ERR(dst);
dst = NULL;
goto tx_err_link_failure;
}
x = dst->xfrm;
if (!x)
goto tx_err_link_failure;
if (x->if_id != if_id)
goto tx_err_link_failure;
tdev = dst->dev;
if (tdev == dev) {
stats->collisions++;
net_warn_ratelimited("%s: Local routing loop detected!\n",
dev->name);
goto tx_err_dst_release;
}
mtu = dst_mtu(dst);
if ((!skb_is_gso(skb) && skb->len > mtu) ||
(skb_is_gso(skb) && !skb_gso_validate_network_len(skb, mtu))) {
skb_dst_update_pmtu_no_confirm(skb, mtu);
if (skb->protocol == htons(ETH_P_IPV6)) {
if (mtu < IPV6_MIN_MTU)
mtu = IPV6_MIN_MTU;
if (skb->len > 1280)
icmpv6_ndo_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
else
goto xmit;
} else {
if (!(ip_hdr(skb)->frag_off & htons(IP_DF)))
goto xmit;
icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
htonl(mtu));
}
dst_release(dst);
return -EMSGSIZE;
}
xmit:
xfrmi_scrub_packet(skb, !net_eq(xi->net, dev_net(dev)));
skb_dst_set(skb, dst);
skb->dev = tdev;
err = dst_output(xi->net, skb->sk, skb);
if (net_xmit_eval(err) == 0) {
dev_sw_netstats_tx_add(dev, 1, length);
} else {
stats->tx_errors++;
stats->tx_aborted_errors++;
}
return 0;
tx_err_link_failure:
stats->tx_carrier_errors++;
dst_link_failure(skb);
tx_err_dst_release:
dst_release(dst);
return err;
}
static netdev_tx_t xfrmi_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct xfrm_if *xi = netdev_priv(dev);
struct net_device_stats *stats = &xi->dev->stats;
struct dst_entry *dst = skb_dst(skb);
struct flowi fl;
int ret;
memset(&fl, 0, sizeof(fl));
switch (skb->protocol) {
case htons(ETH_P_IPV6):
memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
xfrm_decode_session(skb, &fl, AF_INET6);
if (!dst) {
fl.u.ip6.flowi6_oif = dev->ifindex;
fl.u.ip6.flowi6_flags |= FLOWI_FLAG_ANYSRC;
dst = ip6_route_output(dev_net(dev), NULL, &fl.u.ip6);
if (dst->error) {
dst_release(dst);
stats->tx_carrier_errors++;
goto tx_err;
}
skb_dst_set(skb, dst);
}
break;
case htons(ETH_P_IP):
memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
xfrm_decode_session(skb, &fl, AF_INET);
if (!dst) {
struct rtable *rt;
fl.u.ip4.flowi4_oif = dev->ifindex;
fl.u.ip4.flowi4_flags |= FLOWI_FLAG_ANYSRC;
rt = __ip_route_output_key(dev_net(dev), &fl.u.ip4);
if (IS_ERR(rt)) {
stats->tx_carrier_errors++;
goto tx_err;
}
skb_dst_set(skb, &rt->dst);
}
break;
default:
goto tx_err;
}
fl.flowi_oif = xi->p.link;
ret = xfrmi_xmit2(skb, dev, &fl);
if (ret < 0)
goto tx_err;
return NETDEV_TX_OK;
tx_err:
stats->tx_errors++;
stats->tx_dropped++;
kfree_skb(skb);
return NETDEV_TX_OK;
}
static int xfrmi4_err(struct sk_buff *skb, u32 info)
{
const struct iphdr *iph = (const struct iphdr *)skb->data;
struct net *net = dev_net(skb->dev);
int protocol = iph->protocol;
struct ip_comp_hdr *ipch;
struct ip_esp_hdr *esph;
struct ip_auth_hdr *ah ;
struct xfrm_state *x;
struct xfrm_if *xi;
__be32 spi;
switch (protocol) {
case IPPROTO_ESP:
esph = (struct ip_esp_hdr *)(skb->data+(iph->ihl<<2));
spi = esph->spi;
break;
case IPPROTO_AH:
ah = (struct ip_auth_hdr *)(skb->data+(iph->ihl<<2));
spi = ah->spi;
break;
case IPPROTO_COMP:
ipch = (struct ip_comp_hdr *)(skb->data+(iph->ihl<<2));
spi = htonl(ntohs(ipch->cpi));
break;
default:
return 0;
}
switch (icmp_hdr(skb)->type) {
case ICMP_DEST_UNREACH:
if (icmp_hdr(skb)->code != ICMP_FRAG_NEEDED)
return 0;
break;
case ICMP_REDIRECT:
break;
default:
return 0;
}
x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr,
spi, protocol, AF_INET);
if (!x)
return 0;
xi = xfrmi_lookup(net, x);
if (!xi) {
xfrm_state_put(x);
return -1;
}
if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH)
ipv4_update_pmtu(skb, net, info, 0, protocol);
else
ipv4_redirect(skb, net, 0, protocol);
xfrm_state_put(x);
return 0;
}
static int xfrmi6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
u8 type, u8 code, int offset, __be32 info)
{
const struct ipv6hdr *iph = (const struct ipv6hdr *)skb->data;
struct net *net = dev_net(skb->dev);
int protocol = iph->nexthdr;
struct ip_comp_hdr *ipch;
struct ip_esp_hdr *esph;
struct ip_auth_hdr *ah;
struct xfrm_state *x;
struct xfrm_if *xi;
__be32 spi;
switch (protocol) {
case IPPROTO_ESP:
esph = (struct ip_esp_hdr *)(skb->data + offset);
spi = esph->spi;
break;
case IPPROTO_AH:
ah = (struct ip_auth_hdr *)(skb->data + offset);
spi = ah->spi;
break;
case IPPROTO_COMP:
ipch = (struct ip_comp_hdr *)(skb->data + offset);
spi = htonl(ntohs(ipch->cpi));
break;
default:
return 0;
}
if (type != ICMPV6_PKT_TOOBIG &&
type != NDISC_REDIRECT)
return 0;
x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr,
spi, protocol, AF_INET6);
if (!x)
return 0;
xi = xfrmi_lookup(net, x);
if (!xi) {
xfrm_state_put(x);
return -1;
}
if (type == NDISC_REDIRECT)
ip6_redirect(skb, net, skb->dev->ifindex, 0,
sock_net_uid(net, NULL));
else
ip6_update_pmtu(skb, net, info, 0, 0, sock_net_uid(net, NULL));
xfrm_state_put(x);
return 0;
}
static int xfrmi_change(struct xfrm_if *xi, const struct xfrm_if_parms *p)
{
if (xi->p.link != p->link)
return -EINVAL;
xi->p.if_id = p->if_id;
return 0;
}
static int xfrmi_update(struct xfrm_if *xi, struct xfrm_if_parms *p)
{
struct net *net = xi->net;
struct xfrmi_net *xfrmn = net_generic(net, xfrmi_net_id);
int err;
xfrmi_unlink(xfrmn, xi);
synchronize_net();
err = xfrmi_change(xi, p);
xfrmi_link(xfrmn, xi);
netdev_state_change(xi->dev);
return err;
}
static int xfrmi_get_iflink(const struct net_device *dev)
{
struct xfrm_if *xi = netdev_priv(dev);
return xi->p.link;
}
static const struct net_device_ops xfrmi_netdev_ops = {
.ndo_init = xfrmi_dev_init,
.ndo_uninit = xfrmi_dev_uninit,
.ndo_start_xmit = xfrmi_xmit,
.ndo_get_stats64 = dev_get_tstats64,
.ndo_get_iflink = xfrmi_get_iflink,
};
static void xfrmi_dev_setup(struct net_device *dev)
{
dev->netdev_ops = &xfrmi_netdev_ops;
dev->header_ops = &ip_tunnel_header_ops;
dev->type = ARPHRD_NONE;
dev->mtu = ETH_DATA_LEN;
dev->min_mtu = ETH_MIN_MTU;
dev->max_mtu = IP_MAX_MTU;
dev->flags = IFF_NOARP;
dev->needs_free_netdev = true;
dev->priv_destructor = xfrmi_dev_free;
netif_keep_dst(dev);
eth_broadcast_addr(dev->broadcast);
}
#define XFRMI_FEATURES (NETIF_F_SG | \
NETIF_F_FRAGLIST | \
NETIF_F_GSO_SOFTWARE | \
NETIF_F_HW_CSUM)
static int xfrmi_dev_init(struct net_device *dev)
{
struct xfrm_if *xi = netdev_priv(dev);
struct net_device *phydev = __dev_get_by_index(xi->net, xi->p.link);
int err;
dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
if (!dev->tstats)
return -ENOMEM;
err = gro_cells_init(&xi->gro_cells, dev);
if (err) {
free_percpu(dev->tstats);
return err;
}
dev->features |= NETIF_F_LLTX;
dev->features |= XFRMI_FEATURES;
dev->hw_features |= XFRMI_FEATURES;
if (phydev) {
dev->needed_headroom = phydev->needed_headroom;
dev->needed_tailroom = phydev->needed_tailroom;
if (is_zero_ether_addr(dev->dev_addr))
eth_hw_addr_inherit(dev, phydev);
if (is_zero_ether_addr(dev->broadcast))
memcpy(dev->broadcast, phydev->broadcast,
dev->addr_len);
} else {
eth_hw_addr_random(dev);
eth_broadcast_addr(dev->broadcast);
}
return 0;
}
static int xfrmi_validate(struct nlattr *tb[], struct nlattr *data[],
struct netlink_ext_ack *extack)
{
return 0;
}
static void xfrmi_netlink_parms(struct nlattr *data[],
struct xfrm_if_parms *parms)
{
memset(parms, 0, sizeof(*parms));
if (!data)
return;
if (data[IFLA_XFRM_LINK])
parms->link = nla_get_u32(data[IFLA_XFRM_LINK]);
if (data[IFLA_XFRM_IF_ID])
parms->if_id = nla_get_u32(data[IFLA_XFRM_IF_ID]);
if (data[IFLA_XFRM_COLLECT_METADATA])
parms->collect_md = true;
}
static int xfrmi_newlink(struct net *src_net, struct net_device *dev,
struct nlattr *tb[], struct nlattr *data[],
struct netlink_ext_ack *extack)
{
struct net *net = dev_net(dev);
struct xfrm_if_parms p = {};
struct xfrm_if *xi;
int err;
xfrmi_netlink_parms(data, &p);
if (p.collect_md) {
struct xfrmi_net *xfrmn = net_generic(net, xfrmi_net_id);
if (p.link || p.if_id) {
NL_SET_ERR_MSG(extack, "link and if_id must be zero");
return -EINVAL;
}
if (rtnl_dereference(xfrmn->collect_md_xfrmi))
return -EEXIST;
} else {
if (!p.if_id) {
NL_SET_ERR_MSG(extack, "if_id must be non zero");
return -EINVAL;
}
xi = xfrmi_locate(net, &p);
if (xi)
return -EEXIST;
}
xi = netdev_priv(dev);
xi->p = p;
xi->net = net;
xi->dev = dev;
err = xfrmi_create(dev);
return err;
}
static void xfrmi_dellink(struct net_device *dev, struct list_head *head)
{
unregister_netdevice_queue(dev, head);
}
static int xfrmi_changelink(struct net_device *dev, struct nlattr *tb[],
struct nlattr *data[],
struct netlink_ext_ack *extack)
{
struct xfrm_if *xi = netdev_priv(dev);
struct net *net = xi->net;
struct xfrm_if_parms p = {};
xfrmi_netlink_parms(data, &p);
if (!p.if_id) {
NL_SET_ERR_MSG(extack, "if_id must be non zero");
return -EINVAL;
}
if (p.collect_md) {
NL_SET_ERR_MSG(extack, "collect_md can't be changed");
return -EINVAL;
}
xi = xfrmi_locate(net, &p);
if (!xi) {
xi = netdev_priv(dev);
} else {
if (xi->dev != dev)
return -EEXIST;
if (xi->p.collect_md) {
NL_SET_ERR_MSG(extack,
"device can't be changed to collect_md");
return -EINVAL;
}
}
return xfrmi_update(xi, &p);
}
static size_t xfrmi_get_size(const struct net_device *dev)
{
return
/* IFLA_XFRM_LINK */
nla_total_size(4) +
/* IFLA_XFRM_IF_ID */
nla_total_size(4) +
/* IFLA_XFRM_COLLECT_METADATA */
nla_total_size(0) +
0;
}
static int xfrmi_fill_info(struct sk_buff *skb, const struct net_device *dev)
{
struct xfrm_if *xi = netdev_priv(dev);
struct xfrm_if_parms *parm = &xi->p;
if (nla_put_u32(skb, IFLA_XFRM_LINK, parm->link) ||
nla_put_u32(skb, IFLA_XFRM_IF_ID, parm->if_id) ||
(xi->p.collect_md && nla_put_flag(skb, IFLA_XFRM_COLLECT_METADATA)))
goto nla_put_failure;
return 0;
nla_put_failure:
return -EMSGSIZE;
}
static struct net *xfrmi_get_link_net(const struct net_device *dev)
{
struct xfrm_if *xi = netdev_priv(dev);
return xi->net;
}
static const struct nla_policy xfrmi_policy[IFLA_XFRM_MAX + 1] = {
[IFLA_XFRM_UNSPEC] = { .strict_start_type = IFLA_XFRM_COLLECT_METADATA },
[IFLA_XFRM_LINK] = { .type = NLA_U32 },
[IFLA_XFRM_IF_ID] = { .type = NLA_U32 },
[IFLA_XFRM_COLLECT_METADATA] = { .type = NLA_FLAG },
};
static struct rtnl_link_ops xfrmi_link_ops __read_mostly = {
.kind = "xfrm",
.maxtype = IFLA_XFRM_MAX,
.policy = xfrmi_policy,
.priv_size = sizeof(struct xfrm_if),
.setup = xfrmi_dev_setup,
.validate = xfrmi_validate,
.newlink = xfrmi_newlink,
.dellink = xfrmi_dellink,
.changelink = xfrmi_changelink,
.get_size = xfrmi_get_size,
.fill_info = xfrmi_fill_info,
.get_link_net = xfrmi_get_link_net,
};
static void __net_exit xfrmi_exit_batch_net(struct list_head *net_exit_list)
{
struct net *net;
LIST_HEAD(list);
rtnl_lock();
list_for_each_entry(net, net_exit_list, exit_list) {
struct xfrmi_net *xfrmn = net_generic(net, xfrmi_net_id);
struct xfrm_if __rcu **xip;
struct xfrm_if *xi;
int i;
for (i = 0; i < XFRMI_HASH_SIZE; i++) {
for (xip = &xfrmn->xfrmi[i];
(xi = rtnl_dereference(*xip)) != NULL;
xip = &xi->next)
unregister_netdevice_queue(xi->dev, &list);
}
xi = rtnl_dereference(xfrmn->collect_md_xfrmi);
if (xi)
unregister_netdevice_queue(xi->dev, &list);
}
unregister_netdevice_many(&list);
rtnl_unlock();
}
static struct pernet_operations xfrmi_net_ops = {
.exit_batch = xfrmi_exit_batch_net,
.id = &xfrmi_net_id,
.size = sizeof(struct xfrmi_net),
};
static struct xfrm6_protocol xfrmi_esp6_protocol __read_mostly = {
.handler = xfrmi6_rcv,
.input_handler = xfrmi6_input,
.cb_handler = xfrmi_rcv_cb,
.err_handler = xfrmi6_err,
.priority = 10,
};
static struct xfrm6_protocol xfrmi_ah6_protocol __read_mostly = {
.handler = xfrm6_rcv,
.input_handler = xfrm_input,
.cb_handler = xfrmi_rcv_cb,
.err_handler = xfrmi6_err,
.priority = 10,
};
static struct xfrm6_protocol xfrmi_ipcomp6_protocol __read_mostly = {
.handler = xfrm6_rcv,
.input_handler = xfrm_input,
.cb_handler = xfrmi_rcv_cb,
.err_handler = xfrmi6_err,
.priority = 10,
};
#if IS_REACHABLE(CONFIG_INET6_XFRM_TUNNEL)
static int xfrmi6_rcv_tunnel(struct sk_buff *skb)
{
const xfrm_address_t *saddr;
__be32 spi;
saddr = (const xfrm_address_t *)&ipv6_hdr(skb)->saddr;
spi = xfrm6_tunnel_spi_lookup(dev_net(skb->dev), saddr);
return xfrm6_rcv_spi(skb, IPPROTO_IPV6, spi, NULL);
}
static struct xfrm6_tunnel xfrmi_ipv6_handler __read_mostly = {
.handler = xfrmi6_rcv_tunnel,
.cb_handler = xfrmi_rcv_cb,
.err_handler = xfrmi6_err,
.priority = 2,
};
static struct xfrm6_tunnel xfrmi_ip6ip_handler __read_mostly = {
.handler = xfrmi6_rcv_tunnel,
.cb_handler = xfrmi_rcv_cb,
.err_handler = xfrmi6_err,
.priority = 2,
};
#endif
static struct xfrm4_protocol xfrmi_esp4_protocol __read_mostly = {
.handler = xfrmi4_rcv,
.input_handler = xfrmi4_input,
.cb_handler = xfrmi_rcv_cb,
.err_handler = xfrmi4_err,
.priority = 10,
};
static struct xfrm4_protocol xfrmi_ah4_protocol __read_mostly = {
.handler = xfrm4_rcv,
.input_handler = xfrm_input,
.cb_handler = xfrmi_rcv_cb,
.err_handler = xfrmi4_err,
.priority = 10,
};
static struct xfrm4_protocol xfrmi_ipcomp4_protocol __read_mostly = {
.handler = xfrm4_rcv,
.input_handler = xfrm_input,
.cb_handler = xfrmi_rcv_cb,
.err_handler = xfrmi4_err,
.priority = 10,
};
#if IS_REACHABLE(CONFIG_INET_XFRM_TUNNEL)
static int xfrmi4_rcv_tunnel(struct sk_buff *skb)
{
return xfrm4_rcv_spi(skb, IPPROTO_IPIP, ip_hdr(skb)->saddr);
}
static struct xfrm_tunnel xfrmi_ipip_handler __read_mostly = {
.handler = xfrmi4_rcv_tunnel,
.cb_handler = xfrmi_rcv_cb,
.err_handler = xfrmi4_err,
.priority = 3,
};
static struct xfrm_tunnel xfrmi_ipip6_handler __read_mostly = {
.handler = xfrmi4_rcv_tunnel,
.cb_handler = xfrmi_rcv_cb,
.err_handler = xfrmi4_err,
.priority = 2,
};
#endif
static int __init xfrmi4_init(void)
{
int err;
err = xfrm4_protocol_register(&xfrmi_esp4_protocol, IPPROTO_ESP);
if (err < 0)
goto xfrm_proto_esp_failed;
err = xfrm4_protocol_register(&xfrmi_ah4_protocol, IPPROTO_AH);
if (err < 0)
goto xfrm_proto_ah_failed;
err = xfrm4_protocol_register(&xfrmi_ipcomp4_protocol, IPPROTO_COMP);
if (err < 0)
goto xfrm_proto_comp_failed;
#if IS_REACHABLE(CONFIG_INET_XFRM_TUNNEL)
err = xfrm4_tunnel_register(&xfrmi_ipip_handler, AF_INET);
if (err < 0)
goto xfrm_tunnel_ipip_failed;
err = xfrm4_tunnel_register(&xfrmi_ipip6_handler, AF_INET6);
if (err < 0)
goto xfrm_tunnel_ipip6_failed;
#endif
return 0;
#if IS_REACHABLE(CONFIG_INET_XFRM_TUNNEL)
xfrm_tunnel_ipip6_failed:
xfrm4_tunnel_deregister(&xfrmi_ipip_handler, AF_INET);
xfrm_tunnel_ipip_failed:
xfrm4_protocol_deregister(&xfrmi_ipcomp4_protocol, IPPROTO_COMP);
#endif
xfrm_proto_comp_failed:
xfrm4_protocol_deregister(&xfrmi_ah4_protocol, IPPROTO_AH);
xfrm_proto_ah_failed:
xfrm4_protocol_deregister(&xfrmi_esp4_protocol, IPPROTO_ESP);
xfrm_proto_esp_failed:
return err;
}
static void xfrmi4_fini(void)
{
#if IS_REACHABLE(CONFIG_INET_XFRM_TUNNEL)
xfrm4_tunnel_deregister(&xfrmi_ipip6_handler, AF_INET6);
xfrm4_tunnel_deregister(&xfrmi_ipip_handler, AF_INET);
#endif
xfrm4_protocol_deregister(&xfrmi_ipcomp4_protocol, IPPROTO_COMP);
xfrm4_protocol_deregister(&xfrmi_ah4_protocol, IPPROTO_AH);
xfrm4_protocol_deregister(&xfrmi_esp4_protocol, IPPROTO_ESP);
}
static int __init xfrmi6_init(void)
{
int err;
err = xfrm6_protocol_register(&xfrmi_esp6_protocol, IPPROTO_ESP);
if (err < 0)
goto xfrm_proto_esp_failed;
err = xfrm6_protocol_register(&xfrmi_ah6_protocol, IPPROTO_AH);
if (err < 0)
goto xfrm_proto_ah_failed;
err = xfrm6_protocol_register(&xfrmi_ipcomp6_protocol, IPPROTO_COMP);
if (err < 0)
goto xfrm_proto_comp_failed;
#if IS_REACHABLE(CONFIG_INET6_XFRM_TUNNEL)
err = xfrm6_tunnel_register(&xfrmi_ipv6_handler, AF_INET6);
if (err < 0)
goto xfrm_tunnel_ipv6_failed;
err = xfrm6_tunnel_register(&xfrmi_ip6ip_handler, AF_INET);
if (err < 0)
goto xfrm_tunnel_ip6ip_failed;
#endif
return 0;
#if IS_REACHABLE(CONFIG_INET6_XFRM_TUNNEL)
xfrm_tunnel_ip6ip_failed:
xfrm6_tunnel_deregister(&xfrmi_ipv6_handler, AF_INET6);
xfrm_tunnel_ipv6_failed:
xfrm6_protocol_deregister(&xfrmi_ipcomp6_protocol, IPPROTO_COMP);
#endif
xfrm_proto_comp_failed:
xfrm6_protocol_deregister(&xfrmi_ah6_protocol, IPPROTO_AH);
xfrm_proto_ah_failed:
xfrm6_protocol_deregister(&xfrmi_esp6_protocol, IPPROTO_ESP);
xfrm_proto_esp_failed:
return err;
}
static void xfrmi6_fini(void)
{
#if IS_REACHABLE(CONFIG_INET6_XFRM_TUNNEL)
xfrm6_tunnel_deregister(&xfrmi_ip6ip_handler, AF_INET);
xfrm6_tunnel_deregister(&xfrmi_ipv6_handler, AF_INET6);
#endif
xfrm6_protocol_deregister(&xfrmi_ipcomp6_protocol, IPPROTO_COMP);
xfrm6_protocol_deregister(&xfrmi_ah6_protocol, IPPROTO_AH);
xfrm6_protocol_deregister(&xfrmi_esp6_protocol, IPPROTO_ESP);
}
static const struct xfrm_if_cb xfrm_if_cb = {
.decode_session = xfrmi_decode_session,
};
static int __init xfrmi_init(void)
{
const char *msg;
int err;
pr_info("IPsec XFRM device driver\n");
msg = "tunnel device";
err = register_pernet_device(&xfrmi_net_ops);
if (err < 0)
goto pernet_dev_failed;
msg = "xfrm4 protocols";
err = xfrmi4_init();
if (err < 0)
goto xfrmi4_failed;
msg = "xfrm6 protocols";
err = xfrmi6_init();
if (err < 0)
goto xfrmi6_failed;
msg = "netlink interface";
err = rtnl_link_register(&xfrmi_link_ops);
if (err < 0)
goto rtnl_link_failed;
err = register_xfrm_interface_bpf();
if (err < 0)
goto kfunc_failed;
lwtunnel_encap_add_ops(&xfrmi_encap_ops, LWTUNNEL_ENCAP_XFRM);
xfrm_if_register_cb(&xfrm_if_cb);
return err;
kfunc_failed:
rtnl_link_unregister(&xfrmi_link_ops);
rtnl_link_failed:
xfrmi6_fini();
xfrmi6_failed:
xfrmi4_fini();
xfrmi4_failed:
unregister_pernet_device(&xfrmi_net_ops);
pernet_dev_failed:
pr_err("xfrmi init: failed to register %s\n", msg);
return err;
}
static void __exit xfrmi_fini(void)
{
xfrm_if_unregister_cb();
lwtunnel_encap_del_ops(&xfrmi_encap_ops, LWTUNNEL_ENCAP_XFRM);
rtnl_link_unregister(&xfrmi_link_ops);
xfrmi4_fini();
xfrmi6_fini();
unregister_pernet_device(&xfrmi_net_ops);
}
module_init(xfrmi_init);
module_exit(xfrmi_fini);
MODULE_LICENSE("GPL");
MODULE_ALIAS_RTNL_LINK("xfrm");
MODULE_ALIAS_NETDEV("xfrm0");
MODULE_AUTHOR("Steffen Klassert");
MODULE_DESCRIPTION("XFRM virtual interface");
| linux-master | net/xfrm/xfrm_interface_core.c |
// SPDX-License-Identifier: GPL-2.0
/*
* xfrm_input.c
*
* Changes:
* YOSHIFUJI Hideaki @USAGI
* Split up af-specific portion
*
*/
#include <linux/bottom_half.h>
#include <linux/cache.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/percpu.h>
#include <net/dst.h>
#include <net/ip.h>
#include <net/xfrm.h>
#include <net/ip_tunnels.h>
#include <net/ip6_tunnel.h>
#include <net/dst_metadata.h>
#include "xfrm_inout.h"
struct xfrm_trans_tasklet {
struct work_struct work;
spinlock_t queue_lock;
struct sk_buff_head queue;
};
struct xfrm_trans_cb {
union {
struct inet_skb_parm h4;
#if IS_ENABLED(CONFIG_IPV6)
struct inet6_skb_parm h6;
#endif
} header;
int (*finish)(struct net *net, struct sock *sk, struct sk_buff *skb);
struct net *net;
};
#define XFRM_TRANS_SKB_CB(__skb) ((struct xfrm_trans_cb *)&((__skb)->cb[0]))
static DEFINE_SPINLOCK(xfrm_input_afinfo_lock);
static struct xfrm_input_afinfo const __rcu *xfrm_input_afinfo[2][AF_INET6 + 1];
static struct gro_cells gro_cells;
static struct net_device xfrm_napi_dev;
static DEFINE_PER_CPU(struct xfrm_trans_tasklet, xfrm_trans_tasklet);
int xfrm_input_register_afinfo(const struct xfrm_input_afinfo *afinfo)
{
int err = 0;
if (WARN_ON(afinfo->family > AF_INET6))
return -EAFNOSUPPORT;
spin_lock_bh(&xfrm_input_afinfo_lock);
if (unlikely(xfrm_input_afinfo[afinfo->is_ipip][afinfo->family]))
err = -EEXIST;
else
rcu_assign_pointer(xfrm_input_afinfo[afinfo->is_ipip][afinfo->family], afinfo);
spin_unlock_bh(&xfrm_input_afinfo_lock);
return err;
}
EXPORT_SYMBOL(xfrm_input_register_afinfo);
int xfrm_input_unregister_afinfo(const struct xfrm_input_afinfo *afinfo)
{
int err = 0;
spin_lock_bh(&xfrm_input_afinfo_lock);
if (likely(xfrm_input_afinfo[afinfo->is_ipip][afinfo->family])) {
if (unlikely(xfrm_input_afinfo[afinfo->is_ipip][afinfo->family] != afinfo))
err = -EINVAL;
else
RCU_INIT_POINTER(xfrm_input_afinfo[afinfo->is_ipip][afinfo->family], NULL);
}
spin_unlock_bh(&xfrm_input_afinfo_lock);
synchronize_rcu();
return err;
}
EXPORT_SYMBOL(xfrm_input_unregister_afinfo);
static const struct xfrm_input_afinfo *xfrm_input_get_afinfo(u8 family, bool is_ipip)
{
const struct xfrm_input_afinfo *afinfo;
if (WARN_ON_ONCE(family > AF_INET6))
return NULL;
rcu_read_lock();
afinfo = rcu_dereference(xfrm_input_afinfo[is_ipip][family]);
if (unlikely(!afinfo))
rcu_read_unlock();
return afinfo;
}
static int xfrm_rcv_cb(struct sk_buff *skb, unsigned int family, u8 protocol,
int err)
{
bool is_ipip = (protocol == IPPROTO_IPIP || protocol == IPPROTO_IPV6);
const struct xfrm_input_afinfo *afinfo;
int ret;
afinfo = xfrm_input_get_afinfo(family, is_ipip);
if (!afinfo)
return -EAFNOSUPPORT;
ret = afinfo->callback(skb, protocol, err);
rcu_read_unlock();
return ret;
}
struct sec_path *secpath_set(struct sk_buff *skb)
{
struct sec_path *sp, *tmp = skb_ext_find(skb, SKB_EXT_SEC_PATH);
sp = skb_ext_add(skb, SKB_EXT_SEC_PATH);
if (!sp)
return NULL;
if (tmp) /* reused existing one (was COW'd if needed) */
return sp;
/* allocated new secpath */
memset(sp->ovec, 0, sizeof(sp->ovec));
sp->olen = 0;
sp->len = 0;
sp->verified_cnt = 0;
return sp;
}
EXPORT_SYMBOL(secpath_set);
/* Fetch spi and seq from ipsec header */
int xfrm_parse_spi(struct sk_buff *skb, u8 nexthdr, __be32 *spi, __be32 *seq)
{
int offset, offset_seq;
int hlen;
switch (nexthdr) {
case IPPROTO_AH:
hlen = sizeof(struct ip_auth_hdr);
offset = offsetof(struct ip_auth_hdr, spi);
offset_seq = offsetof(struct ip_auth_hdr, seq_no);
break;
case IPPROTO_ESP:
hlen = sizeof(struct ip_esp_hdr);
offset = offsetof(struct ip_esp_hdr, spi);
offset_seq = offsetof(struct ip_esp_hdr, seq_no);
break;
case IPPROTO_COMP:
if (!pskb_may_pull(skb, sizeof(struct ip_comp_hdr)))
return -EINVAL;
*spi = htonl(ntohs(*(__be16 *)(skb_transport_header(skb) + 2)));
*seq = 0;
return 0;
default:
return 1;
}
if (!pskb_may_pull(skb, hlen))
return -EINVAL;
*spi = *(__be32 *)(skb_transport_header(skb) + offset);
*seq = *(__be32 *)(skb_transport_header(skb) + offset_seq);
return 0;
}
EXPORT_SYMBOL(xfrm_parse_spi);
static int xfrm4_remove_beet_encap(struct xfrm_state *x, struct sk_buff *skb)
{
struct iphdr *iph;
int optlen = 0;
int err = -EINVAL;
skb->protocol = htons(ETH_P_IP);
if (unlikely(XFRM_MODE_SKB_CB(skb)->protocol == IPPROTO_BEETPH)) {
struct ip_beet_phdr *ph;
int phlen;
if (!pskb_may_pull(skb, sizeof(*ph)))
goto out;
ph = (struct ip_beet_phdr *)skb->data;
phlen = sizeof(*ph) + ph->padlen;
optlen = ph->hdrlen * 8 + (IPV4_BEET_PHMAXLEN - phlen);
if (optlen < 0 || optlen & 3 || optlen > 250)
goto out;
XFRM_MODE_SKB_CB(skb)->protocol = ph->nexthdr;
if (!pskb_may_pull(skb, phlen))
goto out;
__skb_pull(skb, phlen);
}
skb_push(skb, sizeof(*iph));
skb_reset_network_header(skb);
skb_mac_header_rebuild(skb);
xfrm4_beet_make_header(skb);
iph = ip_hdr(skb);
iph->ihl += optlen / 4;
iph->tot_len = htons(skb->len);
iph->daddr = x->sel.daddr.a4;
iph->saddr = x->sel.saddr.a4;
iph->check = 0;
iph->check = ip_fast_csum(skb_network_header(skb), iph->ihl);
err = 0;
out:
return err;
}
static void ipip_ecn_decapsulate(struct sk_buff *skb)
{
struct iphdr *inner_iph = ipip_hdr(skb);
if (INET_ECN_is_ce(XFRM_MODE_SKB_CB(skb)->tos))
IP_ECN_set_ce(inner_iph);
}
static int xfrm4_remove_tunnel_encap(struct xfrm_state *x, struct sk_buff *skb)
{
int err = -EINVAL;
skb->protocol = htons(ETH_P_IP);
if (!pskb_may_pull(skb, sizeof(struct iphdr)))
goto out;
err = skb_unclone(skb, GFP_ATOMIC);
if (err)
goto out;
if (x->props.flags & XFRM_STATE_DECAP_DSCP)
ipv4_copy_dscp(XFRM_MODE_SKB_CB(skb)->tos, ipip_hdr(skb));
if (!(x->props.flags & XFRM_STATE_NOECN))
ipip_ecn_decapsulate(skb);
skb_reset_network_header(skb);
skb_mac_header_rebuild(skb);
if (skb->mac_len)
eth_hdr(skb)->h_proto = skb->protocol;
err = 0;
out:
return err;
}
static void ipip6_ecn_decapsulate(struct sk_buff *skb)
{
struct ipv6hdr *inner_iph = ipipv6_hdr(skb);
if (INET_ECN_is_ce(XFRM_MODE_SKB_CB(skb)->tos))
IP6_ECN_set_ce(skb, inner_iph);
}
static int xfrm6_remove_tunnel_encap(struct xfrm_state *x, struct sk_buff *skb)
{
int err = -EINVAL;
skb->protocol = htons(ETH_P_IPV6);
if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
goto out;
err = skb_unclone(skb, GFP_ATOMIC);
if (err)
goto out;
if (x->props.flags & XFRM_STATE_DECAP_DSCP)
ipv6_copy_dscp(XFRM_MODE_SKB_CB(skb)->tos, ipipv6_hdr(skb));
if (!(x->props.flags & XFRM_STATE_NOECN))
ipip6_ecn_decapsulate(skb);
skb_reset_network_header(skb);
skb_mac_header_rebuild(skb);
if (skb->mac_len)
eth_hdr(skb)->h_proto = skb->protocol;
err = 0;
out:
return err;
}
static int xfrm6_remove_beet_encap(struct xfrm_state *x, struct sk_buff *skb)
{
struct ipv6hdr *ip6h;
int size = sizeof(struct ipv6hdr);
int err;
skb->protocol = htons(ETH_P_IPV6);
err = skb_cow_head(skb, size + skb->mac_len);
if (err)
goto out;
__skb_push(skb, size);
skb_reset_network_header(skb);
skb_mac_header_rebuild(skb);
xfrm6_beet_make_header(skb);
ip6h = ipv6_hdr(skb);
ip6h->payload_len = htons(skb->len - size);
ip6h->daddr = x->sel.daddr.in6;
ip6h->saddr = x->sel.saddr.in6;
err = 0;
out:
return err;
}
/* Remove encapsulation header.
*
* The IP header will be moved over the top of the encapsulation
* header.
*
* On entry, the transport header shall point to where the IP header
* should be and the network header shall be set to where the IP
* header currently is. skb->data shall point to the start of the
* payload.
*/
static int
xfrm_inner_mode_encap_remove(struct xfrm_state *x,
struct sk_buff *skb)
{
switch (x->props.mode) {
case XFRM_MODE_BEET:
switch (x->sel.family) {
case AF_INET:
return xfrm4_remove_beet_encap(x, skb);
case AF_INET6:
return xfrm6_remove_beet_encap(x, skb);
}
break;
case XFRM_MODE_TUNNEL:
switch (XFRM_MODE_SKB_CB(skb)->protocol) {
case IPPROTO_IPIP:
return xfrm4_remove_tunnel_encap(x, skb);
case IPPROTO_IPV6:
return xfrm6_remove_tunnel_encap(x, skb);
break;
}
return -EINVAL;
}
WARN_ON_ONCE(1);
return -EOPNOTSUPP;
}
static int xfrm_prepare_input(struct xfrm_state *x, struct sk_buff *skb)
{
switch (x->props.family) {
case AF_INET:
xfrm4_extract_header(skb);
break;
case AF_INET6:
xfrm6_extract_header(skb);
break;
default:
WARN_ON_ONCE(1);
return -EAFNOSUPPORT;
}
return xfrm_inner_mode_encap_remove(x, skb);
}
/* Remove encapsulation header.
*
* The IP header will be moved over the top of the encapsulation header.
*
* On entry, skb_transport_header() shall point to where the IP header
* should be and skb_network_header() shall be set to where the IP header
* currently is. skb->data shall point to the start of the payload.
*/
static int xfrm4_transport_input(struct xfrm_state *x, struct sk_buff *skb)
{
int ihl = skb->data - skb_transport_header(skb);
if (skb->transport_header != skb->network_header) {
memmove(skb_transport_header(skb),
skb_network_header(skb), ihl);
skb->network_header = skb->transport_header;
}
ip_hdr(skb)->tot_len = htons(skb->len + ihl);
skb_reset_transport_header(skb);
return 0;
}
static int xfrm6_transport_input(struct xfrm_state *x, struct sk_buff *skb)
{
#if IS_ENABLED(CONFIG_IPV6)
int ihl = skb->data - skb_transport_header(skb);
if (skb->transport_header != skb->network_header) {
memmove(skb_transport_header(skb),
skb_network_header(skb), ihl);
skb->network_header = skb->transport_header;
}
ipv6_hdr(skb)->payload_len = htons(skb->len + ihl -
sizeof(struct ipv6hdr));
skb_reset_transport_header(skb);
return 0;
#else
WARN_ON_ONCE(1);
return -EAFNOSUPPORT;
#endif
}
static int xfrm_inner_mode_input(struct xfrm_state *x,
struct sk_buff *skb)
{
switch (x->props.mode) {
case XFRM_MODE_BEET:
case XFRM_MODE_TUNNEL:
return xfrm_prepare_input(x, skb);
case XFRM_MODE_TRANSPORT:
if (x->props.family == AF_INET)
return xfrm4_transport_input(x, skb);
if (x->props.family == AF_INET6)
return xfrm6_transport_input(x, skb);
break;
case XFRM_MODE_ROUTEOPTIMIZATION:
WARN_ON_ONCE(1);
break;
default:
WARN_ON_ONCE(1);
break;
}
return -EOPNOTSUPP;
}
int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
{
const struct xfrm_state_afinfo *afinfo;
struct net *net = dev_net(skb->dev);
int err;
__be32 seq;
__be32 seq_hi;
struct xfrm_state *x = NULL;
xfrm_address_t *daddr;
u32 mark = skb->mark;
unsigned int family = AF_UNSPEC;
int decaps = 0;
int async = 0;
bool xfrm_gro = false;
bool crypto_done = false;
struct xfrm_offload *xo = xfrm_offload(skb);
struct sec_path *sp;
if (encap_type < 0) {
x = xfrm_input_state(skb);
if (unlikely(x->km.state != XFRM_STATE_VALID)) {
if (x->km.state == XFRM_STATE_ACQ)
XFRM_INC_STATS(net, LINUX_MIB_XFRMACQUIREERROR);
else
XFRM_INC_STATS(net,
LINUX_MIB_XFRMINSTATEINVALID);
if (encap_type == -1)
dev_put(skb->dev);
goto drop;
}
family = x->props.family;
/* An encap_type of -1 indicates async resumption. */
if (encap_type == -1) {
async = 1;
seq = XFRM_SKB_CB(skb)->seq.input.low;
goto resume;
}
/* encap_type < -1 indicates a GRO call. */
encap_type = 0;
seq = XFRM_SPI_SKB_CB(skb)->seq;
if (xo && (xo->flags & CRYPTO_DONE)) {
crypto_done = true;
family = XFRM_SPI_SKB_CB(skb)->family;
if (!(xo->status & CRYPTO_SUCCESS)) {
if (xo->status &
(CRYPTO_TRANSPORT_AH_AUTH_FAILED |
CRYPTO_TRANSPORT_ESP_AUTH_FAILED |
CRYPTO_TUNNEL_AH_AUTH_FAILED |
CRYPTO_TUNNEL_ESP_AUTH_FAILED)) {
xfrm_audit_state_icvfail(x, skb,
x->type->proto);
x->stats.integrity_failed++;
XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEPROTOERROR);
goto drop;
}
if (xo->status & CRYPTO_INVALID_PROTOCOL) {
XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEPROTOERROR);
goto drop;
}
XFRM_INC_STATS(net, LINUX_MIB_XFRMINBUFFERERROR);
goto drop;
}
if (xfrm_parse_spi(skb, nexthdr, &spi, &seq)) {
XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR);
goto drop;
}
}
goto lock;
}
family = XFRM_SPI_SKB_CB(skb)->family;
/* if tunnel is present override skb->mark value with tunnel i_key */
switch (family) {
case AF_INET:
if (XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4)
mark = be32_to_cpu(XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4->parms.i_key);
break;
case AF_INET6:
if (XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6)
mark = be32_to_cpu(XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6->parms.i_key);
break;
}
sp = secpath_set(skb);
if (!sp) {
XFRM_INC_STATS(net, LINUX_MIB_XFRMINERROR);
goto drop;
}
seq = 0;
if (!spi && xfrm_parse_spi(skb, nexthdr, &spi, &seq)) {
secpath_reset(skb);
XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR);
goto drop;
}
daddr = (xfrm_address_t *)(skb_network_header(skb) +
XFRM_SPI_SKB_CB(skb)->daddroff);
do {
sp = skb_sec_path(skb);
if (sp->len == XFRM_MAX_DEPTH) {
secpath_reset(skb);
XFRM_INC_STATS(net, LINUX_MIB_XFRMINBUFFERERROR);
goto drop;
}
x = xfrm_state_lookup(net, mark, daddr, spi, nexthdr, family);
if (x == NULL) {
secpath_reset(skb);
XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOSTATES);
xfrm_audit_state_notfound(skb, family, spi, seq);
goto drop;
}
skb->mark = xfrm_smark_get(skb->mark, x);
sp->xvec[sp->len++] = x;
skb_dst_force(skb);
if (!skb_dst(skb)) {
XFRM_INC_STATS(net, LINUX_MIB_XFRMINERROR);
goto drop;
}
lock:
spin_lock(&x->lock);
if (unlikely(x->km.state != XFRM_STATE_VALID)) {
if (x->km.state == XFRM_STATE_ACQ)
XFRM_INC_STATS(net, LINUX_MIB_XFRMACQUIREERROR);
else
XFRM_INC_STATS(net,
LINUX_MIB_XFRMINSTATEINVALID);
goto drop_unlock;
}
if ((x->encap ? x->encap->encap_type : 0) != encap_type) {
XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMISMATCH);
goto drop_unlock;
}
if (xfrm_replay_check(x, skb, seq)) {
XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATESEQERROR);
goto drop_unlock;
}
if (xfrm_state_check_expire(x)) {
XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEEXPIRED);
goto drop_unlock;
}
spin_unlock(&x->lock);
if (xfrm_tunnel_check(skb, x, family)) {
XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMODEERROR);
goto drop;
}
seq_hi = htonl(xfrm_replay_seqhi(x, seq));
XFRM_SKB_CB(skb)->seq.input.low = seq;
XFRM_SKB_CB(skb)->seq.input.hi = seq_hi;
dev_hold(skb->dev);
if (crypto_done)
nexthdr = x->type_offload->input_tail(x, skb);
else
nexthdr = x->type->input(x, skb);
if (nexthdr == -EINPROGRESS)
return 0;
resume:
dev_put(skb->dev);
spin_lock(&x->lock);
if (nexthdr < 0) {
if (nexthdr == -EBADMSG) {
xfrm_audit_state_icvfail(x, skb,
x->type->proto);
x->stats.integrity_failed++;
}
XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEPROTOERROR);
goto drop_unlock;
}
/* only the first xfrm gets the encap type */
encap_type = 0;
if (xfrm_replay_recheck(x, skb, seq)) {
XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATESEQERROR);
goto drop_unlock;
}
xfrm_replay_advance(x, seq);
x->curlft.bytes += skb->len;
x->curlft.packets++;
x->lastused = ktime_get_real_seconds();
spin_unlock(&x->lock);
XFRM_MODE_SKB_CB(skb)->protocol = nexthdr;
if (xfrm_inner_mode_input(x, skb)) {
XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMODEERROR);
goto drop;
}
if (x->outer_mode.flags & XFRM_MODE_FLAG_TUNNEL) {
decaps = 1;
break;
}
/*
* We need the inner address. However, we only get here for
* transport mode so the outer address is identical.
*/
daddr = &x->id.daddr;
family = x->props.family;
err = xfrm_parse_spi(skb, nexthdr, &spi, &seq);
if (err < 0) {
XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR);
goto drop;
}
crypto_done = false;
} while (!err);
err = xfrm_rcv_cb(skb, family, x->type->proto, 0);
if (err)
goto drop;
nf_reset_ct(skb);
if (decaps) {
sp = skb_sec_path(skb);
if (sp)
sp->olen = 0;
if (skb_valid_dst(skb))
skb_dst_drop(skb);
gro_cells_receive(&gro_cells, skb);
return 0;
} else {
xo = xfrm_offload(skb);
if (xo)
xfrm_gro = xo->flags & XFRM_GRO;
err = -EAFNOSUPPORT;
rcu_read_lock();
afinfo = xfrm_state_afinfo_get_rcu(x->props.family);
if (likely(afinfo))
err = afinfo->transport_finish(skb, xfrm_gro || async);
rcu_read_unlock();
if (xfrm_gro) {
sp = skb_sec_path(skb);
if (sp)
sp->olen = 0;
if (skb_valid_dst(skb))
skb_dst_drop(skb);
gro_cells_receive(&gro_cells, skb);
return err;
}
return err;
}
drop_unlock:
spin_unlock(&x->lock);
drop:
xfrm_rcv_cb(skb, family, x && x->type ? x->type->proto : nexthdr, -1);
kfree_skb(skb);
return 0;
}
EXPORT_SYMBOL(xfrm_input);
int xfrm_input_resume(struct sk_buff *skb, int nexthdr)
{
return xfrm_input(skb, nexthdr, 0, -1);
}
EXPORT_SYMBOL(xfrm_input_resume);
static void xfrm_trans_reinject(struct work_struct *work)
{
struct xfrm_trans_tasklet *trans = container_of(work, struct xfrm_trans_tasklet, work);
struct sk_buff_head queue;
struct sk_buff *skb;
__skb_queue_head_init(&queue);
spin_lock_bh(&trans->queue_lock);
skb_queue_splice_init(&trans->queue, &queue);
spin_unlock_bh(&trans->queue_lock);
local_bh_disable();
while ((skb = __skb_dequeue(&queue)))
XFRM_TRANS_SKB_CB(skb)->finish(XFRM_TRANS_SKB_CB(skb)->net,
NULL, skb);
local_bh_enable();
}
int xfrm_trans_queue_net(struct net *net, struct sk_buff *skb,
int (*finish)(struct net *, struct sock *,
struct sk_buff *))
{
struct xfrm_trans_tasklet *trans;
trans = this_cpu_ptr(&xfrm_trans_tasklet);
if (skb_queue_len(&trans->queue) >= READ_ONCE(netdev_max_backlog))
return -ENOBUFS;
BUILD_BUG_ON(sizeof(struct xfrm_trans_cb) > sizeof(skb->cb));
XFRM_TRANS_SKB_CB(skb)->finish = finish;
XFRM_TRANS_SKB_CB(skb)->net = net;
spin_lock_bh(&trans->queue_lock);
__skb_queue_tail(&trans->queue, skb);
spin_unlock_bh(&trans->queue_lock);
schedule_work(&trans->work);
return 0;
}
EXPORT_SYMBOL(xfrm_trans_queue_net);
int xfrm_trans_queue(struct sk_buff *skb,
int (*finish)(struct net *, struct sock *,
struct sk_buff *))
{
return xfrm_trans_queue_net(dev_net(skb->dev), skb, finish);
}
EXPORT_SYMBOL(xfrm_trans_queue);
void __init xfrm_input_init(void)
{
int err;
int i;
init_dummy_netdev(&xfrm_napi_dev);
err = gro_cells_init(&gro_cells, &xfrm_napi_dev);
if (err)
gro_cells.cells = NULL;
for_each_possible_cpu(i) {
struct xfrm_trans_tasklet *trans;
trans = &per_cpu(xfrm_trans_tasklet, i);
spin_lock_init(&trans->queue_lock);
__skb_queue_head_init(&trans->queue);
INIT_WORK(&trans->work, xfrm_trans_reinject);
}
}
| linux-master | net/xfrm/xfrm_input.c |
// SPDX-License-Identifier: GPL-2.0-only
/* xfrm_user.c: User interface to configure xfrm engine.
*
* Copyright (C) 2002 David S. Miller ([email protected])
*
* Changes:
* Mitsuru KANDA @USAGI
* Kazunori MIYAZAWA @USAGI
* Kunihiro Ishiguro <[email protected]>
* IPv6 support
*
*/
#include <linux/compat.h>
#include <linux/crypto.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/socket.h>
#include <linux/string.h>
#include <linux/net.h>
#include <linux/skbuff.h>
#include <linux/pfkeyv2.h>
#include <linux/ipsec.h>
#include <linux/init.h>
#include <linux/security.h>
#include <net/sock.h>
#include <net/xfrm.h>
#include <net/netlink.h>
#include <net/ah.h>
#include <linux/uaccess.h>
#if IS_ENABLED(CONFIG_IPV6)
#include <linux/in6.h>
#endif
#include <asm/unaligned.h>
static int verify_one_alg(struct nlattr **attrs, enum xfrm_attr_type_t type,
struct netlink_ext_ack *extack)
{
struct nlattr *rt = attrs[type];
struct xfrm_algo *algp;
if (!rt)
return 0;
algp = nla_data(rt);
if (nla_len(rt) < (int)xfrm_alg_len(algp)) {
NL_SET_ERR_MSG(extack, "Invalid AUTH/CRYPT/COMP attribute length");
return -EINVAL;
}
switch (type) {
case XFRMA_ALG_AUTH:
case XFRMA_ALG_CRYPT:
case XFRMA_ALG_COMP:
break;
default:
NL_SET_ERR_MSG(extack, "Invalid algorithm attribute type");
return -EINVAL;
}
algp->alg_name[sizeof(algp->alg_name) - 1] = '\0';
return 0;
}
static int verify_auth_trunc(struct nlattr **attrs,
struct netlink_ext_ack *extack)
{
struct nlattr *rt = attrs[XFRMA_ALG_AUTH_TRUNC];
struct xfrm_algo_auth *algp;
if (!rt)
return 0;
algp = nla_data(rt);
if (nla_len(rt) < (int)xfrm_alg_auth_len(algp)) {
NL_SET_ERR_MSG(extack, "Invalid AUTH_TRUNC attribute length");
return -EINVAL;
}
algp->alg_name[sizeof(algp->alg_name) - 1] = '\0';
return 0;
}
static int verify_aead(struct nlattr **attrs, struct netlink_ext_ack *extack)
{
struct nlattr *rt = attrs[XFRMA_ALG_AEAD];
struct xfrm_algo_aead *algp;
if (!rt)
return 0;
algp = nla_data(rt);
if (nla_len(rt) < (int)aead_len(algp)) {
NL_SET_ERR_MSG(extack, "Invalid AEAD attribute length");
return -EINVAL;
}
algp->alg_name[sizeof(algp->alg_name) - 1] = '\0';
return 0;
}
static void verify_one_addr(struct nlattr **attrs, enum xfrm_attr_type_t type,
xfrm_address_t **addrp)
{
struct nlattr *rt = attrs[type];
if (rt && addrp)
*addrp = nla_data(rt);
}
static inline int verify_sec_ctx_len(struct nlattr **attrs, struct netlink_ext_ack *extack)
{
struct nlattr *rt = attrs[XFRMA_SEC_CTX];
struct xfrm_user_sec_ctx *uctx;
if (!rt)
return 0;
uctx = nla_data(rt);
if (uctx->len > nla_len(rt) ||
uctx->len != (sizeof(struct xfrm_user_sec_ctx) + uctx->ctx_len)) {
NL_SET_ERR_MSG(extack, "Invalid security context length");
return -EINVAL;
}
return 0;
}
static inline int verify_replay(struct xfrm_usersa_info *p,
struct nlattr **attrs,
struct netlink_ext_ack *extack)
{
struct nlattr *rt = attrs[XFRMA_REPLAY_ESN_VAL];
struct xfrm_replay_state_esn *rs;
if (!rt) {
if (p->flags & XFRM_STATE_ESN) {
NL_SET_ERR_MSG(extack, "Missing required attribute for ESN");
return -EINVAL;
}
return 0;
}
rs = nla_data(rt);
if (rs->bmp_len > XFRMA_REPLAY_ESN_MAX / sizeof(rs->bmp[0]) / 8) {
NL_SET_ERR_MSG(extack, "ESN bitmap length must be <= 128");
return -EINVAL;
}
if (nla_len(rt) < (int)xfrm_replay_state_esn_len(rs) &&
nla_len(rt) != sizeof(*rs)) {
NL_SET_ERR_MSG(extack, "ESN attribute is too short to fit the full bitmap length");
return -EINVAL;
}
/* As only ESP and AH support ESN feature. */
if ((p->id.proto != IPPROTO_ESP) && (p->id.proto != IPPROTO_AH)) {
NL_SET_ERR_MSG(extack, "ESN only supported for ESP and AH");
return -EINVAL;
}
if (p->replay_window != 0) {
NL_SET_ERR_MSG(extack, "ESN not compatible with legacy replay_window");
return -EINVAL;
}
return 0;
}
static int verify_newsa_info(struct xfrm_usersa_info *p,
struct nlattr **attrs,
struct netlink_ext_ack *extack)
{
int err;
err = -EINVAL;
switch (p->family) {
case AF_INET:
break;
case AF_INET6:
#if IS_ENABLED(CONFIG_IPV6)
break;
#else
err = -EAFNOSUPPORT;
NL_SET_ERR_MSG(extack, "IPv6 support disabled");
goto out;
#endif
default:
NL_SET_ERR_MSG(extack, "Invalid address family");
goto out;
}
switch (p->sel.family) {
case AF_UNSPEC:
break;
case AF_INET:
if (p->sel.prefixlen_d > 32 || p->sel.prefixlen_s > 32) {
NL_SET_ERR_MSG(extack, "Invalid prefix length in selector (must be <= 32 for IPv4)");
goto out;
}
break;
case AF_INET6:
#if IS_ENABLED(CONFIG_IPV6)
if (p->sel.prefixlen_d > 128 || p->sel.prefixlen_s > 128) {
NL_SET_ERR_MSG(extack, "Invalid prefix length in selector (must be <= 128 for IPv6)");
goto out;
}
break;
#else
NL_SET_ERR_MSG(extack, "IPv6 support disabled");
err = -EAFNOSUPPORT;
goto out;
#endif
default:
NL_SET_ERR_MSG(extack, "Invalid address family in selector");
goto out;
}
err = -EINVAL;
switch (p->id.proto) {
case IPPROTO_AH:
if (!attrs[XFRMA_ALG_AUTH] &&
!attrs[XFRMA_ALG_AUTH_TRUNC]) {
NL_SET_ERR_MSG(extack, "Missing required attribute for AH: AUTH_TRUNC or AUTH");
goto out;
}
if (attrs[XFRMA_ALG_AEAD] ||
attrs[XFRMA_ALG_CRYPT] ||
attrs[XFRMA_ALG_COMP] ||
attrs[XFRMA_TFCPAD]) {
NL_SET_ERR_MSG(extack, "Invalid attributes for AH: AEAD, CRYPT, COMP, TFCPAD");
goto out;
}
break;
case IPPROTO_ESP:
if (attrs[XFRMA_ALG_COMP]) {
NL_SET_ERR_MSG(extack, "Invalid attribute for ESP: COMP");
goto out;
}
if (!attrs[XFRMA_ALG_AUTH] &&
!attrs[XFRMA_ALG_AUTH_TRUNC] &&
!attrs[XFRMA_ALG_CRYPT] &&
!attrs[XFRMA_ALG_AEAD]) {
NL_SET_ERR_MSG(extack, "Missing required attribute for ESP: at least one of AUTH, AUTH_TRUNC, CRYPT, AEAD");
goto out;
}
if ((attrs[XFRMA_ALG_AUTH] ||
attrs[XFRMA_ALG_AUTH_TRUNC] ||
attrs[XFRMA_ALG_CRYPT]) &&
attrs[XFRMA_ALG_AEAD]) {
NL_SET_ERR_MSG(extack, "Invalid attribute combination for ESP: AEAD can't be used with AUTH, AUTH_TRUNC, CRYPT");
goto out;
}
if (attrs[XFRMA_TFCPAD] &&
p->mode != XFRM_MODE_TUNNEL) {
NL_SET_ERR_MSG(extack, "TFC padding can only be used in tunnel mode");
goto out;
}
break;
case IPPROTO_COMP:
if (!attrs[XFRMA_ALG_COMP]) {
NL_SET_ERR_MSG(extack, "Missing required attribute for COMP: COMP");
goto out;
}
if (attrs[XFRMA_ALG_AEAD] ||
attrs[XFRMA_ALG_AUTH] ||
attrs[XFRMA_ALG_AUTH_TRUNC] ||
attrs[XFRMA_ALG_CRYPT] ||
attrs[XFRMA_TFCPAD]) {
NL_SET_ERR_MSG(extack, "Invalid attributes for COMP: AEAD, AUTH, AUTH_TRUNC, CRYPT, TFCPAD");
goto out;
}
if (ntohl(p->id.spi) >= 0x10000) {
NL_SET_ERR_MSG(extack, "SPI is too large for COMP (must be < 0x10000)");
goto out;
}
break;
#if IS_ENABLED(CONFIG_IPV6)
case IPPROTO_DSTOPTS:
case IPPROTO_ROUTING:
if (attrs[XFRMA_ALG_COMP] ||
attrs[XFRMA_ALG_AUTH] ||
attrs[XFRMA_ALG_AUTH_TRUNC] ||
attrs[XFRMA_ALG_AEAD] ||
attrs[XFRMA_ALG_CRYPT] ||
attrs[XFRMA_ENCAP] ||
attrs[XFRMA_SEC_CTX] ||
attrs[XFRMA_TFCPAD]) {
NL_SET_ERR_MSG(extack, "Invalid attributes for DSTOPTS/ROUTING");
goto out;
}
if (!attrs[XFRMA_COADDR]) {
NL_SET_ERR_MSG(extack, "Missing required COADDR attribute for DSTOPTS/ROUTING");
goto out;
}
break;
#endif
default:
NL_SET_ERR_MSG(extack, "Unsupported protocol");
goto out;
}
if ((err = verify_aead(attrs, extack)))
goto out;
if ((err = verify_auth_trunc(attrs, extack)))
goto out;
if ((err = verify_one_alg(attrs, XFRMA_ALG_AUTH, extack)))
goto out;
if ((err = verify_one_alg(attrs, XFRMA_ALG_CRYPT, extack)))
goto out;
if ((err = verify_one_alg(attrs, XFRMA_ALG_COMP, extack)))
goto out;
if ((err = verify_sec_ctx_len(attrs, extack)))
goto out;
if ((err = verify_replay(p, attrs, extack)))
goto out;
err = -EINVAL;
switch (p->mode) {
case XFRM_MODE_TRANSPORT:
case XFRM_MODE_TUNNEL:
case XFRM_MODE_ROUTEOPTIMIZATION:
case XFRM_MODE_BEET:
break;
default:
NL_SET_ERR_MSG(extack, "Unsupported mode");
goto out;
}
err = 0;
if (attrs[XFRMA_MTIMER_THRESH]) {
if (!attrs[XFRMA_ENCAP]) {
NL_SET_ERR_MSG(extack, "MTIMER_THRESH attribute can only be set on ENCAP states");
err = -EINVAL;
goto out;
}
}
out:
return err;
}
static int attach_one_algo(struct xfrm_algo **algpp, u8 *props,
struct xfrm_algo_desc *(*get_byname)(const char *, int),
struct nlattr *rta, struct netlink_ext_ack *extack)
{
struct xfrm_algo *p, *ualg;
struct xfrm_algo_desc *algo;
if (!rta)
return 0;
ualg = nla_data(rta);
algo = get_byname(ualg->alg_name, 1);
if (!algo) {
NL_SET_ERR_MSG(extack, "Requested COMP algorithm not found");
return -ENOSYS;
}
*props = algo->desc.sadb_alg_id;
p = kmemdup(ualg, xfrm_alg_len(ualg), GFP_KERNEL);
if (!p)
return -ENOMEM;
strcpy(p->alg_name, algo->name);
*algpp = p;
return 0;
}
static int attach_crypt(struct xfrm_state *x, struct nlattr *rta,
struct netlink_ext_ack *extack)
{
struct xfrm_algo *p, *ualg;
struct xfrm_algo_desc *algo;
if (!rta)
return 0;
ualg = nla_data(rta);
algo = xfrm_ealg_get_byname(ualg->alg_name, 1);
if (!algo) {
NL_SET_ERR_MSG(extack, "Requested CRYPT algorithm not found");
return -ENOSYS;
}
x->props.ealgo = algo->desc.sadb_alg_id;
p = kmemdup(ualg, xfrm_alg_len(ualg), GFP_KERNEL);
if (!p)
return -ENOMEM;
strcpy(p->alg_name, algo->name);
x->ealg = p;
x->geniv = algo->uinfo.encr.geniv;
return 0;
}
static int attach_auth(struct xfrm_algo_auth **algpp, u8 *props,
struct nlattr *rta, struct netlink_ext_ack *extack)
{
struct xfrm_algo *ualg;
struct xfrm_algo_auth *p;
struct xfrm_algo_desc *algo;
if (!rta)
return 0;
ualg = nla_data(rta);
algo = xfrm_aalg_get_byname(ualg->alg_name, 1);
if (!algo) {
NL_SET_ERR_MSG(extack, "Requested AUTH algorithm not found");
return -ENOSYS;
}
*props = algo->desc.sadb_alg_id;
p = kmalloc(sizeof(*p) + (ualg->alg_key_len + 7) / 8, GFP_KERNEL);
if (!p)
return -ENOMEM;
strcpy(p->alg_name, algo->name);
p->alg_key_len = ualg->alg_key_len;
p->alg_trunc_len = algo->uinfo.auth.icv_truncbits;
memcpy(p->alg_key, ualg->alg_key, (ualg->alg_key_len + 7) / 8);
*algpp = p;
return 0;
}
static int attach_auth_trunc(struct xfrm_algo_auth **algpp, u8 *props,
struct nlattr *rta, struct netlink_ext_ack *extack)
{
struct xfrm_algo_auth *p, *ualg;
struct xfrm_algo_desc *algo;
if (!rta)
return 0;
ualg = nla_data(rta);
algo = xfrm_aalg_get_byname(ualg->alg_name, 1);
if (!algo) {
NL_SET_ERR_MSG(extack, "Requested AUTH_TRUNC algorithm not found");
return -ENOSYS;
}
if (ualg->alg_trunc_len > algo->uinfo.auth.icv_fullbits) {
NL_SET_ERR_MSG(extack, "Invalid length requested for truncated ICV");
return -EINVAL;
}
*props = algo->desc.sadb_alg_id;
p = kmemdup(ualg, xfrm_alg_auth_len(ualg), GFP_KERNEL);
if (!p)
return -ENOMEM;
strcpy(p->alg_name, algo->name);
if (!p->alg_trunc_len)
p->alg_trunc_len = algo->uinfo.auth.icv_truncbits;
*algpp = p;
return 0;
}
static int attach_aead(struct xfrm_state *x, struct nlattr *rta,
struct netlink_ext_ack *extack)
{
struct xfrm_algo_aead *p, *ualg;
struct xfrm_algo_desc *algo;
if (!rta)
return 0;
ualg = nla_data(rta);
algo = xfrm_aead_get_byname(ualg->alg_name, ualg->alg_icv_len, 1);
if (!algo) {
NL_SET_ERR_MSG(extack, "Requested AEAD algorithm not found");
return -ENOSYS;
}
x->props.ealgo = algo->desc.sadb_alg_id;
p = kmemdup(ualg, aead_len(ualg), GFP_KERNEL);
if (!p)
return -ENOMEM;
strcpy(p->alg_name, algo->name);
x->aead = p;
x->geniv = algo->uinfo.aead.geniv;
return 0;
}
static inline int xfrm_replay_verify_len(struct xfrm_replay_state_esn *replay_esn,
struct nlattr *rp,
struct netlink_ext_ack *extack)
{
struct xfrm_replay_state_esn *up;
unsigned int ulen;
if (!replay_esn || !rp)
return 0;
up = nla_data(rp);
ulen = xfrm_replay_state_esn_len(up);
/* Check the overall length and the internal bitmap length to avoid
* potential overflow. */
if (nla_len(rp) < (int)ulen) {
NL_SET_ERR_MSG(extack, "ESN attribute is too short");
return -EINVAL;
}
if (xfrm_replay_state_esn_len(replay_esn) != ulen) {
NL_SET_ERR_MSG(extack, "New ESN size doesn't match the existing SA's ESN size");
return -EINVAL;
}
if (replay_esn->bmp_len != up->bmp_len) {
NL_SET_ERR_MSG(extack, "New ESN bitmap size doesn't match the existing SA's ESN bitmap");
return -EINVAL;
}
if (up->replay_window > up->bmp_len * sizeof(__u32) * 8) {
NL_SET_ERR_MSG(extack, "ESN replay window is longer than the bitmap");
return -EINVAL;
}
return 0;
}
static int xfrm_alloc_replay_state_esn(struct xfrm_replay_state_esn **replay_esn,
struct xfrm_replay_state_esn **preplay_esn,
struct nlattr *rta)
{
struct xfrm_replay_state_esn *p, *pp, *up;
unsigned int klen, ulen;
if (!rta)
return 0;
up = nla_data(rta);
klen = xfrm_replay_state_esn_len(up);
ulen = nla_len(rta) >= (int)klen ? klen : sizeof(*up);
p = kzalloc(klen, GFP_KERNEL);
if (!p)
return -ENOMEM;
pp = kzalloc(klen, GFP_KERNEL);
if (!pp) {
kfree(p);
return -ENOMEM;
}
memcpy(p, up, ulen);
memcpy(pp, up, ulen);
*replay_esn = p;
*preplay_esn = pp;
return 0;
}
static inline unsigned int xfrm_user_sec_ctx_size(struct xfrm_sec_ctx *xfrm_ctx)
{
unsigned int len = 0;
if (xfrm_ctx) {
len += sizeof(struct xfrm_user_sec_ctx);
len += xfrm_ctx->ctx_len;
}
return len;
}
static void copy_from_user_state(struct xfrm_state *x, struct xfrm_usersa_info *p)
{
memcpy(&x->id, &p->id, sizeof(x->id));
memcpy(&x->sel, &p->sel, sizeof(x->sel));
memcpy(&x->lft, &p->lft, sizeof(x->lft));
x->props.mode = p->mode;
x->props.replay_window = min_t(unsigned int, p->replay_window,
sizeof(x->replay.bitmap) * 8);
x->props.reqid = p->reqid;
x->props.family = p->family;
memcpy(&x->props.saddr, &p->saddr, sizeof(x->props.saddr));
x->props.flags = p->flags;
if (!x->sel.family && !(p->flags & XFRM_STATE_AF_UNSPEC))
x->sel.family = p->family;
}
/*
* someday when pfkey also has support, we could have the code
* somehow made shareable and move it to xfrm_state.c - JHS
*
*/
static void xfrm_update_ae_params(struct xfrm_state *x, struct nlattr **attrs,
int update_esn)
{
struct nlattr *rp = attrs[XFRMA_REPLAY_VAL];
struct nlattr *re = update_esn ? attrs[XFRMA_REPLAY_ESN_VAL] : NULL;
struct nlattr *lt = attrs[XFRMA_LTIME_VAL];
struct nlattr *et = attrs[XFRMA_ETIMER_THRESH];
struct nlattr *rt = attrs[XFRMA_REPLAY_THRESH];
struct nlattr *mt = attrs[XFRMA_MTIMER_THRESH];
if (re && x->replay_esn && x->preplay_esn) {
struct xfrm_replay_state_esn *replay_esn;
replay_esn = nla_data(re);
memcpy(x->replay_esn, replay_esn,
xfrm_replay_state_esn_len(replay_esn));
memcpy(x->preplay_esn, replay_esn,
xfrm_replay_state_esn_len(replay_esn));
}
if (rp) {
struct xfrm_replay_state *replay;
replay = nla_data(rp);
memcpy(&x->replay, replay, sizeof(*replay));
memcpy(&x->preplay, replay, sizeof(*replay));
}
if (lt) {
struct xfrm_lifetime_cur *ltime;
ltime = nla_data(lt);
x->curlft.bytes = ltime->bytes;
x->curlft.packets = ltime->packets;
x->curlft.add_time = ltime->add_time;
x->curlft.use_time = ltime->use_time;
}
if (et)
x->replay_maxage = nla_get_u32(et);
if (rt)
x->replay_maxdiff = nla_get_u32(rt);
if (mt)
x->mapping_maxage = nla_get_u32(mt);
}
static void xfrm_smark_init(struct nlattr **attrs, struct xfrm_mark *m)
{
if (attrs[XFRMA_SET_MARK]) {
m->v = nla_get_u32(attrs[XFRMA_SET_MARK]);
if (attrs[XFRMA_SET_MARK_MASK])
m->m = nla_get_u32(attrs[XFRMA_SET_MARK_MASK]);
else
m->m = 0xffffffff;
} else {
m->v = m->m = 0;
}
}
static struct xfrm_state *xfrm_state_construct(struct net *net,
struct xfrm_usersa_info *p,
struct nlattr **attrs,
int *errp,
struct netlink_ext_ack *extack)
{
struct xfrm_state *x = xfrm_state_alloc(net);
int err = -ENOMEM;
if (!x)
goto error_no_put;
copy_from_user_state(x, p);
if (attrs[XFRMA_ENCAP]) {
x->encap = kmemdup(nla_data(attrs[XFRMA_ENCAP]),
sizeof(*x->encap), GFP_KERNEL);
if (x->encap == NULL)
goto error;
}
if (attrs[XFRMA_COADDR]) {
x->coaddr = kmemdup(nla_data(attrs[XFRMA_COADDR]),
sizeof(*x->coaddr), GFP_KERNEL);
if (x->coaddr == NULL)
goto error;
}
if (attrs[XFRMA_SA_EXTRA_FLAGS])
x->props.extra_flags = nla_get_u32(attrs[XFRMA_SA_EXTRA_FLAGS]);
if ((err = attach_aead(x, attrs[XFRMA_ALG_AEAD], extack)))
goto error;
if ((err = attach_auth_trunc(&x->aalg, &x->props.aalgo,
attrs[XFRMA_ALG_AUTH_TRUNC], extack)))
goto error;
if (!x->props.aalgo) {
if ((err = attach_auth(&x->aalg, &x->props.aalgo,
attrs[XFRMA_ALG_AUTH], extack)))
goto error;
}
if ((err = attach_crypt(x, attrs[XFRMA_ALG_CRYPT], extack)))
goto error;
if ((err = attach_one_algo(&x->calg, &x->props.calgo,
xfrm_calg_get_byname,
attrs[XFRMA_ALG_COMP], extack)))
goto error;
if (attrs[XFRMA_TFCPAD])
x->tfcpad = nla_get_u32(attrs[XFRMA_TFCPAD]);
xfrm_mark_get(attrs, &x->mark);
xfrm_smark_init(attrs, &x->props.smark);
if (attrs[XFRMA_IF_ID])
x->if_id = nla_get_u32(attrs[XFRMA_IF_ID]);
err = __xfrm_init_state(x, false, attrs[XFRMA_OFFLOAD_DEV], extack);
if (err)
goto error;
if (attrs[XFRMA_SEC_CTX]) {
err = security_xfrm_state_alloc(x,
nla_data(attrs[XFRMA_SEC_CTX]));
if (err)
goto error;
}
if ((err = xfrm_alloc_replay_state_esn(&x->replay_esn, &x->preplay_esn,
attrs[XFRMA_REPLAY_ESN_VAL])))
goto error;
x->km.seq = p->seq;
x->replay_maxdiff = net->xfrm.sysctl_aevent_rseqth;
/* sysctl_xfrm_aevent_etime is in 100ms units */
x->replay_maxage = (net->xfrm.sysctl_aevent_etime*HZ)/XFRM_AE_ETH_M;
if ((err = xfrm_init_replay(x, extack)))
goto error;
/* override default values from above */
xfrm_update_ae_params(x, attrs, 0);
/* configure the hardware if offload is requested */
if (attrs[XFRMA_OFFLOAD_DEV]) {
err = xfrm_dev_state_add(net, x,
nla_data(attrs[XFRMA_OFFLOAD_DEV]),
extack);
if (err)
goto error;
}
return x;
error:
x->km.state = XFRM_STATE_DEAD;
xfrm_state_put(x);
error_no_put:
*errp = err;
return NULL;
}
static int xfrm_add_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
struct nlattr **attrs, struct netlink_ext_ack *extack)
{
struct net *net = sock_net(skb->sk);
struct xfrm_usersa_info *p = nlmsg_data(nlh);
struct xfrm_state *x;
int err;
struct km_event c;
err = verify_newsa_info(p, attrs, extack);
if (err)
return err;
x = xfrm_state_construct(net, p, attrs, &err, extack);
if (!x)
return err;
xfrm_state_hold(x);
if (nlh->nlmsg_type == XFRM_MSG_NEWSA)
err = xfrm_state_add(x);
else
err = xfrm_state_update(x);
xfrm_audit_state_add(x, err ? 0 : 1, true);
if (err < 0) {
x->km.state = XFRM_STATE_DEAD;
xfrm_dev_state_delete(x);
__xfrm_state_put(x);
goto out;
}
if (x->km.state == XFRM_STATE_VOID)
x->km.state = XFRM_STATE_VALID;
c.seq = nlh->nlmsg_seq;
c.portid = nlh->nlmsg_pid;
c.event = nlh->nlmsg_type;
km_state_notify(x, &c);
out:
xfrm_state_put(x);
return err;
}
static struct xfrm_state *xfrm_user_state_lookup(struct net *net,
struct xfrm_usersa_id *p,
struct nlattr **attrs,
int *errp)
{
struct xfrm_state *x = NULL;
struct xfrm_mark m;
int err;
u32 mark = xfrm_mark_get(attrs, &m);
if (xfrm_id_proto_match(p->proto, IPSEC_PROTO_ANY)) {
err = -ESRCH;
x = xfrm_state_lookup(net, mark, &p->daddr, p->spi, p->proto, p->family);
} else {
xfrm_address_t *saddr = NULL;
verify_one_addr(attrs, XFRMA_SRCADDR, &saddr);
if (!saddr) {
err = -EINVAL;
goto out;
}
err = -ESRCH;
x = xfrm_state_lookup_byaddr(net, mark,
&p->daddr, saddr,
p->proto, p->family);
}
out:
if (!x && errp)
*errp = err;
return x;
}
static int xfrm_del_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
struct nlattr **attrs, struct netlink_ext_ack *extack)
{
struct net *net = sock_net(skb->sk);
struct xfrm_state *x;
int err = -ESRCH;
struct km_event c;
struct xfrm_usersa_id *p = nlmsg_data(nlh);
x = xfrm_user_state_lookup(net, p, attrs, &err);
if (x == NULL)
return err;
if ((err = security_xfrm_state_delete(x)) != 0)
goto out;
if (xfrm_state_kern(x)) {
NL_SET_ERR_MSG(extack, "SA is in use by tunnels");
err = -EPERM;
goto out;
}
err = xfrm_state_delete(x);
if (err < 0)
goto out;
c.seq = nlh->nlmsg_seq;
c.portid = nlh->nlmsg_pid;
c.event = nlh->nlmsg_type;
km_state_notify(x, &c);
out:
xfrm_audit_state_delete(x, err ? 0 : 1, true);
xfrm_state_put(x);
return err;
}
static void copy_to_user_state(struct xfrm_state *x, struct xfrm_usersa_info *p)
{
memset(p, 0, sizeof(*p));
memcpy(&p->id, &x->id, sizeof(p->id));
memcpy(&p->sel, &x->sel, sizeof(p->sel));
memcpy(&p->lft, &x->lft, sizeof(p->lft));
if (x->xso.dev)
xfrm_dev_state_update_curlft(x);
memcpy(&p->curlft, &x->curlft, sizeof(p->curlft));
put_unaligned(x->stats.replay_window, &p->stats.replay_window);
put_unaligned(x->stats.replay, &p->stats.replay);
put_unaligned(x->stats.integrity_failed, &p->stats.integrity_failed);
memcpy(&p->saddr, &x->props.saddr, sizeof(p->saddr));
p->mode = x->props.mode;
p->replay_window = x->props.replay_window;
p->reqid = x->props.reqid;
p->family = x->props.family;
p->flags = x->props.flags;
p->seq = x->km.seq;
}
struct xfrm_dump_info {
struct sk_buff *in_skb;
struct sk_buff *out_skb;
u32 nlmsg_seq;
u16 nlmsg_flags;
};
static int copy_sec_ctx(struct xfrm_sec_ctx *s, struct sk_buff *skb)
{
struct xfrm_user_sec_ctx *uctx;
struct nlattr *attr;
int ctx_size = sizeof(*uctx) + s->ctx_len;
attr = nla_reserve(skb, XFRMA_SEC_CTX, ctx_size);
if (attr == NULL)
return -EMSGSIZE;
uctx = nla_data(attr);
uctx->exttype = XFRMA_SEC_CTX;
uctx->len = ctx_size;
uctx->ctx_doi = s->ctx_doi;
uctx->ctx_alg = s->ctx_alg;
uctx->ctx_len = s->ctx_len;
memcpy(uctx + 1, s->ctx_str, s->ctx_len);
return 0;
}
static int copy_user_offload(struct xfrm_dev_offload *xso, struct sk_buff *skb)
{
struct xfrm_user_offload *xuo;
struct nlattr *attr;
attr = nla_reserve(skb, XFRMA_OFFLOAD_DEV, sizeof(*xuo));
if (attr == NULL)
return -EMSGSIZE;
xuo = nla_data(attr);
memset(xuo, 0, sizeof(*xuo));
xuo->ifindex = xso->dev->ifindex;
if (xso->dir == XFRM_DEV_OFFLOAD_IN)
xuo->flags = XFRM_OFFLOAD_INBOUND;
if (xso->type == XFRM_DEV_OFFLOAD_PACKET)
xuo->flags |= XFRM_OFFLOAD_PACKET;
return 0;
}
static bool xfrm_redact(void)
{
return IS_ENABLED(CONFIG_SECURITY) &&
security_locked_down(LOCKDOWN_XFRM_SECRET);
}
static int copy_to_user_auth(struct xfrm_algo_auth *auth, struct sk_buff *skb)
{
struct xfrm_algo *algo;
struct xfrm_algo_auth *ap;
struct nlattr *nla;
bool redact_secret = xfrm_redact();
nla = nla_reserve(skb, XFRMA_ALG_AUTH,
sizeof(*algo) + (auth->alg_key_len + 7) / 8);
if (!nla)
return -EMSGSIZE;
algo = nla_data(nla);
strncpy(algo->alg_name, auth->alg_name, sizeof(algo->alg_name));
if (redact_secret && auth->alg_key_len)
memset(algo->alg_key, 0, (auth->alg_key_len + 7) / 8);
else
memcpy(algo->alg_key, auth->alg_key,
(auth->alg_key_len + 7) / 8);
algo->alg_key_len = auth->alg_key_len;
nla = nla_reserve(skb, XFRMA_ALG_AUTH_TRUNC, xfrm_alg_auth_len(auth));
if (!nla)
return -EMSGSIZE;
ap = nla_data(nla);
memcpy(ap, auth, sizeof(struct xfrm_algo_auth));
if (redact_secret && auth->alg_key_len)
memset(ap->alg_key, 0, (auth->alg_key_len + 7) / 8);
else
memcpy(ap->alg_key, auth->alg_key,
(auth->alg_key_len + 7) / 8);
return 0;
}
static int copy_to_user_aead(struct xfrm_algo_aead *aead, struct sk_buff *skb)
{
struct nlattr *nla = nla_reserve(skb, XFRMA_ALG_AEAD, aead_len(aead));
struct xfrm_algo_aead *ap;
bool redact_secret = xfrm_redact();
if (!nla)
return -EMSGSIZE;
ap = nla_data(nla);
strscpy_pad(ap->alg_name, aead->alg_name, sizeof(ap->alg_name));
ap->alg_key_len = aead->alg_key_len;
ap->alg_icv_len = aead->alg_icv_len;
if (redact_secret && aead->alg_key_len)
memset(ap->alg_key, 0, (aead->alg_key_len + 7) / 8);
else
memcpy(ap->alg_key, aead->alg_key,
(aead->alg_key_len + 7) / 8);
return 0;
}
static int copy_to_user_ealg(struct xfrm_algo *ealg, struct sk_buff *skb)
{
struct xfrm_algo *ap;
bool redact_secret = xfrm_redact();
struct nlattr *nla = nla_reserve(skb, XFRMA_ALG_CRYPT,
xfrm_alg_len(ealg));
if (!nla)
return -EMSGSIZE;
ap = nla_data(nla);
strscpy_pad(ap->alg_name, ealg->alg_name, sizeof(ap->alg_name));
ap->alg_key_len = ealg->alg_key_len;
if (redact_secret && ealg->alg_key_len)
memset(ap->alg_key, 0, (ealg->alg_key_len + 7) / 8);
else
memcpy(ap->alg_key, ealg->alg_key,
(ealg->alg_key_len + 7) / 8);
return 0;
}
static int copy_to_user_calg(struct xfrm_algo *calg, struct sk_buff *skb)
{
struct nlattr *nla = nla_reserve(skb, XFRMA_ALG_COMP, sizeof(*calg));
struct xfrm_algo *ap;
if (!nla)
return -EMSGSIZE;
ap = nla_data(nla);
strscpy_pad(ap->alg_name, calg->alg_name, sizeof(ap->alg_name));
ap->alg_key_len = 0;
return 0;
}
static int copy_to_user_encap(struct xfrm_encap_tmpl *ep, struct sk_buff *skb)
{
struct nlattr *nla = nla_reserve(skb, XFRMA_ENCAP, sizeof(*ep));
struct xfrm_encap_tmpl *uep;
if (!nla)
return -EMSGSIZE;
uep = nla_data(nla);
memset(uep, 0, sizeof(*uep));
uep->encap_type = ep->encap_type;
uep->encap_sport = ep->encap_sport;
uep->encap_dport = ep->encap_dport;
uep->encap_oa = ep->encap_oa;
return 0;
}
static int xfrm_smark_put(struct sk_buff *skb, struct xfrm_mark *m)
{
int ret = 0;
if (m->v | m->m) {
ret = nla_put_u32(skb, XFRMA_SET_MARK, m->v);
if (!ret)
ret = nla_put_u32(skb, XFRMA_SET_MARK_MASK, m->m);
}
return ret;
}
/* Don't change this without updating xfrm_sa_len! */
static int copy_to_user_state_extra(struct xfrm_state *x,
struct xfrm_usersa_info *p,
struct sk_buff *skb)
{
int ret = 0;
copy_to_user_state(x, p);
if (x->props.extra_flags) {
ret = nla_put_u32(skb, XFRMA_SA_EXTRA_FLAGS,
x->props.extra_flags);
if (ret)
goto out;
}
if (x->coaddr) {
ret = nla_put(skb, XFRMA_COADDR, sizeof(*x->coaddr), x->coaddr);
if (ret)
goto out;
}
if (x->lastused) {
ret = nla_put_u64_64bit(skb, XFRMA_LASTUSED, x->lastused,
XFRMA_PAD);
if (ret)
goto out;
}
if (x->aead) {
ret = copy_to_user_aead(x->aead, skb);
if (ret)
goto out;
}
if (x->aalg) {
ret = copy_to_user_auth(x->aalg, skb);
if (ret)
goto out;
}
if (x->ealg) {
ret = copy_to_user_ealg(x->ealg, skb);
if (ret)
goto out;
}
if (x->calg) {
ret = copy_to_user_calg(x->calg, skb);
if (ret)
goto out;
}
if (x->encap) {
ret = copy_to_user_encap(x->encap, skb);
if (ret)
goto out;
}
if (x->tfcpad) {
ret = nla_put_u32(skb, XFRMA_TFCPAD, x->tfcpad);
if (ret)
goto out;
}
ret = xfrm_mark_put(skb, &x->mark);
if (ret)
goto out;
ret = xfrm_smark_put(skb, &x->props.smark);
if (ret)
goto out;
if (x->replay_esn)
ret = nla_put(skb, XFRMA_REPLAY_ESN_VAL,
xfrm_replay_state_esn_len(x->replay_esn),
x->replay_esn);
else
ret = nla_put(skb, XFRMA_REPLAY_VAL, sizeof(x->replay),
&x->replay);
if (ret)
goto out;
if(x->xso.dev)
ret = copy_user_offload(&x->xso, skb);
if (ret)
goto out;
if (x->if_id) {
ret = nla_put_u32(skb, XFRMA_IF_ID, x->if_id);
if (ret)
goto out;
}
if (x->security) {
ret = copy_sec_ctx(x->security, skb);
if (ret)
goto out;
}
if (x->mapping_maxage)
ret = nla_put_u32(skb, XFRMA_MTIMER_THRESH, x->mapping_maxage);
out:
return ret;
}
static int dump_one_state(struct xfrm_state *x, int count, void *ptr)
{
struct xfrm_dump_info *sp = ptr;
struct sk_buff *in_skb = sp->in_skb;
struct sk_buff *skb = sp->out_skb;
struct xfrm_translator *xtr;
struct xfrm_usersa_info *p;
struct nlmsghdr *nlh;
int err;
nlh = nlmsg_put(skb, NETLINK_CB(in_skb).portid, sp->nlmsg_seq,
XFRM_MSG_NEWSA, sizeof(*p), sp->nlmsg_flags);
if (nlh == NULL)
return -EMSGSIZE;
p = nlmsg_data(nlh);
err = copy_to_user_state_extra(x, p, skb);
if (err) {
nlmsg_cancel(skb, nlh);
return err;
}
nlmsg_end(skb, nlh);
xtr = xfrm_get_translator();
if (xtr) {
err = xtr->alloc_compat(skb, nlh);
xfrm_put_translator(xtr);
if (err) {
nlmsg_cancel(skb, nlh);
return err;
}
}
return 0;
}
static int xfrm_dump_sa_done(struct netlink_callback *cb)
{
struct xfrm_state_walk *walk = (struct xfrm_state_walk *) &cb->args[1];
struct sock *sk = cb->skb->sk;
struct net *net = sock_net(sk);
if (cb->args[0])
xfrm_state_walk_done(walk, net);
return 0;
}
static int xfrm_dump_sa(struct sk_buff *skb, struct netlink_callback *cb)
{
struct net *net = sock_net(skb->sk);
struct xfrm_state_walk *walk = (struct xfrm_state_walk *) &cb->args[1];
struct xfrm_dump_info info;
BUILD_BUG_ON(sizeof(struct xfrm_state_walk) >
sizeof(cb->args) - sizeof(cb->args[0]));
info.in_skb = cb->skb;
info.out_skb = skb;
info.nlmsg_seq = cb->nlh->nlmsg_seq;
info.nlmsg_flags = NLM_F_MULTI;
if (!cb->args[0]) {
struct nlattr *attrs[XFRMA_MAX+1];
struct xfrm_address_filter *filter = NULL;
u8 proto = 0;
int err;
err = nlmsg_parse_deprecated(cb->nlh, 0, attrs, XFRMA_MAX,
xfrma_policy, cb->extack);
if (err < 0)
return err;
if (attrs[XFRMA_ADDRESS_FILTER]) {
filter = kmemdup(nla_data(attrs[XFRMA_ADDRESS_FILTER]),
sizeof(*filter), GFP_KERNEL);
if (filter == NULL)
return -ENOMEM;
/* see addr_match(), (prefix length >> 5) << 2
* will be used to compare xfrm_address_t
*/
if (filter->splen > (sizeof(xfrm_address_t) << 3) ||
filter->dplen > (sizeof(xfrm_address_t) << 3)) {
kfree(filter);
return -EINVAL;
}
}
if (attrs[XFRMA_PROTO])
proto = nla_get_u8(attrs[XFRMA_PROTO]);
xfrm_state_walk_init(walk, proto, filter);
cb->args[0] = 1;
}
(void) xfrm_state_walk(net, walk, dump_one_state, &info);
return skb->len;
}
static struct sk_buff *xfrm_state_netlink(struct sk_buff *in_skb,
struct xfrm_state *x, u32 seq)
{
struct xfrm_dump_info info;
struct sk_buff *skb;
int err;
skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
if (!skb)
return ERR_PTR(-ENOMEM);
info.in_skb = in_skb;
info.out_skb = skb;
info.nlmsg_seq = seq;
info.nlmsg_flags = 0;
err = dump_one_state(x, 0, &info);
if (err) {
kfree_skb(skb);
return ERR_PTR(err);
}
return skb;
}
/* A wrapper for nlmsg_multicast() checking that nlsk is still available.
* Must be called with RCU read lock.
*/
static inline int xfrm_nlmsg_multicast(struct net *net, struct sk_buff *skb,
u32 pid, unsigned int group)
{
struct sock *nlsk = rcu_dereference(net->xfrm.nlsk);
struct xfrm_translator *xtr;
if (!nlsk) {
kfree_skb(skb);
return -EPIPE;
}
xtr = xfrm_get_translator();
if (xtr) {
int err = xtr->alloc_compat(skb, nlmsg_hdr(skb));
xfrm_put_translator(xtr);
if (err) {
kfree_skb(skb);
return err;
}
}
return nlmsg_multicast(nlsk, skb, pid, group, GFP_ATOMIC);
}
static inline unsigned int xfrm_spdinfo_msgsize(void)
{
return NLMSG_ALIGN(4)
+ nla_total_size(sizeof(struct xfrmu_spdinfo))
+ nla_total_size(sizeof(struct xfrmu_spdhinfo))
+ nla_total_size(sizeof(struct xfrmu_spdhthresh))
+ nla_total_size(sizeof(struct xfrmu_spdhthresh));
}
static int build_spdinfo(struct sk_buff *skb, struct net *net,
u32 portid, u32 seq, u32 flags)
{
struct xfrmk_spdinfo si;
struct xfrmu_spdinfo spc;
struct xfrmu_spdhinfo sph;
struct xfrmu_spdhthresh spt4, spt6;
struct nlmsghdr *nlh;
int err;
u32 *f;
unsigned lseq;
nlh = nlmsg_put(skb, portid, seq, XFRM_MSG_NEWSPDINFO, sizeof(u32), 0);
if (nlh == NULL) /* shouldn't really happen ... */
return -EMSGSIZE;
f = nlmsg_data(nlh);
*f = flags;
xfrm_spd_getinfo(net, &si);
spc.incnt = si.incnt;
spc.outcnt = si.outcnt;
spc.fwdcnt = si.fwdcnt;
spc.inscnt = si.inscnt;
spc.outscnt = si.outscnt;
spc.fwdscnt = si.fwdscnt;
sph.spdhcnt = si.spdhcnt;
sph.spdhmcnt = si.spdhmcnt;
do {
lseq = read_seqbegin(&net->xfrm.policy_hthresh.lock);
spt4.lbits = net->xfrm.policy_hthresh.lbits4;
spt4.rbits = net->xfrm.policy_hthresh.rbits4;
spt6.lbits = net->xfrm.policy_hthresh.lbits6;
spt6.rbits = net->xfrm.policy_hthresh.rbits6;
} while (read_seqretry(&net->xfrm.policy_hthresh.lock, lseq));
err = nla_put(skb, XFRMA_SPD_INFO, sizeof(spc), &spc);
if (!err)
err = nla_put(skb, XFRMA_SPD_HINFO, sizeof(sph), &sph);
if (!err)
err = nla_put(skb, XFRMA_SPD_IPV4_HTHRESH, sizeof(spt4), &spt4);
if (!err)
err = nla_put(skb, XFRMA_SPD_IPV6_HTHRESH, sizeof(spt6), &spt6);
if (err) {
nlmsg_cancel(skb, nlh);
return err;
}
nlmsg_end(skb, nlh);
return 0;
}
static int xfrm_set_spdinfo(struct sk_buff *skb, struct nlmsghdr *nlh,
struct nlattr **attrs,
struct netlink_ext_ack *extack)
{
struct net *net = sock_net(skb->sk);
struct xfrmu_spdhthresh *thresh4 = NULL;
struct xfrmu_spdhthresh *thresh6 = NULL;
/* selector prefixlen thresholds to hash policies */
if (attrs[XFRMA_SPD_IPV4_HTHRESH]) {
struct nlattr *rta = attrs[XFRMA_SPD_IPV4_HTHRESH];
if (nla_len(rta) < sizeof(*thresh4)) {
NL_SET_ERR_MSG(extack, "Invalid SPD_IPV4_HTHRESH attribute length");
return -EINVAL;
}
thresh4 = nla_data(rta);
if (thresh4->lbits > 32 || thresh4->rbits > 32) {
NL_SET_ERR_MSG(extack, "Invalid hash threshold (must be <= 32 for IPv4)");
return -EINVAL;
}
}
if (attrs[XFRMA_SPD_IPV6_HTHRESH]) {
struct nlattr *rta = attrs[XFRMA_SPD_IPV6_HTHRESH];
if (nla_len(rta) < sizeof(*thresh6)) {
NL_SET_ERR_MSG(extack, "Invalid SPD_IPV6_HTHRESH attribute length");
return -EINVAL;
}
thresh6 = nla_data(rta);
if (thresh6->lbits > 128 || thresh6->rbits > 128) {
NL_SET_ERR_MSG(extack, "Invalid hash threshold (must be <= 128 for IPv6)");
return -EINVAL;
}
}
if (thresh4 || thresh6) {
write_seqlock(&net->xfrm.policy_hthresh.lock);
if (thresh4) {
net->xfrm.policy_hthresh.lbits4 = thresh4->lbits;
net->xfrm.policy_hthresh.rbits4 = thresh4->rbits;
}
if (thresh6) {
net->xfrm.policy_hthresh.lbits6 = thresh6->lbits;
net->xfrm.policy_hthresh.rbits6 = thresh6->rbits;
}
write_sequnlock(&net->xfrm.policy_hthresh.lock);
xfrm_policy_hash_rebuild(net);
}
return 0;
}
static int xfrm_get_spdinfo(struct sk_buff *skb, struct nlmsghdr *nlh,
struct nlattr **attrs,
struct netlink_ext_ack *extack)
{
struct net *net = sock_net(skb->sk);
struct sk_buff *r_skb;
u32 *flags = nlmsg_data(nlh);
u32 sportid = NETLINK_CB(skb).portid;
u32 seq = nlh->nlmsg_seq;
int err;
r_skb = nlmsg_new(xfrm_spdinfo_msgsize(), GFP_ATOMIC);
if (r_skb == NULL)
return -ENOMEM;
err = build_spdinfo(r_skb, net, sportid, seq, *flags);
BUG_ON(err < 0);
return nlmsg_unicast(net->xfrm.nlsk, r_skb, sportid);
}
static inline unsigned int xfrm_sadinfo_msgsize(void)
{
return NLMSG_ALIGN(4)
+ nla_total_size(sizeof(struct xfrmu_sadhinfo))
+ nla_total_size(4); /* XFRMA_SAD_CNT */
}
static int build_sadinfo(struct sk_buff *skb, struct net *net,
u32 portid, u32 seq, u32 flags)
{
struct xfrmk_sadinfo si;
struct xfrmu_sadhinfo sh;
struct nlmsghdr *nlh;
int err;
u32 *f;
nlh = nlmsg_put(skb, portid, seq, XFRM_MSG_NEWSADINFO, sizeof(u32), 0);
if (nlh == NULL) /* shouldn't really happen ... */
return -EMSGSIZE;
f = nlmsg_data(nlh);
*f = flags;
xfrm_sad_getinfo(net, &si);
sh.sadhmcnt = si.sadhmcnt;
sh.sadhcnt = si.sadhcnt;
err = nla_put_u32(skb, XFRMA_SAD_CNT, si.sadcnt);
if (!err)
err = nla_put(skb, XFRMA_SAD_HINFO, sizeof(sh), &sh);
if (err) {
nlmsg_cancel(skb, nlh);
return err;
}
nlmsg_end(skb, nlh);
return 0;
}
static int xfrm_get_sadinfo(struct sk_buff *skb, struct nlmsghdr *nlh,
struct nlattr **attrs,
struct netlink_ext_ack *extack)
{
struct net *net = sock_net(skb->sk);
struct sk_buff *r_skb;
u32 *flags = nlmsg_data(nlh);
u32 sportid = NETLINK_CB(skb).portid;
u32 seq = nlh->nlmsg_seq;
int err;
r_skb = nlmsg_new(xfrm_sadinfo_msgsize(), GFP_ATOMIC);
if (r_skb == NULL)
return -ENOMEM;
err = build_sadinfo(r_skb, net, sportid, seq, *flags);
BUG_ON(err < 0);
return nlmsg_unicast(net->xfrm.nlsk, r_skb, sportid);
}
static int xfrm_get_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
struct nlattr **attrs, struct netlink_ext_ack *extack)
{
struct net *net = sock_net(skb->sk);
struct xfrm_usersa_id *p = nlmsg_data(nlh);
struct xfrm_state *x;
struct sk_buff *resp_skb;
int err = -ESRCH;
x = xfrm_user_state_lookup(net, p, attrs, &err);
if (x == NULL)
goto out_noput;
resp_skb = xfrm_state_netlink(skb, x, nlh->nlmsg_seq);
if (IS_ERR(resp_skb)) {
err = PTR_ERR(resp_skb);
} else {
err = nlmsg_unicast(net->xfrm.nlsk, resp_skb, NETLINK_CB(skb).portid);
}
xfrm_state_put(x);
out_noput:
return err;
}
static int xfrm_alloc_userspi(struct sk_buff *skb, struct nlmsghdr *nlh,
struct nlattr **attrs,
struct netlink_ext_ack *extack)
{
struct net *net = sock_net(skb->sk);
struct xfrm_state *x;
struct xfrm_userspi_info *p;
struct xfrm_translator *xtr;
struct sk_buff *resp_skb;
xfrm_address_t *daddr;
int family;
int err;
u32 mark;
struct xfrm_mark m;
u32 if_id = 0;
p = nlmsg_data(nlh);
err = verify_spi_info(p->info.id.proto, p->min, p->max, extack);
if (err)
goto out_noput;
family = p->info.family;
daddr = &p->info.id.daddr;
x = NULL;
mark = xfrm_mark_get(attrs, &m);
if (attrs[XFRMA_IF_ID])
if_id = nla_get_u32(attrs[XFRMA_IF_ID]);
if (p->info.seq) {
x = xfrm_find_acq_byseq(net, mark, p->info.seq);
if (x && !xfrm_addr_equal(&x->id.daddr, daddr, family)) {
xfrm_state_put(x);
x = NULL;
}
}
if (!x)
x = xfrm_find_acq(net, &m, p->info.mode, p->info.reqid,
if_id, p->info.id.proto, daddr,
&p->info.saddr, 1,
family);
err = -ENOENT;
if (!x) {
NL_SET_ERR_MSG(extack, "Target ACQUIRE not found");
goto out_noput;
}
err = xfrm_alloc_spi(x, p->min, p->max, extack);
if (err)
goto out;
resp_skb = xfrm_state_netlink(skb, x, nlh->nlmsg_seq);
if (IS_ERR(resp_skb)) {
err = PTR_ERR(resp_skb);
goto out;
}
xtr = xfrm_get_translator();
if (xtr) {
err = xtr->alloc_compat(skb, nlmsg_hdr(skb));
xfrm_put_translator(xtr);
if (err) {
kfree_skb(resp_skb);
goto out;
}
}
err = nlmsg_unicast(net->xfrm.nlsk, resp_skb, NETLINK_CB(skb).portid);
out:
xfrm_state_put(x);
out_noput:
return err;
}
static int verify_policy_dir(u8 dir, struct netlink_ext_ack *extack)
{
switch (dir) {
case XFRM_POLICY_IN:
case XFRM_POLICY_OUT:
case XFRM_POLICY_FWD:
break;
default:
NL_SET_ERR_MSG(extack, "Invalid policy direction");
return -EINVAL;
}
return 0;
}
static int verify_policy_type(u8 type, struct netlink_ext_ack *extack)
{
switch (type) {
case XFRM_POLICY_TYPE_MAIN:
#ifdef CONFIG_XFRM_SUB_POLICY
case XFRM_POLICY_TYPE_SUB:
#endif
break;
default:
NL_SET_ERR_MSG(extack, "Invalid policy type");
return -EINVAL;
}
return 0;
}
static int verify_newpolicy_info(struct xfrm_userpolicy_info *p,
struct netlink_ext_ack *extack)
{
int ret;
switch (p->share) {
case XFRM_SHARE_ANY:
case XFRM_SHARE_SESSION:
case XFRM_SHARE_USER:
case XFRM_SHARE_UNIQUE:
break;
default:
NL_SET_ERR_MSG(extack, "Invalid policy share");
return -EINVAL;
}
switch (p->action) {
case XFRM_POLICY_ALLOW:
case XFRM_POLICY_BLOCK:
break;
default:
NL_SET_ERR_MSG(extack, "Invalid policy action");
return -EINVAL;
}
switch (p->sel.family) {
case AF_INET:
if (p->sel.prefixlen_d > 32 || p->sel.prefixlen_s > 32) {
NL_SET_ERR_MSG(extack, "Invalid prefix length in selector (must be <= 32 for IPv4)");
return -EINVAL;
}
break;
case AF_INET6:
#if IS_ENABLED(CONFIG_IPV6)
if (p->sel.prefixlen_d > 128 || p->sel.prefixlen_s > 128) {
NL_SET_ERR_MSG(extack, "Invalid prefix length in selector (must be <= 128 for IPv6)");
return -EINVAL;
}
break;
#else
NL_SET_ERR_MSG(extack, "IPv6 support disabled");
return -EAFNOSUPPORT;
#endif
default:
NL_SET_ERR_MSG(extack, "Invalid selector family");
return -EINVAL;
}
ret = verify_policy_dir(p->dir, extack);
if (ret)
return ret;
if (p->index && (xfrm_policy_id2dir(p->index) != p->dir)) {
NL_SET_ERR_MSG(extack, "Policy index doesn't match direction");
return -EINVAL;
}
return 0;
}
static int copy_from_user_sec_ctx(struct xfrm_policy *pol, struct nlattr **attrs)
{
struct nlattr *rt = attrs[XFRMA_SEC_CTX];
struct xfrm_user_sec_ctx *uctx;
if (!rt)
return 0;
uctx = nla_data(rt);
return security_xfrm_policy_alloc(&pol->security, uctx, GFP_KERNEL);
}
static void copy_templates(struct xfrm_policy *xp, struct xfrm_user_tmpl *ut,
int nr)
{
int i;
xp->xfrm_nr = nr;
for (i = 0; i < nr; i++, ut++) {
struct xfrm_tmpl *t = &xp->xfrm_vec[i];
memcpy(&t->id, &ut->id, sizeof(struct xfrm_id));
memcpy(&t->saddr, &ut->saddr,
sizeof(xfrm_address_t));
t->reqid = ut->reqid;
t->mode = ut->mode;
t->share = ut->share;
t->optional = ut->optional;
t->aalgos = ut->aalgos;
t->ealgos = ut->ealgos;
t->calgos = ut->calgos;
/* If all masks are ~0, then we allow all algorithms. */
t->allalgs = !~(t->aalgos & t->ealgos & t->calgos);
t->encap_family = ut->family;
}
}
static int validate_tmpl(int nr, struct xfrm_user_tmpl *ut, u16 family,
int dir, struct netlink_ext_ack *extack)
{
u16 prev_family;
int i;
if (nr > XFRM_MAX_DEPTH) {
NL_SET_ERR_MSG(extack, "Template count must be <= XFRM_MAX_DEPTH (" __stringify(XFRM_MAX_DEPTH) ")");
return -EINVAL;
}
prev_family = family;
for (i = 0; i < nr; i++) {
/* We never validated the ut->family value, so many
* applications simply leave it at zero. The check was
* never made and ut->family was ignored because all
* templates could be assumed to have the same family as
* the policy itself. Now that we will have ipv4-in-ipv6
* and ipv6-in-ipv4 tunnels, this is no longer true.
*/
if (!ut[i].family)
ut[i].family = family;
switch (ut[i].mode) {
case XFRM_MODE_TUNNEL:
case XFRM_MODE_BEET:
if (ut[i].optional && dir == XFRM_POLICY_OUT) {
NL_SET_ERR_MSG(extack, "Mode in optional template not allowed in outbound policy");
return -EINVAL;
}
break;
default:
if (ut[i].family != prev_family) {
NL_SET_ERR_MSG(extack, "Mode in template doesn't support a family change");
return -EINVAL;
}
break;
}
if (ut[i].mode >= XFRM_MODE_MAX) {
NL_SET_ERR_MSG(extack, "Mode in template must be < XFRM_MODE_MAX (" __stringify(XFRM_MODE_MAX) ")");
return -EINVAL;
}
prev_family = ut[i].family;
switch (ut[i].family) {
case AF_INET:
break;
#if IS_ENABLED(CONFIG_IPV6)
case AF_INET6:
break;
#endif
default:
NL_SET_ERR_MSG(extack, "Invalid family in template");
return -EINVAL;
}
if (!xfrm_id_proto_valid(ut[i].id.proto)) {
NL_SET_ERR_MSG(extack, "Invalid XFRM protocol in template");
return -EINVAL;
}
}
return 0;
}
static int copy_from_user_tmpl(struct xfrm_policy *pol, struct nlattr **attrs,
int dir, struct netlink_ext_ack *extack)
{
struct nlattr *rt = attrs[XFRMA_TMPL];
if (!rt) {
pol->xfrm_nr = 0;
} else {
struct xfrm_user_tmpl *utmpl = nla_data(rt);
int nr = nla_len(rt) / sizeof(*utmpl);
int err;
err = validate_tmpl(nr, utmpl, pol->family, dir, extack);
if (err)
return err;
copy_templates(pol, utmpl, nr);
}
return 0;
}
static int copy_from_user_policy_type(u8 *tp, struct nlattr **attrs,
struct netlink_ext_ack *extack)
{
struct nlattr *rt = attrs[XFRMA_POLICY_TYPE];
struct xfrm_userpolicy_type *upt;
u8 type = XFRM_POLICY_TYPE_MAIN;
int err;
if (rt) {
upt = nla_data(rt);
type = upt->type;
}
err = verify_policy_type(type, extack);
if (err)
return err;
*tp = type;
return 0;
}
static void copy_from_user_policy(struct xfrm_policy *xp, struct xfrm_userpolicy_info *p)
{
xp->priority = p->priority;
xp->index = p->index;
memcpy(&xp->selector, &p->sel, sizeof(xp->selector));
memcpy(&xp->lft, &p->lft, sizeof(xp->lft));
xp->action = p->action;
xp->flags = p->flags;
xp->family = p->sel.family;
/* XXX xp->share = p->share; */
}
static void copy_to_user_policy(struct xfrm_policy *xp, struct xfrm_userpolicy_info *p, int dir)
{
memset(p, 0, sizeof(*p));
memcpy(&p->sel, &xp->selector, sizeof(p->sel));
memcpy(&p->lft, &xp->lft, sizeof(p->lft));
memcpy(&p->curlft, &xp->curlft, sizeof(p->curlft));
p->priority = xp->priority;
p->index = xp->index;
p->sel.family = xp->family;
p->dir = dir;
p->action = xp->action;
p->flags = xp->flags;
p->share = XFRM_SHARE_ANY; /* XXX xp->share */
}
static struct xfrm_policy *xfrm_policy_construct(struct net *net,
struct xfrm_userpolicy_info *p,
struct nlattr **attrs,
int *errp,
struct netlink_ext_ack *extack)
{
struct xfrm_policy *xp = xfrm_policy_alloc(net, GFP_KERNEL);
int err;
if (!xp) {
*errp = -ENOMEM;
return NULL;
}
copy_from_user_policy(xp, p);
err = copy_from_user_policy_type(&xp->type, attrs, extack);
if (err)
goto error;
if (!(err = copy_from_user_tmpl(xp, attrs, p->dir, extack)))
err = copy_from_user_sec_ctx(xp, attrs);
if (err)
goto error;
xfrm_mark_get(attrs, &xp->mark);
if (attrs[XFRMA_IF_ID])
xp->if_id = nla_get_u32(attrs[XFRMA_IF_ID]);
/* configure the hardware if offload is requested */
if (attrs[XFRMA_OFFLOAD_DEV]) {
err = xfrm_dev_policy_add(net, xp,
nla_data(attrs[XFRMA_OFFLOAD_DEV]),
p->dir, extack);
if (err)
goto error;
}
return xp;
error:
*errp = err;
xp->walk.dead = 1;
xfrm_policy_destroy(xp);
return NULL;
}
static int xfrm_add_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
struct nlattr **attrs,
struct netlink_ext_ack *extack)
{
struct net *net = sock_net(skb->sk);
struct xfrm_userpolicy_info *p = nlmsg_data(nlh);
struct xfrm_policy *xp;
struct km_event c;
int err;
int excl;
err = verify_newpolicy_info(p, extack);
if (err)
return err;
err = verify_sec_ctx_len(attrs, extack);
if (err)
return err;
xp = xfrm_policy_construct(net, p, attrs, &err, extack);
if (!xp)
return err;
/* shouldn't excl be based on nlh flags??
* Aha! this is anti-netlink really i.e more pfkey derived
* in netlink excl is a flag and you wouldn't need
* a type XFRM_MSG_UPDPOLICY - JHS */
excl = nlh->nlmsg_type == XFRM_MSG_NEWPOLICY;
err = xfrm_policy_insert(p->dir, xp, excl);
xfrm_audit_policy_add(xp, err ? 0 : 1, true);
if (err) {
xfrm_dev_policy_delete(xp);
xfrm_dev_policy_free(xp);
security_xfrm_policy_free(xp->security);
kfree(xp);
return err;
}
c.event = nlh->nlmsg_type;
c.seq = nlh->nlmsg_seq;
c.portid = nlh->nlmsg_pid;
km_policy_notify(xp, p->dir, &c);
xfrm_pol_put(xp);
return 0;
}
static int copy_to_user_tmpl(struct xfrm_policy *xp, struct sk_buff *skb)
{
struct xfrm_user_tmpl vec[XFRM_MAX_DEPTH];
int i;
if (xp->xfrm_nr == 0)
return 0;
for (i = 0; i < xp->xfrm_nr; i++) {
struct xfrm_user_tmpl *up = &vec[i];
struct xfrm_tmpl *kp = &xp->xfrm_vec[i];
memset(up, 0, sizeof(*up));
memcpy(&up->id, &kp->id, sizeof(up->id));
up->family = kp->encap_family;
memcpy(&up->saddr, &kp->saddr, sizeof(up->saddr));
up->reqid = kp->reqid;
up->mode = kp->mode;
up->share = kp->share;
up->optional = kp->optional;
up->aalgos = kp->aalgos;
up->ealgos = kp->ealgos;
up->calgos = kp->calgos;
}
return nla_put(skb, XFRMA_TMPL,
sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr, vec);
}
static inline int copy_to_user_state_sec_ctx(struct xfrm_state *x, struct sk_buff *skb)
{
if (x->security) {
return copy_sec_ctx(x->security, skb);
}
return 0;
}
static inline int copy_to_user_sec_ctx(struct xfrm_policy *xp, struct sk_buff *skb)
{
if (xp->security)
return copy_sec_ctx(xp->security, skb);
return 0;
}
static inline unsigned int userpolicy_type_attrsize(void)
{
#ifdef CONFIG_XFRM_SUB_POLICY
return nla_total_size(sizeof(struct xfrm_userpolicy_type));
#else
return 0;
#endif
}
#ifdef CONFIG_XFRM_SUB_POLICY
static int copy_to_user_policy_type(u8 type, struct sk_buff *skb)
{
struct xfrm_userpolicy_type upt;
/* Sadly there are two holes in struct xfrm_userpolicy_type */
memset(&upt, 0, sizeof(upt));
upt.type = type;
return nla_put(skb, XFRMA_POLICY_TYPE, sizeof(upt), &upt);
}
#else
static inline int copy_to_user_policy_type(u8 type, struct sk_buff *skb)
{
return 0;
}
#endif
static int dump_one_policy(struct xfrm_policy *xp, int dir, int count, void *ptr)
{
struct xfrm_dump_info *sp = ptr;
struct xfrm_userpolicy_info *p;
struct sk_buff *in_skb = sp->in_skb;
struct sk_buff *skb = sp->out_skb;
struct xfrm_translator *xtr;
struct nlmsghdr *nlh;
int err;
nlh = nlmsg_put(skb, NETLINK_CB(in_skb).portid, sp->nlmsg_seq,
XFRM_MSG_NEWPOLICY, sizeof(*p), sp->nlmsg_flags);
if (nlh == NULL)
return -EMSGSIZE;
p = nlmsg_data(nlh);
copy_to_user_policy(xp, p, dir);
err = copy_to_user_tmpl(xp, skb);
if (!err)
err = copy_to_user_sec_ctx(xp, skb);
if (!err)
err = copy_to_user_policy_type(xp->type, skb);
if (!err)
err = xfrm_mark_put(skb, &xp->mark);
if (!err)
err = xfrm_if_id_put(skb, xp->if_id);
if (!err && xp->xdo.dev)
err = copy_user_offload(&xp->xdo, skb);
if (err) {
nlmsg_cancel(skb, nlh);
return err;
}
nlmsg_end(skb, nlh);
xtr = xfrm_get_translator();
if (xtr) {
err = xtr->alloc_compat(skb, nlh);
xfrm_put_translator(xtr);
if (err) {
nlmsg_cancel(skb, nlh);
return err;
}
}
return 0;
}
static int xfrm_dump_policy_done(struct netlink_callback *cb)
{
struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *)cb->args;
struct net *net = sock_net(cb->skb->sk);
xfrm_policy_walk_done(walk, net);
return 0;
}
static int xfrm_dump_policy_start(struct netlink_callback *cb)
{
struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *)cb->args;
BUILD_BUG_ON(sizeof(*walk) > sizeof(cb->args));
xfrm_policy_walk_init(walk, XFRM_POLICY_TYPE_ANY);
return 0;
}
static int xfrm_dump_policy(struct sk_buff *skb, struct netlink_callback *cb)
{
struct net *net = sock_net(skb->sk);
struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *)cb->args;
struct xfrm_dump_info info;
info.in_skb = cb->skb;
info.out_skb = skb;
info.nlmsg_seq = cb->nlh->nlmsg_seq;
info.nlmsg_flags = NLM_F_MULTI;
(void) xfrm_policy_walk(net, walk, dump_one_policy, &info);
return skb->len;
}
static struct sk_buff *xfrm_policy_netlink(struct sk_buff *in_skb,
struct xfrm_policy *xp,
int dir, u32 seq)
{
struct xfrm_dump_info info;
struct sk_buff *skb;
int err;
skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!skb)
return ERR_PTR(-ENOMEM);
info.in_skb = in_skb;
info.out_skb = skb;
info.nlmsg_seq = seq;
info.nlmsg_flags = 0;
err = dump_one_policy(xp, dir, 0, &info);
if (err) {
kfree_skb(skb);
return ERR_PTR(err);
}
return skb;
}
static int xfrm_notify_userpolicy(struct net *net)
{
struct xfrm_userpolicy_default *up;
int len = NLMSG_ALIGN(sizeof(*up));
struct nlmsghdr *nlh;
struct sk_buff *skb;
int err;
skb = nlmsg_new(len, GFP_ATOMIC);
if (skb == NULL)
return -ENOMEM;
nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_GETDEFAULT, sizeof(*up), 0);
if (nlh == NULL) {
kfree_skb(skb);
return -EMSGSIZE;
}
up = nlmsg_data(nlh);
up->in = net->xfrm.policy_default[XFRM_POLICY_IN];
up->fwd = net->xfrm.policy_default[XFRM_POLICY_FWD];
up->out = net->xfrm.policy_default[XFRM_POLICY_OUT];
nlmsg_end(skb, nlh);
rcu_read_lock();
err = xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_POLICY);
rcu_read_unlock();
return err;
}
static bool xfrm_userpolicy_is_valid(__u8 policy)
{
return policy == XFRM_USERPOLICY_BLOCK ||
policy == XFRM_USERPOLICY_ACCEPT;
}
static int xfrm_set_default(struct sk_buff *skb, struct nlmsghdr *nlh,
struct nlattr **attrs, struct netlink_ext_ack *extack)
{
struct net *net = sock_net(skb->sk);
struct xfrm_userpolicy_default *up = nlmsg_data(nlh);
if (xfrm_userpolicy_is_valid(up->in))
net->xfrm.policy_default[XFRM_POLICY_IN] = up->in;
if (xfrm_userpolicy_is_valid(up->fwd))
net->xfrm.policy_default[XFRM_POLICY_FWD] = up->fwd;
if (xfrm_userpolicy_is_valid(up->out))
net->xfrm.policy_default[XFRM_POLICY_OUT] = up->out;
rt_genid_bump_all(net);
xfrm_notify_userpolicy(net);
return 0;
}
static int xfrm_get_default(struct sk_buff *skb, struct nlmsghdr *nlh,
struct nlattr **attrs, struct netlink_ext_ack *extack)
{
struct sk_buff *r_skb;
struct nlmsghdr *r_nlh;
struct net *net = sock_net(skb->sk);
struct xfrm_userpolicy_default *r_up;
int len = NLMSG_ALIGN(sizeof(struct xfrm_userpolicy_default));
u32 portid = NETLINK_CB(skb).portid;
u32 seq = nlh->nlmsg_seq;
r_skb = nlmsg_new(len, GFP_ATOMIC);
if (!r_skb)
return -ENOMEM;
r_nlh = nlmsg_put(r_skb, portid, seq, XFRM_MSG_GETDEFAULT, sizeof(*r_up), 0);
if (!r_nlh) {
kfree_skb(r_skb);
return -EMSGSIZE;
}
r_up = nlmsg_data(r_nlh);
r_up->in = net->xfrm.policy_default[XFRM_POLICY_IN];
r_up->fwd = net->xfrm.policy_default[XFRM_POLICY_FWD];
r_up->out = net->xfrm.policy_default[XFRM_POLICY_OUT];
nlmsg_end(r_skb, r_nlh);
return nlmsg_unicast(net->xfrm.nlsk, r_skb, portid);
}
static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
struct nlattr **attrs,
struct netlink_ext_ack *extack)
{
struct net *net = sock_net(skb->sk);
struct xfrm_policy *xp;
struct xfrm_userpolicy_id *p;
u8 type = XFRM_POLICY_TYPE_MAIN;
int err;
struct km_event c;
int delete;
struct xfrm_mark m;
u32 if_id = 0;
p = nlmsg_data(nlh);
delete = nlh->nlmsg_type == XFRM_MSG_DELPOLICY;
err = copy_from_user_policy_type(&type, attrs, extack);
if (err)
return err;
err = verify_policy_dir(p->dir, extack);
if (err)
return err;
if (attrs[XFRMA_IF_ID])
if_id = nla_get_u32(attrs[XFRMA_IF_ID]);
xfrm_mark_get(attrs, &m);
if (p->index)
xp = xfrm_policy_byid(net, &m, if_id, type, p->dir,
p->index, delete, &err);
else {
struct nlattr *rt = attrs[XFRMA_SEC_CTX];
struct xfrm_sec_ctx *ctx;
err = verify_sec_ctx_len(attrs, extack);
if (err)
return err;
ctx = NULL;
if (rt) {
struct xfrm_user_sec_ctx *uctx = nla_data(rt);
err = security_xfrm_policy_alloc(&ctx, uctx, GFP_KERNEL);
if (err)
return err;
}
xp = xfrm_policy_bysel_ctx(net, &m, if_id, type, p->dir,
&p->sel, ctx, delete, &err);
security_xfrm_policy_free(ctx);
}
if (xp == NULL)
return -ENOENT;
if (!delete) {
struct sk_buff *resp_skb;
resp_skb = xfrm_policy_netlink(skb, xp, p->dir, nlh->nlmsg_seq);
if (IS_ERR(resp_skb)) {
err = PTR_ERR(resp_skb);
} else {
err = nlmsg_unicast(net->xfrm.nlsk, resp_skb,
NETLINK_CB(skb).portid);
}
} else {
xfrm_dev_policy_delete(xp);
xfrm_audit_policy_delete(xp, err ? 0 : 1, true);
if (err != 0)
goto out;
c.data.byid = p->index;
c.event = nlh->nlmsg_type;
c.seq = nlh->nlmsg_seq;
c.portid = nlh->nlmsg_pid;
km_policy_notify(xp, p->dir, &c);
}
out:
xfrm_pol_put(xp);
return err;
}
static int xfrm_flush_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
struct nlattr **attrs,
struct netlink_ext_ack *extack)
{
struct net *net = sock_net(skb->sk);
struct km_event c;
struct xfrm_usersa_flush *p = nlmsg_data(nlh);
int err;
err = xfrm_state_flush(net, p->proto, true, false);
if (err) {
if (err == -ESRCH) /* empty table */
return 0;
return err;
}
c.data.proto = p->proto;
c.event = nlh->nlmsg_type;
c.seq = nlh->nlmsg_seq;
c.portid = nlh->nlmsg_pid;
c.net = net;
km_state_notify(NULL, &c);
return 0;
}
static inline unsigned int xfrm_aevent_msgsize(struct xfrm_state *x)
{
unsigned int replay_size = x->replay_esn ?
xfrm_replay_state_esn_len(x->replay_esn) :
sizeof(struct xfrm_replay_state);
return NLMSG_ALIGN(sizeof(struct xfrm_aevent_id))
+ nla_total_size(replay_size)
+ nla_total_size_64bit(sizeof(struct xfrm_lifetime_cur))
+ nla_total_size(sizeof(struct xfrm_mark))
+ nla_total_size(4) /* XFRM_AE_RTHR */
+ nla_total_size(4); /* XFRM_AE_ETHR */
}
static int build_aevent(struct sk_buff *skb, struct xfrm_state *x, const struct km_event *c)
{
struct xfrm_aevent_id *id;
struct nlmsghdr *nlh;
int err;
nlh = nlmsg_put(skb, c->portid, c->seq, XFRM_MSG_NEWAE, sizeof(*id), 0);
if (nlh == NULL)
return -EMSGSIZE;
id = nlmsg_data(nlh);
memset(&id->sa_id, 0, sizeof(id->sa_id));
memcpy(&id->sa_id.daddr, &x->id.daddr, sizeof(x->id.daddr));
id->sa_id.spi = x->id.spi;
id->sa_id.family = x->props.family;
id->sa_id.proto = x->id.proto;
memcpy(&id->saddr, &x->props.saddr, sizeof(x->props.saddr));
id->reqid = x->props.reqid;
id->flags = c->data.aevent;
if (x->replay_esn) {
err = nla_put(skb, XFRMA_REPLAY_ESN_VAL,
xfrm_replay_state_esn_len(x->replay_esn),
x->replay_esn);
} else {
err = nla_put(skb, XFRMA_REPLAY_VAL, sizeof(x->replay),
&x->replay);
}
if (err)
goto out_cancel;
err = nla_put_64bit(skb, XFRMA_LTIME_VAL, sizeof(x->curlft), &x->curlft,
XFRMA_PAD);
if (err)
goto out_cancel;
if (id->flags & XFRM_AE_RTHR) {
err = nla_put_u32(skb, XFRMA_REPLAY_THRESH, x->replay_maxdiff);
if (err)
goto out_cancel;
}
if (id->flags & XFRM_AE_ETHR) {
err = nla_put_u32(skb, XFRMA_ETIMER_THRESH,
x->replay_maxage * 10 / HZ);
if (err)
goto out_cancel;
}
err = xfrm_mark_put(skb, &x->mark);
if (err)
goto out_cancel;
err = xfrm_if_id_put(skb, x->if_id);
if (err)
goto out_cancel;
nlmsg_end(skb, nlh);
return 0;
out_cancel:
nlmsg_cancel(skb, nlh);
return err;
}
static int xfrm_get_ae(struct sk_buff *skb, struct nlmsghdr *nlh,
struct nlattr **attrs, struct netlink_ext_ack *extack)
{
struct net *net = sock_net(skb->sk);
struct xfrm_state *x;
struct sk_buff *r_skb;
int err;
struct km_event c;
u32 mark;
struct xfrm_mark m;
struct xfrm_aevent_id *p = nlmsg_data(nlh);
struct xfrm_usersa_id *id = &p->sa_id;
mark = xfrm_mark_get(attrs, &m);
x = xfrm_state_lookup(net, mark, &id->daddr, id->spi, id->proto, id->family);
if (x == NULL)
return -ESRCH;
r_skb = nlmsg_new(xfrm_aevent_msgsize(x), GFP_ATOMIC);
if (r_skb == NULL) {
xfrm_state_put(x);
return -ENOMEM;
}
/*
* XXX: is this lock really needed - none of the other
* gets lock (the concern is things getting updated
* while we are still reading) - jhs
*/
spin_lock_bh(&x->lock);
c.data.aevent = p->flags;
c.seq = nlh->nlmsg_seq;
c.portid = nlh->nlmsg_pid;
err = build_aevent(r_skb, x, &c);
BUG_ON(err < 0);
err = nlmsg_unicast(net->xfrm.nlsk, r_skb, NETLINK_CB(skb).portid);
spin_unlock_bh(&x->lock);
xfrm_state_put(x);
return err;
}
static int xfrm_new_ae(struct sk_buff *skb, struct nlmsghdr *nlh,
struct nlattr **attrs, struct netlink_ext_ack *extack)
{
struct net *net = sock_net(skb->sk);
struct xfrm_state *x;
struct km_event c;
int err = -EINVAL;
u32 mark = 0;
struct xfrm_mark m;
struct xfrm_aevent_id *p = nlmsg_data(nlh);
struct nlattr *rp = attrs[XFRMA_REPLAY_VAL];
struct nlattr *re = attrs[XFRMA_REPLAY_ESN_VAL];
struct nlattr *lt = attrs[XFRMA_LTIME_VAL];
struct nlattr *et = attrs[XFRMA_ETIMER_THRESH];
struct nlattr *rt = attrs[XFRMA_REPLAY_THRESH];
if (!lt && !rp && !re && !et && !rt) {
NL_SET_ERR_MSG(extack, "Missing required attribute for AE");
return err;
}
/* pedantic mode - thou shalt sayeth replaceth */
if (!(nlh->nlmsg_flags & NLM_F_REPLACE)) {
NL_SET_ERR_MSG(extack, "NLM_F_REPLACE flag is required");
return err;
}
mark = xfrm_mark_get(attrs, &m);
x = xfrm_state_lookup(net, mark, &p->sa_id.daddr, p->sa_id.spi, p->sa_id.proto, p->sa_id.family);
if (x == NULL)
return -ESRCH;
if (x->km.state != XFRM_STATE_VALID) {
NL_SET_ERR_MSG(extack, "SA must be in VALID state");
goto out;
}
err = xfrm_replay_verify_len(x->replay_esn, re, extack);
if (err)
goto out;
spin_lock_bh(&x->lock);
xfrm_update_ae_params(x, attrs, 1);
spin_unlock_bh(&x->lock);
c.event = nlh->nlmsg_type;
c.seq = nlh->nlmsg_seq;
c.portid = nlh->nlmsg_pid;
c.data.aevent = XFRM_AE_CU;
km_state_notify(x, &c);
err = 0;
out:
xfrm_state_put(x);
return err;
}
static int xfrm_flush_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
struct nlattr **attrs,
struct netlink_ext_ack *extack)
{
struct net *net = sock_net(skb->sk);
struct km_event c;
u8 type = XFRM_POLICY_TYPE_MAIN;
int err;
err = copy_from_user_policy_type(&type, attrs, extack);
if (err)
return err;
err = xfrm_policy_flush(net, type, true);
if (err) {
if (err == -ESRCH) /* empty table */
return 0;
return err;
}
c.data.type = type;
c.event = nlh->nlmsg_type;
c.seq = nlh->nlmsg_seq;
c.portid = nlh->nlmsg_pid;
c.net = net;
km_policy_notify(NULL, 0, &c);
return 0;
}
static int xfrm_add_pol_expire(struct sk_buff *skb, struct nlmsghdr *nlh,
struct nlattr **attrs,
struct netlink_ext_ack *extack)
{
struct net *net = sock_net(skb->sk);
struct xfrm_policy *xp;
struct xfrm_user_polexpire *up = nlmsg_data(nlh);
struct xfrm_userpolicy_info *p = &up->pol;
u8 type = XFRM_POLICY_TYPE_MAIN;
int err = -ENOENT;
struct xfrm_mark m;
u32 if_id = 0;
err = copy_from_user_policy_type(&type, attrs, extack);
if (err)
return err;
err = verify_policy_dir(p->dir, extack);
if (err)
return err;
if (attrs[XFRMA_IF_ID])
if_id = nla_get_u32(attrs[XFRMA_IF_ID]);
xfrm_mark_get(attrs, &m);
if (p->index)
xp = xfrm_policy_byid(net, &m, if_id, type, p->dir, p->index,
0, &err);
else {
struct nlattr *rt = attrs[XFRMA_SEC_CTX];
struct xfrm_sec_ctx *ctx;
err = verify_sec_ctx_len(attrs, extack);
if (err)
return err;
ctx = NULL;
if (rt) {
struct xfrm_user_sec_ctx *uctx = nla_data(rt);
err = security_xfrm_policy_alloc(&ctx, uctx, GFP_KERNEL);
if (err)
return err;
}
xp = xfrm_policy_bysel_ctx(net, &m, if_id, type, p->dir,
&p->sel, ctx, 0, &err);
security_xfrm_policy_free(ctx);
}
if (xp == NULL)
return -ENOENT;
if (unlikely(xp->walk.dead))
goto out;
err = 0;
if (up->hard) {
xfrm_policy_delete(xp, p->dir);
xfrm_audit_policy_delete(xp, 1, true);
}
km_policy_expired(xp, p->dir, up->hard, nlh->nlmsg_pid);
out:
xfrm_pol_put(xp);
return err;
}
static int xfrm_add_sa_expire(struct sk_buff *skb, struct nlmsghdr *nlh,
struct nlattr **attrs,
struct netlink_ext_ack *extack)
{
struct net *net = sock_net(skb->sk);
struct xfrm_state *x;
int err;
struct xfrm_user_expire *ue = nlmsg_data(nlh);
struct xfrm_usersa_info *p = &ue->state;
struct xfrm_mark m;
u32 mark = xfrm_mark_get(attrs, &m);
x = xfrm_state_lookup(net, mark, &p->id.daddr, p->id.spi, p->id.proto, p->family);
err = -ENOENT;
if (x == NULL)
return err;
spin_lock_bh(&x->lock);
err = -EINVAL;
if (x->km.state != XFRM_STATE_VALID) {
NL_SET_ERR_MSG(extack, "SA must be in VALID state");
goto out;
}
km_state_expired(x, ue->hard, nlh->nlmsg_pid);
if (ue->hard) {
__xfrm_state_delete(x);
xfrm_audit_state_delete(x, 1, true);
}
err = 0;
out:
spin_unlock_bh(&x->lock);
xfrm_state_put(x);
return err;
}
static int xfrm_add_acquire(struct sk_buff *skb, struct nlmsghdr *nlh,
struct nlattr **attrs,
struct netlink_ext_ack *extack)
{
struct net *net = sock_net(skb->sk);
struct xfrm_policy *xp;
struct xfrm_user_tmpl *ut;
int i;
struct nlattr *rt = attrs[XFRMA_TMPL];
struct xfrm_mark mark;
struct xfrm_user_acquire *ua = nlmsg_data(nlh);
struct xfrm_state *x = xfrm_state_alloc(net);
int err = -ENOMEM;
if (!x)
goto nomem;
xfrm_mark_get(attrs, &mark);
err = verify_newpolicy_info(&ua->policy, extack);
if (err)
goto free_state;
err = verify_sec_ctx_len(attrs, extack);
if (err)
goto free_state;
/* build an XP */
xp = xfrm_policy_construct(net, &ua->policy, attrs, &err, extack);
if (!xp)
goto free_state;
memcpy(&x->id, &ua->id, sizeof(ua->id));
memcpy(&x->props.saddr, &ua->saddr, sizeof(ua->saddr));
memcpy(&x->sel, &ua->sel, sizeof(ua->sel));
xp->mark.m = x->mark.m = mark.m;
xp->mark.v = x->mark.v = mark.v;
ut = nla_data(rt);
/* extract the templates and for each call km_key */
for (i = 0; i < xp->xfrm_nr; i++, ut++) {
struct xfrm_tmpl *t = &xp->xfrm_vec[i];
memcpy(&x->id, &t->id, sizeof(x->id));
x->props.mode = t->mode;
x->props.reqid = t->reqid;
x->props.family = ut->family;
t->aalgos = ua->aalgos;
t->ealgos = ua->ealgos;
t->calgos = ua->calgos;
err = km_query(x, t, xp);
}
xfrm_state_free(x);
kfree(xp);
return 0;
free_state:
xfrm_state_free(x);
nomem:
return err;
}
#ifdef CONFIG_XFRM_MIGRATE
static int copy_from_user_migrate(struct xfrm_migrate *ma,
struct xfrm_kmaddress *k,
struct nlattr **attrs, int *num,
struct netlink_ext_ack *extack)
{
struct nlattr *rt = attrs[XFRMA_MIGRATE];
struct xfrm_user_migrate *um;
int i, num_migrate;
if (k != NULL) {
struct xfrm_user_kmaddress *uk;
uk = nla_data(attrs[XFRMA_KMADDRESS]);
memcpy(&k->local, &uk->local, sizeof(k->local));
memcpy(&k->remote, &uk->remote, sizeof(k->remote));
k->family = uk->family;
k->reserved = uk->reserved;
}
um = nla_data(rt);
num_migrate = nla_len(rt) / sizeof(*um);
if (num_migrate <= 0 || num_migrate > XFRM_MAX_DEPTH) {
NL_SET_ERR_MSG(extack, "Invalid number of SAs to migrate, must be 0 < num <= XFRM_MAX_DEPTH (6)");
return -EINVAL;
}
for (i = 0; i < num_migrate; i++, um++, ma++) {
memcpy(&ma->old_daddr, &um->old_daddr, sizeof(ma->old_daddr));
memcpy(&ma->old_saddr, &um->old_saddr, sizeof(ma->old_saddr));
memcpy(&ma->new_daddr, &um->new_daddr, sizeof(ma->new_daddr));
memcpy(&ma->new_saddr, &um->new_saddr, sizeof(ma->new_saddr));
ma->proto = um->proto;
ma->mode = um->mode;
ma->reqid = um->reqid;
ma->old_family = um->old_family;
ma->new_family = um->new_family;
}
*num = i;
return 0;
}
static int xfrm_do_migrate(struct sk_buff *skb, struct nlmsghdr *nlh,
struct nlattr **attrs, struct netlink_ext_ack *extack)
{
struct xfrm_userpolicy_id *pi = nlmsg_data(nlh);
struct xfrm_migrate m[XFRM_MAX_DEPTH];
struct xfrm_kmaddress km, *kmp;
u8 type;
int err;
int n = 0;
struct net *net = sock_net(skb->sk);
struct xfrm_encap_tmpl *encap = NULL;
u32 if_id = 0;
if (!attrs[XFRMA_MIGRATE]) {
NL_SET_ERR_MSG(extack, "Missing required MIGRATE attribute");
return -EINVAL;
}
kmp = attrs[XFRMA_KMADDRESS] ? &km : NULL;
err = copy_from_user_policy_type(&type, attrs, extack);
if (err)
return err;
err = copy_from_user_migrate(m, kmp, attrs, &n, extack);
if (err)
return err;
if (!n)
return 0;
if (attrs[XFRMA_ENCAP]) {
encap = kmemdup(nla_data(attrs[XFRMA_ENCAP]),
sizeof(*encap), GFP_KERNEL);
if (!encap)
return -ENOMEM;
}
if (attrs[XFRMA_IF_ID])
if_id = nla_get_u32(attrs[XFRMA_IF_ID]);
err = xfrm_migrate(&pi->sel, pi->dir, type, m, n, kmp, net, encap,
if_id, extack);
kfree(encap);
return err;
}
#else
static int xfrm_do_migrate(struct sk_buff *skb, struct nlmsghdr *nlh,
struct nlattr **attrs, struct netlink_ext_ack *extack)
{
return -ENOPROTOOPT;
}
#endif
#ifdef CONFIG_XFRM_MIGRATE
static int copy_to_user_migrate(const struct xfrm_migrate *m, struct sk_buff *skb)
{
struct xfrm_user_migrate um;
memset(&um, 0, sizeof(um));
um.proto = m->proto;
um.mode = m->mode;
um.reqid = m->reqid;
um.old_family = m->old_family;
memcpy(&um.old_daddr, &m->old_daddr, sizeof(um.old_daddr));
memcpy(&um.old_saddr, &m->old_saddr, sizeof(um.old_saddr));
um.new_family = m->new_family;
memcpy(&um.new_daddr, &m->new_daddr, sizeof(um.new_daddr));
memcpy(&um.new_saddr, &m->new_saddr, sizeof(um.new_saddr));
return nla_put(skb, XFRMA_MIGRATE, sizeof(um), &um);
}
static int copy_to_user_kmaddress(const struct xfrm_kmaddress *k, struct sk_buff *skb)
{
struct xfrm_user_kmaddress uk;
memset(&uk, 0, sizeof(uk));
uk.family = k->family;
uk.reserved = k->reserved;
memcpy(&uk.local, &k->local, sizeof(uk.local));
memcpy(&uk.remote, &k->remote, sizeof(uk.remote));
return nla_put(skb, XFRMA_KMADDRESS, sizeof(uk), &uk);
}
static inline unsigned int xfrm_migrate_msgsize(int num_migrate, int with_kma,
int with_encp)
{
return NLMSG_ALIGN(sizeof(struct xfrm_userpolicy_id))
+ (with_kma ? nla_total_size(sizeof(struct xfrm_kmaddress)) : 0)
+ (with_encp ? nla_total_size(sizeof(struct xfrm_encap_tmpl)) : 0)
+ nla_total_size(sizeof(struct xfrm_user_migrate) * num_migrate)
+ userpolicy_type_attrsize();
}
static int build_migrate(struct sk_buff *skb, const struct xfrm_migrate *m,
int num_migrate, const struct xfrm_kmaddress *k,
const struct xfrm_selector *sel,
const struct xfrm_encap_tmpl *encap, u8 dir, u8 type)
{
const struct xfrm_migrate *mp;
struct xfrm_userpolicy_id *pol_id;
struct nlmsghdr *nlh;
int i, err;
nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_MIGRATE, sizeof(*pol_id), 0);
if (nlh == NULL)
return -EMSGSIZE;
pol_id = nlmsg_data(nlh);
/* copy data from selector, dir, and type to the pol_id */
memset(pol_id, 0, sizeof(*pol_id));
memcpy(&pol_id->sel, sel, sizeof(pol_id->sel));
pol_id->dir = dir;
if (k != NULL) {
err = copy_to_user_kmaddress(k, skb);
if (err)
goto out_cancel;
}
if (encap) {
err = nla_put(skb, XFRMA_ENCAP, sizeof(*encap), encap);
if (err)
goto out_cancel;
}
err = copy_to_user_policy_type(type, skb);
if (err)
goto out_cancel;
for (i = 0, mp = m ; i < num_migrate; i++, mp++) {
err = copy_to_user_migrate(mp, skb);
if (err)
goto out_cancel;
}
nlmsg_end(skb, nlh);
return 0;
out_cancel:
nlmsg_cancel(skb, nlh);
return err;
}
static int xfrm_send_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
const struct xfrm_migrate *m, int num_migrate,
const struct xfrm_kmaddress *k,
const struct xfrm_encap_tmpl *encap)
{
struct net *net = &init_net;
struct sk_buff *skb;
int err;
skb = nlmsg_new(xfrm_migrate_msgsize(num_migrate, !!k, !!encap),
GFP_ATOMIC);
if (skb == NULL)
return -ENOMEM;
/* build migrate */
err = build_migrate(skb, m, num_migrate, k, sel, encap, dir, type);
BUG_ON(err < 0);
return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_MIGRATE);
}
#else
static int xfrm_send_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
const struct xfrm_migrate *m, int num_migrate,
const struct xfrm_kmaddress *k,
const struct xfrm_encap_tmpl *encap)
{
return -ENOPROTOOPT;
}
#endif
#define XMSGSIZE(type) sizeof(struct type)
const int xfrm_msg_min[XFRM_NR_MSGTYPES] = {
[XFRM_MSG_NEWSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_info),
[XFRM_MSG_DELSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_id),
[XFRM_MSG_GETSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_id),
[XFRM_MSG_NEWPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_info),
[XFRM_MSG_DELPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_id),
[XFRM_MSG_GETPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_id),
[XFRM_MSG_ALLOCSPI - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userspi_info),
[XFRM_MSG_ACQUIRE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_acquire),
[XFRM_MSG_EXPIRE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_expire),
[XFRM_MSG_UPDPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_info),
[XFRM_MSG_UPDSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_info),
[XFRM_MSG_POLEXPIRE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_polexpire),
[XFRM_MSG_FLUSHSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_flush),
[XFRM_MSG_FLUSHPOLICY - XFRM_MSG_BASE] = 0,
[XFRM_MSG_NEWAE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_aevent_id),
[XFRM_MSG_GETAE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_aevent_id),
[XFRM_MSG_REPORT - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_report),
[XFRM_MSG_MIGRATE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_id),
[XFRM_MSG_GETSADINFO - XFRM_MSG_BASE] = sizeof(u32),
[XFRM_MSG_NEWSPDINFO - XFRM_MSG_BASE] = sizeof(u32),
[XFRM_MSG_GETSPDINFO - XFRM_MSG_BASE] = sizeof(u32),
[XFRM_MSG_SETDEFAULT - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_default),
[XFRM_MSG_GETDEFAULT - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_default),
};
EXPORT_SYMBOL_GPL(xfrm_msg_min);
#undef XMSGSIZE
const struct nla_policy xfrma_policy[XFRMA_MAX+1] = {
[XFRMA_SA] = { .len = sizeof(struct xfrm_usersa_info)},
[XFRMA_POLICY] = { .len = sizeof(struct xfrm_userpolicy_info)},
[XFRMA_LASTUSED] = { .type = NLA_U64},
[XFRMA_ALG_AUTH_TRUNC] = { .len = sizeof(struct xfrm_algo_auth)},
[XFRMA_ALG_AEAD] = { .len = sizeof(struct xfrm_algo_aead) },
[XFRMA_ALG_AUTH] = { .len = sizeof(struct xfrm_algo) },
[XFRMA_ALG_CRYPT] = { .len = sizeof(struct xfrm_algo) },
[XFRMA_ALG_COMP] = { .len = sizeof(struct xfrm_algo) },
[XFRMA_ENCAP] = { .len = sizeof(struct xfrm_encap_tmpl) },
[XFRMA_TMPL] = { .len = sizeof(struct xfrm_user_tmpl) },
[XFRMA_SEC_CTX] = { .len = sizeof(struct xfrm_user_sec_ctx) },
[XFRMA_LTIME_VAL] = { .len = sizeof(struct xfrm_lifetime_cur) },
[XFRMA_REPLAY_VAL] = { .len = sizeof(struct xfrm_replay_state) },
[XFRMA_REPLAY_THRESH] = { .type = NLA_U32 },
[XFRMA_ETIMER_THRESH] = { .type = NLA_U32 },
[XFRMA_SRCADDR] = { .len = sizeof(xfrm_address_t) },
[XFRMA_COADDR] = { .len = sizeof(xfrm_address_t) },
[XFRMA_POLICY_TYPE] = { .len = sizeof(struct xfrm_userpolicy_type)},
[XFRMA_MIGRATE] = { .len = sizeof(struct xfrm_user_migrate) },
[XFRMA_KMADDRESS] = { .len = sizeof(struct xfrm_user_kmaddress) },
[XFRMA_MARK] = { .len = sizeof(struct xfrm_mark) },
[XFRMA_TFCPAD] = { .type = NLA_U32 },
[XFRMA_REPLAY_ESN_VAL] = { .len = sizeof(struct xfrm_replay_state_esn) },
[XFRMA_SA_EXTRA_FLAGS] = { .type = NLA_U32 },
[XFRMA_PROTO] = { .type = NLA_U8 },
[XFRMA_ADDRESS_FILTER] = { .len = sizeof(struct xfrm_address_filter) },
[XFRMA_OFFLOAD_DEV] = { .len = sizeof(struct xfrm_user_offload) },
[XFRMA_SET_MARK] = { .type = NLA_U32 },
[XFRMA_SET_MARK_MASK] = { .type = NLA_U32 },
[XFRMA_IF_ID] = { .type = NLA_U32 },
[XFRMA_MTIMER_THRESH] = { .type = NLA_U32 },
};
EXPORT_SYMBOL_GPL(xfrma_policy);
static const struct nla_policy xfrma_spd_policy[XFRMA_SPD_MAX+1] = {
[XFRMA_SPD_IPV4_HTHRESH] = { .len = sizeof(struct xfrmu_spdhthresh) },
[XFRMA_SPD_IPV6_HTHRESH] = { .len = sizeof(struct xfrmu_spdhthresh) },
};
static const struct xfrm_link {
int (*doit)(struct sk_buff *, struct nlmsghdr *, struct nlattr **,
struct netlink_ext_ack *);
int (*start)(struct netlink_callback *);
int (*dump)(struct sk_buff *, struct netlink_callback *);
int (*done)(struct netlink_callback *);
const struct nla_policy *nla_pol;
int nla_max;
} xfrm_dispatch[XFRM_NR_MSGTYPES] = {
[XFRM_MSG_NEWSA - XFRM_MSG_BASE] = { .doit = xfrm_add_sa },
[XFRM_MSG_DELSA - XFRM_MSG_BASE] = { .doit = xfrm_del_sa },
[XFRM_MSG_GETSA - XFRM_MSG_BASE] = { .doit = xfrm_get_sa,
.dump = xfrm_dump_sa,
.done = xfrm_dump_sa_done },
[XFRM_MSG_NEWPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_add_policy },
[XFRM_MSG_DELPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_get_policy },
[XFRM_MSG_GETPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_get_policy,
.start = xfrm_dump_policy_start,
.dump = xfrm_dump_policy,
.done = xfrm_dump_policy_done },
[XFRM_MSG_ALLOCSPI - XFRM_MSG_BASE] = { .doit = xfrm_alloc_userspi },
[XFRM_MSG_ACQUIRE - XFRM_MSG_BASE] = { .doit = xfrm_add_acquire },
[XFRM_MSG_EXPIRE - XFRM_MSG_BASE] = { .doit = xfrm_add_sa_expire },
[XFRM_MSG_UPDPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_add_policy },
[XFRM_MSG_UPDSA - XFRM_MSG_BASE] = { .doit = xfrm_add_sa },
[XFRM_MSG_POLEXPIRE - XFRM_MSG_BASE] = { .doit = xfrm_add_pol_expire},
[XFRM_MSG_FLUSHSA - XFRM_MSG_BASE] = { .doit = xfrm_flush_sa },
[XFRM_MSG_FLUSHPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_flush_policy },
[XFRM_MSG_NEWAE - XFRM_MSG_BASE] = { .doit = xfrm_new_ae },
[XFRM_MSG_GETAE - XFRM_MSG_BASE] = { .doit = xfrm_get_ae },
[XFRM_MSG_MIGRATE - XFRM_MSG_BASE] = { .doit = xfrm_do_migrate },
[XFRM_MSG_GETSADINFO - XFRM_MSG_BASE] = { .doit = xfrm_get_sadinfo },
[XFRM_MSG_NEWSPDINFO - XFRM_MSG_BASE] = { .doit = xfrm_set_spdinfo,
.nla_pol = xfrma_spd_policy,
.nla_max = XFRMA_SPD_MAX },
[XFRM_MSG_GETSPDINFO - XFRM_MSG_BASE] = { .doit = xfrm_get_spdinfo },
[XFRM_MSG_SETDEFAULT - XFRM_MSG_BASE] = { .doit = xfrm_set_default },
[XFRM_MSG_GETDEFAULT - XFRM_MSG_BASE] = { .doit = xfrm_get_default },
};
static int xfrm_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
struct netlink_ext_ack *extack)
{
struct net *net = sock_net(skb->sk);
struct nlattr *attrs[XFRMA_MAX+1];
const struct xfrm_link *link;
struct nlmsghdr *nlh64 = NULL;
int type, err;
type = nlh->nlmsg_type;
if (type > XFRM_MSG_MAX)
return -EINVAL;
type -= XFRM_MSG_BASE;
link = &xfrm_dispatch[type];
/* All operations require privileges, even GET */
if (!netlink_net_capable(skb, CAP_NET_ADMIN))
return -EPERM;
if (in_compat_syscall()) {
struct xfrm_translator *xtr = xfrm_get_translator();
if (!xtr)
return -EOPNOTSUPP;
nlh64 = xtr->rcv_msg_compat(nlh, link->nla_max,
link->nla_pol, extack);
xfrm_put_translator(xtr);
if (IS_ERR(nlh64))
return PTR_ERR(nlh64);
if (nlh64)
nlh = nlh64;
}
if ((type == (XFRM_MSG_GETSA - XFRM_MSG_BASE) ||
type == (XFRM_MSG_GETPOLICY - XFRM_MSG_BASE)) &&
(nlh->nlmsg_flags & NLM_F_DUMP)) {
struct netlink_dump_control c = {
.start = link->start,
.dump = link->dump,
.done = link->done,
};
if (link->dump == NULL) {
err = -EINVAL;
goto err;
}
err = netlink_dump_start(net->xfrm.nlsk, skb, nlh, &c);
goto err;
}
err = nlmsg_parse_deprecated(nlh, xfrm_msg_min[type], attrs,
link->nla_max ? : XFRMA_MAX,
link->nla_pol ? : xfrma_policy, extack);
if (err < 0)
goto err;
if (link->doit == NULL) {
err = -EINVAL;
goto err;
}
err = link->doit(skb, nlh, attrs, extack);
/* We need to free skb allocated in xfrm_alloc_compat() before
* returning from this function, because consume_skb() won't take
* care of frag_list since netlink destructor sets
* sbk->head to NULL. (see netlink_skb_destructor())
*/
if (skb_has_frag_list(skb)) {
kfree_skb(skb_shinfo(skb)->frag_list);
skb_shinfo(skb)->frag_list = NULL;
}
err:
kvfree(nlh64);
return err;
}
static void xfrm_netlink_rcv(struct sk_buff *skb)
{
struct net *net = sock_net(skb->sk);
mutex_lock(&net->xfrm.xfrm_cfg_mutex);
netlink_rcv_skb(skb, &xfrm_user_rcv_msg);
mutex_unlock(&net->xfrm.xfrm_cfg_mutex);
}
static inline unsigned int xfrm_expire_msgsize(void)
{
return NLMSG_ALIGN(sizeof(struct xfrm_user_expire))
+ nla_total_size(sizeof(struct xfrm_mark));
}
static int build_expire(struct sk_buff *skb, struct xfrm_state *x, const struct km_event *c)
{
struct xfrm_user_expire *ue;
struct nlmsghdr *nlh;
int err;
nlh = nlmsg_put(skb, c->portid, 0, XFRM_MSG_EXPIRE, sizeof(*ue), 0);
if (nlh == NULL)
return -EMSGSIZE;
ue = nlmsg_data(nlh);
copy_to_user_state(x, &ue->state);
ue->hard = (c->data.hard != 0) ? 1 : 0;
/* clear the padding bytes */
memset_after(ue, 0, hard);
err = xfrm_mark_put(skb, &x->mark);
if (err)
return err;
err = xfrm_if_id_put(skb, x->if_id);
if (err)
return err;
nlmsg_end(skb, nlh);
return 0;
}
static int xfrm_exp_state_notify(struct xfrm_state *x, const struct km_event *c)
{
struct net *net = xs_net(x);
struct sk_buff *skb;
skb = nlmsg_new(xfrm_expire_msgsize(), GFP_ATOMIC);
if (skb == NULL)
return -ENOMEM;
if (build_expire(skb, x, c) < 0) {
kfree_skb(skb);
return -EMSGSIZE;
}
return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_EXPIRE);
}
static int xfrm_aevent_state_notify(struct xfrm_state *x, const struct km_event *c)
{
struct net *net = xs_net(x);
struct sk_buff *skb;
int err;
skb = nlmsg_new(xfrm_aevent_msgsize(x), GFP_ATOMIC);
if (skb == NULL)
return -ENOMEM;
err = build_aevent(skb, x, c);
BUG_ON(err < 0);
return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_AEVENTS);
}
static int xfrm_notify_sa_flush(const struct km_event *c)
{
struct net *net = c->net;
struct xfrm_usersa_flush *p;
struct nlmsghdr *nlh;
struct sk_buff *skb;
int len = NLMSG_ALIGN(sizeof(struct xfrm_usersa_flush));
skb = nlmsg_new(len, GFP_ATOMIC);
if (skb == NULL)
return -ENOMEM;
nlh = nlmsg_put(skb, c->portid, c->seq, XFRM_MSG_FLUSHSA, sizeof(*p), 0);
if (nlh == NULL) {
kfree_skb(skb);
return -EMSGSIZE;
}
p = nlmsg_data(nlh);
p->proto = c->data.proto;
nlmsg_end(skb, nlh);
return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_SA);
}
static inline unsigned int xfrm_sa_len(struct xfrm_state *x)
{
unsigned int l = 0;
if (x->aead)
l += nla_total_size(aead_len(x->aead));
if (x->aalg) {
l += nla_total_size(sizeof(struct xfrm_algo) +
(x->aalg->alg_key_len + 7) / 8);
l += nla_total_size(xfrm_alg_auth_len(x->aalg));
}
if (x->ealg)
l += nla_total_size(xfrm_alg_len(x->ealg));
if (x->calg)
l += nla_total_size(sizeof(*x->calg));
if (x->encap)
l += nla_total_size(sizeof(*x->encap));
if (x->tfcpad)
l += nla_total_size(sizeof(x->tfcpad));
if (x->replay_esn)
l += nla_total_size(xfrm_replay_state_esn_len(x->replay_esn));
else
l += nla_total_size(sizeof(struct xfrm_replay_state));
if (x->security)
l += nla_total_size(sizeof(struct xfrm_user_sec_ctx) +
x->security->ctx_len);
if (x->coaddr)
l += nla_total_size(sizeof(*x->coaddr));
if (x->props.extra_flags)
l += nla_total_size(sizeof(x->props.extra_flags));
if (x->xso.dev)
l += nla_total_size(sizeof(struct xfrm_user_offload));
if (x->props.smark.v | x->props.smark.m) {
l += nla_total_size(sizeof(x->props.smark.v));
l += nla_total_size(sizeof(x->props.smark.m));
}
if (x->if_id)
l += nla_total_size(sizeof(x->if_id));
/* Must count x->lastused as it may become non-zero behind our back. */
l += nla_total_size_64bit(sizeof(u64));
if (x->mapping_maxage)
l += nla_total_size(sizeof(x->mapping_maxage));
return l;
}
static int xfrm_notify_sa(struct xfrm_state *x, const struct km_event *c)
{
struct net *net = xs_net(x);
struct xfrm_usersa_info *p;
struct xfrm_usersa_id *id;
struct nlmsghdr *nlh;
struct sk_buff *skb;
unsigned int len = xfrm_sa_len(x);
unsigned int headlen;
int err;
headlen = sizeof(*p);
if (c->event == XFRM_MSG_DELSA) {
len += nla_total_size(headlen);
headlen = sizeof(*id);
len += nla_total_size(sizeof(struct xfrm_mark));
}
len += NLMSG_ALIGN(headlen);
skb = nlmsg_new(len, GFP_ATOMIC);
if (skb == NULL)
return -ENOMEM;
nlh = nlmsg_put(skb, c->portid, c->seq, c->event, headlen, 0);
err = -EMSGSIZE;
if (nlh == NULL)
goto out_free_skb;
p = nlmsg_data(nlh);
if (c->event == XFRM_MSG_DELSA) {
struct nlattr *attr;
id = nlmsg_data(nlh);
memset(id, 0, sizeof(*id));
memcpy(&id->daddr, &x->id.daddr, sizeof(id->daddr));
id->spi = x->id.spi;
id->family = x->props.family;
id->proto = x->id.proto;
attr = nla_reserve(skb, XFRMA_SA, sizeof(*p));
err = -EMSGSIZE;
if (attr == NULL)
goto out_free_skb;
p = nla_data(attr);
}
err = copy_to_user_state_extra(x, p, skb);
if (err)
goto out_free_skb;
nlmsg_end(skb, nlh);
return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_SA);
out_free_skb:
kfree_skb(skb);
return err;
}
static int xfrm_send_state_notify(struct xfrm_state *x, const struct km_event *c)
{
switch (c->event) {
case XFRM_MSG_EXPIRE:
return xfrm_exp_state_notify(x, c);
case XFRM_MSG_NEWAE:
return xfrm_aevent_state_notify(x, c);
case XFRM_MSG_DELSA:
case XFRM_MSG_UPDSA:
case XFRM_MSG_NEWSA:
return xfrm_notify_sa(x, c);
case XFRM_MSG_FLUSHSA:
return xfrm_notify_sa_flush(c);
default:
printk(KERN_NOTICE "xfrm_user: Unknown SA event %d\n",
c->event);
break;
}
return 0;
}
static inline unsigned int xfrm_acquire_msgsize(struct xfrm_state *x,
struct xfrm_policy *xp)
{
return NLMSG_ALIGN(sizeof(struct xfrm_user_acquire))
+ nla_total_size(sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr)
+ nla_total_size(sizeof(struct xfrm_mark))
+ nla_total_size(xfrm_user_sec_ctx_size(x->security))
+ userpolicy_type_attrsize();
}
static int build_acquire(struct sk_buff *skb, struct xfrm_state *x,
struct xfrm_tmpl *xt, struct xfrm_policy *xp)
{
__u32 seq = xfrm_get_acqseq();
struct xfrm_user_acquire *ua;
struct nlmsghdr *nlh;
int err;
nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_ACQUIRE, sizeof(*ua), 0);
if (nlh == NULL)
return -EMSGSIZE;
ua = nlmsg_data(nlh);
memcpy(&ua->id, &x->id, sizeof(ua->id));
memcpy(&ua->saddr, &x->props.saddr, sizeof(ua->saddr));
memcpy(&ua->sel, &x->sel, sizeof(ua->sel));
copy_to_user_policy(xp, &ua->policy, XFRM_POLICY_OUT);
ua->aalgos = xt->aalgos;
ua->ealgos = xt->ealgos;
ua->calgos = xt->calgos;
ua->seq = x->km.seq = seq;
err = copy_to_user_tmpl(xp, skb);
if (!err)
err = copy_to_user_state_sec_ctx(x, skb);
if (!err)
err = copy_to_user_policy_type(xp->type, skb);
if (!err)
err = xfrm_mark_put(skb, &xp->mark);
if (!err)
err = xfrm_if_id_put(skb, xp->if_id);
if (!err && xp->xdo.dev)
err = copy_user_offload(&xp->xdo, skb);
if (err) {
nlmsg_cancel(skb, nlh);
return err;
}
nlmsg_end(skb, nlh);
return 0;
}
static int xfrm_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *xt,
struct xfrm_policy *xp)
{
struct net *net = xs_net(x);
struct sk_buff *skb;
int err;
skb = nlmsg_new(xfrm_acquire_msgsize(x, xp), GFP_ATOMIC);
if (skb == NULL)
return -ENOMEM;
err = build_acquire(skb, x, xt, xp);
BUG_ON(err < 0);
return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_ACQUIRE);
}
/* User gives us xfrm_user_policy_info followed by an array of 0
* or more templates.
*/
static struct xfrm_policy *xfrm_compile_policy(struct sock *sk, int opt,
u8 *data, int len, int *dir)
{
struct net *net = sock_net(sk);
struct xfrm_userpolicy_info *p = (struct xfrm_userpolicy_info *)data;
struct xfrm_user_tmpl *ut = (struct xfrm_user_tmpl *) (p + 1);
struct xfrm_policy *xp;
int nr;
switch (sk->sk_family) {
case AF_INET:
if (opt != IP_XFRM_POLICY) {
*dir = -EOPNOTSUPP;
return NULL;
}
break;
#if IS_ENABLED(CONFIG_IPV6)
case AF_INET6:
if (opt != IPV6_XFRM_POLICY) {
*dir = -EOPNOTSUPP;
return NULL;
}
break;
#endif
default:
*dir = -EINVAL;
return NULL;
}
*dir = -EINVAL;
if (len < sizeof(*p) ||
verify_newpolicy_info(p, NULL))
return NULL;
nr = ((len - sizeof(*p)) / sizeof(*ut));
if (validate_tmpl(nr, ut, p->sel.family, p->dir, NULL))
return NULL;
if (p->dir > XFRM_POLICY_OUT)
return NULL;
xp = xfrm_policy_alloc(net, GFP_ATOMIC);
if (xp == NULL) {
*dir = -ENOBUFS;
return NULL;
}
copy_from_user_policy(xp, p);
xp->type = XFRM_POLICY_TYPE_MAIN;
copy_templates(xp, ut, nr);
*dir = p->dir;
return xp;
}
static inline unsigned int xfrm_polexpire_msgsize(struct xfrm_policy *xp)
{
return NLMSG_ALIGN(sizeof(struct xfrm_user_polexpire))
+ nla_total_size(sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr)
+ nla_total_size(xfrm_user_sec_ctx_size(xp->security))
+ nla_total_size(sizeof(struct xfrm_mark))
+ userpolicy_type_attrsize();
}
static int build_polexpire(struct sk_buff *skb, struct xfrm_policy *xp,
int dir, const struct km_event *c)
{
struct xfrm_user_polexpire *upe;
int hard = c->data.hard;
struct nlmsghdr *nlh;
int err;
nlh = nlmsg_put(skb, c->portid, 0, XFRM_MSG_POLEXPIRE, sizeof(*upe), 0);
if (nlh == NULL)
return -EMSGSIZE;
upe = nlmsg_data(nlh);
copy_to_user_policy(xp, &upe->pol, dir);
err = copy_to_user_tmpl(xp, skb);
if (!err)
err = copy_to_user_sec_ctx(xp, skb);
if (!err)
err = copy_to_user_policy_type(xp->type, skb);
if (!err)
err = xfrm_mark_put(skb, &xp->mark);
if (!err)
err = xfrm_if_id_put(skb, xp->if_id);
if (!err && xp->xdo.dev)
err = copy_user_offload(&xp->xdo, skb);
if (err) {
nlmsg_cancel(skb, nlh);
return err;
}
upe->hard = !!hard;
nlmsg_end(skb, nlh);
return 0;
}
static int xfrm_exp_policy_notify(struct xfrm_policy *xp, int dir, const struct km_event *c)
{
struct net *net = xp_net(xp);
struct sk_buff *skb;
int err;
skb = nlmsg_new(xfrm_polexpire_msgsize(xp), GFP_ATOMIC);
if (skb == NULL)
return -ENOMEM;
err = build_polexpire(skb, xp, dir, c);
BUG_ON(err < 0);
return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_EXPIRE);
}
static int xfrm_notify_policy(struct xfrm_policy *xp, int dir, const struct km_event *c)
{
unsigned int len = nla_total_size(sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr);
struct net *net = xp_net(xp);
struct xfrm_userpolicy_info *p;
struct xfrm_userpolicy_id *id;
struct nlmsghdr *nlh;
struct sk_buff *skb;
unsigned int headlen;
int err;
headlen = sizeof(*p);
if (c->event == XFRM_MSG_DELPOLICY) {
len += nla_total_size(headlen);
headlen = sizeof(*id);
}
len += userpolicy_type_attrsize();
len += nla_total_size(sizeof(struct xfrm_mark));
len += NLMSG_ALIGN(headlen);
skb = nlmsg_new(len, GFP_ATOMIC);
if (skb == NULL)
return -ENOMEM;
nlh = nlmsg_put(skb, c->portid, c->seq, c->event, headlen, 0);
err = -EMSGSIZE;
if (nlh == NULL)
goto out_free_skb;
p = nlmsg_data(nlh);
if (c->event == XFRM_MSG_DELPOLICY) {
struct nlattr *attr;
id = nlmsg_data(nlh);
memset(id, 0, sizeof(*id));
id->dir = dir;
if (c->data.byid)
id->index = xp->index;
else
memcpy(&id->sel, &xp->selector, sizeof(id->sel));
attr = nla_reserve(skb, XFRMA_POLICY, sizeof(*p));
err = -EMSGSIZE;
if (attr == NULL)
goto out_free_skb;
p = nla_data(attr);
}
copy_to_user_policy(xp, p, dir);
err = copy_to_user_tmpl(xp, skb);
if (!err)
err = copy_to_user_policy_type(xp->type, skb);
if (!err)
err = xfrm_mark_put(skb, &xp->mark);
if (!err)
err = xfrm_if_id_put(skb, xp->if_id);
if (!err && xp->xdo.dev)
err = copy_user_offload(&xp->xdo, skb);
if (err)
goto out_free_skb;
nlmsg_end(skb, nlh);
return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_POLICY);
out_free_skb:
kfree_skb(skb);
return err;
}
static int xfrm_notify_policy_flush(const struct km_event *c)
{
struct net *net = c->net;
struct nlmsghdr *nlh;
struct sk_buff *skb;
int err;
skb = nlmsg_new(userpolicy_type_attrsize(), GFP_ATOMIC);
if (skb == NULL)
return -ENOMEM;
nlh = nlmsg_put(skb, c->portid, c->seq, XFRM_MSG_FLUSHPOLICY, 0, 0);
err = -EMSGSIZE;
if (nlh == NULL)
goto out_free_skb;
err = copy_to_user_policy_type(c->data.type, skb);
if (err)
goto out_free_skb;
nlmsg_end(skb, nlh);
return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_POLICY);
out_free_skb:
kfree_skb(skb);
return err;
}
static int xfrm_send_policy_notify(struct xfrm_policy *xp, int dir, const struct km_event *c)
{
switch (c->event) {
case XFRM_MSG_NEWPOLICY:
case XFRM_MSG_UPDPOLICY:
case XFRM_MSG_DELPOLICY:
return xfrm_notify_policy(xp, dir, c);
case XFRM_MSG_FLUSHPOLICY:
return xfrm_notify_policy_flush(c);
case XFRM_MSG_POLEXPIRE:
return xfrm_exp_policy_notify(xp, dir, c);
default:
printk(KERN_NOTICE "xfrm_user: Unknown Policy event %d\n",
c->event);
}
return 0;
}
static inline unsigned int xfrm_report_msgsize(void)
{
return NLMSG_ALIGN(sizeof(struct xfrm_user_report));
}
static int build_report(struct sk_buff *skb, u8 proto,
struct xfrm_selector *sel, xfrm_address_t *addr)
{
struct xfrm_user_report *ur;
struct nlmsghdr *nlh;
nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_REPORT, sizeof(*ur), 0);
if (nlh == NULL)
return -EMSGSIZE;
ur = nlmsg_data(nlh);
ur->proto = proto;
memcpy(&ur->sel, sel, sizeof(ur->sel));
if (addr) {
int err = nla_put(skb, XFRMA_COADDR, sizeof(*addr), addr);
if (err) {
nlmsg_cancel(skb, nlh);
return err;
}
}
nlmsg_end(skb, nlh);
return 0;
}
static int xfrm_send_report(struct net *net, u8 proto,
struct xfrm_selector *sel, xfrm_address_t *addr)
{
struct sk_buff *skb;
int err;
skb = nlmsg_new(xfrm_report_msgsize(), GFP_ATOMIC);
if (skb == NULL)
return -ENOMEM;
err = build_report(skb, proto, sel, addr);
BUG_ON(err < 0);
return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_REPORT);
}
static inline unsigned int xfrm_mapping_msgsize(void)
{
return NLMSG_ALIGN(sizeof(struct xfrm_user_mapping));
}
static int build_mapping(struct sk_buff *skb, struct xfrm_state *x,
xfrm_address_t *new_saddr, __be16 new_sport)
{
struct xfrm_user_mapping *um;
struct nlmsghdr *nlh;
nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_MAPPING, sizeof(*um), 0);
if (nlh == NULL)
return -EMSGSIZE;
um = nlmsg_data(nlh);
memcpy(&um->id.daddr, &x->id.daddr, sizeof(um->id.daddr));
um->id.spi = x->id.spi;
um->id.family = x->props.family;
um->id.proto = x->id.proto;
memcpy(&um->new_saddr, new_saddr, sizeof(um->new_saddr));
memcpy(&um->old_saddr, &x->props.saddr, sizeof(um->old_saddr));
um->new_sport = new_sport;
um->old_sport = x->encap->encap_sport;
um->reqid = x->props.reqid;
nlmsg_end(skb, nlh);
return 0;
}
static int xfrm_send_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr,
__be16 sport)
{
struct net *net = xs_net(x);
struct sk_buff *skb;
int err;
if (x->id.proto != IPPROTO_ESP)
return -EINVAL;
if (!x->encap)
return -EINVAL;
skb = nlmsg_new(xfrm_mapping_msgsize(), GFP_ATOMIC);
if (skb == NULL)
return -ENOMEM;
err = build_mapping(skb, x, ipaddr, sport);
BUG_ON(err < 0);
return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_MAPPING);
}
static bool xfrm_is_alive(const struct km_event *c)
{
return (bool)xfrm_acquire_is_on(c->net);
}
static struct xfrm_mgr netlink_mgr = {
.notify = xfrm_send_state_notify,
.acquire = xfrm_send_acquire,
.compile_policy = xfrm_compile_policy,
.notify_policy = xfrm_send_policy_notify,
.report = xfrm_send_report,
.migrate = xfrm_send_migrate,
.new_mapping = xfrm_send_mapping,
.is_alive = xfrm_is_alive,
};
static int __net_init xfrm_user_net_init(struct net *net)
{
struct sock *nlsk;
struct netlink_kernel_cfg cfg = {
.groups = XFRMNLGRP_MAX,
.input = xfrm_netlink_rcv,
};
nlsk = netlink_kernel_create(net, NETLINK_XFRM, &cfg);
if (nlsk == NULL)
return -ENOMEM;
net->xfrm.nlsk_stash = nlsk; /* Don't set to NULL */
rcu_assign_pointer(net->xfrm.nlsk, nlsk);
return 0;
}
static void __net_exit xfrm_user_net_pre_exit(struct net *net)
{
RCU_INIT_POINTER(net->xfrm.nlsk, NULL);
}
static void __net_exit xfrm_user_net_exit(struct list_head *net_exit_list)
{
struct net *net;
list_for_each_entry(net, net_exit_list, exit_list)
netlink_kernel_release(net->xfrm.nlsk_stash);
}
static struct pernet_operations xfrm_user_net_ops = {
.init = xfrm_user_net_init,
.pre_exit = xfrm_user_net_pre_exit,
.exit_batch = xfrm_user_net_exit,
};
static int __init xfrm_user_init(void)
{
int rv;
printk(KERN_INFO "Initializing XFRM netlink socket\n");
rv = register_pernet_subsys(&xfrm_user_net_ops);
if (rv < 0)
return rv;
xfrm_register_km(&netlink_mgr);
return 0;
}
static void __exit xfrm_user_exit(void)
{
xfrm_unregister_km(&netlink_mgr);
unregister_pernet_subsys(&xfrm_user_net_ops);
}
module_init(xfrm_user_init);
module_exit(xfrm_user_exit);
MODULE_LICENSE("GPL");
MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_XFRM);
| linux-master | net/xfrm/xfrm_user.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* xfrm_replay.c - xfrm replay detection, derived from xfrm_state.c.
*
* Copyright (C) 2010 secunet Security Networks AG
* Copyright (C) 2010 Steffen Klassert <[email protected]>
*/
#include <linux/export.h>
#include <net/xfrm.h>
u32 xfrm_replay_seqhi(struct xfrm_state *x, __be32 net_seq)
{
u32 seq, seq_hi, bottom;
struct xfrm_replay_state_esn *replay_esn = x->replay_esn;
if (!(x->props.flags & XFRM_STATE_ESN))
return 0;
seq = ntohl(net_seq);
seq_hi = replay_esn->seq_hi;
bottom = replay_esn->seq - replay_esn->replay_window + 1;
if (likely(replay_esn->seq >= replay_esn->replay_window - 1)) {
/* A. same subspace */
if (unlikely(seq < bottom))
seq_hi++;
} else {
/* B. window spans two subspaces */
if (unlikely(seq >= bottom))
seq_hi--;
}
return seq_hi;
}
EXPORT_SYMBOL(xfrm_replay_seqhi);
static void xfrm_replay_notify_bmp(struct xfrm_state *x, int event);
static void xfrm_replay_notify_esn(struct xfrm_state *x, int event);
void xfrm_replay_notify(struct xfrm_state *x, int event)
{
struct km_event c;
/* we send notify messages in case
* 1. we updated on of the sequence numbers, and the seqno difference
* is at least x->replay_maxdiff, in this case we also update the
* timeout of our timer function
* 2. if x->replay_maxage has elapsed since last update,
* and there were changes
*
* The state structure must be locked!
*/
switch (x->repl_mode) {
case XFRM_REPLAY_MODE_LEGACY:
break;
case XFRM_REPLAY_MODE_BMP:
xfrm_replay_notify_bmp(x, event);
return;
case XFRM_REPLAY_MODE_ESN:
xfrm_replay_notify_esn(x, event);
return;
}
switch (event) {
case XFRM_REPLAY_UPDATE:
if (!x->replay_maxdiff ||
((x->replay.seq - x->preplay.seq < x->replay_maxdiff) &&
(x->replay.oseq - x->preplay.oseq < x->replay_maxdiff))) {
if (x->xflags & XFRM_TIME_DEFER)
event = XFRM_REPLAY_TIMEOUT;
else
return;
}
break;
case XFRM_REPLAY_TIMEOUT:
if (memcmp(&x->replay, &x->preplay,
sizeof(struct xfrm_replay_state)) == 0) {
x->xflags |= XFRM_TIME_DEFER;
return;
}
break;
}
memcpy(&x->preplay, &x->replay, sizeof(struct xfrm_replay_state));
c.event = XFRM_MSG_NEWAE;
c.data.aevent = event;
km_state_notify(x, &c);
if (x->replay_maxage &&
!mod_timer(&x->rtimer, jiffies + x->replay_maxage))
x->xflags &= ~XFRM_TIME_DEFER;
}
static int __xfrm_replay_overflow(struct xfrm_state *x, struct sk_buff *skb)
{
int err = 0;
struct net *net = xs_net(x);
if (x->type->flags & XFRM_TYPE_REPLAY_PROT) {
XFRM_SKB_CB(skb)->seq.output.low = ++x->replay.oseq;
XFRM_SKB_CB(skb)->seq.output.hi = 0;
if (unlikely(x->replay.oseq == 0) &&
!(x->props.extra_flags & XFRM_SA_XFLAG_OSEQ_MAY_WRAP)) {
x->replay.oseq--;
xfrm_audit_state_replay_overflow(x, skb);
err = -EOVERFLOW;
return err;
}
if (xfrm_aevent_is_on(net))
xfrm_replay_notify(x, XFRM_REPLAY_UPDATE);
}
return err;
}
static int xfrm_replay_check_legacy(struct xfrm_state *x,
struct sk_buff *skb, __be32 net_seq)
{
u32 diff;
u32 seq = ntohl(net_seq);
if (!x->props.replay_window)
return 0;
if (unlikely(seq == 0))
goto err;
if (likely(seq > x->replay.seq))
return 0;
diff = x->replay.seq - seq;
if (diff >= x->props.replay_window) {
x->stats.replay_window++;
goto err;
}
if (x->replay.bitmap & (1U << diff)) {
x->stats.replay++;
goto err;
}
return 0;
err:
xfrm_audit_state_replay(x, skb, net_seq);
return -EINVAL;
}
static void xfrm_replay_advance_bmp(struct xfrm_state *x, __be32 net_seq);
static void xfrm_replay_advance_esn(struct xfrm_state *x, __be32 net_seq);
void xfrm_replay_advance(struct xfrm_state *x, __be32 net_seq)
{
u32 diff, seq;
switch (x->repl_mode) {
case XFRM_REPLAY_MODE_LEGACY:
break;
case XFRM_REPLAY_MODE_BMP:
return xfrm_replay_advance_bmp(x, net_seq);
case XFRM_REPLAY_MODE_ESN:
return xfrm_replay_advance_esn(x, net_seq);
}
if (!x->props.replay_window)
return;
seq = ntohl(net_seq);
if (seq > x->replay.seq) {
diff = seq - x->replay.seq;
if (diff < x->props.replay_window)
x->replay.bitmap = ((x->replay.bitmap) << diff) | 1;
else
x->replay.bitmap = 1;
x->replay.seq = seq;
} else {
diff = x->replay.seq - seq;
x->replay.bitmap |= (1U << diff);
}
if (xfrm_aevent_is_on(xs_net(x)))
xfrm_replay_notify(x, XFRM_REPLAY_UPDATE);
}
static int xfrm_replay_overflow_bmp(struct xfrm_state *x, struct sk_buff *skb)
{
int err = 0;
struct xfrm_replay_state_esn *replay_esn = x->replay_esn;
struct net *net = xs_net(x);
if (x->type->flags & XFRM_TYPE_REPLAY_PROT) {
XFRM_SKB_CB(skb)->seq.output.low = ++replay_esn->oseq;
XFRM_SKB_CB(skb)->seq.output.hi = 0;
if (unlikely(replay_esn->oseq == 0) &&
!(x->props.extra_flags & XFRM_SA_XFLAG_OSEQ_MAY_WRAP)) {
replay_esn->oseq--;
xfrm_audit_state_replay_overflow(x, skb);
err = -EOVERFLOW;
return err;
}
if (xfrm_aevent_is_on(net))
xfrm_replay_notify(x, XFRM_REPLAY_UPDATE);
}
return err;
}
static int xfrm_replay_check_bmp(struct xfrm_state *x,
struct sk_buff *skb, __be32 net_seq)
{
unsigned int bitnr, nr;
struct xfrm_replay_state_esn *replay_esn = x->replay_esn;
u32 pos;
u32 seq = ntohl(net_seq);
u32 diff = replay_esn->seq - seq;
if (!replay_esn->replay_window)
return 0;
if (unlikely(seq == 0))
goto err;
if (likely(seq > replay_esn->seq))
return 0;
if (diff >= replay_esn->replay_window) {
x->stats.replay_window++;
goto err;
}
pos = (replay_esn->seq - 1) % replay_esn->replay_window;
if (pos >= diff)
bitnr = (pos - diff) % replay_esn->replay_window;
else
bitnr = replay_esn->replay_window - (diff - pos);
nr = bitnr >> 5;
bitnr = bitnr & 0x1F;
if (replay_esn->bmp[nr] & (1U << bitnr))
goto err_replay;
return 0;
err_replay:
x->stats.replay++;
err:
xfrm_audit_state_replay(x, skb, net_seq);
return -EINVAL;
}
static void xfrm_replay_advance_bmp(struct xfrm_state *x, __be32 net_seq)
{
unsigned int bitnr, nr, i;
u32 diff;
struct xfrm_replay_state_esn *replay_esn = x->replay_esn;
u32 seq = ntohl(net_seq);
u32 pos;
if (!replay_esn->replay_window)
return;
pos = (replay_esn->seq - 1) % replay_esn->replay_window;
if (seq > replay_esn->seq) {
diff = seq - replay_esn->seq;
if (diff < replay_esn->replay_window) {
for (i = 1; i < diff; i++) {
bitnr = (pos + i) % replay_esn->replay_window;
nr = bitnr >> 5;
bitnr = bitnr & 0x1F;
replay_esn->bmp[nr] &= ~(1U << bitnr);
}
} else {
nr = (replay_esn->replay_window - 1) >> 5;
for (i = 0; i <= nr; i++)
replay_esn->bmp[i] = 0;
}
bitnr = (pos + diff) % replay_esn->replay_window;
replay_esn->seq = seq;
} else {
diff = replay_esn->seq - seq;
if (pos >= diff)
bitnr = (pos - diff) % replay_esn->replay_window;
else
bitnr = replay_esn->replay_window - (diff - pos);
}
nr = bitnr >> 5;
bitnr = bitnr & 0x1F;
replay_esn->bmp[nr] |= (1U << bitnr);
if (xfrm_aevent_is_on(xs_net(x)))
xfrm_replay_notify(x, XFRM_REPLAY_UPDATE);
}
static void xfrm_replay_notify_bmp(struct xfrm_state *x, int event)
{
struct km_event c;
struct xfrm_replay_state_esn *replay_esn = x->replay_esn;
struct xfrm_replay_state_esn *preplay_esn = x->preplay_esn;
/* we send notify messages in case
* 1. we updated on of the sequence numbers, and the seqno difference
* is at least x->replay_maxdiff, in this case we also update the
* timeout of our timer function
* 2. if x->replay_maxage has elapsed since last update,
* and there were changes
*
* The state structure must be locked!
*/
switch (event) {
case XFRM_REPLAY_UPDATE:
if (!x->replay_maxdiff ||
((replay_esn->seq - preplay_esn->seq < x->replay_maxdiff) &&
(replay_esn->oseq - preplay_esn->oseq
< x->replay_maxdiff))) {
if (x->xflags & XFRM_TIME_DEFER)
event = XFRM_REPLAY_TIMEOUT;
else
return;
}
break;
case XFRM_REPLAY_TIMEOUT:
if (memcmp(x->replay_esn, x->preplay_esn,
xfrm_replay_state_esn_len(replay_esn)) == 0) {
x->xflags |= XFRM_TIME_DEFER;
return;
}
break;
}
memcpy(x->preplay_esn, x->replay_esn,
xfrm_replay_state_esn_len(replay_esn));
c.event = XFRM_MSG_NEWAE;
c.data.aevent = event;
km_state_notify(x, &c);
if (x->replay_maxage &&
!mod_timer(&x->rtimer, jiffies + x->replay_maxage))
x->xflags &= ~XFRM_TIME_DEFER;
}
static void xfrm_replay_notify_esn(struct xfrm_state *x, int event)
{
u32 seq_diff, oseq_diff;
struct km_event c;
struct xfrm_replay_state_esn *replay_esn = x->replay_esn;
struct xfrm_replay_state_esn *preplay_esn = x->preplay_esn;
/* we send notify messages in case
* 1. we updated on of the sequence numbers, and the seqno difference
* is at least x->replay_maxdiff, in this case we also update the
* timeout of our timer function
* 2. if x->replay_maxage has elapsed since last update,
* and there were changes
*
* The state structure must be locked!
*/
switch (event) {
case XFRM_REPLAY_UPDATE:
if (x->replay_maxdiff) {
if (replay_esn->seq_hi == preplay_esn->seq_hi)
seq_diff = replay_esn->seq - preplay_esn->seq;
else
seq_diff = ~preplay_esn->seq + replay_esn->seq
+ 1;
if (replay_esn->oseq_hi == preplay_esn->oseq_hi)
oseq_diff = replay_esn->oseq
- preplay_esn->oseq;
else
oseq_diff = ~preplay_esn->oseq
+ replay_esn->oseq + 1;
if (seq_diff >= x->replay_maxdiff ||
oseq_diff >= x->replay_maxdiff)
break;
}
if (x->xflags & XFRM_TIME_DEFER)
event = XFRM_REPLAY_TIMEOUT;
else
return;
break;
case XFRM_REPLAY_TIMEOUT:
if (memcmp(x->replay_esn, x->preplay_esn,
xfrm_replay_state_esn_len(replay_esn)) == 0) {
x->xflags |= XFRM_TIME_DEFER;
return;
}
break;
}
memcpy(x->preplay_esn, x->replay_esn,
xfrm_replay_state_esn_len(replay_esn));
c.event = XFRM_MSG_NEWAE;
c.data.aevent = event;
km_state_notify(x, &c);
if (x->replay_maxage &&
!mod_timer(&x->rtimer, jiffies + x->replay_maxage))
x->xflags &= ~XFRM_TIME_DEFER;
}
static int xfrm_replay_overflow_esn(struct xfrm_state *x, struct sk_buff *skb)
{
int err = 0;
struct xfrm_replay_state_esn *replay_esn = x->replay_esn;
struct net *net = xs_net(x);
if (x->type->flags & XFRM_TYPE_REPLAY_PROT) {
XFRM_SKB_CB(skb)->seq.output.low = ++replay_esn->oseq;
XFRM_SKB_CB(skb)->seq.output.hi = replay_esn->oseq_hi;
if (unlikely(replay_esn->oseq == 0)) {
XFRM_SKB_CB(skb)->seq.output.hi = ++replay_esn->oseq_hi;
if (replay_esn->oseq_hi == 0) {
replay_esn->oseq--;
replay_esn->oseq_hi--;
xfrm_audit_state_replay_overflow(x, skb);
err = -EOVERFLOW;
return err;
}
}
if (xfrm_aevent_is_on(net))
xfrm_replay_notify(x, XFRM_REPLAY_UPDATE);
}
return err;
}
static int xfrm_replay_check_esn(struct xfrm_state *x,
struct sk_buff *skb, __be32 net_seq)
{
unsigned int bitnr, nr;
u32 diff;
struct xfrm_replay_state_esn *replay_esn = x->replay_esn;
u32 pos;
u32 seq = ntohl(net_seq);
u32 wsize = replay_esn->replay_window;
u32 top = replay_esn->seq;
u32 bottom = top - wsize + 1;
if (!wsize)
return 0;
if (unlikely(seq == 0 && replay_esn->seq_hi == 0 &&
(replay_esn->seq < replay_esn->replay_window - 1)))
goto err;
diff = top - seq;
if (likely(top >= wsize - 1)) {
/* A. same subspace */
if (likely(seq > top) || seq < bottom)
return 0;
} else {
/* B. window spans two subspaces */
if (likely(seq > top && seq < bottom))
return 0;
if (seq >= bottom)
diff = ~seq + top + 1;
}
if (diff >= replay_esn->replay_window) {
x->stats.replay_window++;
goto err;
}
pos = (replay_esn->seq - 1) % replay_esn->replay_window;
if (pos >= diff)
bitnr = (pos - diff) % replay_esn->replay_window;
else
bitnr = replay_esn->replay_window - (diff - pos);
nr = bitnr >> 5;
bitnr = bitnr & 0x1F;
if (replay_esn->bmp[nr] & (1U << bitnr))
goto err_replay;
return 0;
err_replay:
x->stats.replay++;
err:
xfrm_audit_state_replay(x, skb, net_seq);
return -EINVAL;
}
int xfrm_replay_check(struct xfrm_state *x,
struct sk_buff *skb, __be32 net_seq)
{
switch (x->repl_mode) {
case XFRM_REPLAY_MODE_LEGACY:
break;
case XFRM_REPLAY_MODE_BMP:
return xfrm_replay_check_bmp(x, skb, net_seq);
case XFRM_REPLAY_MODE_ESN:
return xfrm_replay_check_esn(x, skb, net_seq);
}
return xfrm_replay_check_legacy(x, skb, net_seq);
}
static int xfrm_replay_recheck_esn(struct xfrm_state *x,
struct sk_buff *skb, __be32 net_seq)
{
if (unlikely(XFRM_SKB_CB(skb)->seq.input.hi !=
htonl(xfrm_replay_seqhi(x, net_seq)))) {
x->stats.replay_window++;
return -EINVAL;
}
return xfrm_replay_check_esn(x, skb, net_seq);
}
int xfrm_replay_recheck(struct xfrm_state *x,
struct sk_buff *skb, __be32 net_seq)
{
switch (x->repl_mode) {
case XFRM_REPLAY_MODE_LEGACY:
break;
case XFRM_REPLAY_MODE_BMP:
/* no special recheck treatment */
return xfrm_replay_check_bmp(x, skb, net_seq);
case XFRM_REPLAY_MODE_ESN:
return xfrm_replay_recheck_esn(x, skb, net_seq);
}
return xfrm_replay_check_legacy(x, skb, net_seq);
}
static void xfrm_replay_advance_esn(struct xfrm_state *x, __be32 net_seq)
{
unsigned int bitnr, nr, i;
int wrap;
u32 diff, pos, seq, seq_hi;
struct xfrm_replay_state_esn *replay_esn = x->replay_esn;
if (!replay_esn->replay_window)
return;
seq = ntohl(net_seq);
pos = (replay_esn->seq - 1) % replay_esn->replay_window;
seq_hi = xfrm_replay_seqhi(x, net_seq);
wrap = seq_hi - replay_esn->seq_hi;
if ((!wrap && seq > replay_esn->seq) || wrap > 0) {
if (likely(!wrap))
diff = seq - replay_esn->seq;
else
diff = ~replay_esn->seq + seq + 1;
if (diff < replay_esn->replay_window) {
for (i = 1; i < diff; i++) {
bitnr = (pos + i) % replay_esn->replay_window;
nr = bitnr >> 5;
bitnr = bitnr & 0x1F;
replay_esn->bmp[nr] &= ~(1U << bitnr);
}
} else {
nr = (replay_esn->replay_window - 1) >> 5;
for (i = 0; i <= nr; i++)
replay_esn->bmp[i] = 0;
}
bitnr = (pos + diff) % replay_esn->replay_window;
replay_esn->seq = seq;
if (unlikely(wrap > 0))
replay_esn->seq_hi++;
} else {
diff = replay_esn->seq - seq;
if (pos >= diff)
bitnr = (pos - diff) % replay_esn->replay_window;
else
bitnr = replay_esn->replay_window - (diff - pos);
}
xfrm_dev_state_advance_esn(x);
nr = bitnr >> 5;
bitnr = bitnr & 0x1F;
replay_esn->bmp[nr] |= (1U << bitnr);
if (xfrm_aevent_is_on(xs_net(x)))
xfrm_replay_notify(x, XFRM_REPLAY_UPDATE);
}
#ifdef CONFIG_XFRM_OFFLOAD
static int xfrm_replay_overflow_offload(struct xfrm_state *x, struct sk_buff *skb)
{
int err = 0;
struct net *net = xs_net(x);
struct xfrm_offload *xo = xfrm_offload(skb);
__u32 oseq = x->replay.oseq;
if (!xo)
return __xfrm_replay_overflow(x, skb);
if (x->type->flags & XFRM_TYPE_REPLAY_PROT) {
if (!skb_is_gso(skb)) {
XFRM_SKB_CB(skb)->seq.output.low = ++oseq;
xo->seq.low = oseq;
} else {
XFRM_SKB_CB(skb)->seq.output.low = oseq + 1;
xo->seq.low = oseq + 1;
oseq += skb_shinfo(skb)->gso_segs;
}
XFRM_SKB_CB(skb)->seq.output.hi = 0;
xo->seq.hi = 0;
if (unlikely(oseq < x->replay.oseq) &&
!(x->props.extra_flags & XFRM_SA_XFLAG_OSEQ_MAY_WRAP)) {
xfrm_audit_state_replay_overflow(x, skb);
err = -EOVERFLOW;
return err;
}
x->replay.oseq = oseq;
if (xfrm_aevent_is_on(net))
xfrm_replay_notify(x, XFRM_REPLAY_UPDATE);
}
return err;
}
static int xfrm_replay_overflow_offload_bmp(struct xfrm_state *x, struct sk_buff *skb)
{
int err = 0;
struct xfrm_offload *xo = xfrm_offload(skb);
struct xfrm_replay_state_esn *replay_esn = x->replay_esn;
struct net *net = xs_net(x);
__u32 oseq = replay_esn->oseq;
if (!xo)
return xfrm_replay_overflow_bmp(x, skb);
if (x->type->flags & XFRM_TYPE_REPLAY_PROT) {
if (!skb_is_gso(skb)) {
XFRM_SKB_CB(skb)->seq.output.low = ++oseq;
xo->seq.low = oseq;
} else {
XFRM_SKB_CB(skb)->seq.output.low = oseq + 1;
xo->seq.low = oseq + 1;
oseq += skb_shinfo(skb)->gso_segs;
}
XFRM_SKB_CB(skb)->seq.output.hi = 0;
xo->seq.hi = 0;
if (unlikely(oseq < replay_esn->oseq) &&
!(x->props.extra_flags & XFRM_SA_XFLAG_OSEQ_MAY_WRAP)) {
xfrm_audit_state_replay_overflow(x, skb);
err = -EOVERFLOW;
return err;
} else {
replay_esn->oseq = oseq;
}
if (xfrm_aevent_is_on(net))
xfrm_replay_notify(x, XFRM_REPLAY_UPDATE);
}
return err;
}
static int xfrm_replay_overflow_offload_esn(struct xfrm_state *x, struct sk_buff *skb)
{
int err = 0;
struct xfrm_offload *xo = xfrm_offload(skb);
struct xfrm_replay_state_esn *replay_esn = x->replay_esn;
struct net *net = xs_net(x);
__u32 oseq = replay_esn->oseq;
__u32 oseq_hi = replay_esn->oseq_hi;
if (!xo)
return xfrm_replay_overflow_esn(x, skb);
if (x->type->flags & XFRM_TYPE_REPLAY_PROT) {
if (!skb_is_gso(skb)) {
XFRM_SKB_CB(skb)->seq.output.low = ++oseq;
XFRM_SKB_CB(skb)->seq.output.hi = oseq_hi;
xo->seq.low = oseq;
xo->seq.hi = oseq_hi;
} else {
XFRM_SKB_CB(skb)->seq.output.low = oseq + 1;
XFRM_SKB_CB(skb)->seq.output.hi = oseq_hi;
xo->seq.low = oseq + 1;
xo->seq.hi = oseq_hi;
oseq += skb_shinfo(skb)->gso_segs;
}
if (unlikely(xo->seq.low < replay_esn->oseq)) {
XFRM_SKB_CB(skb)->seq.output.hi = ++oseq_hi;
xo->seq.hi = oseq_hi;
replay_esn->oseq_hi = oseq_hi;
if (replay_esn->oseq_hi == 0) {
replay_esn->oseq--;
replay_esn->oseq_hi--;
xfrm_audit_state_replay_overflow(x, skb);
err = -EOVERFLOW;
return err;
}
}
replay_esn->oseq = oseq;
if (xfrm_aevent_is_on(net))
xfrm_replay_notify(x, XFRM_REPLAY_UPDATE);
}
return err;
}
int xfrm_replay_overflow(struct xfrm_state *x, struct sk_buff *skb)
{
switch (x->repl_mode) {
case XFRM_REPLAY_MODE_LEGACY:
break;
case XFRM_REPLAY_MODE_BMP:
return xfrm_replay_overflow_offload_bmp(x, skb);
case XFRM_REPLAY_MODE_ESN:
return xfrm_replay_overflow_offload_esn(x, skb);
}
return xfrm_replay_overflow_offload(x, skb);
}
#else
int xfrm_replay_overflow(struct xfrm_state *x, struct sk_buff *skb)
{
switch (x->repl_mode) {
case XFRM_REPLAY_MODE_LEGACY:
break;
case XFRM_REPLAY_MODE_BMP:
return xfrm_replay_overflow_bmp(x, skb);
case XFRM_REPLAY_MODE_ESN:
return xfrm_replay_overflow_esn(x, skb);
}
return __xfrm_replay_overflow(x, skb);
}
#endif
int xfrm_init_replay(struct xfrm_state *x, struct netlink_ext_ack *extack)
{
struct xfrm_replay_state_esn *replay_esn = x->replay_esn;
if (replay_esn) {
if (replay_esn->replay_window >
replay_esn->bmp_len * sizeof(__u32) * 8) {
NL_SET_ERR_MSG(extack, "ESN replay window is too large for the chosen bitmap size");
return -EINVAL;
}
if (x->props.flags & XFRM_STATE_ESN) {
if (replay_esn->replay_window == 0) {
NL_SET_ERR_MSG(extack, "ESN replay window must be > 0");
return -EINVAL;
}
x->repl_mode = XFRM_REPLAY_MODE_ESN;
} else {
x->repl_mode = XFRM_REPLAY_MODE_BMP;
}
} else {
x->repl_mode = XFRM_REPLAY_MODE_LEGACY;
}
return 0;
}
EXPORT_SYMBOL(xfrm_init_replay);
| linux-master | net/xfrm/xfrm_replay.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/sysctl.h>
#include <linux/slab.h>
#include <net/net_namespace.h>
#include <net/xfrm.h>
static void __net_init __xfrm_sysctl_init(struct net *net)
{
net->xfrm.sysctl_aevent_etime = XFRM_AE_ETIME;
net->xfrm.sysctl_aevent_rseqth = XFRM_AE_SEQT_SIZE;
net->xfrm.sysctl_larval_drop = 1;
net->xfrm.sysctl_acq_expires = 30;
}
#ifdef CONFIG_SYSCTL
static struct ctl_table xfrm_table[] = {
{
.procname = "xfrm_aevent_etime",
.maxlen = sizeof(u32),
.mode = 0644,
.proc_handler = proc_douintvec
},
{
.procname = "xfrm_aevent_rseqth",
.maxlen = sizeof(u32),
.mode = 0644,
.proc_handler = proc_douintvec
},
{
.procname = "xfrm_larval_drop",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec
},
{
.procname = "xfrm_acq_expires",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec
},
{}
};
int __net_init xfrm_sysctl_init(struct net *net)
{
struct ctl_table *table;
size_t table_size = ARRAY_SIZE(xfrm_table);
__xfrm_sysctl_init(net);
table = kmemdup(xfrm_table, sizeof(xfrm_table), GFP_KERNEL);
if (!table)
goto out_kmemdup;
table[0].data = &net->xfrm.sysctl_aevent_etime;
table[1].data = &net->xfrm.sysctl_aevent_rseqth;
table[2].data = &net->xfrm.sysctl_larval_drop;
table[3].data = &net->xfrm.sysctl_acq_expires;
/* Don't export sysctls to unprivileged users */
if (net->user_ns != &init_user_ns) {
table[0].procname = NULL;
table_size = 0;
}
net->xfrm.sysctl_hdr = register_net_sysctl_sz(net, "net/core", table,
table_size);
if (!net->xfrm.sysctl_hdr)
goto out_register;
return 0;
out_register:
kfree(table);
out_kmemdup:
return -ENOMEM;
}
void __net_exit xfrm_sysctl_fini(struct net *net)
{
struct ctl_table *table;
table = net->xfrm.sysctl_hdr->ctl_table_arg;
unregister_net_sysctl_table(net->xfrm.sysctl_hdr);
kfree(table);
}
#else
int __net_init xfrm_sysctl_init(struct net *net)
{
__xfrm_sysctl_init(net);
return 0;
}
#endif
| linux-master | net/xfrm/xfrm_sysctl.c |
// SPDX-License-Identifier: GPL-2.0
/* xfrm_hash.c: Common hash table code.
*
* Copyright (C) 2006 David S. Miller ([email protected])
*/
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/memblock.h>
#include <linux/vmalloc.h>
#include <linux/slab.h>
#include <linux/xfrm.h>
#include "xfrm_hash.h"
struct hlist_head *xfrm_hash_alloc(unsigned int sz)
{
struct hlist_head *n;
if (sz <= PAGE_SIZE)
n = kzalloc(sz, GFP_KERNEL);
else if (hashdist)
n = vzalloc(sz);
else
n = (struct hlist_head *)
__get_free_pages(GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO,
get_order(sz));
return n;
}
void xfrm_hash_free(struct hlist_head *n, unsigned int sz)
{
if (sz <= PAGE_SIZE)
kfree(n);
else if (hashdist)
vfree(n);
else
free_pages((unsigned long)n, get_order(sz));
}
| linux-master | net/xfrm/xfrm_hash.c |
// SPDX-License-Identifier: GPL-2.0
/*
* XFRM compat layer
* Author: Dmitry Safonov <[email protected]>
* Based on code and translator idea by: Florian Westphal <[email protected]>
*/
#include <linux/compat.h>
#include <linux/nospec.h>
#include <linux/xfrm.h>
#include <net/xfrm.h>
struct compat_xfrm_lifetime_cfg {
compat_u64 soft_byte_limit, hard_byte_limit;
compat_u64 soft_packet_limit, hard_packet_limit;
compat_u64 soft_add_expires_seconds, hard_add_expires_seconds;
compat_u64 soft_use_expires_seconds, hard_use_expires_seconds;
}; /* same size on 32bit, but only 4 byte alignment required */
struct compat_xfrm_lifetime_cur {
compat_u64 bytes, packets, add_time, use_time;
}; /* same size on 32bit, but only 4 byte alignment required */
struct compat_xfrm_userpolicy_info {
struct xfrm_selector sel;
struct compat_xfrm_lifetime_cfg lft;
struct compat_xfrm_lifetime_cur curlft;
__u32 priority, index;
u8 dir, action, flags, share;
/* 4 bytes additional padding on 64bit */
};
struct compat_xfrm_usersa_info {
struct xfrm_selector sel;
struct xfrm_id id;
xfrm_address_t saddr;
struct compat_xfrm_lifetime_cfg lft;
struct compat_xfrm_lifetime_cur curlft;
struct xfrm_stats stats;
__u32 seq, reqid;
u16 family;
u8 mode, replay_window, flags;
/* 4 bytes additional padding on 64bit */
};
struct compat_xfrm_user_acquire {
struct xfrm_id id;
xfrm_address_t saddr;
struct xfrm_selector sel;
struct compat_xfrm_userpolicy_info policy;
/* 4 bytes additional padding on 64bit */
__u32 aalgos, ealgos, calgos, seq;
};
struct compat_xfrm_userspi_info {
struct compat_xfrm_usersa_info info;
/* 4 bytes additional padding on 64bit */
__u32 min, max;
};
struct compat_xfrm_user_expire {
struct compat_xfrm_usersa_info state;
/* 8 bytes additional padding on 64bit */
u8 hard;
};
struct compat_xfrm_user_polexpire {
struct compat_xfrm_userpolicy_info pol;
/* 8 bytes additional padding on 64bit */
u8 hard;
};
#define XMSGSIZE(type) sizeof(struct type)
static const int compat_msg_min[XFRM_NR_MSGTYPES] = {
[XFRM_MSG_NEWSA - XFRM_MSG_BASE] = XMSGSIZE(compat_xfrm_usersa_info),
[XFRM_MSG_DELSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_id),
[XFRM_MSG_GETSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_id),
[XFRM_MSG_NEWPOLICY - XFRM_MSG_BASE] = XMSGSIZE(compat_xfrm_userpolicy_info),
[XFRM_MSG_DELPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_id),
[XFRM_MSG_GETPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_id),
[XFRM_MSG_ALLOCSPI - XFRM_MSG_BASE] = XMSGSIZE(compat_xfrm_userspi_info),
[XFRM_MSG_ACQUIRE - XFRM_MSG_BASE] = XMSGSIZE(compat_xfrm_user_acquire),
[XFRM_MSG_EXPIRE - XFRM_MSG_BASE] = XMSGSIZE(compat_xfrm_user_expire),
[XFRM_MSG_UPDPOLICY - XFRM_MSG_BASE] = XMSGSIZE(compat_xfrm_userpolicy_info),
[XFRM_MSG_UPDSA - XFRM_MSG_BASE] = XMSGSIZE(compat_xfrm_usersa_info),
[XFRM_MSG_POLEXPIRE - XFRM_MSG_BASE] = XMSGSIZE(compat_xfrm_user_polexpire),
[XFRM_MSG_FLUSHSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_flush),
[XFRM_MSG_FLUSHPOLICY - XFRM_MSG_BASE] = 0,
[XFRM_MSG_NEWAE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_aevent_id),
[XFRM_MSG_GETAE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_aevent_id),
[XFRM_MSG_REPORT - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_report),
[XFRM_MSG_MIGRATE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_id),
[XFRM_MSG_NEWSADINFO - XFRM_MSG_BASE] = sizeof(u32),
[XFRM_MSG_GETSADINFO - XFRM_MSG_BASE] = sizeof(u32),
[XFRM_MSG_NEWSPDINFO - XFRM_MSG_BASE] = sizeof(u32),
[XFRM_MSG_GETSPDINFO - XFRM_MSG_BASE] = sizeof(u32),
[XFRM_MSG_MAPPING - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_mapping)
};
static const struct nla_policy compat_policy[XFRMA_MAX+1] = {
[XFRMA_SA] = { .len = XMSGSIZE(compat_xfrm_usersa_info)},
[XFRMA_POLICY] = { .len = XMSGSIZE(compat_xfrm_userpolicy_info)},
[XFRMA_LASTUSED] = { .type = NLA_U64},
[XFRMA_ALG_AUTH_TRUNC] = { .len = sizeof(struct xfrm_algo_auth)},
[XFRMA_ALG_AEAD] = { .len = sizeof(struct xfrm_algo_aead) },
[XFRMA_ALG_AUTH] = { .len = sizeof(struct xfrm_algo) },
[XFRMA_ALG_CRYPT] = { .len = sizeof(struct xfrm_algo) },
[XFRMA_ALG_COMP] = { .len = sizeof(struct xfrm_algo) },
[XFRMA_ENCAP] = { .len = sizeof(struct xfrm_encap_tmpl) },
[XFRMA_TMPL] = { .len = sizeof(struct xfrm_user_tmpl) },
[XFRMA_SEC_CTX] = { .len = sizeof(struct xfrm_user_sec_ctx) },
[XFRMA_LTIME_VAL] = { .len = sizeof(struct xfrm_lifetime_cur) },
[XFRMA_REPLAY_VAL] = { .len = sizeof(struct xfrm_replay_state) },
[XFRMA_REPLAY_THRESH] = { .type = NLA_U32 },
[XFRMA_ETIMER_THRESH] = { .type = NLA_U32 },
[XFRMA_SRCADDR] = { .len = sizeof(xfrm_address_t) },
[XFRMA_COADDR] = { .len = sizeof(xfrm_address_t) },
[XFRMA_POLICY_TYPE] = { .len = sizeof(struct xfrm_userpolicy_type)},
[XFRMA_MIGRATE] = { .len = sizeof(struct xfrm_user_migrate) },
[XFRMA_KMADDRESS] = { .len = sizeof(struct xfrm_user_kmaddress) },
[XFRMA_MARK] = { .len = sizeof(struct xfrm_mark) },
[XFRMA_TFCPAD] = { .type = NLA_U32 },
[XFRMA_REPLAY_ESN_VAL] = { .len = sizeof(struct xfrm_replay_state_esn) },
[XFRMA_SA_EXTRA_FLAGS] = { .type = NLA_U32 },
[XFRMA_PROTO] = { .type = NLA_U8 },
[XFRMA_ADDRESS_FILTER] = { .len = sizeof(struct xfrm_address_filter) },
[XFRMA_OFFLOAD_DEV] = { .len = sizeof(struct xfrm_user_offload) },
[XFRMA_SET_MARK] = { .type = NLA_U32 },
[XFRMA_SET_MARK_MASK] = { .type = NLA_U32 },
[XFRMA_IF_ID] = { .type = NLA_U32 },
[XFRMA_MTIMER_THRESH] = { .type = NLA_U32 },
};
static struct nlmsghdr *xfrm_nlmsg_put_compat(struct sk_buff *skb,
const struct nlmsghdr *nlh_src, u16 type)
{
int payload = compat_msg_min[type];
int src_len = xfrm_msg_min[type];
struct nlmsghdr *nlh_dst;
/* Compat messages are shorter or equal to native (+padding) */
if (WARN_ON_ONCE(src_len < payload))
return ERR_PTR(-EMSGSIZE);
nlh_dst = nlmsg_put(skb, nlh_src->nlmsg_pid, nlh_src->nlmsg_seq,
nlh_src->nlmsg_type, payload, nlh_src->nlmsg_flags);
if (!nlh_dst)
return ERR_PTR(-EMSGSIZE);
memset(nlmsg_data(nlh_dst), 0, payload);
switch (nlh_src->nlmsg_type) {
/* Compat message has the same layout as native */
case XFRM_MSG_DELSA:
case XFRM_MSG_DELPOLICY:
case XFRM_MSG_FLUSHSA:
case XFRM_MSG_FLUSHPOLICY:
case XFRM_MSG_NEWAE:
case XFRM_MSG_REPORT:
case XFRM_MSG_MIGRATE:
case XFRM_MSG_NEWSADINFO:
case XFRM_MSG_NEWSPDINFO:
case XFRM_MSG_MAPPING:
WARN_ON_ONCE(src_len != payload);
memcpy(nlmsg_data(nlh_dst), nlmsg_data(nlh_src), src_len);
break;
/* 4 byte alignment for trailing u64 on native, but not on compat */
case XFRM_MSG_NEWSA:
case XFRM_MSG_NEWPOLICY:
case XFRM_MSG_UPDSA:
case XFRM_MSG_UPDPOLICY:
WARN_ON_ONCE(src_len != payload + 4);
memcpy(nlmsg_data(nlh_dst), nlmsg_data(nlh_src), payload);
break;
case XFRM_MSG_EXPIRE: {
const struct xfrm_user_expire *src_ue = nlmsg_data(nlh_src);
struct compat_xfrm_user_expire *dst_ue = nlmsg_data(nlh_dst);
/* compat_xfrm_user_expire has 4-byte smaller state */
memcpy(dst_ue, src_ue, sizeof(dst_ue->state));
dst_ue->hard = src_ue->hard;
break;
}
case XFRM_MSG_ACQUIRE: {
const struct xfrm_user_acquire *src_ua = nlmsg_data(nlh_src);
struct compat_xfrm_user_acquire *dst_ua = nlmsg_data(nlh_dst);
memcpy(dst_ua, src_ua, offsetof(struct compat_xfrm_user_acquire, aalgos));
dst_ua->aalgos = src_ua->aalgos;
dst_ua->ealgos = src_ua->ealgos;
dst_ua->calgos = src_ua->calgos;
dst_ua->seq = src_ua->seq;
break;
}
case XFRM_MSG_POLEXPIRE: {
const struct xfrm_user_polexpire *src_upe = nlmsg_data(nlh_src);
struct compat_xfrm_user_polexpire *dst_upe = nlmsg_data(nlh_dst);
/* compat_xfrm_user_polexpire has 4-byte smaller state */
memcpy(dst_upe, src_upe, sizeof(dst_upe->pol));
dst_upe->hard = src_upe->hard;
break;
}
case XFRM_MSG_ALLOCSPI: {
const struct xfrm_userspi_info *src_usi = nlmsg_data(nlh_src);
struct compat_xfrm_userspi_info *dst_usi = nlmsg_data(nlh_dst);
/* compat_xfrm_user_polexpire has 4-byte smaller state */
memcpy(dst_usi, src_usi, sizeof(src_usi->info));
dst_usi->min = src_usi->min;
dst_usi->max = src_usi->max;
break;
}
/* Not being sent by kernel */
case XFRM_MSG_GETSA:
case XFRM_MSG_GETPOLICY:
case XFRM_MSG_GETAE:
case XFRM_MSG_GETSADINFO:
case XFRM_MSG_GETSPDINFO:
default:
pr_warn_once("unsupported nlmsg_type %d\n", nlh_src->nlmsg_type);
return ERR_PTR(-EOPNOTSUPP);
}
return nlh_dst;
}
static int xfrm_nla_cpy(struct sk_buff *dst, const struct nlattr *src, int len)
{
return nla_put(dst, src->nla_type, len, nla_data(src));
}
static int xfrm_xlate64_attr(struct sk_buff *dst, const struct nlattr *src)
{
switch (src->nla_type) {
case XFRMA_PAD:
/* Ignore */
return 0;
case XFRMA_UNSPEC:
case XFRMA_ALG_AUTH:
case XFRMA_ALG_CRYPT:
case XFRMA_ALG_COMP:
case XFRMA_ENCAP:
case XFRMA_TMPL:
return xfrm_nla_cpy(dst, src, nla_len(src));
case XFRMA_SA:
return xfrm_nla_cpy(dst, src, XMSGSIZE(compat_xfrm_usersa_info));
case XFRMA_POLICY:
return xfrm_nla_cpy(dst, src, XMSGSIZE(compat_xfrm_userpolicy_info));
case XFRMA_SEC_CTX:
return xfrm_nla_cpy(dst, src, nla_len(src));
case XFRMA_LTIME_VAL:
return nla_put_64bit(dst, src->nla_type, nla_len(src),
nla_data(src), XFRMA_PAD);
case XFRMA_REPLAY_VAL:
case XFRMA_REPLAY_THRESH:
case XFRMA_ETIMER_THRESH:
case XFRMA_SRCADDR:
case XFRMA_COADDR:
return xfrm_nla_cpy(dst, src, nla_len(src));
case XFRMA_LASTUSED:
return nla_put_64bit(dst, src->nla_type, nla_len(src),
nla_data(src), XFRMA_PAD);
case XFRMA_POLICY_TYPE:
case XFRMA_MIGRATE:
case XFRMA_ALG_AEAD:
case XFRMA_KMADDRESS:
case XFRMA_ALG_AUTH_TRUNC:
case XFRMA_MARK:
case XFRMA_TFCPAD:
case XFRMA_REPLAY_ESN_VAL:
case XFRMA_SA_EXTRA_FLAGS:
case XFRMA_PROTO:
case XFRMA_ADDRESS_FILTER:
case XFRMA_OFFLOAD_DEV:
case XFRMA_SET_MARK:
case XFRMA_SET_MARK_MASK:
case XFRMA_IF_ID:
case XFRMA_MTIMER_THRESH:
return xfrm_nla_cpy(dst, src, nla_len(src));
default:
BUILD_BUG_ON(XFRMA_MAX != XFRMA_MTIMER_THRESH);
pr_warn_once("unsupported nla_type %d\n", src->nla_type);
return -EOPNOTSUPP;
}
}
/* Take kernel-built (64bit layout) and create 32bit layout for userspace */
static int xfrm_xlate64(struct sk_buff *dst, const struct nlmsghdr *nlh_src)
{
u16 type = nlh_src->nlmsg_type - XFRM_MSG_BASE;
const struct nlattr *nla, *attrs;
struct nlmsghdr *nlh_dst;
int len, remaining;
nlh_dst = xfrm_nlmsg_put_compat(dst, nlh_src, type);
if (IS_ERR(nlh_dst))
return PTR_ERR(nlh_dst);
attrs = nlmsg_attrdata(nlh_src, xfrm_msg_min[type]);
len = nlmsg_attrlen(nlh_src, xfrm_msg_min[type]);
nla_for_each_attr(nla, attrs, len, remaining) {
int err;
switch (nlh_src->nlmsg_type) {
case XFRM_MSG_NEWSPDINFO:
err = xfrm_nla_cpy(dst, nla, nla_len(nla));
break;
default:
err = xfrm_xlate64_attr(dst, nla);
break;
}
if (err)
return err;
}
nlmsg_end(dst, nlh_dst);
return 0;
}
static int xfrm_alloc_compat(struct sk_buff *skb, const struct nlmsghdr *nlh_src)
{
u16 type = nlh_src->nlmsg_type - XFRM_MSG_BASE;
struct sk_buff *new = NULL;
int err;
if (type >= ARRAY_SIZE(xfrm_msg_min)) {
pr_warn_once("unsupported nlmsg_type %d\n", nlh_src->nlmsg_type);
return -EOPNOTSUPP;
}
if (skb_shinfo(skb)->frag_list == NULL) {
new = alloc_skb(skb->len + skb_tailroom(skb), GFP_ATOMIC);
if (!new)
return -ENOMEM;
skb_shinfo(skb)->frag_list = new;
}
err = xfrm_xlate64(skb_shinfo(skb)->frag_list, nlh_src);
if (err) {
if (new) {
kfree_skb(new);
skb_shinfo(skb)->frag_list = NULL;
}
return err;
}
return 0;
}
/* Calculates len of translated 64-bit message. */
static size_t xfrm_user_rcv_calculate_len64(const struct nlmsghdr *src,
struct nlattr *attrs[XFRMA_MAX + 1],
int maxtype)
{
size_t len = nlmsg_len(src);
switch (src->nlmsg_type) {
case XFRM_MSG_NEWSA:
case XFRM_MSG_NEWPOLICY:
case XFRM_MSG_ALLOCSPI:
case XFRM_MSG_ACQUIRE:
case XFRM_MSG_UPDPOLICY:
case XFRM_MSG_UPDSA:
len += 4;
break;
case XFRM_MSG_EXPIRE:
case XFRM_MSG_POLEXPIRE:
len += 8;
break;
case XFRM_MSG_NEWSPDINFO:
/* attirbutes are xfrm_spdattr_type_t, not xfrm_attr_type_t */
return len;
default:
break;
}
/* Unexpected for anything, but XFRM_MSG_NEWSPDINFO, please
* correct both 64=>32-bit and 32=>64-bit translators to copy
* new attributes.
*/
if (WARN_ON_ONCE(maxtype))
return len;
if (attrs[XFRMA_SA])
len += 4;
if (attrs[XFRMA_POLICY])
len += 4;
/* XXX: some attrs may need to be realigned
* if !CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
*/
return len;
}
static int xfrm_attr_cpy32(void *dst, size_t *pos, const struct nlattr *src,
size_t size, int copy_len, int payload)
{
struct nlmsghdr *nlmsg = dst;
struct nlattr *nla;
/* xfrm_user_rcv_msg_compat() relies on fact that 32-bit messages
* have the same len or shorted than 64-bit ones.
* 32-bit translation that is bigger than 64-bit original is unexpected.
*/
if (WARN_ON_ONCE(copy_len > payload))
copy_len = payload;
if (size - *pos < nla_attr_size(payload))
return -ENOBUFS;
nla = dst + *pos;
memcpy(nla, src, nla_attr_size(copy_len));
nla->nla_len = nla_attr_size(payload);
*pos += nla_attr_size(copy_len);
nlmsg->nlmsg_len += nla->nla_len;
memset(dst + *pos, 0, payload - copy_len);
*pos += payload - copy_len;
return 0;
}
static int xfrm_xlate32_attr(void *dst, const struct nlattr *nla,
size_t *pos, size_t size,
struct netlink_ext_ack *extack)
{
int type = nla_type(nla);
u16 pol_len32, pol_len64;
int err;
if (type > XFRMA_MAX) {
BUILD_BUG_ON(XFRMA_MAX != XFRMA_MTIMER_THRESH);
NL_SET_ERR_MSG(extack, "Bad attribute");
return -EOPNOTSUPP;
}
type = array_index_nospec(type, XFRMA_MAX + 1);
if (nla_len(nla) < compat_policy[type].len) {
NL_SET_ERR_MSG(extack, "Attribute bad length");
return -EOPNOTSUPP;
}
pol_len32 = compat_policy[type].len;
pol_len64 = xfrma_policy[type].len;
/* XFRMA_SA and XFRMA_POLICY - need to know how-to translate */
if (pol_len32 != pol_len64) {
if (nla_len(nla) != compat_policy[type].len) {
NL_SET_ERR_MSG(extack, "Attribute bad length");
return -EOPNOTSUPP;
}
err = xfrm_attr_cpy32(dst, pos, nla, size, pol_len32, pol_len64);
if (err)
return err;
}
return xfrm_attr_cpy32(dst, pos, nla, size, nla_len(nla), nla_len(nla));
}
static int xfrm_xlate32(struct nlmsghdr *dst, const struct nlmsghdr *src,
struct nlattr *attrs[XFRMA_MAX+1],
size_t size, u8 type, int maxtype,
struct netlink_ext_ack *extack)
{
size_t pos;
int i;
memcpy(dst, src, NLMSG_HDRLEN);
dst->nlmsg_len = NLMSG_HDRLEN + xfrm_msg_min[type];
memset(nlmsg_data(dst), 0, xfrm_msg_min[type]);
switch (src->nlmsg_type) {
/* Compat message has the same layout as native */
case XFRM_MSG_DELSA:
case XFRM_MSG_GETSA:
case XFRM_MSG_DELPOLICY:
case XFRM_MSG_GETPOLICY:
case XFRM_MSG_FLUSHSA:
case XFRM_MSG_FLUSHPOLICY:
case XFRM_MSG_NEWAE:
case XFRM_MSG_GETAE:
case XFRM_MSG_REPORT:
case XFRM_MSG_MIGRATE:
case XFRM_MSG_NEWSADINFO:
case XFRM_MSG_GETSADINFO:
case XFRM_MSG_NEWSPDINFO:
case XFRM_MSG_GETSPDINFO:
case XFRM_MSG_MAPPING:
memcpy(nlmsg_data(dst), nlmsg_data(src), compat_msg_min[type]);
break;
/* 4 byte alignment for trailing u64 on native, but not on compat */
case XFRM_MSG_NEWSA:
case XFRM_MSG_NEWPOLICY:
case XFRM_MSG_UPDSA:
case XFRM_MSG_UPDPOLICY:
memcpy(nlmsg_data(dst), nlmsg_data(src), compat_msg_min[type]);
break;
case XFRM_MSG_EXPIRE: {
const struct compat_xfrm_user_expire *src_ue = nlmsg_data(src);
struct xfrm_user_expire *dst_ue = nlmsg_data(dst);
/* compat_xfrm_user_expire has 4-byte smaller state */
memcpy(dst_ue, src_ue, sizeof(src_ue->state));
dst_ue->hard = src_ue->hard;
break;
}
case XFRM_MSG_ACQUIRE: {
const struct compat_xfrm_user_acquire *src_ua = nlmsg_data(src);
struct xfrm_user_acquire *dst_ua = nlmsg_data(dst);
memcpy(dst_ua, src_ua, offsetof(struct compat_xfrm_user_acquire, aalgos));
dst_ua->aalgos = src_ua->aalgos;
dst_ua->ealgos = src_ua->ealgos;
dst_ua->calgos = src_ua->calgos;
dst_ua->seq = src_ua->seq;
break;
}
case XFRM_MSG_POLEXPIRE: {
const struct compat_xfrm_user_polexpire *src_upe = nlmsg_data(src);
struct xfrm_user_polexpire *dst_upe = nlmsg_data(dst);
/* compat_xfrm_user_polexpire has 4-byte smaller state */
memcpy(dst_upe, src_upe, sizeof(src_upe->pol));
dst_upe->hard = src_upe->hard;
break;
}
case XFRM_MSG_ALLOCSPI: {
const struct compat_xfrm_userspi_info *src_usi = nlmsg_data(src);
struct xfrm_userspi_info *dst_usi = nlmsg_data(dst);
/* compat_xfrm_user_polexpire has 4-byte smaller state */
memcpy(dst_usi, src_usi, sizeof(src_usi->info));
dst_usi->min = src_usi->min;
dst_usi->max = src_usi->max;
break;
}
default:
NL_SET_ERR_MSG(extack, "Unsupported message type");
return -EOPNOTSUPP;
}
pos = dst->nlmsg_len;
if (maxtype) {
/* attirbutes are xfrm_spdattr_type_t, not xfrm_attr_type_t */
WARN_ON_ONCE(src->nlmsg_type != XFRM_MSG_NEWSPDINFO);
for (i = 1; i <= maxtype; i++) {
int err;
if (!attrs[i])
continue;
/* just copy - no need for translation */
err = xfrm_attr_cpy32(dst, &pos, attrs[i], size,
nla_len(attrs[i]), nla_len(attrs[i]));
if (err)
return err;
}
return 0;
}
for (i = 1; i < XFRMA_MAX + 1; i++) {
int err;
if (i == XFRMA_PAD)
continue;
if (!attrs[i])
continue;
err = xfrm_xlate32_attr(dst, attrs[i], &pos, size, extack);
if (err)
return err;
}
return 0;
}
static struct nlmsghdr *xfrm_user_rcv_msg_compat(const struct nlmsghdr *h32,
int maxtype, const struct nla_policy *policy,
struct netlink_ext_ack *extack)
{
/* netlink_rcv_skb() checks if a message has full (struct nlmsghdr) */
u16 type = h32->nlmsg_type - XFRM_MSG_BASE;
struct nlattr *attrs[XFRMA_MAX+1];
struct nlmsghdr *h64;
size_t len;
int err;
BUILD_BUG_ON(ARRAY_SIZE(xfrm_msg_min) != ARRAY_SIZE(compat_msg_min));
if (type >= ARRAY_SIZE(xfrm_msg_min))
return ERR_PTR(-EINVAL);
/* Don't call parse: the message might have only nlmsg header */
if ((h32->nlmsg_type == XFRM_MSG_GETSA ||
h32->nlmsg_type == XFRM_MSG_GETPOLICY) &&
(h32->nlmsg_flags & NLM_F_DUMP))
return NULL;
err = nlmsg_parse_deprecated(h32, compat_msg_min[type], attrs,
maxtype ? : XFRMA_MAX, policy ? : compat_policy, extack);
if (err < 0)
return ERR_PTR(err);
len = xfrm_user_rcv_calculate_len64(h32, attrs, maxtype);
/* The message doesn't need translation */
if (len == nlmsg_len(h32))
return NULL;
len += NLMSG_HDRLEN;
h64 = kvmalloc(len, GFP_KERNEL);
if (!h64)
return ERR_PTR(-ENOMEM);
err = xfrm_xlate32(h64, h32, attrs, len, type, maxtype, extack);
if (err < 0) {
kvfree(h64);
return ERR_PTR(err);
}
return h64;
}
static int xfrm_user_policy_compat(u8 **pdata32, int optlen)
{
struct compat_xfrm_userpolicy_info *p = (void *)*pdata32;
u8 *src_templates, *dst_templates;
u8 *data64;
if (optlen < sizeof(*p))
return -EINVAL;
data64 = kmalloc_track_caller(optlen + 4, GFP_USER | __GFP_NOWARN);
if (!data64)
return -ENOMEM;
memcpy(data64, *pdata32, sizeof(*p));
memset(data64 + sizeof(*p), 0, 4);
src_templates = *pdata32 + sizeof(*p);
dst_templates = data64 + sizeof(*p) + 4;
memcpy(dst_templates, src_templates, optlen - sizeof(*p));
kfree(*pdata32);
*pdata32 = data64;
return 0;
}
static struct xfrm_translator xfrm_translator = {
.owner = THIS_MODULE,
.alloc_compat = xfrm_alloc_compat,
.rcv_msg_compat = xfrm_user_rcv_msg_compat,
.xlate_user_policy_sockptr = xfrm_user_policy_compat,
};
static int __init xfrm_compat_init(void)
{
return xfrm_register_translator(&xfrm_translator);
}
static void __exit xfrm_compat_exit(void)
{
xfrm_unregister_translator(&xfrm_translator);
}
module_init(xfrm_compat_init);
module_exit(xfrm_compat_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Dmitry Safonov");
MODULE_DESCRIPTION("XFRM 32-bit compatibility layer");
| linux-master | net/xfrm/xfrm_compat.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* IP Payload Compression Protocol (IPComp) - RFC3173.
*
* Copyright (c) 2003 James Morris <[email protected]>
* Copyright (c) 2003-2008 Herbert Xu <[email protected]>
*
* Todo:
* - Tunable compression parameters.
* - Compression stats.
* - Adaptive compression.
*/
#include <linux/crypto.h>
#include <linux/err.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/percpu.h>
#include <linux/slab.h>
#include <linux/smp.h>
#include <linux/vmalloc.h>
#include <net/ip.h>
#include <net/ipcomp.h>
#include <net/xfrm.h>
struct ipcomp_tfms {
struct list_head list;
struct crypto_comp * __percpu *tfms;
int users;
};
static DEFINE_MUTEX(ipcomp_resource_mutex);
static void * __percpu *ipcomp_scratches;
static int ipcomp_scratch_users;
static LIST_HEAD(ipcomp_tfms_list);
static int ipcomp_decompress(struct xfrm_state *x, struct sk_buff *skb)
{
struct ipcomp_data *ipcd = x->data;
const int plen = skb->len;
int dlen = IPCOMP_SCRATCH_SIZE;
const u8 *start = skb->data;
u8 *scratch = *this_cpu_ptr(ipcomp_scratches);
struct crypto_comp *tfm = *this_cpu_ptr(ipcd->tfms);
int err = crypto_comp_decompress(tfm, start, plen, scratch, &dlen);
int len;
if (err)
return err;
if (dlen < (plen + sizeof(struct ip_comp_hdr)))
return -EINVAL;
len = dlen - plen;
if (len > skb_tailroom(skb))
len = skb_tailroom(skb);
__skb_put(skb, len);
len += plen;
skb_copy_to_linear_data(skb, scratch, len);
while ((scratch += len, dlen -= len) > 0) {
skb_frag_t *frag;
struct page *page;
if (WARN_ON(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS))
return -EMSGSIZE;
frag = skb_shinfo(skb)->frags + skb_shinfo(skb)->nr_frags;
page = alloc_page(GFP_ATOMIC);
if (!page)
return -ENOMEM;
len = PAGE_SIZE;
if (dlen < len)
len = dlen;
skb_frag_fill_page_desc(frag, page, 0, len);
memcpy(skb_frag_address(frag), scratch, len);
skb->truesize += len;
skb->data_len += len;
skb->len += len;
skb_shinfo(skb)->nr_frags++;
}
return 0;
}
int ipcomp_input(struct xfrm_state *x, struct sk_buff *skb)
{
int nexthdr;
int err = -ENOMEM;
struct ip_comp_hdr *ipch;
if (skb_linearize_cow(skb))
goto out;
skb->ip_summed = CHECKSUM_NONE;
/* Remove ipcomp header and decompress original payload */
ipch = (void *)skb->data;
nexthdr = ipch->nexthdr;
skb->transport_header = skb->network_header + sizeof(*ipch);
__skb_pull(skb, sizeof(*ipch));
err = ipcomp_decompress(x, skb);
if (err)
goto out;
err = nexthdr;
out:
return err;
}
EXPORT_SYMBOL_GPL(ipcomp_input);
static int ipcomp_compress(struct xfrm_state *x, struct sk_buff *skb)
{
struct ipcomp_data *ipcd = x->data;
const int plen = skb->len;
int dlen = IPCOMP_SCRATCH_SIZE;
u8 *start = skb->data;
struct crypto_comp *tfm;
u8 *scratch;
int err;
local_bh_disable();
scratch = *this_cpu_ptr(ipcomp_scratches);
tfm = *this_cpu_ptr(ipcd->tfms);
err = crypto_comp_compress(tfm, start, plen, scratch, &dlen);
if (err)
goto out;
if ((dlen + sizeof(struct ip_comp_hdr)) >= plen) {
err = -EMSGSIZE;
goto out;
}
memcpy(start + sizeof(struct ip_comp_hdr), scratch, dlen);
local_bh_enable();
pskb_trim(skb, dlen + sizeof(struct ip_comp_hdr));
return 0;
out:
local_bh_enable();
return err;
}
int ipcomp_output(struct xfrm_state *x, struct sk_buff *skb)
{
int err;
struct ip_comp_hdr *ipch;
struct ipcomp_data *ipcd = x->data;
if (skb->len < ipcd->threshold) {
/* Don't bother compressing */
goto out_ok;
}
if (skb_linearize_cow(skb))
goto out_ok;
err = ipcomp_compress(x, skb);
if (err) {
goto out_ok;
}
/* Install ipcomp header, convert into ipcomp datagram. */
ipch = ip_comp_hdr(skb);
ipch->nexthdr = *skb_mac_header(skb);
ipch->flags = 0;
ipch->cpi = htons((u16 )ntohl(x->id.spi));
*skb_mac_header(skb) = IPPROTO_COMP;
out_ok:
skb_push(skb, -skb_network_offset(skb));
return 0;
}
EXPORT_SYMBOL_GPL(ipcomp_output);
static void ipcomp_free_scratches(void)
{
int i;
void * __percpu *scratches;
if (--ipcomp_scratch_users)
return;
scratches = ipcomp_scratches;
if (!scratches)
return;
for_each_possible_cpu(i)
vfree(*per_cpu_ptr(scratches, i));
free_percpu(scratches);
ipcomp_scratches = NULL;
}
static void * __percpu *ipcomp_alloc_scratches(void)
{
void * __percpu *scratches;
int i;
if (ipcomp_scratch_users++)
return ipcomp_scratches;
scratches = alloc_percpu(void *);
if (!scratches)
return NULL;
ipcomp_scratches = scratches;
for_each_possible_cpu(i) {
void *scratch;
scratch = vmalloc_node(IPCOMP_SCRATCH_SIZE, cpu_to_node(i));
if (!scratch)
return NULL;
*per_cpu_ptr(scratches, i) = scratch;
}
return scratches;
}
static void ipcomp_free_tfms(struct crypto_comp * __percpu *tfms)
{
struct ipcomp_tfms *pos;
int cpu;
list_for_each_entry(pos, &ipcomp_tfms_list, list) {
if (pos->tfms == tfms)
break;
}
WARN_ON(list_entry_is_head(pos, &ipcomp_tfms_list, list));
if (--pos->users)
return;
list_del(&pos->list);
kfree(pos);
if (!tfms)
return;
for_each_possible_cpu(cpu) {
struct crypto_comp *tfm = *per_cpu_ptr(tfms, cpu);
crypto_free_comp(tfm);
}
free_percpu(tfms);
}
static struct crypto_comp * __percpu *ipcomp_alloc_tfms(const char *alg_name)
{
struct ipcomp_tfms *pos;
struct crypto_comp * __percpu *tfms;
int cpu;
list_for_each_entry(pos, &ipcomp_tfms_list, list) {
struct crypto_comp *tfm;
/* This can be any valid CPU ID so we don't need locking. */
tfm = this_cpu_read(*pos->tfms);
if (!strcmp(crypto_comp_name(tfm), alg_name)) {
pos->users++;
return pos->tfms;
}
}
pos = kmalloc(sizeof(*pos), GFP_KERNEL);
if (!pos)
return NULL;
pos->users = 1;
INIT_LIST_HEAD(&pos->list);
list_add(&pos->list, &ipcomp_tfms_list);
pos->tfms = tfms = alloc_percpu(struct crypto_comp *);
if (!tfms)
goto error;
for_each_possible_cpu(cpu) {
struct crypto_comp *tfm = crypto_alloc_comp(alg_name, 0,
CRYPTO_ALG_ASYNC);
if (IS_ERR(tfm))
goto error;
*per_cpu_ptr(tfms, cpu) = tfm;
}
return tfms;
error:
ipcomp_free_tfms(tfms);
return NULL;
}
static void ipcomp_free_data(struct ipcomp_data *ipcd)
{
if (ipcd->tfms)
ipcomp_free_tfms(ipcd->tfms);
ipcomp_free_scratches();
}
void ipcomp_destroy(struct xfrm_state *x)
{
struct ipcomp_data *ipcd = x->data;
if (!ipcd)
return;
xfrm_state_delete_tunnel(x);
mutex_lock(&ipcomp_resource_mutex);
ipcomp_free_data(ipcd);
mutex_unlock(&ipcomp_resource_mutex);
kfree(ipcd);
}
EXPORT_SYMBOL_GPL(ipcomp_destroy);
int ipcomp_init_state(struct xfrm_state *x, struct netlink_ext_ack *extack)
{
int err;
struct ipcomp_data *ipcd;
struct xfrm_algo_desc *calg_desc;
err = -EINVAL;
if (!x->calg) {
NL_SET_ERR_MSG(extack, "Missing required compression algorithm");
goto out;
}
if (x->encap) {
NL_SET_ERR_MSG(extack, "IPComp is not compatible with encapsulation");
goto out;
}
err = -ENOMEM;
ipcd = kzalloc(sizeof(*ipcd), GFP_KERNEL);
if (!ipcd)
goto out;
mutex_lock(&ipcomp_resource_mutex);
if (!ipcomp_alloc_scratches())
goto error;
ipcd->tfms = ipcomp_alloc_tfms(x->calg->alg_name);
if (!ipcd->tfms)
goto error;
mutex_unlock(&ipcomp_resource_mutex);
calg_desc = xfrm_calg_get_byname(x->calg->alg_name, 0);
BUG_ON(!calg_desc);
ipcd->threshold = calg_desc->uinfo.comp.threshold;
x->data = ipcd;
err = 0;
out:
return err;
error:
ipcomp_free_data(ipcd);
mutex_unlock(&ipcomp_resource_mutex);
kfree(ipcd);
goto out;
}
EXPORT_SYMBOL_GPL(ipcomp_init_state);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("IP Payload Compression Protocol (IPComp) - RFC3173");
MODULE_AUTHOR("James Morris <[email protected]>");
| linux-master | net/xfrm/xfrm_ipcomp.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* xfrm_proc.c
*
* Copyright (C)2006-2007 USAGI/WIDE Project
*
* Authors: Masahide NAKAMURA <[email protected]>
*/
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/export.h>
#include <net/snmp.h>
#include <net/xfrm.h>
static const struct snmp_mib xfrm_mib_list[] = {
SNMP_MIB_ITEM("XfrmInError", LINUX_MIB_XFRMINERROR),
SNMP_MIB_ITEM("XfrmInBufferError", LINUX_MIB_XFRMINBUFFERERROR),
SNMP_MIB_ITEM("XfrmInHdrError", LINUX_MIB_XFRMINHDRERROR),
SNMP_MIB_ITEM("XfrmInNoStates", LINUX_MIB_XFRMINNOSTATES),
SNMP_MIB_ITEM("XfrmInStateProtoError", LINUX_MIB_XFRMINSTATEPROTOERROR),
SNMP_MIB_ITEM("XfrmInStateModeError", LINUX_MIB_XFRMINSTATEMODEERROR),
SNMP_MIB_ITEM("XfrmInStateSeqError", LINUX_MIB_XFRMINSTATESEQERROR),
SNMP_MIB_ITEM("XfrmInStateExpired", LINUX_MIB_XFRMINSTATEEXPIRED),
SNMP_MIB_ITEM("XfrmInStateMismatch", LINUX_MIB_XFRMINSTATEMISMATCH),
SNMP_MIB_ITEM("XfrmInStateInvalid", LINUX_MIB_XFRMINSTATEINVALID),
SNMP_MIB_ITEM("XfrmInTmplMismatch", LINUX_MIB_XFRMINTMPLMISMATCH),
SNMP_MIB_ITEM("XfrmInNoPols", LINUX_MIB_XFRMINNOPOLS),
SNMP_MIB_ITEM("XfrmInPolBlock", LINUX_MIB_XFRMINPOLBLOCK),
SNMP_MIB_ITEM("XfrmInPolError", LINUX_MIB_XFRMINPOLERROR),
SNMP_MIB_ITEM("XfrmOutError", LINUX_MIB_XFRMOUTERROR),
SNMP_MIB_ITEM("XfrmOutBundleGenError", LINUX_MIB_XFRMOUTBUNDLEGENERROR),
SNMP_MIB_ITEM("XfrmOutBundleCheckError", LINUX_MIB_XFRMOUTBUNDLECHECKERROR),
SNMP_MIB_ITEM("XfrmOutNoStates", LINUX_MIB_XFRMOUTNOSTATES),
SNMP_MIB_ITEM("XfrmOutStateProtoError", LINUX_MIB_XFRMOUTSTATEPROTOERROR),
SNMP_MIB_ITEM("XfrmOutStateModeError", LINUX_MIB_XFRMOUTSTATEMODEERROR),
SNMP_MIB_ITEM("XfrmOutStateSeqError", LINUX_MIB_XFRMOUTSTATESEQERROR),
SNMP_MIB_ITEM("XfrmOutStateExpired", LINUX_MIB_XFRMOUTSTATEEXPIRED),
SNMP_MIB_ITEM("XfrmOutPolBlock", LINUX_MIB_XFRMOUTPOLBLOCK),
SNMP_MIB_ITEM("XfrmOutPolDead", LINUX_MIB_XFRMOUTPOLDEAD),
SNMP_MIB_ITEM("XfrmOutPolError", LINUX_MIB_XFRMOUTPOLERROR),
SNMP_MIB_ITEM("XfrmFwdHdrError", LINUX_MIB_XFRMFWDHDRERROR),
SNMP_MIB_ITEM("XfrmOutStateInvalid", LINUX_MIB_XFRMOUTSTATEINVALID),
SNMP_MIB_ITEM("XfrmAcquireError", LINUX_MIB_XFRMACQUIREERROR),
SNMP_MIB_SENTINEL
};
static int xfrm_statistics_seq_show(struct seq_file *seq, void *v)
{
unsigned long buff[LINUX_MIB_XFRMMAX];
struct net *net = seq->private;
int i;
memset(buff, 0, sizeof(unsigned long) * LINUX_MIB_XFRMMAX);
snmp_get_cpu_field_batch(buff, xfrm_mib_list,
net->mib.xfrm_statistics);
for (i = 0; xfrm_mib_list[i].name; i++)
seq_printf(seq, "%-24s\t%lu\n", xfrm_mib_list[i].name,
buff[i]);
return 0;
}
int __net_init xfrm_proc_init(struct net *net)
{
if (!proc_create_net_single("xfrm_stat", 0444, net->proc_net,
xfrm_statistics_seq_show, NULL))
return -ENOMEM;
return 0;
}
void xfrm_proc_fini(struct net *net)
{
remove_proc_entry("xfrm_stat", net->proc_net);
}
| linux-master | net/xfrm/xfrm_proc.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* xfrm_output.c - Common IPsec encapsulation code.
*
* Copyright (c) 2007 Herbert Xu <[email protected]>
*/
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/netfilter.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <net/dst.h>
#include <net/gso.h>
#include <net/icmp.h>
#include <net/inet_ecn.h>
#include <net/xfrm.h>
#if IS_ENABLED(CONFIG_IPV6)
#include <net/ip6_route.h>
#include <net/ipv6_stubs.h>
#endif
#include "xfrm_inout.h"
static int xfrm_output2(struct net *net, struct sock *sk, struct sk_buff *skb);
static int xfrm_inner_extract_output(struct xfrm_state *x, struct sk_buff *skb);
static int xfrm_skb_check_space(struct sk_buff *skb)
{
struct dst_entry *dst = skb_dst(skb);
int nhead = dst->header_len + LL_RESERVED_SPACE(dst->dev)
- skb_headroom(skb);
int ntail = dst->dev->needed_tailroom - skb_tailroom(skb);
if (nhead <= 0) {
if (ntail <= 0)
return 0;
nhead = 0;
} else if (ntail < 0)
ntail = 0;
return pskb_expand_head(skb, nhead, ntail, GFP_ATOMIC);
}
/* Children define the path of the packet through the
* Linux networking. Thus, destinations are stackable.
*/
static struct dst_entry *skb_dst_pop(struct sk_buff *skb)
{
struct dst_entry *child = dst_clone(xfrm_dst_child(skb_dst(skb)));
skb_dst_drop(skb);
return child;
}
/* Add encapsulation header.
*
* The IP header will be moved forward to make space for the encapsulation
* header.
*/
static int xfrm4_transport_output(struct xfrm_state *x, struct sk_buff *skb)
{
struct iphdr *iph = ip_hdr(skb);
int ihl = iph->ihl * 4;
skb_set_inner_transport_header(skb, skb_transport_offset(skb));
skb_set_network_header(skb, -x->props.header_len);
skb->mac_header = skb->network_header +
offsetof(struct iphdr, protocol);
skb->transport_header = skb->network_header + ihl;
__skb_pull(skb, ihl);
memmove(skb_network_header(skb), iph, ihl);
return 0;
}
#if IS_ENABLED(CONFIG_IPV6_MIP6)
static int mip6_rthdr_offset(struct sk_buff *skb, u8 **nexthdr, int type)
{
const unsigned char *nh = skb_network_header(skb);
unsigned int offset = sizeof(struct ipv6hdr);
unsigned int packet_len;
int found_rhdr = 0;
packet_len = skb_tail_pointer(skb) - nh;
*nexthdr = &ipv6_hdr(skb)->nexthdr;
while (offset <= packet_len) {
struct ipv6_opt_hdr *exthdr;
switch (**nexthdr) {
case NEXTHDR_HOP:
break;
case NEXTHDR_ROUTING:
if (type == IPPROTO_ROUTING && offset + 3 <= packet_len) {
struct ipv6_rt_hdr *rt;
rt = (struct ipv6_rt_hdr *)(nh + offset);
if (rt->type != 0)
return offset;
}
found_rhdr = 1;
break;
case NEXTHDR_DEST:
/* HAO MUST NOT appear more than once.
* XXX: It is better to try to find by the end of
* XXX: packet if HAO exists.
*/
if (ipv6_find_tlv(skb, offset, IPV6_TLV_HAO) >= 0) {
net_dbg_ratelimited("mip6: hao exists already, override\n");
return offset;
}
if (found_rhdr)
return offset;
break;
default:
return offset;
}
if (offset + sizeof(struct ipv6_opt_hdr) > packet_len)
return -EINVAL;
exthdr = (struct ipv6_opt_hdr *)(skb_network_header(skb) +
offset);
offset += ipv6_optlen(exthdr);
if (offset > IPV6_MAXPLEN)
return -EINVAL;
*nexthdr = &exthdr->nexthdr;
}
return -EINVAL;
}
#endif
#if IS_ENABLED(CONFIG_IPV6)
static int xfrm6_hdr_offset(struct xfrm_state *x, struct sk_buff *skb, u8 **prevhdr)
{
switch (x->type->proto) {
#if IS_ENABLED(CONFIG_IPV6_MIP6)
case IPPROTO_DSTOPTS:
case IPPROTO_ROUTING:
return mip6_rthdr_offset(skb, prevhdr, x->type->proto);
#endif
default:
break;
}
return ip6_find_1stfragopt(skb, prevhdr);
}
#endif
/* Add encapsulation header.
*
* The IP header and mutable extension headers will be moved forward to make
* space for the encapsulation header.
*/
static int xfrm6_transport_output(struct xfrm_state *x, struct sk_buff *skb)
{
#if IS_ENABLED(CONFIG_IPV6)
struct ipv6hdr *iph;
u8 *prevhdr;
int hdr_len;
iph = ipv6_hdr(skb);
skb_set_inner_transport_header(skb, skb_transport_offset(skb));
hdr_len = xfrm6_hdr_offset(x, skb, &prevhdr);
if (hdr_len < 0)
return hdr_len;
skb_set_mac_header(skb,
(prevhdr - x->props.header_len) - skb->data);
skb_set_network_header(skb, -x->props.header_len);
skb->transport_header = skb->network_header + hdr_len;
__skb_pull(skb, hdr_len);
memmove(ipv6_hdr(skb), iph, hdr_len);
return 0;
#else
WARN_ON_ONCE(1);
return -EAFNOSUPPORT;
#endif
}
/* Add route optimization header space.
*
* The IP header and mutable extension headers will be moved forward to make
* space for the route optimization header.
*/
static int xfrm6_ro_output(struct xfrm_state *x, struct sk_buff *skb)
{
#if IS_ENABLED(CONFIG_IPV6)
struct ipv6hdr *iph;
u8 *prevhdr;
int hdr_len;
iph = ipv6_hdr(skb);
hdr_len = xfrm6_hdr_offset(x, skb, &prevhdr);
if (hdr_len < 0)
return hdr_len;
skb_set_mac_header(skb,
(prevhdr - x->props.header_len) - skb->data);
skb_set_network_header(skb, -x->props.header_len);
skb->transport_header = skb->network_header + hdr_len;
__skb_pull(skb, hdr_len);
memmove(ipv6_hdr(skb), iph, hdr_len);
return 0;
#else
WARN_ON_ONCE(1);
return -EAFNOSUPPORT;
#endif
}
/* Add encapsulation header.
*
* The top IP header will be constructed per draft-nikander-esp-beet-mode-06.txt.
*/
static int xfrm4_beet_encap_add(struct xfrm_state *x, struct sk_buff *skb)
{
struct ip_beet_phdr *ph;
struct iphdr *top_iph;
int hdrlen, optlen;
hdrlen = 0;
optlen = XFRM_MODE_SKB_CB(skb)->optlen;
if (unlikely(optlen))
hdrlen += IPV4_BEET_PHMAXLEN - (optlen & 4);
skb_set_network_header(skb, -x->props.header_len - hdrlen +
(XFRM_MODE_SKB_CB(skb)->ihl - sizeof(*top_iph)));
if (x->sel.family != AF_INET6)
skb->network_header += IPV4_BEET_PHMAXLEN;
skb->mac_header = skb->network_header +
offsetof(struct iphdr, protocol);
skb->transport_header = skb->network_header + sizeof(*top_iph);
xfrm4_beet_make_header(skb);
ph = __skb_pull(skb, XFRM_MODE_SKB_CB(skb)->ihl - hdrlen);
top_iph = ip_hdr(skb);
if (unlikely(optlen)) {
if (WARN_ON(optlen < 0))
return -EINVAL;
ph->padlen = 4 - (optlen & 4);
ph->hdrlen = optlen / 8;
ph->nexthdr = top_iph->protocol;
if (ph->padlen)
memset(ph + 1, IPOPT_NOP, ph->padlen);
top_iph->protocol = IPPROTO_BEETPH;
top_iph->ihl = sizeof(struct iphdr) / 4;
}
top_iph->saddr = x->props.saddr.a4;
top_iph->daddr = x->id.daddr.a4;
return 0;
}
/* Add encapsulation header.
*
* The top IP header will be constructed per RFC 2401.
*/
static int xfrm4_tunnel_encap_add(struct xfrm_state *x, struct sk_buff *skb)
{
bool small_ipv6 = (skb->protocol == htons(ETH_P_IPV6)) && (skb->len <= IPV6_MIN_MTU);
struct dst_entry *dst = skb_dst(skb);
struct iphdr *top_iph;
int flags;
skb_set_inner_network_header(skb, skb_network_offset(skb));
skb_set_inner_transport_header(skb, skb_transport_offset(skb));
skb_set_network_header(skb, -x->props.header_len);
skb->mac_header = skb->network_header +
offsetof(struct iphdr, protocol);
skb->transport_header = skb->network_header + sizeof(*top_iph);
top_iph = ip_hdr(skb);
top_iph->ihl = 5;
top_iph->version = 4;
top_iph->protocol = xfrm_af2proto(skb_dst(skb)->ops->family);
/* DS disclosing depends on XFRM_SA_XFLAG_DONT_ENCAP_DSCP */
if (x->props.extra_flags & XFRM_SA_XFLAG_DONT_ENCAP_DSCP)
top_iph->tos = 0;
else
top_iph->tos = XFRM_MODE_SKB_CB(skb)->tos;
top_iph->tos = INET_ECN_encapsulate(top_iph->tos,
XFRM_MODE_SKB_CB(skb)->tos);
flags = x->props.flags;
if (flags & XFRM_STATE_NOECN)
IP_ECN_clear(top_iph);
top_iph->frag_off = (flags & XFRM_STATE_NOPMTUDISC) || small_ipv6 ?
0 : (XFRM_MODE_SKB_CB(skb)->frag_off & htons(IP_DF));
top_iph->ttl = ip4_dst_hoplimit(xfrm_dst_child(dst));
top_iph->saddr = x->props.saddr.a4;
top_iph->daddr = x->id.daddr.a4;
ip_select_ident(dev_net(dst->dev), skb, NULL);
return 0;
}
#if IS_ENABLED(CONFIG_IPV6)
static int xfrm6_tunnel_encap_add(struct xfrm_state *x, struct sk_buff *skb)
{
struct dst_entry *dst = skb_dst(skb);
struct ipv6hdr *top_iph;
int dsfield;
skb_set_inner_network_header(skb, skb_network_offset(skb));
skb_set_inner_transport_header(skb, skb_transport_offset(skb));
skb_set_network_header(skb, -x->props.header_len);
skb->mac_header = skb->network_header +
offsetof(struct ipv6hdr, nexthdr);
skb->transport_header = skb->network_header + sizeof(*top_iph);
top_iph = ipv6_hdr(skb);
top_iph->version = 6;
memcpy(top_iph->flow_lbl, XFRM_MODE_SKB_CB(skb)->flow_lbl,
sizeof(top_iph->flow_lbl));
top_iph->nexthdr = xfrm_af2proto(skb_dst(skb)->ops->family);
if (x->props.extra_flags & XFRM_SA_XFLAG_DONT_ENCAP_DSCP)
dsfield = 0;
else
dsfield = XFRM_MODE_SKB_CB(skb)->tos;
dsfield = INET_ECN_encapsulate(dsfield, XFRM_MODE_SKB_CB(skb)->tos);
if (x->props.flags & XFRM_STATE_NOECN)
dsfield &= ~INET_ECN_MASK;
ipv6_change_dsfield(top_iph, 0, dsfield);
top_iph->hop_limit = ip6_dst_hoplimit(xfrm_dst_child(dst));
top_iph->saddr = *(struct in6_addr *)&x->props.saddr;
top_iph->daddr = *(struct in6_addr *)&x->id.daddr;
return 0;
}
static int xfrm6_beet_encap_add(struct xfrm_state *x, struct sk_buff *skb)
{
struct ipv6hdr *top_iph;
struct ip_beet_phdr *ph;
int optlen, hdr_len;
hdr_len = 0;
optlen = XFRM_MODE_SKB_CB(skb)->optlen;
if (unlikely(optlen))
hdr_len += IPV4_BEET_PHMAXLEN - (optlen & 4);
skb_set_network_header(skb, -x->props.header_len - hdr_len);
if (x->sel.family != AF_INET6)
skb->network_header += IPV4_BEET_PHMAXLEN;
skb->mac_header = skb->network_header +
offsetof(struct ipv6hdr, nexthdr);
skb->transport_header = skb->network_header + sizeof(*top_iph);
ph = __skb_pull(skb, XFRM_MODE_SKB_CB(skb)->ihl - hdr_len);
xfrm6_beet_make_header(skb);
top_iph = ipv6_hdr(skb);
if (unlikely(optlen)) {
if (WARN_ON(optlen < 0))
return -EINVAL;
ph->padlen = 4 - (optlen & 4);
ph->hdrlen = optlen / 8;
ph->nexthdr = top_iph->nexthdr;
if (ph->padlen)
memset(ph + 1, IPOPT_NOP, ph->padlen);
top_iph->nexthdr = IPPROTO_BEETPH;
}
top_iph->saddr = *(struct in6_addr *)&x->props.saddr;
top_iph->daddr = *(struct in6_addr *)&x->id.daddr;
return 0;
}
#endif
/* Add encapsulation header.
*
* On exit, the transport header will be set to the start of the
* encapsulation header to be filled in by x->type->output and the mac
* header will be set to the nextheader (protocol for IPv4) field of the
* extension header directly preceding the encapsulation header, or in
* its absence, that of the top IP header.
* The value of the network header will always point to the top IP header
* while skb->data will point to the payload.
*/
static int xfrm4_prepare_output(struct xfrm_state *x, struct sk_buff *skb)
{
int err;
err = xfrm_inner_extract_output(x, skb);
if (err)
return err;
IPCB(skb)->flags |= IPSKB_XFRM_TUNNEL_SIZE;
skb->protocol = htons(ETH_P_IP);
switch (x->props.mode) {
case XFRM_MODE_BEET:
return xfrm4_beet_encap_add(x, skb);
case XFRM_MODE_TUNNEL:
return xfrm4_tunnel_encap_add(x, skb);
}
WARN_ON_ONCE(1);
return -EOPNOTSUPP;
}
static int xfrm6_prepare_output(struct xfrm_state *x, struct sk_buff *skb)
{
#if IS_ENABLED(CONFIG_IPV6)
int err;
err = xfrm_inner_extract_output(x, skb);
if (err)
return err;
skb->ignore_df = 1;
skb->protocol = htons(ETH_P_IPV6);
switch (x->props.mode) {
case XFRM_MODE_BEET:
return xfrm6_beet_encap_add(x, skb);
case XFRM_MODE_TUNNEL:
return xfrm6_tunnel_encap_add(x, skb);
default:
WARN_ON_ONCE(1);
return -EOPNOTSUPP;
}
#endif
WARN_ON_ONCE(1);
return -EAFNOSUPPORT;
}
static int xfrm_outer_mode_output(struct xfrm_state *x, struct sk_buff *skb)
{
switch (x->props.mode) {
case XFRM_MODE_BEET:
case XFRM_MODE_TUNNEL:
if (x->props.family == AF_INET)
return xfrm4_prepare_output(x, skb);
if (x->props.family == AF_INET6)
return xfrm6_prepare_output(x, skb);
break;
case XFRM_MODE_TRANSPORT:
if (x->props.family == AF_INET)
return xfrm4_transport_output(x, skb);
if (x->props.family == AF_INET6)
return xfrm6_transport_output(x, skb);
break;
case XFRM_MODE_ROUTEOPTIMIZATION:
if (x->props.family == AF_INET6)
return xfrm6_ro_output(x, skb);
WARN_ON_ONCE(1);
break;
default:
WARN_ON_ONCE(1);
break;
}
return -EOPNOTSUPP;
}
#if IS_ENABLED(CONFIG_NET_PKTGEN)
int pktgen_xfrm_outer_mode_output(struct xfrm_state *x, struct sk_buff *skb)
{
return xfrm_outer_mode_output(x, skb);
}
EXPORT_SYMBOL_GPL(pktgen_xfrm_outer_mode_output);
#endif
static int xfrm_output_one(struct sk_buff *skb, int err)
{
struct dst_entry *dst = skb_dst(skb);
struct xfrm_state *x = dst->xfrm;
struct net *net = xs_net(x);
if (err <= 0 || x->xso.type == XFRM_DEV_OFFLOAD_PACKET)
goto resume;
do {
err = xfrm_skb_check_space(skb);
if (err) {
XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR);
goto error_nolock;
}
skb->mark = xfrm_smark_get(skb->mark, x);
err = xfrm_outer_mode_output(x, skb);
if (err) {
XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTSTATEMODEERROR);
goto error_nolock;
}
spin_lock_bh(&x->lock);
if (unlikely(x->km.state != XFRM_STATE_VALID)) {
XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTSTATEINVALID);
err = -EINVAL;
goto error;
}
err = xfrm_state_check_expire(x);
if (err) {
XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTSTATEEXPIRED);
goto error;
}
err = xfrm_replay_overflow(x, skb);
if (err) {
XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTSTATESEQERROR);
goto error;
}
x->curlft.bytes += skb->len;
x->curlft.packets++;
x->lastused = ktime_get_real_seconds();
spin_unlock_bh(&x->lock);
skb_dst_force(skb);
if (!skb_dst(skb)) {
XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR);
err = -EHOSTUNREACH;
goto error_nolock;
}
if (xfrm_offload(skb)) {
x->type_offload->encap(x, skb);
} else {
/* Inner headers are invalid now. */
skb->encapsulation = 0;
err = x->type->output(x, skb);
if (err == -EINPROGRESS)
goto out;
}
resume:
if (err) {
XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTSTATEPROTOERROR);
goto error_nolock;
}
dst = skb_dst_pop(skb);
if (!dst) {
XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR);
err = -EHOSTUNREACH;
goto error_nolock;
}
skb_dst_set(skb, dst);
x = dst->xfrm;
} while (x && !(x->outer_mode.flags & XFRM_MODE_FLAG_TUNNEL));
return 0;
error:
spin_unlock_bh(&x->lock);
error_nolock:
kfree_skb(skb);
out:
return err;
}
int xfrm_output_resume(struct sock *sk, struct sk_buff *skb, int err)
{
struct net *net = xs_net(skb_dst(skb)->xfrm);
while (likely((err = xfrm_output_one(skb, err)) == 0)) {
nf_reset_ct(skb);
err = skb_dst(skb)->ops->local_out(net, sk, skb);
if (unlikely(err != 1))
goto out;
if (!skb_dst(skb)->xfrm)
return dst_output(net, sk, skb);
err = nf_hook(skb_dst(skb)->ops->family,
NF_INET_POST_ROUTING, net, sk, skb,
NULL, skb_dst(skb)->dev, xfrm_output2);
if (unlikely(err != 1))
goto out;
}
if (err == -EINPROGRESS)
err = 0;
out:
return err;
}
EXPORT_SYMBOL_GPL(xfrm_output_resume);
static int xfrm_output2(struct net *net, struct sock *sk, struct sk_buff *skb)
{
return xfrm_output_resume(sk, skb, 1);
}
static int xfrm_output_gso(struct net *net, struct sock *sk, struct sk_buff *skb)
{
struct sk_buff *segs, *nskb;
BUILD_BUG_ON(sizeof(*IPCB(skb)) > SKB_GSO_CB_OFFSET);
BUILD_BUG_ON(sizeof(*IP6CB(skb)) > SKB_GSO_CB_OFFSET);
segs = skb_gso_segment(skb, 0);
kfree_skb(skb);
if (IS_ERR(segs))
return PTR_ERR(segs);
if (segs == NULL)
return -EINVAL;
skb_list_walk_safe(segs, segs, nskb) {
int err;
skb_mark_not_on_list(segs);
err = xfrm_output2(net, sk, segs);
if (unlikely(err)) {
kfree_skb_list(nskb);
return err;
}
}
return 0;
}
/* For partial checksum offload, the outer header checksum is calculated
* by software and the inner header checksum is calculated by hardware.
* This requires hardware to know the inner packet type to calculate
* the inner header checksum. Save inner ip protocol here to avoid
* traversing the packet in the vendor's xmit code.
* For IPsec tunnel mode save the ip protocol from the IP header of the
* plain text packet. Otherwise If the encap type is IPIP, just save
* skb->inner_ipproto in any other case get the ip protocol from the IP
* header.
*/
static void xfrm_get_inner_ipproto(struct sk_buff *skb, struct xfrm_state *x)
{
struct xfrm_offload *xo = xfrm_offload(skb);
const struct ethhdr *eth;
if (!xo)
return;
if (x->outer_mode.encap == XFRM_MODE_TUNNEL) {
switch (x->outer_mode.family) {
case AF_INET:
xo->inner_ipproto = ip_hdr(skb)->protocol;
break;
case AF_INET6:
xo->inner_ipproto = ipv6_hdr(skb)->nexthdr;
break;
default:
break;
}
return;
}
/* non-Tunnel Mode */
if (!skb->encapsulation)
return;
if (skb->inner_protocol_type == ENCAP_TYPE_IPPROTO) {
xo->inner_ipproto = skb->inner_ipproto;
return;
}
if (skb->inner_protocol_type != ENCAP_TYPE_ETHER)
return;
eth = (struct ethhdr *)skb_inner_mac_header(skb);
switch (ntohs(eth->h_proto)) {
case ETH_P_IPV6:
xo->inner_ipproto = inner_ipv6_hdr(skb)->nexthdr;
break;
case ETH_P_IP:
xo->inner_ipproto = inner_ip_hdr(skb)->protocol;
break;
}
}
int xfrm_output(struct sock *sk, struct sk_buff *skb)
{
struct net *net = dev_net(skb_dst(skb)->dev);
struct xfrm_state *x = skb_dst(skb)->xfrm;
int err;
switch (x->outer_mode.family) {
case AF_INET:
memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
IPCB(skb)->flags |= IPSKB_XFRM_TRANSFORMED;
break;
case AF_INET6:
memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
IP6CB(skb)->flags |= IP6SKB_XFRM_TRANSFORMED;
break;
}
if (x->xso.type == XFRM_DEV_OFFLOAD_PACKET) {
if (!xfrm_dev_offload_ok(skb, x)) {
XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR);
kfree_skb(skb);
return -EHOSTUNREACH;
}
return xfrm_output_resume(sk, skb, 0);
}
secpath_reset(skb);
if (xfrm_dev_offload_ok(skb, x)) {
struct sec_path *sp;
sp = secpath_set(skb);
if (!sp) {
XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR);
kfree_skb(skb);
return -ENOMEM;
}
sp->olen++;
sp->xvec[sp->len++] = x;
xfrm_state_hold(x);
xfrm_get_inner_ipproto(skb, x);
skb->encapsulation = 1;
if (skb_is_gso(skb)) {
if (skb->inner_protocol)
return xfrm_output_gso(net, sk, skb);
skb_shinfo(skb)->gso_type |= SKB_GSO_ESP;
goto out;
}
if (x->xso.dev && x->xso.dev->features & NETIF_F_HW_ESP_TX_CSUM)
goto out;
} else {
if (skb_is_gso(skb))
return xfrm_output_gso(net, sk, skb);
}
if (skb->ip_summed == CHECKSUM_PARTIAL) {
err = skb_checksum_help(skb);
if (err) {
XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR);
kfree_skb(skb);
return err;
}
}
out:
return xfrm_output2(net, sk, skb);
}
EXPORT_SYMBOL_GPL(xfrm_output);
static int xfrm4_tunnel_check_size(struct sk_buff *skb)
{
int mtu, ret = 0;
if (IPCB(skb)->flags & IPSKB_XFRM_TUNNEL_SIZE)
goto out;
if (!(ip_hdr(skb)->frag_off & htons(IP_DF)) || skb->ignore_df)
goto out;
mtu = dst_mtu(skb_dst(skb));
if ((!skb_is_gso(skb) && skb->len > mtu) ||
(skb_is_gso(skb) &&
!skb_gso_validate_network_len(skb, ip_skb_dst_mtu(skb->sk, skb)))) {
skb->protocol = htons(ETH_P_IP);
if (skb->sk)
xfrm_local_error(skb, mtu);
else
icmp_send(skb, ICMP_DEST_UNREACH,
ICMP_FRAG_NEEDED, htonl(mtu));
ret = -EMSGSIZE;
}
out:
return ret;
}
static int xfrm4_extract_output(struct xfrm_state *x, struct sk_buff *skb)
{
int err;
if (x->outer_mode.encap == XFRM_MODE_BEET &&
ip_is_fragment(ip_hdr(skb))) {
net_warn_ratelimited("BEET mode doesn't support inner IPv4 fragments\n");
return -EAFNOSUPPORT;
}
err = xfrm4_tunnel_check_size(skb);
if (err)
return err;
XFRM_MODE_SKB_CB(skb)->protocol = ip_hdr(skb)->protocol;
xfrm4_extract_header(skb);
return 0;
}
#if IS_ENABLED(CONFIG_IPV6)
static int xfrm6_tunnel_check_size(struct sk_buff *skb)
{
int mtu, ret = 0;
struct dst_entry *dst = skb_dst(skb);
if (skb->ignore_df)
goto out;
mtu = dst_mtu(dst);
if (mtu < IPV6_MIN_MTU)
mtu = IPV6_MIN_MTU;
if ((!skb_is_gso(skb) && skb->len > mtu) ||
(skb_is_gso(skb) &&
!skb_gso_validate_network_len(skb, ip6_skb_dst_mtu(skb)))) {
skb->dev = dst->dev;
skb->protocol = htons(ETH_P_IPV6);
if (xfrm6_local_dontfrag(skb->sk))
ipv6_stub->xfrm6_local_rxpmtu(skb, mtu);
else if (skb->sk)
xfrm_local_error(skb, mtu);
else
icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
ret = -EMSGSIZE;
}
out:
return ret;
}
#endif
static int xfrm6_extract_output(struct xfrm_state *x, struct sk_buff *skb)
{
#if IS_ENABLED(CONFIG_IPV6)
int err;
err = xfrm6_tunnel_check_size(skb);
if (err)
return err;
XFRM_MODE_SKB_CB(skb)->protocol = ipv6_hdr(skb)->nexthdr;
xfrm6_extract_header(skb);
return 0;
#else
WARN_ON_ONCE(1);
return -EAFNOSUPPORT;
#endif
}
static int xfrm_inner_extract_output(struct xfrm_state *x, struct sk_buff *skb)
{
switch (skb->protocol) {
case htons(ETH_P_IP):
return xfrm4_extract_output(x, skb);
case htons(ETH_P_IPV6):
return xfrm6_extract_output(x, skb);
}
return -EAFNOSUPPORT;
}
void xfrm_local_error(struct sk_buff *skb, int mtu)
{
unsigned int proto;
struct xfrm_state_afinfo *afinfo;
if (skb->protocol == htons(ETH_P_IP))
proto = AF_INET;
else if (skb->protocol == htons(ETH_P_IPV6) &&
skb->sk->sk_family == AF_INET6)
proto = AF_INET6;
else
return;
afinfo = xfrm_state_get_afinfo(proto);
if (afinfo) {
afinfo->local_error(skb, mtu);
rcu_read_unlock();
}
}
EXPORT_SYMBOL_GPL(xfrm_local_error);
| linux-master | net/xfrm/xfrm_output.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Unstable XFRM Helpers for TC-BPF hook
*
* These are called from SCHED_CLS BPF programs. Note that it is
* allowed to break compatibility for these functions since the interface they
* are exposed through to BPF programs is explicitly unstable.
*/
#include <linux/bpf.h>
#include <linux/btf_ids.h>
#include <net/dst_metadata.h>
#include <net/xfrm.h>
/* bpf_xfrm_info - XFRM metadata information
*
* Members:
* @if_id - XFRM if_id:
* Transmit: if_id to be used in policy and state lookups
* Receive: if_id of the state matched for the incoming packet
* @link - Underlying device ifindex:
* Transmit: used as the underlying device in VRF routing
* Receive: the device on which the packet had been received
*/
struct bpf_xfrm_info {
u32 if_id;
int link;
};
__diag_push();
__diag_ignore_all("-Wmissing-prototypes",
"Global functions as their definitions will be in xfrm_interface BTF");
/* bpf_skb_get_xfrm_info - Get XFRM metadata
*
* Parameters:
* @skb_ctx - Pointer to ctx (__sk_buff) in TC program
* Cannot be NULL
* @to - Pointer to memory to which the metadata will be copied
* Cannot be NULL
*/
__bpf_kfunc int bpf_skb_get_xfrm_info(struct __sk_buff *skb_ctx, struct bpf_xfrm_info *to)
{
struct sk_buff *skb = (struct sk_buff *)skb_ctx;
struct xfrm_md_info *info;
info = skb_xfrm_md_info(skb);
if (!info)
return -EINVAL;
to->if_id = info->if_id;
to->link = info->link;
return 0;
}
/* bpf_skb_get_xfrm_info - Set XFRM metadata
*
* Parameters:
* @skb_ctx - Pointer to ctx (__sk_buff) in TC program
* Cannot be NULL
* @from - Pointer to memory from which the metadata will be copied
* Cannot be NULL
*/
__bpf_kfunc int bpf_skb_set_xfrm_info(struct __sk_buff *skb_ctx, const struct bpf_xfrm_info *from)
{
struct sk_buff *skb = (struct sk_buff *)skb_ctx;
struct metadata_dst *md_dst;
struct xfrm_md_info *info;
if (unlikely(skb_metadata_dst(skb)))
return -EINVAL;
if (!xfrm_bpf_md_dst) {
struct metadata_dst __percpu *tmp;
tmp = metadata_dst_alloc_percpu(0, METADATA_XFRM, GFP_ATOMIC);
if (!tmp)
return -ENOMEM;
if (cmpxchg(&xfrm_bpf_md_dst, NULL, tmp))
metadata_dst_free_percpu(tmp);
}
md_dst = this_cpu_ptr(xfrm_bpf_md_dst);
info = &md_dst->u.xfrm_info;
info->if_id = from->if_id;
info->link = from->link;
skb_dst_force(skb);
info->dst_orig = skb_dst(skb);
dst_hold((struct dst_entry *)md_dst);
skb_dst_set(skb, (struct dst_entry *)md_dst);
return 0;
}
__diag_pop()
BTF_SET8_START(xfrm_ifc_kfunc_set)
BTF_ID_FLAGS(func, bpf_skb_get_xfrm_info)
BTF_ID_FLAGS(func, bpf_skb_set_xfrm_info)
BTF_SET8_END(xfrm_ifc_kfunc_set)
static const struct btf_kfunc_id_set xfrm_interface_kfunc_set = {
.owner = THIS_MODULE,
.set = &xfrm_ifc_kfunc_set,
};
int __init register_xfrm_interface_bpf(void)
{
return register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_CLS,
&xfrm_interface_kfunc_set);
}
| linux-master | net/xfrm/xfrm_interface_bpf.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* xfrm_device.c - IPsec device offloading code.
*
* Copyright (c) 2015 secunet Security Networks AG
*
* Author:
* Steffen Klassert <[email protected]>
*/
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <net/dst.h>
#include <net/gso.h>
#include <net/xfrm.h>
#include <linux/notifier.h>
#ifdef CONFIG_XFRM_OFFLOAD
static void __xfrm_transport_prep(struct xfrm_state *x, struct sk_buff *skb,
unsigned int hsize)
{
struct xfrm_offload *xo = xfrm_offload(skb);
skb_reset_mac_len(skb);
if (xo->flags & XFRM_GSO_SEGMENT)
skb->transport_header -= x->props.header_len;
pskb_pull(skb, skb_transport_offset(skb) + x->props.header_len);
}
static void __xfrm_mode_tunnel_prep(struct xfrm_state *x, struct sk_buff *skb,
unsigned int hsize)
{
struct xfrm_offload *xo = xfrm_offload(skb);
if (xo->flags & XFRM_GSO_SEGMENT)
skb->transport_header = skb->network_header + hsize;
skb_reset_mac_len(skb);
pskb_pull(skb, skb->mac_len + x->props.header_len);
}
static void __xfrm_mode_beet_prep(struct xfrm_state *x, struct sk_buff *skb,
unsigned int hsize)
{
struct xfrm_offload *xo = xfrm_offload(skb);
int phlen = 0;
if (xo->flags & XFRM_GSO_SEGMENT)
skb->transport_header = skb->network_header + hsize;
skb_reset_mac_len(skb);
if (x->sel.family != AF_INET6) {
phlen = IPV4_BEET_PHMAXLEN;
if (x->outer_mode.family == AF_INET6)
phlen += sizeof(struct ipv6hdr) - sizeof(struct iphdr);
}
pskb_pull(skb, skb->mac_len + hsize + (x->props.header_len - phlen));
}
/* Adjust pointers into the packet when IPsec is done at layer2 */
static void xfrm_outer_mode_prep(struct xfrm_state *x, struct sk_buff *skb)
{
switch (x->outer_mode.encap) {
case XFRM_MODE_TUNNEL:
if (x->outer_mode.family == AF_INET)
return __xfrm_mode_tunnel_prep(x, skb,
sizeof(struct iphdr));
if (x->outer_mode.family == AF_INET6)
return __xfrm_mode_tunnel_prep(x, skb,
sizeof(struct ipv6hdr));
break;
case XFRM_MODE_TRANSPORT:
if (x->outer_mode.family == AF_INET)
return __xfrm_transport_prep(x, skb,
sizeof(struct iphdr));
if (x->outer_mode.family == AF_INET6)
return __xfrm_transport_prep(x, skb,
sizeof(struct ipv6hdr));
break;
case XFRM_MODE_BEET:
if (x->outer_mode.family == AF_INET)
return __xfrm_mode_beet_prep(x, skb,
sizeof(struct iphdr));
if (x->outer_mode.family == AF_INET6)
return __xfrm_mode_beet_prep(x, skb,
sizeof(struct ipv6hdr));
break;
case XFRM_MODE_ROUTEOPTIMIZATION:
case XFRM_MODE_IN_TRIGGER:
break;
}
}
static inline bool xmit_xfrm_check_overflow(struct sk_buff *skb)
{
struct xfrm_offload *xo = xfrm_offload(skb);
__u32 seq = xo->seq.low;
seq += skb_shinfo(skb)->gso_segs;
if (unlikely(seq < xo->seq.low))
return true;
return false;
}
struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features, bool *again)
{
int err;
unsigned long flags;
struct xfrm_state *x;
struct softnet_data *sd;
struct sk_buff *skb2, *nskb, *pskb = NULL;
netdev_features_t esp_features = features;
struct xfrm_offload *xo = xfrm_offload(skb);
struct net_device *dev = skb->dev;
struct sec_path *sp;
if (!xo || (xo->flags & XFRM_XMIT))
return skb;
if (!(features & NETIF_F_HW_ESP))
esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK);
sp = skb_sec_path(skb);
x = sp->xvec[sp->len - 1];
if (xo->flags & XFRM_GRO || x->xso.dir == XFRM_DEV_OFFLOAD_IN)
return skb;
/* The packet was sent to HW IPsec packet offload engine,
* but to wrong device. Drop the packet, so it won't skip
* XFRM stack.
*/
if (x->xso.type == XFRM_DEV_OFFLOAD_PACKET && x->xso.dev != dev) {
kfree_skb(skb);
dev_core_stats_tx_dropped_inc(dev);
return NULL;
}
/* This skb was already validated on the upper/virtual dev */
if ((x->xso.dev != dev) && (x->xso.real_dev == dev))
return skb;
local_irq_save(flags);
sd = this_cpu_ptr(&softnet_data);
err = !skb_queue_empty(&sd->xfrm_backlog);
local_irq_restore(flags);
if (err) {
*again = true;
return skb;
}
if (skb_is_gso(skb) && (unlikely(x->xso.dev != dev) ||
unlikely(xmit_xfrm_check_overflow(skb)))) {
struct sk_buff *segs;
/* Packet got rerouted, fixup features and segment it. */
esp_features = esp_features & ~(NETIF_F_HW_ESP | NETIF_F_GSO_ESP);
segs = skb_gso_segment(skb, esp_features);
if (IS_ERR(segs)) {
kfree_skb(skb);
dev_core_stats_tx_dropped_inc(dev);
return NULL;
} else {
consume_skb(skb);
skb = segs;
}
}
if (!skb->next) {
esp_features |= skb->dev->gso_partial_features;
xfrm_outer_mode_prep(x, skb);
xo->flags |= XFRM_DEV_RESUME;
err = x->type_offload->xmit(x, skb, esp_features);
if (err) {
if (err == -EINPROGRESS)
return NULL;
XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR);
kfree_skb(skb);
return NULL;
}
skb_push(skb, skb->data - skb_mac_header(skb));
return skb;
}
skb_list_walk_safe(skb, skb2, nskb) {
esp_features |= skb->dev->gso_partial_features;
skb_mark_not_on_list(skb2);
xo = xfrm_offload(skb2);
xo->flags |= XFRM_DEV_RESUME;
xfrm_outer_mode_prep(x, skb2);
err = x->type_offload->xmit(x, skb2, esp_features);
if (!err) {
skb2->next = nskb;
} else if (err != -EINPROGRESS) {
XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR);
skb2->next = nskb;
kfree_skb_list(skb2);
return NULL;
} else {
if (skb == skb2)
skb = nskb;
else
pskb->next = nskb;
continue;
}
skb_push(skb2, skb2->data - skb_mac_header(skb2));
pskb = skb2;
}
return skb;
}
EXPORT_SYMBOL_GPL(validate_xmit_xfrm);
int xfrm_dev_state_add(struct net *net, struct xfrm_state *x,
struct xfrm_user_offload *xuo,
struct netlink_ext_ack *extack)
{
int err;
struct dst_entry *dst;
struct net_device *dev;
struct xfrm_dev_offload *xso = &x->xso;
xfrm_address_t *saddr;
xfrm_address_t *daddr;
bool is_packet_offload;
if (!x->type_offload) {
NL_SET_ERR_MSG(extack, "Type doesn't support offload");
return -EINVAL;
}
if (xuo->flags &
~(XFRM_OFFLOAD_IPV6 | XFRM_OFFLOAD_INBOUND | XFRM_OFFLOAD_PACKET)) {
NL_SET_ERR_MSG(extack, "Unrecognized flags in offload request");
return -EINVAL;
}
is_packet_offload = xuo->flags & XFRM_OFFLOAD_PACKET;
/* We don't yet support UDP encapsulation and TFC padding. */
if ((!is_packet_offload && x->encap) || x->tfcpad) {
NL_SET_ERR_MSG(extack, "Encapsulation and TFC padding can't be offloaded");
return -EINVAL;
}
dev = dev_get_by_index(net, xuo->ifindex);
if (!dev) {
if (!(xuo->flags & XFRM_OFFLOAD_INBOUND)) {
saddr = &x->props.saddr;
daddr = &x->id.daddr;
} else {
saddr = &x->id.daddr;
daddr = &x->props.saddr;
}
dst = __xfrm_dst_lookup(net, 0, 0, saddr, daddr,
x->props.family,
xfrm_smark_get(0, x));
if (IS_ERR(dst))
return (is_packet_offload) ? -EINVAL : 0;
dev = dst->dev;
dev_hold(dev);
dst_release(dst);
}
if (!dev->xfrmdev_ops || !dev->xfrmdev_ops->xdo_dev_state_add) {
xso->dev = NULL;
dev_put(dev);
return (is_packet_offload) ? -EINVAL : 0;
}
if (!is_packet_offload && x->props.flags & XFRM_STATE_ESN &&
!dev->xfrmdev_ops->xdo_dev_state_advance_esn) {
NL_SET_ERR_MSG(extack, "Device doesn't support offload with ESN");
xso->dev = NULL;
dev_put(dev);
return -EINVAL;
}
xso->dev = dev;
netdev_tracker_alloc(dev, &xso->dev_tracker, GFP_ATOMIC);
xso->real_dev = dev;
if (xuo->flags & XFRM_OFFLOAD_INBOUND)
xso->dir = XFRM_DEV_OFFLOAD_IN;
else
xso->dir = XFRM_DEV_OFFLOAD_OUT;
if (is_packet_offload)
xso->type = XFRM_DEV_OFFLOAD_PACKET;
else
xso->type = XFRM_DEV_OFFLOAD_CRYPTO;
err = dev->xfrmdev_ops->xdo_dev_state_add(x, extack);
if (err) {
xso->dev = NULL;
xso->dir = 0;
xso->real_dev = NULL;
netdev_put(dev, &xso->dev_tracker);
xso->type = XFRM_DEV_OFFLOAD_UNSPECIFIED;
/* User explicitly requested packet offload mode and configured
* policy in addition to the XFRM state. So be civil to users,
* and return an error instead of taking fallback path.
*
* This WARN_ON() can be seen as a documentation for driver
* authors to do not return -EOPNOTSUPP in packet offload mode.
*/
WARN_ON(err == -EOPNOTSUPP && is_packet_offload);
if (err != -EOPNOTSUPP || is_packet_offload) {
NL_SET_ERR_MSG_WEAK(extack, "Device failed to offload this state");
return err;
}
}
return 0;
}
EXPORT_SYMBOL_GPL(xfrm_dev_state_add);
int xfrm_dev_policy_add(struct net *net, struct xfrm_policy *xp,
struct xfrm_user_offload *xuo, u8 dir,
struct netlink_ext_ack *extack)
{
struct xfrm_dev_offload *xdo = &xp->xdo;
struct net_device *dev;
int err;
if (!xuo->flags || xuo->flags & ~XFRM_OFFLOAD_PACKET) {
/* We support only packet offload mode and it means
* that user must set XFRM_OFFLOAD_PACKET bit.
*/
NL_SET_ERR_MSG(extack, "Unrecognized flags in offload request");
return -EINVAL;
}
dev = dev_get_by_index(net, xuo->ifindex);
if (!dev)
return -EINVAL;
if (!dev->xfrmdev_ops || !dev->xfrmdev_ops->xdo_dev_policy_add) {
xdo->dev = NULL;
dev_put(dev);
NL_SET_ERR_MSG(extack, "Policy offload is not supported");
return -EINVAL;
}
xdo->dev = dev;
netdev_tracker_alloc(dev, &xdo->dev_tracker, GFP_ATOMIC);
xdo->real_dev = dev;
xdo->type = XFRM_DEV_OFFLOAD_PACKET;
switch (dir) {
case XFRM_POLICY_IN:
xdo->dir = XFRM_DEV_OFFLOAD_IN;
break;
case XFRM_POLICY_OUT:
xdo->dir = XFRM_DEV_OFFLOAD_OUT;
break;
case XFRM_POLICY_FWD:
xdo->dir = XFRM_DEV_OFFLOAD_FWD;
break;
default:
xdo->dev = NULL;
netdev_put(dev, &xdo->dev_tracker);
NL_SET_ERR_MSG(extack, "Unrecognized offload direction");
return -EINVAL;
}
err = dev->xfrmdev_ops->xdo_dev_policy_add(xp, extack);
if (err) {
xdo->dev = NULL;
xdo->real_dev = NULL;
xdo->type = XFRM_DEV_OFFLOAD_UNSPECIFIED;
xdo->dir = 0;
netdev_put(dev, &xdo->dev_tracker);
NL_SET_ERR_MSG_WEAK(extack, "Device failed to offload this policy");
return err;
}
return 0;
}
EXPORT_SYMBOL_GPL(xfrm_dev_policy_add);
bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
{
int mtu;
struct dst_entry *dst = skb_dst(skb);
struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
struct net_device *dev = x->xso.dev;
if (!x->type_offload || x->encap)
return false;
if (x->xso.type == XFRM_DEV_OFFLOAD_PACKET ||
((!dev || (dev == xfrm_dst_path(dst)->dev)) &&
!xdst->child->xfrm)) {
mtu = xfrm_state_mtu(x, xdst->child_mtu_cached);
if (skb->len <= mtu)
goto ok;
if (skb_is_gso(skb) && skb_gso_validate_network_len(skb, mtu))
goto ok;
}
return false;
ok:
if (dev && dev->xfrmdev_ops && dev->xfrmdev_ops->xdo_dev_offload_ok)
return x->xso.dev->xfrmdev_ops->xdo_dev_offload_ok(skb, x);
return true;
}
EXPORT_SYMBOL_GPL(xfrm_dev_offload_ok);
void xfrm_dev_resume(struct sk_buff *skb)
{
struct net_device *dev = skb->dev;
int ret = NETDEV_TX_BUSY;
struct netdev_queue *txq;
struct softnet_data *sd;
unsigned long flags;
rcu_read_lock();
txq = netdev_core_pick_tx(dev, skb, NULL);
HARD_TX_LOCK(dev, txq, smp_processor_id());
if (!netif_xmit_frozen_or_stopped(txq))
skb = dev_hard_start_xmit(skb, dev, txq, &ret);
HARD_TX_UNLOCK(dev, txq);
if (!dev_xmit_complete(ret)) {
local_irq_save(flags);
sd = this_cpu_ptr(&softnet_data);
skb_queue_tail(&sd->xfrm_backlog, skb);
raise_softirq_irqoff(NET_TX_SOFTIRQ);
local_irq_restore(flags);
}
rcu_read_unlock();
}
EXPORT_SYMBOL_GPL(xfrm_dev_resume);
void xfrm_dev_backlog(struct softnet_data *sd)
{
struct sk_buff_head *xfrm_backlog = &sd->xfrm_backlog;
struct sk_buff_head list;
struct sk_buff *skb;
if (skb_queue_empty(xfrm_backlog))
return;
__skb_queue_head_init(&list);
spin_lock(&xfrm_backlog->lock);
skb_queue_splice_init(xfrm_backlog, &list);
spin_unlock(&xfrm_backlog->lock);
while (!skb_queue_empty(&list)) {
skb = __skb_dequeue(&list);
xfrm_dev_resume(skb);
}
}
#endif
static int xfrm_api_check(struct net_device *dev)
{
#ifdef CONFIG_XFRM_OFFLOAD
if ((dev->features & NETIF_F_HW_ESP_TX_CSUM) &&
!(dev->features & NETIF_F_HW_ESP))
return NOTIFY_BAD;
if ((dev->features & NETIF_F_HW_ESP) &&
(!(dev->xfrmdev_ops &&
dev->xfrmdev_ops->xdo_dev_state_add &&
dev->xfrmdev_ops->xdo_dev_state_delete)))
return NOTIFY_BAD;
#else
if (dev->features & (NETIF_F_HW_ESP | NETIF_F_HW_ESP_TX_CSUM))
return NOTIFY_BAD;
#endif
return NOTIFY_DONE;
}
static int xfrm_dev_down(struct net_device *dev)
{
if (dev->features & NETIF_F_HW_ESP) {
xfrm_dev_state_flush(dev_net(dev), dev, true);
xfrm_dev_policy_flush(dev_net(dev), dev, true);
}
return NOTIFY_DONE;
}
static int xfrm_dev_event(struct notifier_block *this, unsigned long event, void *ptr)
{
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
switch (event) {
case NETDEV_REGISTER:
return xfrm_api_check(dev);
case NETDEV_FEAT_CHANGE:
return xfrm_api_check(dev);
case NETDEV_DOWN:
case NETDEV_UNREGISTER:
return xfrm_dev_down(dev);
}
return NOTIFY_DONE;
}
static struct notifier_block xfrm_dev_notifier = {
.notifier_call = xfrm_dev_event,
};
void __init xfrm_dev_init(void)
{
register_netdevice_notifier(&xfrm_dev_notifier);
}
| linux-master | net/xfrm/xfrm_device.c |
// SPDX-License-Identifier: GPL-2.0
#include <net/tcp.h>
#include <net/strparser.h>
#include <net/xfrm.h>
#include <net/esp.h>
#include <net/espintcp.h>
#include <linux/skmsg.h>
#include <net/inet_common.h>
#include <trace/events/sock.h>
#if IS_ENABLED(CONFIG_IPV6)
#include <net/ipv6_stubs.h>
#endif
static void handle_nonesp(struct espintcp_ctx *ctx, struct sk_buff *skb,
struct sock *sk)
{
if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf ||
!sk_rmem_schedule(sk, skb, skb->truesize)) {
XFRM_INC_STATS(sock_net(sk), LINUX_MIB_XFRMINERROR);
kfree_skb(skb);
return;
}
skb_set_owner_r(skb, sk);
memset(skb->cb, 0, sizeof(skb->cb));
skb_queue_tail(&ctx->ike_queue, skb);
ctx->saved_data_ready(sk);
}
static void handle_esp(struct sk_buff *skb, struct sock *sk)
{
struct tcp_skb_cb *tcp_cb = (struct tcp_skb_cb *)skb->cb;
skb_reset_transport_header(skb);
/* restore IP CB, we need at least IP6CB->nhoff */
memmove(skb->cb, &tcp_cb->header, sizeof(tcp_cb->header));
rcu_read_lock();
skb->dev = dev_get_by_index_rcu(sock_net(sk), skb->skb_iif);
local_bh_disable();
#if IS_ENABLED(CONFIG_IPV6)
if (sk->sk_family == AF_INET6)
ipv6_stub->xfrm6_rcv_encap(skb, IPPROTO_ESP, 0, TCP_ENCAP_ESPINTCP);
else
#endif
xfrm4_rcv_encap(skb, IPPROTO_ESP, 0, TCP_ENCAP_ESPINTCP);
local_bh_enable();
rcu_read_unlock();
}
static void espintcp_rcv(struct strparser *strp, struct sk_buff *skb)
{
struct espintcp_ctx *ctx = container_of(strp, struct espintcp_ctx,
strp);
struct strp_msg *rxm = strp_msg(skb);
int len = rxm->full_len - 2;
u32 nonesp_marker;
int err;
/* keepalive packet? */
if (unlikely(len == 1)) {
u8 data;
err = skb_copy_bits(skb, rxm->offset + 2, &data, 1);
if (err < 0) {
XFRM_INC_STATS(sock_net(strp->sk), LINUX_MIB_XFRMINHDRERROR);
kfree_skb(skb);
return;
}
if (data == 0xff) {
kfree_skb(skb);
return;
}
}
/* drop other short messages */
if (unlikely(len <= sizeof(nonesp_marker))) {
XFRM_INC_STATS(sock_net(strp->sk), LINUX_MIB_XFRMINHDRERROR);
kfree_skb(skb);
return;
}
err = skb_copy_bits(skb, rxm->offset + 2, &nonesp_marker,
sizeof(nonesp_marker));
if (err < 0) {
XFRM_INC_STATS(sock_net(strp->sk), LINUX_MIB_XFRMINHDRERROR);
kfree_skb(skb);
return;
}
/* remove header, leave non-ESP marker/SPI */
if (!pskb_pull(skb, rxm->offset + 2)) {
XFRM_INC_STATS(sock_net(strp->sk), LINUX_MIB_XFRMINERROR);
kfree_skb(skb);
return;
}
if (pskb_trim(skb, rxm->full_len - 2) != 0) {
XFRM_INC_STATS(sock_net(strp->sk), LINUX_MIB_XFRMINERROR);
kfree_skb(skb);
return;
}
if (nonesp_marker == 0)
handle_nonesp(ctx, skb, strp->sk);
else
handle_esp(skb, strp->sk);
}
static int espintcp_parse(struct strparser *strp, struct sk_buff *skb)
{
struct strp_msg *rxm = strp_msg(skb);
__be16 blen;
u16 len;
int err;
if (skb->len < rxm->offset + 2)
return 0;
err = skb_copy_bits(skb, rxm->offset, &blen, sizeof(blen));
if (err < 0)
return err;
len = be16_to_cpu(blen);
if (len < 2)
return -EINVAL;
return len;
}
static int espintcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
int flags, int *addr_len)
{
struct espintcp_ctx *ctx = espintcp_getctx(sk);
struct sk_buff *skb;
int err = 0;
int copied;
int off = 0;
skb = __skb_recv_datagram(sk, &ctx->ike_queue, flags, &off, &err);
if (!skb) {
if (err == -EAGAIN && sk->sk_shutdown & RCV_SHUTDOWN)
return 0;
return err;
}
copied = len;
if (copied > skb->len)
copied = skb->len;
else if (copied < skb->len)
msg->msg_flags |= MSG_TRUNC;
err = skb_copy_datagram_msg(skb, 0, msg, copied);
if (unlikely(err)) {
kfree_skb(skb);
return err;
}
if (flags & MSG_TRUNC)
copied = skb->len;
kfree_skb(skb);
return copied;
}
int espintcp_queue_out(struct sock *sk, struct sk_buff *skb)
{
struct espintcp_ctx *ctx = espintcp_getctx(sk);
if (skb_queue_len(&ctx->out_queue) >= READ_ONCE(netdev_max_backlog))
return -ENOBUFS;
__skb_queue_tail(&ctx->out_queue, skb);
return 0;
}
EXPORT_SYMBOL_GPL(espintcp_queue_out);
/* espintcp length field is 2B and length includes the length field's size */
#define MAX_ESPINTCP_MSG (((1 << 16) - 1) - 2)
static int espintcp_sendskb_locked(struct sock *sk, struct espintcp_msg *emsg,
int flags)
{
do {
int ret;
ret = skb_send_sock_locked(sk, emsg->skb,
emsg->offset, emsg->len);
if (ret < 0)
return ret;
emsg->len -= ret;
emsg->offset += ret;
} while (emsg->len > 0);
kfree_skb(emsg->skb);
memset(emsg, 0, sizeof(*emsg));
return 0;
}
static int espintcp_sendskmsg_locked(struct sock *sk,
struct espintcp_msg *emsg, int flags)
{
struct msghdr msghdr = {
.msg_flags = flags | MSG_SPLICE_PAGES | MSG_MORE,
};
struct sk_msg *skmsg = &emsg->skmsg;
bool more = flags & MSG_MORE;
struct scatterlist *sg;
int done = 0;
int ret;
sg = &skmsg->sg.data[skmsg->sg.start];
do {
struct bio_vec bvec;
size_t size = sg->length - emsg->offset;
int offset = sg->offset + emsg->offset;
struct page *p;
emsg->offset = 0;
if (sg_is_last(sg) && !more)
msghdr.msg_flags &= ~MSG_MORE;
p = sg_page(sg);
retry:
bvec_set_page(&bvec, p, size, offset);
iov_iter_bvec(&msghdr.msg_iter, ITER_SOURCE, &bvec, 1, size);
ret = tcp_sendmsg_locked(sk, &msghdr, size);
if (ret < 0) {
emsg->offset = offset - sg->offset;
skmsg->sg.start += done;
return ret;
}
if (ret != size) {
offset += ret;
size -= ret;
goto retry;
}
done++;
put_page(p);
sk_mem_uncharge(sk, sg->length);
sg = sg_next(sg);
} while (sg);
memset(emsg, 0, sizeof(*emsg));
return 0;
}
static int espintcp_push_msgs(struct sock *sk, int flags)
{
struct espintcp_ctx *ctx = espintcp_getctx(sk);
struct espintcp_msg *emsg = &ctx->partial;
int err;
if (!emsg->len)
return 0;
if (ctx->tx_running)
return -EAGAIN;
ctx->tx_running = 1;
if (emsg->skb)
err = espintcp_sendskb_locked(sk, emsg, flags);
else
err = espintcp_sendskmsg_locked(sk, emsg, flags);
if (err == -EAGAIN) {
ctx->tx_running = 0;
return flags & MSG_DONTWAIT ? -EAGAIN : 0;
}
if (!err)
memset(emsg, 0, sizeof(*emsg));
ctx->tx_running = 0;
return err;
}
int espintcp_push_skb(struct sock *sk, struct sk_buff *skb)
{
struct espintcp_ctx *ctx = espintcp_getctx(sk);
struct espintcp_msg *emsg = &ctx->partial;
unsigned int len;
int offset;
if (sk->sk_state != TCP_ESTABLISHED) {
kfree_skb(skb);
return -ECONNRESET;
}
offset = skb_transport_offset(skb);
len = skb->len - offset;
espintcp_push_msgs(sk, 0);
if (emsg->len) {
kfree_skb(skb);
return -ENOBUFS;
}
skb_set_owner_w(skb, sk);
emsg->offset = offset;
emsg->len = len;
emsg->skb = skb;
espintcp_push_msgs(sk, 0);
return 0;
}
EXPORT_SYMBOL_GPL(espintcp_push_skb);
static int espintcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
{
long timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
struct espintcp_ctx *ctx = espintcp_getctx(sk);
struct espintcp_msg *emsg = &ctx->partial;
struct iov_iter pfx_iter;
struct kvec pfx_iov = {};
size_t msglen = size + 2;
char buf[2] = {0};
int err, end;
if (msg->msg_flags & ~MSG_DONTWAIT)
return -EOPNOTSUPP;
if (size > MAX_ESPINTCP_MSG)
return -EMSGSIZE;
if (msg->msg_controllen)
return -EOPNOTSUPP;
lock_sock(sk);
err = espintcp_push_msgs(sk, msg->msg_flags & MSG_DONTWAIT);
if (err < 0) {
if (err != -EAGAIN || !(msg->msg_flags & MSG_DONTWAIT))
err = -ENOBUFS;
goto unlock;
}
sk_msg_init(&emsg->skmsg);
while (1) {
/* only -ENOMEM is possible since we don't coalesce */
err = sk_msg_alloc(sk, &emsg->skmsg, msglen, 0);
if (!err)
break;
err = sk_stream_wait_memory(sk, &timeo);
if (err)
goto fail;
}
*((__be16 *)buf) = cpu_to_be16(msglen);
pfx_iov.iov_base = buf;
pfx_iov.iov_len = sizeof(buf);
iov_iter_kvec(&pfx_iter, ITER_SOURCE, &pfx_iov, 1, pfx_iov.iov_len);
err = sk_msg_memcopy_from_iter(sk, &pfx_iter, &emsg->skmsg,
pfx_iov.iov_len);
if (err < 0)
goto fail;
err = sk_msg_memcopy_from_iter(sk, &msg->msg_iter, &emsg->skmsg, size);
if (err < 0)
goto fail;
end = emsg->skmsg.sg.end;
emsg->len = size;
sk_msg_iter_var_prev(end);
sg_mark_end(sk_msg_elem(&emsg->skmsg, end));
tcp_rate_check_app_limited(sk);
err = espintcp_push_msgs(sk, msg->msg_flags & MSG_DONTWAIT);
/* this message could be partially sent, keep it */
release_sock(sk);
return size;
fail:
sk_msg_free(sk, &emsg->skmsg);
memset(emsg, 0, sizeof(*emsg));
unlock:
release_sock(sk);
return err;
}
static struct proto espintcp_prot __ro_after_init;
static struct proto_ops espintcp_ops __ro_after_init;
static struct proto espintcp6_prot;
static struct proto_ops espintcp6_ops;
static DEFINE_MUTEX(tcpv6_prot_mutex);
static void espintcp_data_ready(struct sock *sk)
{
struct espintcp_ctx *ctx = espintcp_getctx(sk);
trace_sk_data_ready(sk);
strp_data_ready(&ctx->strp);
}
static void espintcp_tx_work(struct work_struct *work)
{
struct espintcp_ctx *ctx = container_of(work,
struct espintcp_ctx, work);
struct sock *sk = ctx->strp.sk;
lock_sock(sk);
if (!ctx->tx_running)
espintcp_push_msgs(sk, 0);
release_sock(sk);
}
static void espintcp_write_space(struct sock *sk)
{
struct espintcp_ctx *ctx = espintcp_getctx(sk);
schedule_work(&ctx->work);
ctx->saved_write_space(sk);
}
static void espintcp_destruct(struct sock *sk)
{
struct espintcp_ctx *ctx = espintcp_getctx(sk);
ctx->saved_destruct(sk);
kfree(ctx);
}
bool tcp_is_ulp_esp(struct sock *sk)
{
return sk->sk_prot == &espintcp_prot || sk->sk_prot == &espintcp6_prot;
}
EXPORT_SYMBOL_GPL(tcp_is_ulp_esp);
static void build_protos(struct proto *espintcp_prot,
struct proto_ops *espintcp_ops,
const struct proto *orig_prot,
const struct proto_ops *orig_ops);
static int espintcp_init_sk(struct sock *sk)
{
struct inet_connection_sock *icsk = inet_csk(sk);
struct strp_callbacks cb = {
.rcv_msg = espintcp_rcv,
.parse_msg = espintcp_parse,
};
struct espintcp_ctx *ctx;
int err;
/* sockmap is not compatible with espintcp */
if (sk->sk_user_data)
return -EBUSY;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
err = strp_init(&ctx->strp, sk, &cb);
if (err)
goto free;
__sk_dst_reset(sk);
strp_check_rcv(&ctx->strp);
skb_queue_head_init(&ctx->ike_queue);
skb_queue_head_init(&ctx->out_queue);
if (sk->sk_family == AF_INET) {
sk->sk_prot = &espintcp_prot;
sk->sk_socket->ops = &espintcp_ops;
} else {
mutex_lock(&tcpv6_prot_mutex);
if (!espintcp6_prot.recvmsg)
build_protos(&espintcp6_prot, &espintcp6_ops, sk->sk_prot, sk->sk_socket->ops);
mutex_unlock(&tcpv6_prot_mutex);
sk->sk_prot = &espintcp6_prot;
sk->sk_socket->ops = &espintcp6_ops;
}
ctx->saved_data_ready = sk->sk_data_ready;
ctx->saved_write_space = sk->sk_write_space;
ctx->saved_destruct = sk->sk_destruct;
sk->sk_data_ready = espintcp_data_ready;
sk->sk_write_space = espintcp_write_space;
sk->sk_destruct = espintcp_destruct;
rcu_assign_pointer(icsk->icsk_ulp_data, ctx);
INIT_WORK(&ctx->work, espintcp_tx_work);
/* avoid using task_frag */
sk->sk_allocation = GFP_ATOMIC;
sk->sk_use_task_frag = false;
return 0;
free:
kfree(ctx);
return err;
}
static void espintcp_release(struct sock *sk)
{
struct espintcp_ctx *ctx = espintcp_getctx(sk);
struct sk_buff_head queue;
struct sk_buff *skb;
__skb_queue_head_init(&queue);
skb_queue_splice_init(&ctx->out_queue, &queue);
while ((skb = __skb_dequeue(&queue)))
espintcp_push_skb(sk, skb);
tcp_release_cb(sk);
}
static void espintcp_close(struct sock *sk, long timeout)
{
struct espintcp_ctx *ctx = espintcp_getctx(sk);
struct espintcp_msg *emsg = &ctx->partial;
strp_stop(&ctx->strp);
sk->sk_prot = &tcp_prot;
barrier();
cancel_work_sync(&ctx->work);
strp_done(&ctx->strp);
skb_queue_purge(&ctx->out_queue);
skb_queue_purge(&ctx->ike_queue);
if (emsg->len) {
if (emsg->skb)
kfree_skb(emsg->skb);
else
sk_msg_free(sk, &emsg->skmsg);
}
tcp_close(sk, timeout);
}
static __poll_t espintcp_poll(struct file *file, struct socket *sock,
poll_table *wait)
{
__poll_t mask = datagram_poll(file, sock, wait);
struct sock *sk = sock->sk;
struct espintcp_ctx *ctx = espintcp_getctx(sk);
if (!skb_queue_empty(&ctx->ike_queue))
mask |= EPOLLIN | EPOLLRDNORM;
return mask;
}
static void build_protos(struct proto *espintcp_prot,
struct proto_ops *espintcp_ops,
const struct proto *orig_prot,
const struct proto_ops *orig_ops)
{
memcpy(espintcp_prot, orig_prot, sizeof(struct proto));
memcpy(espintcp_ops, orig_ops, sizeof(struct proto_ops));
espintcp_prot->sendmsg = espintcp_sendmsg;
espintcp_prot->recvmsg = espintcp_recvmsg;
espintcp_prot->close = espintcp_close;
espintcp_prot->release_cb = espintcp_release;
espintcp_ops->poll = espintcp_poll;
}
static struct tcp_ulp_ops espintcp_ulp __read_mostly = {
.name = "espintcp",
.owner = THIS_MODULE,
.init = espintcp_init_sk,
};
void __init espintcp_init(void)
{
build_protos(&espintcp_prot, &espintcp_ops, &tcp_prot, &inet_stream_ops);
tcp_register_ulp(&espintcp_ulp);
}
| linux-master | net/xfrm/espintcp.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* xfrm algorithm interface
*
* Copyright (c) 2002 James Morris <[email protected]>
*/
#include <crypto/hash.h>
#include <crypto/skcipher.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/pfkeyv2.h>
#include <linux/crypto.h>
#include <linux/scatterlist.h>
#include <net/xfrm.h>
#if IS_ENABLED(CONFIG_INET_ESP) || IS_ENABLED(CONFIG_INET6_ESP)
#include <net/esp.h>
#endif
/*
* Algorithms supported by IPsec. These entries contain properties which
* are used in key negotiation and xfrm processing, and are used to verify
* that instantiated crypto transforms have correct parameters for IPsec
* purposes.
*/
static struct xfrm_algo_desc aead_list[] = {
{
.name = "rfc4106(gcm(aes))",
.uinfo = {
.aead = {
.geniv = "seqiv",
.icv_truncbits = 64,
}
},
.pfkey_supported = 1,
.desc = {
.sadb_alg_id = SADB_X_EALG_AES_GCM_ICV8,
.sadb_alg_ivlen = 8,
.sadb_alg_minbits = 128,
.sadb_alg_maxbits = 256
}
},
{
.name = "rfc4106(gcm(aes))",
.uinfo = {
.aead = {
.geniv = "seqiv",
.icv_truncbits = 96,
}
},
.pfkey_supported = 1,
.desc = {
.sadb_alg_id = SADB_X_EALG_AES_GCM_ICV12,
.sadb_alg_ivlen = 8,
.sadb_alg_minbits = 128,
.sadb_alg_maxbits = 256
}
},
{
.name = "rfc4106(gcm(aes))",
.uinfo = {
.aead = {
.geniv = "seqiv",
.icv_truncbits = 128,
}
},
.pfkey_supported = 1,
.desc = {
.sadb_alg_id = SADB_X_EALG_AES_GCM_ICV16,
.sadb_alg_ivlen = 8,
.sadb_alg_minbits = 128,
.sadb_alg_maxbits = 256
}
},
{
.name = "rfc4309(ccm(aes))",
.uinfo = {
.aead = {
.geniv = "seqiv",
.icv_truncbits = 64,
}
},
.pfkey_supported = 1,
.desc = {
.sadb_alg_id = SADB_X_EALG_AES_CCM_ICV8,
.sadb_alg_ivlen = 8,
.sadb_alg_minbits = 128,
.sadb_alg_maxbits = 256
}
},
{
.name = "rfc4309(ccm(aes))",
.uinfo = {
.aead = {
.geniv = "seqiv",
.icv_truncbits = 96,
}
},
.pfkey_supported = 1,
.desc = {
.sadb_alg_id = SADB_X_EALG_AES_CCM_ICV12,
.sadb_alg_ivlen = 8,
.sadb_alg_minbits = 128,
.sadb_alg_maxbits = 256
}
},
{
.name = "rfc4309(ccm(aes))",
.uinfo = {
.aead = {
.geniv = "seqiv",
.icv_truncbits = 128,
}
},
.pfkey_supported = 1,
.desc = {
.sadb_alg_id = SADB_X_EALG_AES_CCM_ICV16,
.sadb_alg_ivlen = 8,
.sadb_alg_minbits = 128,
.sadb_alg_maxbits = 256
}
},
{
.name = "rfc4543(gcm(aes))",
.uinfo = {
.aead = {
.geniv = "seqiv",
.icv_truncbits = 128,
}
},
.pfkey_supported = 1,
.desc = {
.sadb_alg_id = SADB_X_EALG_NULL_AES_GMAC,
.sadb_alg_ivlen = 8,
.sadb_alg_minbits = 128,
.sadb_alg_maxbits = 256
}
},
{
.name = "rfc7539esp(chacha20,poly1305)",
.uinfo = {
.aead = {
.geniv = "seqiv",
.icv_truncbits = 128,
}
},
.pfkey_supported = 0,
},
};
static struct xfrm_algo_desc aalg_list[] = {
{
.name = "digest_null",
.uinfo = {
.auth = {
.icv_truncbits = 0,
.icv_fullbits = 0,
}
},
.pfkey_supported = 1,
.desc = {
.sadb_alg_id = SADB_X_AALG_NULL,
.sadb_alg_ivlen = 0,
.sadb_alg_minbits = 0,
.sadb_alg_maxbits = 0
}
},
{
.name = "hmac(md5)",
.compat = "md5",
.uinfo = {
.auth = {
.icv_truncbits = 96,
.icv_fullbits = 128,
}
},
.pfkey_supported = 1,
.desc = {
.sadb_alg_id = SADB_AALG_MD5HMAC,
.sadb_alg_ivlen = 0,
.sadb_alg_minbits = 128,
.sadb_alg_maxbits = 128
}
},
{
.name = "hmac(sha1)",
.compat = "sha1",
.uinfo = {
.auth = {
.icv_truncbits = 96,
.icv_fullbits = 160,
}
},
.pfkey_supported = 1,
.desc = {
.sadb_alg_id = SADB_AALG_SHA1HMAC,
.sadb_alg_ivlen = 0,
.sadb_alg_minbits = 160,
.sadb_alg_maxbits = 160
}
},
{
.name = "hmac(sha256)",
.compat = "sha256",
.uinfo = {
.auth = {
.icv_truncbits = 96,
.icv_fullbits = 256,
}
},
.pfkey_supported = 1,
.desc = {
.sadb_alg_id = SADB_X_AALG_SHA2_256HMAC,
.sadb_alg_ivlen = 0,
.sadb_alg_minbits = 256,
.sadb_alg_maxbits = 256
}
},
{
.name = "hmac(sha384)",
.uinfo = {
.auth = {
.icv_truncbits = 192,
.icv_fullbits = 384,
}
},
.pfkey_supported = 1,
.desc = {
.sadb_alg_id = SADB_X_AALG_SHA2_384HMAC,
.sadb_alg_ivlen = 0,
.sadb_alg_minbits = 384,
.sadb_alg_maxbits = 384
}
},
{
.name = "hmac(sha512)",
.uinfo = {
.auth = {
.icv_truncbits = 256,
.icv_fullbits = 512,
}
},
.pfkey_supported = 1,
.desc = {
.sadb_alg_id = SADB_X_AALG_SHA2_512HMAC,
.sadb_alg_ivlen = 0,
.sadb_alg_minbits = 512,
.sadb_alg_maxbits = 512
}
},
{
.name = "hmac(rmd160)",
.compat = "rmd160",
.uinfo = {
.auth = {
.icv_truncbits = 96,
.icv_fullbits = 160,
}
},
.pfkey_supported = 1,
.desc = {
.sadb_alg_id = SADB_X_AALG_RIPEMD160HMAC,
.sadb_alg_ivlen = 0,
.sadb_alg_minbits = 160,
.sadb_alg_maxbits = 160
}
},
{
.name = "xcbc(aes)",
.uinfo = {
.auth = {
.icv_truncbits = 96,
.icv_fullbits = 128,
}
},
.pfkey_supported = 1,
.desc = {
.sadb_alg_id = SADB_X_AALG_AES_XCBC_MAC,
.sadb_alg_ivlen = 0,
.sadb_alg_minbits = 128,
.sadb_alg_maxbits = 128
}
},
{
/* rfc4494 */
.name = "cmac(aes)",
.uinfo = {
.auth = {
.icv_truncbits = 96,
.icv_fullbits = 128,
}
},
.pfkey_supported = 0,
},
{
.name = "hmac(sm3)",
.compat = "sm3",
.uinfo = {
.auth = {
.icv_truncbits = 256,
.icv_fullbits = 256,
}
},
.pfkey_supported = 1,
.desc = {
.sadb_alg_id = SADB_X_AALG_SM3_256HMAC,
.sadb_alg_ivlen = 0,
.sadb_alg_minbits = 256,
.sadb_alg_maxbits = 256
}
},
};
static struct xfrm_algo_desc ealg_list[] = {
{
.name = "ecb(cipher_null)",
.compat = "cipher_null",
.uinfo = {
.encr = {
.blockbits = 8,
.defkeybits = 0,
}
},
.pfkey_supported = 1,
.desc = {
.sadb_alg_id = SADB_EALG_NULL,
.sadb_alg_ivlen = 0,
.sadb_alg_minbits = 0,
.sadb_alg_maxbits = 0
}
},
{
.name = "cbc(des)",
.compat = "des",
.uinfo = {
.encr = {
.geniv = "echainiv",
.blockbits = 64,
.defkeybits = 64,
}
},
.pfkey_supported = 1,
.desc = {
.sadb_alg_id = SADB_EALG_DESCBC,
.sadb_alg_ivlen = 8,
.sadb_alg_minbits = 64,
.sadb_alg_maxbits = 64
}
},
{
.name = "cbc(des3_ede)",
.compat = "des3_ede",
.uinfo = {
.encr = {
.geniv = "echainiv",
.blockbits = 64,
.defkeybits = 192,
}
},
.pfkey_supported = 1,
.desc = {
.sadb_alg_id = SADB_EALG_3DESCBC,
.sadb_alg_ivlen = 8,
.sadb_alg_minbits = 192,
.sadb_alg_maxbits = 192
}
},
{
.name = "cbc(cast5)",
.compat = "cast5",
.uinfo = {
.encr = {
.geniv = "echainiv",
.blockbits = 64,
.defkeybits = 128,
}
},
.pfkey_supported = 1,
.desc = {
.sadb_alg_id = SADB_X_EALG_CASTCBC,
.sadb_alg_ivlen = 8,
.sadb_alg_minbits = 40,
.sadb_alg_maxbits = 128
}
},
{
.name = "cbc(blowfish)",
.compat = "blowfish",
.uinfo = {
.encr = {
.geniv = "echainiv",
.blockbits = 64,
.defkeybits = 128,
}
},
.pfkey_supported = 1,
.desc = {
.sadb_alg_id = SADB_X_EALG_BLOWFISHCBC,
.sadb_alg_ivlen = 8,
.sadb_alg_minbits = 40,
.sadb_alg_maxbits = 448
}
},
{
.name = "cbc(aes)",
.compat = "aes",
.uinfo = {
.encr = {
.geniv = "echainiv",
.blockbits = 128,
.defkeybits = 128,
}
},
.pfkey_supported = 1,
.desc = {
.sadb_alg_id = SADB_X_EALG_AESCBC,
.sadb_alg_ivlen = 8,
.sadb_alg_minbits = 128,
.sadb_alg_maxbits = 256
}
},
{
.name = "cbc(serpent)",
.compat = "serpent",
.uinfo = {
.encr = {
.geniv = "echainiv",
.blockbits = 128,
.defkeybits = 128,
}
},
.pfkey_supported = 1,
.desc = {
.sadb_alg_id = SADB_X_EALG_SERPENTCBC,
.sadb_alg_ivlen = 8,
.sadb_alg_minbits = 128,
.sadb_alg_maxbits = 256,
}
},
{
.name = "cbc(camellia)",
.compat = "camellia",
.uinfo = {
.encr = {
.geniv = "echainiv",
.blockbits = 128,
.defkeybits = 128,
}
},
.pfkey_supported = 1,
.desc = {
.sadb_alg_id = SADB_X_EALG_CAMELLIACBC,
.sadb_alg_ivlen = 8,
.sadb_alg_minbits = 128,
.sadb_alg_maxbits = 256
}
},
{
.name = "cbc(twofish)",
.compat = "twofish",
.uinfo = {
.encr = {
.geniv = "echainiv",
.blockbits = 128,
.defkeybits = 128,
}
},
.pfkey_supported = 1,
.desc = {
.sadb_alg_id = SADB_X_EALG_TWOFISHCBC,
.sadb_alg_ivlen = 8,
.sadb_alg_minbits = 128,
.sadb_alg_maxbits = 256
}
},
{
.name = "rfc3686(ctr(aes))",
.uinfo = {
.encr = {
.geniv = "seqiv",
.blockbits = 128,
.defkeybits = 160, /* 128-bit key + 32-bit nonce */
}
},
.pfkey_supported = 1,
.desc = {
.sadb_alg_id = SADB_X_EALG_AESCTR,
.sadb_alg_ivlen = 8,
.sadb_alg_minbits = 160,
.sadb_alg_maxbits = 288
}
},
{
.name = "cbc(sm4)",
.compat = "sm4",
.uinfo = {
.encr = {
.geniv = "echainiv",
.blockbits = 128,
.defkeybits = 128,
}
},
.pfkey_supported = 1,
.desc = {
.sadb_alg_id = SADB_X_EALG_SM4CBC,
.sadb_alg_ivlen = 16,
.sadb_alg_minbits = 128,
.sadb_alg_maxbits = 256
}
},
};
static struct xfrm_algo_desc calg_list[] = {
{
.name = "deflate",
.uinfo = {
.comp = {
.threshold = 90,
}
},
.pfkey_supported = 1,
.desc = { .sadb_alg_id = SADB_X_CALG_DEFLATE }
},
{
.name = "lzs",
.uinfo = {
.comp = {
.threshold = 90,
}
},
.pfkey_supported = 1,
.desc = { .sadb_alg_id = SADB_X_CALG_LZS }
},
{
.name = "lzjh",
.uinfo = {
.comp = {
.threshold = 50,
}
},
.pfkey_supported = 1,
.desc = { .sadb_alg_id = SADB_X_CALG_LZJH }
},
};
static inline int aalg_entries(void)
{
return ARRAY_SIZE(aalg_list);
}
static inline int ealg_entries(void)
{
return ARRAY_SIZE(ealg_list);
}
static inline int calg_entries(void)
{
return ARRAY_SIZE(calg_list);
}
struct xfrm_algo_list {
struct xfrm_algo_desc *algs;
int entries;
u32 type;
u32 mask;
};
static const struct xfrm_algo_list xfrm_aead_list = {
.algs = aead_list,
.entries = ARRAY_SIZE(aead_list),
.type = CRYPTO_ALG_TYPE_AEAD,
.mask = CRYPTO_ALG_TYPE_MASK,
};
static const struct xfrm_algo_list xfrm_aalg_list = {
.algs = aalg_list,
.entries = ARRAY_SIZE(aalg_list),
.type = CRYPTO_ALG_TYPE_HASH,
.mask = CRYPTO_ALG_TYPE_HASH_MASK,
};
static const struct xfrm_algo_list xfrm_ealg_list = {
.algs = ealg_list,
.entries = ARRAY_SIZE(ealg_list),
.type = CRYPTO_ALG_TYPE_SKCIPHER,
.mask = CRYPTO_ALG_TYPE_MASK,
};
static const struct xfrm_algo_list xfrm_calg_list = {
.algs = calg_list,
.entries = ARRAY_SIZE(calg_list),
.type = CRYPTO_ALG_TYPE_COMPRESS,
.mask = CRYPTO_ALG_TYPE_MASK,
};
static struct xfrm_algo_desc *xfrm_find_algo(
const struct xfrm_algo_list *algo_list,
int match(const struct xfrm_algo_desc *entry, const void *data),
const void *data, int probe)
{
struct xfrm_algo_desc *list = algo_list->algs;
int i, status;
for (i = 0; i < algo_list->entries; i++) {
if (!match(list + i, data))
continue;
if (list[i].available)
return &list[i];
if (!probe)
break;
status = crypto_has_alg(list[i].name, algo_list->type,
algo_list->mask);
if (!status)
break;
list[i].available = status;
return &list[i];
}
return NULL;
}
static int xfrm_alg_id_match(const struct xfrm_algo_desc *entry,
const void *data)
{
return entry->desc.sadb_alg_id == (unsigned long)data;
}
struct xfrm_algo_desc *xfrm_aalg_get_byid(int alg_id)
{
return xfrm_find_algo(&xfrm_aalg_list, xfrm_alg_id_match,
(void *)(unsigned long)alg_id, 1);
}
EXPORT_SYMBOL_GPL(xfrm_aalg_get_byid);
struct xfrm_algo_desc *xfrm_ealg_get_byid(int alg_id)
{
return xfrm_find_algo(&xfrm_ealg_list, xfrm_alg_id_match,
(void *)(unsigned long)alg_id, 1);
}
EXPORT_SYMBOL_GPL(xfrm_ealg_get_byid);
struct xfrm_algo_desc *xfrm_calg_get_byid(int alg_id)
{
return xfrm_find_algo(&xfrm_calg_list, xfrm_alg_id_match,
(void *)(unsigned long)alg_id, 1);
}
EXPORT_SYMBOL_GPL(xfrm_calg_get_byid);
static int xfrm_alg_name_match(const struct xfrm_algo_desc *entry,
const void *data)
{
const char *name = data;
return name && (!strcmp(name, entry->name) ||
(entry->compat && !strcmp(name, entry->compat)));
}
struct xfrm_algo_desc *xfrm_aalg_get_byname(const char *name, int probe)
{
return xfrm_find_algo(&xfrm_aalg_list, xfrm_alg_name_match, name,
probe);
}
EXPORT_SYMBOL_GPL(xfrm_aalg_get_byname);
struct xfrm_algo_desc *xfrm_ealg_get_byname(const char *name, int probe)
{
return xfrm_find_algo(&xfrm_ealg_list, xfrm_alg_name_match, name,
probe);
}
EXPORT_SYMBOL_GPL(xfrm_ealg_get_byname);
struct xfrm_algo_desc *xfrm_calg_get_byname(const char *name, int probe)
{
return xfrm_find_algo(&xfrm_calg_list, xfrm_alg_name_match, name,
probe);
}
EXPORT_SYMBOL_GPL(xfrm_calg_get_byname);
struct xfrm_aead_name {
const char *name;
int icvbits;
};
static int xfrm_aead_name_match(const struct xfrm_algo_desc *entry,
const void *data)
{
const struct xfrm_aead_name *aead = data;
const char *name = aead->name;
return aead->icvbits == entry->uinfo.aead.icv_truncbits && name &&
!strcmp(name, entry->name);
}
struct xfrm_algo_desc *xfrm_aead_get_byname(const char *name, int icv_len, int probe)
{
struct xfrm_aead_name data = {
.name = name,
.icvbits = icv_len,
};
return xfrm_find_algo(&xfrm_aead_list, xfrm_aead_name_match, &data,
probe);
}
EXPORT_SYMBOL_GPL(xfrm_aead_get_byname);
struct xfrm_algo_desc *xfrm_aalg_get_byidx(unsigned int idx)
{
if (idx >= aalg_entries())
return NULL;
return &aalg_list[idx];
}
EXPORT_SYMBOL_GPL(xfrm_aalg_get_byidx);
struct xfrm_algo_desc *xfrm_ealg_get_byidx(unsigned int idx)
{
if (idx >= ealg_entries())
return NULL;
return &ealg_list[idx];
}
EXPORT_SYMBOL_GPL(xfrm_ealg_get_byidx);
/*
* Probe for the availability of crypto algorithms, and set the available
* flag for any algorithms found on the system. This is typically called by
* pfkey during userspace SA add, update or register.
*/
void xfrm_probe_algs(void)
{
int i, status;
BUG_ON(in_softirq());
for (i = 0; i < aalg_entries(); i++) {
status = crypto_has_ahash(aalg_list[i].name, 0, 0);
if (aalg_list[i].available != status)
aalg_list[i].available = status;
}
for (i = 0; i < ealg_entries(); i++) {
status = crypto_has_skcipher(ealg_list[i].name, 0, 0);
if (ealg_list[i].available != status)
ealg_list[i].available = status;
}
for (i = 0; i < calg_entries(); i++) {
status = crypto_has_comp(calg_list[i].name, 0,
CRYPTO_ALG_ASYNC);
if (calg_list[i].available != status)
calg_list[i].available = status;
}
}
EXPORT_SYMBOL_GPL(xfrm_probe_algs);
int xfrm_count_pfkey_auth_supported(void)
{
int i, n;
for (i = 0, n = 0; i < aalg_entries(); i++)
if (aalg_list[i].available && aalg_list[i].pfkey_supported)
n++;
return n;
}
EXPORT_SYMBOL_GPL(xfrm_count_pfkey_auth_supported);
int xfrm_count_pfkey_enc_supported(void)
{
int i, n;
for (i = 0, n = 0; i < ealg_entries(); i++)
if (ealg_list[i].available && ealg_list[i].pfkey_supported)
n++;
return n;
}
EXPORT_SYMBOL_GPL(xfrm_count_pfkey_enc_supported);
MODULE_LICENSE("GPL");
| linux-master | net/xfrm/xfrm_algo.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* xfrm_state.c
*
* Changes:
* Mitsuru KANDA @USAGI
* Kazunori MIYAZAWA @USAGI
* Kunihiro Ishiguro <[email protected]>
* IPv6 support
* YOSHIFUJI Hideaki @USAGI
* Split up af-specific functions
* Derek Atkins <[email protected]>
* Add UDP Encapsulation
*
*/
#include <linux/compat.h>
#include <linux/workqueue.h>
#include <net/xfrm.h>
#include <linux/pfkeyv2.h>
#include <linux/ipsec.h>
#include <linux/module.h>
#include <linux/cache.h>
#include <linux/audit.h>
#include <linux/uaccess.h>
#include <linux/ktime.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <crypto/aead.h>
#include "xfrm_hash.h"
#define xfrm_state_deref_prot(table, net) \
rcu_dereference_protected((table), lockdep_is_held(&(net)->xfrm.xfrm_state_lock))
static void xfrm_state_gc_task(struct work_struct *work);
/* Each xfrm_state may be linked to two tables:
1. Hash table by (spi,daddr,ah/esp) to find SA by SPI. (input,ctl)
2. Hash table by (daddr,family,reqid) to find what SAs exist for given
destination/tunnel endpoint. (output)
*/
static unsigned int xfrm_state_hashmax __read_mostly = 1 * 1024 * 1024;
static struct kmem_cache *xfrm_state_cache __ro_after_init;
static DECLARE_WORK(xfrm_state_gc_work, xfrm_state_gc_task);
static HLIST_HEAD(xfrm_state_gc_list);
static inline bool xfrm_state_hold_rcu(struct xfrm_state __rcu *x)
{
return refcount_inc_not_zero(&x->refcnt);
}
static inline unsigned int xfrm_dst_hash(struct net *net,
const xfrm_address_t *daddr,
const xfrm_address_t *saddr,
u32 reqid,
unsigned short family)
{
return __xfrm_dst_hash(daddr, saddr, reqid, family, net->xfrm.state_hmask);
}
static inline unsigned int xfrm_src_hash(struct net *net,
const xfrm_address_t *daddr,
const xfrm_address_t *saddr,
unsigned short family)
{
return __xfrm_src_hash(daddr, saddr, family, net->xfrm.state_hmask);
}
static inline unsigned int
xfrm_spi_hash(struct net *net, const xfrm_address_t *daddr,
__be32 spi, u8 proto, unsigned short family)
{
return __xfrm_spi_hash(daddr, spi, proto, family, net->xfrm.state_hmask);
}
static unsigned int xfrm_seq_hash(struct net *net, u32 seq)
{
return __xfrm_seq_hash(seq, net->xfrm.state_hmask);
}
#define XFRM_STATE_INSERT(by, _n, _h, _type) \
{ \
struct xfrm_state *_x = NULL; \
\
if (_type != XFRM_DEV_OFFLOAD_PACKET) { \
hlist_for_each_entry_rcu(_x, _h, by) { \
if (_x->xso.type == XFRM_DEV_OFFLOAD_PACKET) \
continue; \
break; \
} \
} \
\
if (!_x || _x->xso.type == XFRM_DEV_OFFLOAD_PACKET) \
/* SAD is empty or consist from HW SAs only */ \
hlist_add_head_rcu(_n, _h); \
else \
hlist_add_before_rcu(_n, &_x->by); \
}
static void xfrm_hash_transfer(struct hlist_head *list,
struct hlist_head *ndsttable,
struct hlist_head *nsrctable,
struct hlist_head *nspitable,
struct hlist_head *nseqtable,
unsigned int nhashmask)
{
struct hlist_node *tmp;
struct xfrm_state *x;
hlist_for_each_entry_safe(x, tmp, list, bydst) {
unsigned int h;
h = __xfrm_dst_hash(&x->id.daddr, &x->props.saddr,
x->props.reqid, x->props.family,
nhashmask);
XFRM_STATE_INSERT(bydst, &x->bydst, ndsttable + h, x->xso.type);
h = __xfrm_src_hash(&x->id.daddr, &x->props.saddr,
x->props.family,
nhashmask);
XFRM_STATE_INSERT(bysrc, &x->bysrc, nsrctable + h, x->xso.type);
if (x->id.spi) {
h = __xfrm_spi_hash(&x->id.daddr, x->id.spi,
x->id.proto, x->props.family,
nhashmask);
XFRM_STATE_INSERT(byspi, &x->byspi, nspitable + h,
x->xso.type);
}
if (x->km.seq) {
h = __xfrm_seq_hash(x->km.seq, nhashmask);
XFRM_STATE_INSERT(byseq, &x->byseq, nseqtable + h,
x->xso.type);
}
}
}
static unsigned long xfrm_hash_new_size(unsigned int state_hmask)
{
return ((state_hmask + 1) << 1) * sizeof(struct hlist_head);
}
static void xfrm_hash_resize(struct work_struct *work)
{
struct net *net = container_of(work, struct net, xfrm.state_hash_work);
struct hlist_head *ndst, *nsrc, *nspi, *nseq, *odst, *osrc, *ospi, *oseq;
unsigned long nsize, osize;
unsigned int nhashmask, ohashmask;
int i;
nsize = xfrm_hash_new_size(net->xfrm.state_hmask);
ndst = xfrm_hash_alloc(nsize);
if (!ndst)
return;
nsrc = xfrm_hash_alloc(nsize);
if (!nsrc) {
xfrm_hash_free(ndst, nsize);
return;
}
nspi = xfrm_hash_alloc(nsize);
if (!nspi) {
xfrm_hash_free(ndst, nsize);
xfrm_hash_free(nsrc, nsize);
return;
}
nseq = xfrm_hash_alloc(nsize);
if (!nseq) {
xfrm_hash_free(ndst, nsize);
xfrm_hash_free(nsrc, nsize);
xfrm_hash_free(nspi, nsize);
return;
}
spin_lock_bh(&net->xfrm.xfrm_state_lock);
write_seqcount_begin(&net->xfrm.xfrm_state_hash_generation);
nhashmask = (nsize / sizeof(struct hlist_head)) - 1U;
odst = xfrm_state_deref_prot(net->xfrm.state_bydst, net);
for (i = net->xfrm.state_hmask; i >= 0; i--)
xfrm_hash_transfer(odst + i, ndst, nsrc, nspi, nseq, nhashmask);
osrc = xfrm_state_deref_prot(net->xfrm.state_bysrc, net);
ospi = xfrm_state_deref_prot(net->xfrm.state_byspi, net);
oseq = xfrm_state_deref_prot(net->xfrm.state_byseq, net);
ohashmask = net->xfrm.state_hmask;
rcu_assign_pointer(net->xfrm.state_bydst, ndst);
rcu_assign_pointer(net->xfrm.state_bysrc, nsrc);
rcu_assign_pointer(net->xfrm.state_byspi, nspi);
rcu_assign_pointer(net->xfrm.state_byseq, nseq);
net->xfrm.state_hmask = nhashmask;
write_seqcount_end(&net->xfrm.xfrm_state_hash_generation);
spin_unlock_bh(&net->xfrm.xfrm_state_lock);
osize = (ohashmask + 1) * sizeof(struct hlist_head);
synchronize_rcu();
xfrm_hash_free(odst, osize);
xfrm_hash_free(osrc, osize);
xfrm_hash_free(ospi, osize);
xfrm_hash_free(oseq, osize);
}
static DEFINE_SPINLOCK(xfrm_state_afinfo_lock);
static struct xfrm_state_afinfo __rcu *xfrm_state_afinfo[NPROTO];
static DEFINE_SPINLOCK(xfrm_state_gc_lock);
int __xfrm_state_delete(struct xfrm_state *x);
int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol);
static bool km_is_alive(const struct km_event *c);
void km_state_expired(struct xfrm_state *x, int hard, u32 portid);
int xfrm_register_type(const struct xfrm_type *type, unsigned short family)
{
struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
int err = 0;
if (!afinfo)
return -EAFNOSUPPORT;
#define X(afi, T, name) do { \
WARN_ON((afi)->type_ ## name); \
(afi)->type_ ## name = (T); \
} while (0)
switch (type->proto) {
case IPPROTO_COMP:
X(afinfo, type, comp);
break;
case IPPROTO_AH:
X(afinfo, type, ah);
break;
case IPPROTO_ESP:
X(afinfo, type, esp);
break;
case IPPROTO_IPIP:
X(afinfo, type, ipip);
break;
case IPPROTO_DSTOPTS:
X(afinfo, type, dstopts);
break;
case IPPROTO_ROUTING:
X(afinfo, type, routing);
break;
case IPPROTO_IPV6:
X(afinfo, type, ipip6);
break;
default:
WARN_ON(1);
err = -EPROTONOSUPPORT;
break;
}
#undef X
rcu_read_unlock();
return err;
}
EXPORT_SYMBOL(xfrm_register_type);
void xfrm_unregister_type(const struct xfrm_type *type, unsigned short family)
{
struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
if (unlikely(afinfo == NULL))
return;
#define X(afi, T, name) do { \
WARN_ON((afi)->type_ ## name != (T)); \
(afi)->type_ ## name = NULL; \
} while (0)
switch (type->proto) {
case IPPROTO_COMP:
X(afinfo, type, comp);
break;
case IPPROTO_AH:
X(afinfo, type, ah);
break;
case IPPROTO_ESP:
X(afinfo, type, esp);
break;
case IPPROTO_IPIP:
X(afinfo, type, ipip);
break;
case IPPROTO_DSTOPTS:
X(afinfo, type, dstopts);
break;
case IPPROTO_ROUTING:
X(afinfo, type, routing);
break;
case IPPROTO_IPV6:
X(afinfo, type, ipip6);
break;
default:
WARN_ON(1);
break;
}
#undef X
rcu_read_unlock();
}
EXPORT_SYMBOL(xfrm_unregister_type);
static const struct xfrm_type *xfrm_get_type(u8 proto, unsigned short family)
{
const struct xfrm_type *type = NULL;
struct xfrm_state_afinfo *afinfo;
int modload_attempted = 0;
retry:
afinfo = xfrm_state_get_afinfo(family);
if (unlikely(afinfo == NULL))
return NULL;
switch (proto) {
case IPPROTO_COMP:
type = afinfo->type_comp;
break;
case IPPROTO_AH:
type = afinfo->type_ah;
break;
case IPPROTO_ESP:
type = afinfo->type_esp;
break;
case IPPROTO_IPIP:
type = afinfo->type_ipip;
break;
case IPPROTO_DSTOPTS:
type = afinfo->type_dstopts;
break;
case IPPROTO_ROUTING:
type = afinfo->type_routing;
break;
case IPPROTO_IPV6:
type = afinfo->type_ipip6;
break;
default:
break;
}
if (unlikely(type && !try_module_get(type->owner)))
type = NULL;
rcu_read_unlock();
if (!type && !modload_attempted) {
request_module("xfrm-type-%d-%d", family, proto);
modload_attempted = 1;
goto retry;
}
return type;
}
static void xfrm_put_type(const struct xfrm_type *type)
{
module_put(type->owner);
}
int xfrm_register_type_offload(const struct xfrm_type_offload *type,
unsigned short family)
{
struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
int err = 0;
if (unlikely(afinfo == NULL))
return -EAFNOSUPPORT;
switch (type->proto) {
case IPPROTO_ESP:
WARN_ON(afinfo->type_offload_esp);
afinfo->type_offload_esp = type;
break;
default:
WARN_ON(1);
err = -EPROTONOSUPPORT;
break;
}
rcu_read_unlock();
return err;
}
EXPORT_SYMBOL(xfrm_register_type_offload);
void xfrm_unregister_type_offload(const struct xfrm_type_offload *type,
unsigned short family)
{
struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
if (unlikely(afinfo == NULL))
return;
switch (type->proto) {
case IPPROTO_ESP:
WARN_ON(afinfo->type_offload_esp != type);
afinfo->type_offload_esp = NULL;
break;
default:
WARN_ON(1);
break;
}
rcu_read_unlock();
}
EXPORT_SYMBOL(xfrm_unregister_type_offload);
static const struct xfrm_type_offload *
xfrm_get_type_offload(u8 proto, unsigned short family, bool try_load)
{
const struct xfrm_type_offload *type = NULL;
struct xfrm_state_afinfo *afinfo;
retry:
afinfo = xfrm_state_get_afinfo(family);
if (unlikely(afinfo == NULL))
return NULL;
switch (proto) {
case IPPROTO_ESP:
type = afinfo->type_offload_esp;
break;
default:
break;
}
if ((type && !try_module_get(type->owner)))
type = NULL;
rcu_read_unlock();
if (!type && try_load) {
request_module("xfrm-offload-%d-%d", family, proto);
try_load = false;
goto retry;
}
return type;
}
static void xfrm_put_type_offload(const struct xfrm_type_offload *type)
{
module_put(type->owner);
}
static const struct xfrm_mode xfrm4_mode_map[XFRM_MODE_MAX] = {
[XFRM_MODE_BEET] = {
.encap = XFRM_MODE_BEET,
.flags = XFRM_MODE_FLAG_TUNNEL,
.family = AF_INET,
},
[XFRM_MODE_TRANSPORT] = {
.encap = XFRM_MODE_TRANSPORT,
.family = AF_INET,
},
[XFRM_MODE_TUNNEL] = {
.encap = XFRM_MODE_TUNNEL,
.flags = XFRM_MODE_FLAG_TUNNEL,
.family = AF_INET,
},
};
static const struct xfrm_mode xfrm6_mode_map[XFRM_MODE_MAX] = {
[XFRM_MODE_BEET] = {
.encap = XFRM_MODE_BEET,
.flags = XFRM_MODE_FLAG_TUNNEL,
.family = AF_INET6,
},
[XFRM_MODE_ROUTEOPTIMIZATION] = {
.encap = XFRM_MODE_ROUTEOPTIMIZATION,
.family = AF_INET6,
},
[XFRM_MODE_TRANSPORT] = {
.encap = XFRM_MODE_TRANSPORT,
.family = AF_INET6,
},
[XFRM_MODE_TUNNEL] = {
.encap = XFRM_MODE_TUNNEL,
.flags = XFRM_MODE_FLAG_TUNNEL,
.family = AF_INET6,
},
};
static const struct xfrm_mode *xfrm_get_mode(unsigned int encap, int family)
{
const struct xfrm_mode *mode;
if (unlikely(encap >= XFRM_MODE_MAX))
return NULL;
switch (family) {
case AF_INET:
mode = &xfrm4_mode_map[encap];
if (mode->family == family)
return mode;
break;
case AF_INET6:
mode = &xfrm6_mode_map[encap];
if (mode->family == family)
return mode;
break;
default:
break;
}
return NULL;
}
void xfrm_state_free(struct xfrm_state *x)
{
kmem_cache_free(xfrm_state_cache, x);
}
EXPORT_SYMBOL(xfrm_state_free);
static void ___xfrm_state_destroy(struct xfrm_state *x)
{
hrtimer_cancel(&x->mtimer);
del_timer_sync(&x->rtimer);
kfree(x->aead);
kfree(x->aalg);
kfree(x->ealg);
kfree(x->calg);
kfree(x->encap);
kfree(x->coaddr);
kfree(x->replay_esn);
kfree(x->preplay_esn);
if (x->type_offload)
xfrm_put_type_offload(x->type_offload);
if (x->type) {
x->type->destructor(x);
xfrm_put_type(x->type);
}
if (x->xfrag.page)
put_page(x->xfrag.page);
xfrm_dev_state_free(x);
security_xfrm_state_free(x);
xfrm_state_free(x);
}
static void xfrm_state_gc_task(struct work_struct *work)
{
struct xfrm_state *x;
struct hlist_node *tmp;
struct hlist_head gc_list;
spin_lock_bh(&xfrm_state_gc_lock);
hlist_move_list(&xfrm_state_gc_list, &gc_list);
spin_unlock_bh(&xfrm_state_gc_lock);
synchronize_rcu();
hlist_for_each_entry_safe(x, tmp, &gc_list, gclist)
___xfrm_state_destroy(x);
}
static enum hrtimer_restart xfrm_timer_handler(struct hrtimer *me)
{
struct xfrm_state *x = container_of(me, struct xfrm_state, mtimer);
enum hrtimer_restart ret = HRTIMER_NORESTART;
time64_t now = ktime_get_real_seconds();
time64_t next = TIME64_MAX;
int warn = 0;
int err = 0;
spin_lock(&x->lock);
xfrm_dev_state_update_curlft(x);
if (x->km.state == XFRM_STATE_DEAD)
goto out;
if (x->km.state == XFRM_STATE_EXPIRED)
goto expired;
if (x->lft.hard_add_expires_seconds) {
time64_t tmo = x->lft.hard_add_expires_seconds +
x->curlft.add_time - now;
if (tmo <= 0) {
if (x->xflags & XFRM_SOFT_EXPIRE) {
/* enter hard expire without soft expire first?!
* setting a new date could trigger this.
* workaround: fix x->curflt.add_time by below:
*/
x->curlft.add_time = now - x->saved_tmo - 1;
tmo = x->lft.hard_add_expires_seconds - x->saved_tmo;
} else
goto expired;
}
if (tmo < next)
next = tmo;
}
if (x->lft.hard_use_expires_seconds) {
time64_t tmo = x->lft.hard_use_expires_seconds +
(READ_ONCE(x->curlft.use_time) ? : now) - now;
if (tmo <= 0)
goto expired;
if (tmo < next)
next = tmo;
}
if (x->km.dying)
goto resched;
if (x->lft.soft_add_expires_seconds) {
time64_t tmo = x->lft.soft_add_expires_seconds +
x->curlft.add_time - now;
if (tmo <= 0) {
warn = 1;
x->xflags &= ~XFRM_SOFT_EXPIRE;
} else if (tmo < next) {
next = tmo;
x->xflags |= XFRM_SOFT_EXPIRE;
x->saved_tmo = tmo;
}
}
if (x->lft.soft_use_expires_seconds) {
time64_t tmo = x->lft.soft_use_expires_seconds +
(READ_ONCE(x->curlft.use_time) ? : now) - now;
if (tmo <= 0)
warn = 1;
else if (tmo < next)
next = tmo;
}
x->km.dying = warn;
if (warn)
km_state_expired(x, 0, 0);
resched:
if (next != TIME64_MAX) {
hrtimer_forward_now(&x->mtimer, ktime_set(next, 0));
ret = HRTIMER_RESTART;
}
goto out;
expired:
if (x->km.state == XFRM_STATE_ACQ && x->id.spi == 0)
x->km.state = XFRM_STATE_EXPIRED;
err = __xfrm_state_delete(x);
if (!err)
km_state_expired(x, 1, 0);
xfrm_audit_state_delete(x, err ? 0 : 1, true);
out:
spin_unlock(&x->lock);
return ret;
}
static void xfrm_replay_timer_handler(struct timer_list *t);
struct xfrm_state *xfrm_state_alloc(struct net *net)
{
struct xfrm_state *x;
x = kmem_cache_zalloc(xfrm_state_cache, GFP_ATOMIC);
if (x) {
write_pnet(&x->xs_net, net);
refcount_set(&x->refcnt, 1);
atomic_set(&x->tunnel_users, 0);
INIT_LIST_HEAD(&x->km.all);
INIT_HLIST_NODE(&x->bydst);
INIT_HLIST_NODE(&x->bysrc);
INIT_HLIST_NODE(&x->byspi);
INIT_HLIST_NODE(&x->byseq);
hrtimer_init(&x->mtimer, CLOCK_BOOTTIME, HRTIMER_MODE_ABS_SOFT);
x->mtimer.function = xfrm_timer_handler;
timer_setup(&x->rtimer, xfrm_replay_timer_handler, 0);
x->curlft.add_time = ktime_get_real_seconds();
x->lft.soft_byte_limit = XFRM_INF;
x->lft.soft_packet_limit = XFRM_INF;
x->lft.hard_byte_limit = XFRM_INF;
x->lft.hard_packet_limit = XFRM_INF;
x->replay_maxage = 0;
x->replay_maxdiff = 0;
spin_lock_init(&x->lock);
}
return x;
}
EXPORT_SYMBOL(xfrm_state_alloc);
void __xfrm_state_destroy(struct xfrm_state *x, bool sync)
{
WARN_ON(x->km.state != XFRM_STATE_DEAD);
if (sync) {
synchronize_rcu();
___xfrm_state_destroy(x);
} else {
spin_lock_bh(&xfrm_state_gc_lock);
hlist_add_head(&x->gclist, &xfrm_state_gc_list);
spin_unlock_bh(&xfrm_state_gc_lock);
schedule_work(&xfrm_state_gc_work);
}
}
EXPORT_SYMBOL(__xfrm_state_destroy);
int __xfrm_state_delete(struct xfrm_state *x)
{
struct net *net = xs_net(x);
int err = -ESRCH;
if (x->km.state != XFRM_STATE_DEAD) {
x->km.state = XFRM_STATE_DEAD;
spin_lock(&net->xfrm.xfrm_state_lock);
list_del(&x->km.all);
hlist_del_rcu(&x->bydst);
hlist_del_rcu(&x->bysrc);
if (x->km.seq)
hlist_del_rcu(&x->byseq);
if (x->id.spi)
hlist_del_rcu(&x->byspi);
net->xfrm.state_num--;
spin_unlock(&net->xfrm.xfrm_state_lock);
if (x->encap_sk)
sock_put(rcu_dereference_raw(x->encap_sk));
xfrm_dev_state_delete(x);
/* All xfrm_state objects are created by xfrm_state_alloc.
* The xfrm_state_alloc call gives a reference, and that
* is what we are dropping here.
*/
xfrm_state_put(x);
err = 0;
}
return err;
}
EXPORT_SYMBOL(__xfrm_state_delete);
int xfrm_state_delete(struct xfrm_state *x)
{
int err;
spin_lock_bh(&x->lock);
err = __xfrm_state_delete(x);
spin_unlock_bh(&x->lock);
return err;
}
EXPORT_SYMBOL(xfrm_state_delete);
#ifdef CONFIG_SECURITY_NETWORK_XFRM
static inline int
xfrm_state_flush_secctx_check(struct net *net, u8 proto, bool task_valid)
{
int i, err = 0;
for (i = 0; i <= net->xfrm.state_hmask; i++) {
struct xfrm_state *x;
hlist_for_each_entry(x, net->xfrm.state_bydst+i, bydst) {
if (xfrm_id_proto_match(x->id.proto, proto) &&
(err = security_xfrm_state_delete(x)) != 0) {
xfrm_audit_state_delete(x, 0, task_valid);
return err;
}
}
}
return err;
}
static inline int
xfrm_dev_state_flush_secctx_check(struct net *net, struct net_device *dev, bool task_valid)
{
int i, err = 0;
for (i = 0; i <= net->xfrm.state_hmask; i++) {
struct xfrm_state *x;
struct xfrm_dev_offload *xso;
hlist_for_each_entry(x, net->xfrm.state_bydst+i, bydst) {
xso = &x->xso;
if (xso->dev == dev &&
(err = security_xfrm_state_delete(x)) != 0) {
xfrm_audit_state_delete(x, 0, task_valid);
return err;
}
}
}
return err;
}
#else
static inline int
xfrm_state_flush_secctx_check(struct net *net, u8 proto, bool task_valid)
{
return 0;
}
static inline int
xfrm_dev_state_flush_secctx_check(struct net *net, struct net_device *dev, bool task_valid)
{
return 0;
}
#endif
int xfrm_state_flush(struct net *net, u8 proto, bool task_valid, bool sync)
{
int i, err = 0, cnt = 0;
spin_lock_bh(&net->xfrm.xfrm_state_lock);
err = xfrm_state_flush_secctx_check(net, proto, task_valid);
if (err)
goto out;
err = -ESRCH;
for (i = 0; i <= net->xfrm.state_hmask; i++) {
struct xfrm_state *x;
restart:
hlist_for_each_entry(x, net->xfrm.state_bydst+i, bydst) {
if (!xfrm_state_kern(x) &&
xfrm_id_proto_match(x->id.proto, proto)) {
xfrm_state_hold(x);
spin_unlock_bh(&net->xfrm.xfrm_state_lock);
err = xfrm_state_delete(x);
xfrm_audit_state_delete(x, err ? 0 : 1,
task_valid);
if (sync)
xfrm_state_put_sync(x);
else
xfrm_state_put(x);
if (!err)
cnt++;
spin_lock_bh(&net->xfrm.xfrm_state_lock);
goto restart;
}
}
}
out:
spin_unlock_bh(&net->xfrm.xfrm_state_lock);
if (cnt)
err = 0;
return err;
}
EXPORT_SYMBOL(xfrm_state_flush);
int xfrm_dev_state_flush(struct net *net, struct net_device *dev, bool task_valid)
{
int i, err = 0, cnt = 0;
spin_lock_bh(&net->xfrm.xfrm_state_lock);
err = xfrm_dev_state_flush_secctx_check(net, dev, task_valid);
if (err)
goto out;
err = -ESRCH;
for (i = 0; i <= net->xfrm.state_hmask; i++) {
struct xfrm_state *x;
struct xfrm_dev_offload *xso;
restart:
hlist_for_each_entry(x, net->xfrm.state_bydst+i, bydst) {
xso = &x->xso;
if (!xfrm_state_kern(x) && xso->dev == dev) {
xfrm_state_hold(x);
spin_unlock_bh(&net->xfrm.xfrm_state_lock);
err = xfrm_state_delete(x);
xfrm_audit_state_delete(x, err ? 0 : 1,
task_valid);
xfrm_state_put(x);
if (!err)
cnt++;
spin_lock_bh(&net->xfrm.xfrm_state_lock);
goto restart;
}
}
}
if (cnt)
err = 0;
out:
spin_unlock_bh(&net->xfrm.xfrm_state_lock);
return err;
}
EXPORT_SYMBOL(xfrm_dev_state_flush);
void xfrm_sad_getinfo(struct net *net, struct xfrmk_sadinfo *si)
{
spin_lock_bh(&net->xfrm.xfrm_state_lock);
si->sadcnt = net->xfrm.state_num;
si->sadhcnt = net->xfrm.state_hmask + 1;
si->sadhmcnt = xfrm_state_hashmax;
spin_unlock_bh(&net->xfrm.xfrm_state_lock);
}
EXPORT_SYMBOL(xfrm_sad_getinfo);
static void
__xfrm4_init_tempsel(struct xfrm_selector *sel, const struct flowi *fl)
{
const struct flowi4 *fl4 = &fl->u.ip4;
sel->daddr.a4 = fl4->daddr;
sel->saddr.a4 = fl4->saddr;
sel->dport = xfrm_flowi_dport(fl, &fl4->uli);
sel->dport_mask = htons(0xffff);
sel->sport = xfrm_flowi_sport(fl, &fl4->uli);
sel->sport_mask = htons(0xffff);
sel->family = AF_INET;
sel->prefixlen_d = 32;
sel->prefixlen_s = 32;
sel->proto = fl4->flowi4_proto;
sel->ifindex = fl4->flowi4_oif;
}
static void
__xfrm6_init_tempsel(struct xfrm_selector *sel, const struct flowi *fl)
{
const struct flowi6 *fl6 = &fl->u.ip6;
/* Initialize temporary selector matching only to current session. */
*(struct in6_addr *)&sel->daddr = fl6->daddr;
*(struct in6_addr *)&sel->saddr = fl6->saddr;
sel->dport = xfrm_flowi_dport(fl, &fl6->uli);
sel->dport_mask = htons(0xffff);
sel->sport = xfrm_flowi_sport(fl, &fl6->uli);
sel->sport_mask = htons(0xffff);
sel->family = AF_INET6;
sel->prefixlen_d = 128;
sel->prefixlen_s = 128;
sel->proto = fl6->flowi6_proto;
sel->ifindex = fl6->flowi6_oif;
}
static void
xfrm_init_tempstate(struct xfrm_state *x, const struct flowi *fl,
const struct xfrm_tmpl *tmpl,
const xfrm_address_t *daddr, const xfrm_address_t *saddr,
unsigned short family)
{
switch (family) {
case AF_INET:
__xfrm4_init_tempsel(&x->sel, fl);
break;
case AF_INET6:
__xfrm6_init_tempsel(&x->sel, fl);
break;
}
x->id = tmpl->id;
switch (tmpl->encap_family) {
case AF_INET:
if (x->id.daddr.a4 == 0)
x->id.daddr.a4 = daddr->a4;
x->props.saddr = tmpl->saddr;
if (x->props.saddr.a4 == 0)
x->props.saddr.a4 = saddr->a4;
break;
case AF_INET6:
if (ipv6_addr_any((struct in6_addr *)&x->id.daddr))
memcpy(&x->id.daddr, daddr, sizeof(x->sel.daddr));
memcpy(&x->props.saddr, &tmpl->saddr, sizeof(x->props.saddr));
if (ipv6_addr_any((struct in6_addr *)&x->props.saddr))
memcpy(&x->props.saddr, saddr, sizeof(x->props.saddr));
break;
}
x->props.mode = tmpl->mode;
x->props.reqid = tmpl->reqid;
x->props.family = tmpl->encap_family;
}
static struct xfrm_state *__xfrm_state_lookup_all(struct net *net, u32 mark,
const xfrm_address_t *daddr,
__be32 spi, u8 proto,
unsigned short family,
struct xfrm_dev_offload *xdo)
{
unsigned int h = xfrm_spi_hash(net, daddr, spi, proto, family);
struct xfrm_state *x;
hlist_for_each_entry_rcu(x, net->xfrm.state_byspi + h, byspi) {
#ifdef CONFIG_XFRM_OFFLOAD
if (xdo->type == XFRM_DEV_OFFLOAD_PACKET) {
if (x->xso.type != XFRM_DEV_OFFLOAD_PACKET)
/* HW states are in the head of list, there is
* no need to iterate further.
*/
break;
/* Packet offload: both policy and SA should
* have same device.
*/
if (xdo->dev != x->xso.dev)
continue;
} else if (x->xso.type == XFRM_DEV_OFFLOAD_PACKET)
/* Skip HW policy for SW lookups */
continue;
#endif
if (x->props.family != family ||
x->id.spi != spi ||
x->id.proto != proto ||
!xfrm_addr_equal(&x->id.daddr, daddr, family))
continue;
if ((mark & x->mark.m) != x->mark.v)
continue;
if (!xfrm_state_hold_rcu(x))
continue;
return x;
}
return NULL;
}
static struct xfrm_state *__xfrm_state_lookup(struct net *net, u32 mark,
const xfrm_address_t *daddr,
__be32 spi, u8 proto,
unsigned short family)
{
unsigned int h = xfrm_spi_hash(net, daddr, spi, proto, family);
struct xfrm_state *x;
hlist_for_each_entry_rcu(x, net->xfrm.state_byspi + h, byspi) {
if (x->props.family != family ||
x->id.spi != spi ||
x->id.proto != proto ||
!xfrm_addr_equal(&x->id.daddr, daddr, family))
continue;
if ((mark & x->mark.m) != x->mark.v)
continue;
if (!xfrm_state_hold_rcu(x))
continue;
return x;
}
return NULL;
}
static struct xfrm_state *__xfrm_state_lookup_byaddr(struct net *net, u32 mark,
const xfrm_address_t *daddr,
const xfrm_address_t *saddr,
u8 proto, unsigned short family)
{
unsigned int h = xfrm_src_hash(net, daddr, saddr, family);
struct xfrm_state *x;
hlist_for_each_entry_rcu(x, net->xfrm.state_bysrc + h, bysrc) {
if (x->props.family != family ||
x->id.proto != proto ||
!xfrm_addr_equal(&x->id.daddr, daddr, family) ||
!xfrm_addr_equal(&x->props.saddr, saddr, family))
continue;
if ((mark & x->mark.m) != x->mark.v)
continue;
if (!xfrm_state_hold_rcu(x))
continue;
return x;
}
return NULL;
}
static inline struct xfrm_state *
__xfrm_state_locate(struct xfrm_state *x, int use_spi, int family)
{
struct net *net = xs_net(x);
u32 mark = x->mark.v & x->mark.m;
if (use_spi)
return __xfrm_state_lookup(net, mark, &x->id.daddr,
x->id.spi, x->id.proto, family);
else
return __xfrm_state_lookup_byaddr(net, mark,
&x->id.daddr,
&x->props.saddr,
x->id.proto, family);
}
static void xfrm_hash_grow_check(struct net *net, int have_hash_collision)
{
if (have_hash_collision &&
(net->xfrm.state_hmask + 1) < xfrm_state_hashmax &&
net->xfrm.state_num > net->xfrm.state_hmask)
schedule_work(&net->xfrm.state_hash_work);
}
static void xfrm_state_look_at(struct xfrm_policy *pol, struct xfrm_state *x,
const struct flowi *fl, unsigned short family,
struct xfrm_state **best, int *acq_in_progress,
int *error)
{
/* Resolution logic:
* 1. There is a valid state with matching selector. Done.
* 2. Valid state with inappropriate selector. Skip.
*
* Entering area of "sysdeps".
*
* 3. If state is not valid, selector is temporary, it selects
* only session which triggered previous resolution. Key
* manager will do something to install a state with proper
* selector.
*/
if (x->km.state == XFRM_STATE_VALID) {
if ((x->sel.family &&
(x->sel.family != family ||
!xfrm_selector_match(&x->sel, fl, family))) ||
!security_xfrm_state_pol_flow_match(x, pol,
&fl->u.__fl_common))
return;
if (!*best ||
(*best)->km.dying > x->km.dying ||
((*best)->km.dying == x->km.dying &&
(*best)->curlft.add_time < x->curlft.add_time))
*best = x;
} else if (x->km.state == XFRM_STATE_ACQ) {
*acq_in_progress = 1;
} else if (x->km.state == XFRM_STATE_ERROR ||
x->km.state == XFRM_STATE_EXPIRED) {
if ((!x->sel.family ||
(x->sel.family == family &&
xfrm_selector_match(&x->sel, fl, family))) &&
security_xfrm_state_pol_flow_match(x, pol,
&fl->u.__fl_common))
*error = -ESRCH;
}
}
struct xfrm_state *
xfrm_state_find(const xfrm_address_t *daddr, const xfrm_address_t *saddr,
const struct flowi *fl, struct xfrm_tmpl *tmpl,
struct xfrm_policy *pol, int *err,
unsigned short family, u32 if_id)
{
static xfrm_address_t saddr_wildcard = { };
struct net *net = xp_net(pol);
unsigned int h, h_wildcard;
struct xfrm_state *x, *x0, *to_put;
int acquire_in_progress = 0;
int error = 0;
struct xfrm_state *best = NULL;
u32 mark = pol->mark.v & pol->mark.m;
unsigned short encap_family = tmpl->encap_family;
unsigned int sequence;
struct km_event c;
to_put = NULL;
sequence = read_seqcount_begin(&net->xfrm.xfrm_state_hash_generation);
rcu_read_lock();
h = xfrm_dst_hash(net, daddr, saddr, tmpl->reqid, encap_family);
hlist_for_each_entry_rcu(x, net->xfrm.state_bydst + h, bydst) {
#ifdef CONFIG_XFRM_OFFLOAD
if (pol->xdo.type == XFRM_DEV_OFFLOAD_PACKET) {
if (x->xso.type != XFRM_DEV_OFFLOAD_PACKET)
/* HW states are in the head of list, there is
* no need to iterate further.
*/
break;
/* Packet offload: both policy and SA should
* have same device.
*/
if (pol->xdo.dev != x->xso.dev)
continue;
} else if (x->xso.type == XFRM_DEV_OFFLOAD_PACKET)
/* Skip HW policy for SW lookups */
continue;
#endif
if (x->props.family == encap_family &&
x->props.reqid == tmpl->reqid &&
(mark & x->mark.m) == x->mark.v &&
x->if_id == if_id &&
!(x->props.flags & XFRM_STATE_WILDRECV) &&
xfrm_state_addr_check(x, daddr, saddr, encap_family) &&
tmpl->mode == x->props.mode &&
tmpl->id.proto == x->id.proto &&
(tmpl->id.spi == x->id.spi || !tmpl->id.spi))
xfrm_state_look_at(pol, x, fl, family,
&best, &acquire_in_progress, &error);
}
if (best || acquire_in_progress)
goto found;
h_wildcard = xfrm_dst_hash(net, daddr, &saddr_wildcard, tmpl->reqid, encap_family);
hlist_for_each_entry_rcu(x, net->xfrm.state_bydst + h_wildcard, bydst) {
#ifdef CONFIG_XFRM_OFFLOAD
if (pol->xdo.type == XFRM_DEV_OFFLOAD_PACKET) {
if (x->xso.type != XFRM_DEV_OFFLOAD_PACKET)
/* HW states are in the head of list, there is
* no need to iterate further.
*/
break;
/* Packet offload: both policy and SA should
* have same device.
*/
if (pol->xdo.dev != x->xso.dev)
continue;
} else if (x->xso.type == XFRM_DEV_OFFLOAD_PACKET)
/* Skip HW policy for SW lookups */
continue;
#endif
if (x->props.family == encap_family &&
x->props.reqid == tmpl->reqid &&
(mark & x->mark.m) == x->mark.v &&
x->if_id == if_id &&
!(x->props.flags & XFRM_STATE_WILDRECV) &&
xfrm_addr_equal(&x->id.daddr, daddr, encap_family) &&
tmpl->mode == x->props.mode &&
tmpl->id.proto == x->id.proto &&
(tmpl->id.spi == x->id.spi || !tmpl->id.spi))
xfrm_state_look_at(pol, x, fl, family,
&best, &acquire_in_progress, &error);
}
found:
x = best;
if (!x && !error && !acquire_in_progress) {
if (tmpl->id.spi &&
(x0 = __xfrm_state_lookup_all(net, mark, daddr,
tmpl->id.spi, tmpl->id.proto,
encap_family,
&pol->xdo)) != NULL) {
to_put = x0;
error = -EEXIST;
goto out;
}
c.net = net;
/* If the KMs have no listeners (yet...), avoid allocating an SA
* for each and every packet - garbage collection might not
* handle the flood.
*/
if (!km_is_alive(&c)) {
error = -ESRCH;
goto out;
}
x = xfrm_state_alloc(net);
if (x == NULL) {
error = -ENOMEM;
goto out;
}
/* Initialize temporary state matching only
* to current session. */
xfrm_init_tempstate(x, fl, tmpl, daddr, saddr, family);
memcpy(&x->mark, &pol->mark, sizeof(x->mark));
x->if_id = if_id;
error = security_xfrm_state_alloc_acquire(x, pol->security, fl->flowi_secid);
if (error) {
x->km.state = XFRM_STATE_DEAD;
to_put = x;
x = NULL;
goto out;
}
#ifdef CONFIG_XFRM_OFFLOAD
if (pol->xdo.type == XFRM_DEV_OFFLOAD_PACKET) {
struct xfrm_dev_offload *xdo = &pol->xdo;
struct xfrm_dev_offload *xso = &x->xso;
xso->type = XFRM_DEV_OFFLOAD_PACKET;
xso->dir = xdo->dir;
xso->dev = xdo->dev;
xso->real_dev = xdo->real_dev;
xso->flags = XFRM_DEV_OFFLOAD_FLAG_ACQ;
netdev_tracker_alloc(xso->dev, &xso->dev_tracker,
GFP_ATOMIC);
error = xso->dev->xfrmdev_ops->xdo_dev_state_add(x, NULL);
if (error) {
xso->dir = 0;
netdev_put(xso->dev, &xso->dev_tracker);
xso->dev = NULL;
xso->real_dev = NULL;
xso->type = XFRM_DEV_OFFLOAD_UNSPECIFIED;
x->km.state = XFRM_STATE_DEAD;
to_put = x;
x = NULL;
goto out;
}
}
#endif
if (km_query(x, tmpl, pol) == 0) {
spin_lock_bh(&net->xfrm.xfrm_state_lock);
x->km.state = XFRM_STATE_ACQ;
list_add(&x->km.all, &net->xfrm.state_all);
XFRM_STATE_INSERT(bydst, &x->bydst,
net->xfrm.state_bydst + h,
x->xso.type);
h = xfrm_src_hash(net, daddr, saddr, encap_family);
XFRM_STATE_INSERT(bysrc, &x->bysrc,
net->xfrm.state_bysrc + h,
x->xso.type);
if (x->id.spi) {
h = xfrm_spi_hash(net, &x->id.daddr, x->id.spi, x->id.proto, encap_family);
XFRM_STATE_INSERT(byspi, &x->byspi,
net->xfrm.state_byspi + h,
x->xso.type);
}
if (x->km.seq) {
h = xfrm_seq_hash(net, x->km.seq);
XFRM_STATE_INSERT(byseq, &x->byseq,
net->xfrm.state_byseq + h,
x->xso.type);
}
x->lft.hard_add_expires_seconds = net->xfrm.sysctl_acq_expires;
hrtimer_start(&x->mtimer,
ktime_set(net->xfrm.sysctl_acq_expires, 0),
HRTIMER_MODE_REL_SOFT);
net->xfrm.state_num++;
xfrm_hash_grow_check(net, x->bydst.next != NULL);
spin_unlock_bh(&net->xfrm.xfrm_state_lock);
} else {
#ifdef CONFIG_XFRM_OFFLOAD
struct xfrm_dev_offload *xso = &x->xso;
if (xso->type == XFRM_DEV_OFFLOAD_PACKET) {
xfrm_dev_state_delete(x);
xfrm_dev_state_free(x);
}
#endif
x->km.state = XFRM_STATE_DEAD;
to_put = x;
x = NULL;
error = -ESRCH;
}
}
out:
if (x) {
if (!xfrm_state_hold_rcu(x)) {
*err = -EAGAIN;
x = NULL;
}
} else {
*err = acquire_in_progress ? -EAGAIN : error;
}
rcu_read_unlock();
if (to_put)
xfrm_state_put(to_put);
if (read_seqcount_retry(&net->xfrm.xfrm_state_hash_generation, sequence)) {
*err = -EAGAIN;
if (x) {
xfrm_state_put(x);
x = NULL;
}
}
return x;
}
struct xfrm_state *
xfrm_stateonly_find(struct net *net, u32 mark, u32 if_id,
xfrm_address_t *daddr, xfrm_address_t *saddr,
unsigned short family, u8 mode, u8 proto, u32 reqid)
{
unsigned int h;
struct xfrm_state *rx = NULL, *x = NULL;
spin_lock_bh(&net->xfrm.xfrm_state_lock);
h = xfrm_dst_hash(net, daddr, saddr, reqid, family);
hlist_for_each_entry(x, net->xfrm.state_bydst+h, bydst) {
if (x->props.family == family &&
x->props.reqid == reqid &&
(mark & x->mark.m) == x->mark.v &&
x->if_id == if_id &&
!(x->props.flags & XFRM_STATE_WILDRECV) &&
xfrm_state_addr_check(x, daddr, saddr, family) &&
mode == x->props.mode &&
proto == x->id.proto &&
x->km.state == XFRM_STATE_VALID) {
rx = x;
break;
}
}
if (rx)
xfrm_state_hold(rx);
spin_unlock_bh(&net->xfrm.xfrm_state_lock);
return rx;
}
EXPORT_SYMBOL(xfrm_stateonly_find);
struct xfrm_state *xfrm_state_lookup_byspi(struct net *net, __be32 spi,
unsigned short family)
{
struct xfrm_state *x;
struct xfrm_state_walk *w;
spin_lock_bh(&net->xfrm.xfrm_state_lock);
list_for_each_entry(w, &net->xfrm.state_all, all) {
x = container_of(w, struct xfrm_state, km);
if (x->props.family != family ||
x->id.spi != spi)
continue;
xfrm_state_hold(x);
spin_unlock_bh(&net->xfrm.xfrm_state_lock);
return x;
}
spin_unlock_bh(&net->xfrm.xfrm_state_lock);
return NULL;
}
EXPORT_SYMBOL(xfrm_state_lookup_byspi);
static void __xfrm_state_insert(struct xfrm_state *x)
{
struct net *net = xs_net(x);
unsigned int h;
list_add(&x->km.all, &net->xfrm.state_all);
h = xfrm_dst_hash(net, &x->id.daddr, &x->props.saddr,
x->props.reqid, x->props.family);
XFRM_STATE_INSERT(bydst, &x->bydst, net->xfrm.state_bydst + h,
x->xso.type);
h = xfrm_src_hash(net, &x->id.daddr, &x->props.saddr, x->props.family);
XFRM_STATE_INSERT(bysrc, &x->bysrc, net->xfrm.state_bysrc + h,
x->xso.type);
if (x->id.spi) {
h = xfrm_spi_hash(net, &x->id.daddr, x->id.spi, x->id.proto,
x->props.family);
XFRM_STATE_INSERT(byspi, &x->byspi, net->xfrm.state_byspi + h,
x->xso.type);
}
if (x->km.seq) {
h = xfrm_seq_hash(net, x->km.seq);
XFRM_STATE_INSERT(byseq, &x->byseq, net->xfrm.state_byseq + h,
x->xso.type);
}
hrtimer_start(&x->mtimer, ktime_set(1, 0), HRTIMER_MODE_REL_SOFT);
if (x->replay_maxage)
mod_timer(&x->rtimer, jiffies + x->replay_maxage);
net->xfrm.state_num++;
xfrm_hash_grow_check(net, x->bydst.next != NULL);
}
/* net->xfrm.xfrm_state_lock is held */
static void __xfrm_state_bump_genids(struct xfrm_state *xnew)
{
struct net *net = xs_net(xnew);
unsigned short family = xnew->props.family;
u32 reqid = xnew->props.reqid;
struct xfrm_state *x;
unsigned int h;
u32 mark = xnew->mark.v & xnew->mark.m;
u32 if_id = xnew->if_id;
h = xfrm_dst_hash(net, &xnew->id.daddr, &xnew->props.saddr, reqid, family);
hlist_for_each_entry(x, net->xfrm.state_bydst+h, bydst) {
if (x->props.family == family &&
x->props.reqid == reqid &&
x->if_id == if_id &&
(mark & x->mark.m) == x->mark.v &&
xfrm_addr_equal(&x->id.daddr, &xnew->id.daddr, family) &&
xfrm_addr_equal(&x->props.saddr, &xnew->props.saddr, family))
x->genid++;
}
}
void xfrm_state_insert(struct xfrm_state *x)
{
struct net *net = xs_net(x);
spin_lock_bh(&net->xfrm.xfrm_state_lock);
__xfrm_state_bump_genids(x);
__xfrm_state_insert(x);
spin_unlock_bh(&net->xfrm.xfrm_state_lock);
}
EXPORT_SYMBOL(xfrm_state_insert);
/* net->xfrm.xfrm_state_lock is held */
static struct xfrm_state *__find_acq_core(struct net *net,
const struct xfrm_mark *m,
unsigned short family, u8 mode,
u32 reqid, u32 if_id, u8 proto,
const xfrm_address_t *daddr,
const xfrm_address_t *saddr,
int create)
{
unsigned int h = xfrm_dst_hash(net, daddr, saddr, reqid, family);
struct xfrm_state *x;
u32 mark = m->v & m->m;
hlist_for_each_entry(x, net->xfrm.state_bydst+h, bydst) {
if (x->props.reqid != reqid ||
x->props.mode != mode ||
x->props.family != family ||
x->km.state != XFRM_STATE_ACQ ||
x->id.spi != 0 ||
x->id.proto != proto ||
(mark & x->mark.m) != x->mark.v ||
!xfrm_addr_equal(&x->id.daddr, daddr, family) ||
!xfrm_addr_equal(&x->props.saddr, saddr, family))
continue;
xfrm_state_hold(x);
return x;
}
if (!create)
return NULL;
x = xfrm_state_alloc(net);
if (likely(x)) {
switch (family) {
case AF_INET:
x->sel.daddr.a4 = daddr->a4;
x->sel.saddr.a4 = saddr->a4;
x->sel.prefixlen_d = 32;
x->sel.prefixlen_s = 32;
x->props.saddr.a4 = saddr->a4;
x->id.daddr.a4 = daddr->a4;
break;
case AF_INET6:
x->sel.daddr.in6 = daddr->in6;
x->sel.saddr.in6 = saddr->in6;
x->sel.prefixlen_d = 128;
x->sel.prefixlen_s = 128;
x->props.saddr.in6 = saddr->in6;
x->id.daddr.in6 = daddr->in6;
break;
}
x->km.state = XFRM_STATE_ACQ;
x->id.proto = proto;
x->props.family = family;
x->props.mode = mode;
x->props.reqid = reqid;
x->if_id = if_id;
x->mark.v = m->v;
x->mark.m = m->m;
x->lft.hard_add_expires_seconds = net->xfrm.sysctl_acq_expires;
xfrm_state_hold(x);
hrtimer_start(&x->mtimer,
ktime_set(net->xfrm.sysctl_acq_expires, 0),
HRTIMER_MODE_REL_SOFT);
list_add(&x->km.all, &net->xfrm.state_all);
XFRM_STATE_INSERT(bydst, &x->bydst, net->xfrm.state_bydst + h,
x->xso.type);
h = xfrm_src_hash(net, daddr, saddr, family);
XFRM_STATE_INSERT(bysrc, &x->bysrc, net->xfrm.state_bysrc + h,
x->xso.type);
net->xfrm.state_num++;
xfrm_hash_grow_check(net, x->bydst.next != NULL);
}
return x;
}
static struct xfrm_state *__xfrm_find_acq_byseq(struct net *net, u32 mark, u32 seq);
int xfrm_state_add(struct xfrm_state *x)
{
struct net *net = xs_net(x);
struct xfrm_state *x1, *to_put;
int family;
int err;
u32 mark = x->mark.v & x->mark.m;
int use_spi = xfrm_id_proto_match(x->id.proto, IPSEC_PROTO_ANY);
family = x->props.family;
to_put = NULL;
spin_lock_bh(&net->xfrm.xfrm_state_lock);
x1 = __xfrm_state_locate(x, use_spi, family);
if (x1) {
to_put = x1;
x1 = NULL;
err = -EEXIST;
goto out;
}
if (use_spi && x->km.seq) {
x1 = __xfrm_find_acq_byseq(net, mark, x->km.seq);
if (x1 && ((x1->id.proto != x->id.proto) ||
!xfrm_addr_equal(&x1->id.daddr, &x->id.daddr, family))) {
to_put = x1;
x1 = NULL;
}
}
if (use_spi && !x1)
x1 = __find_acq_core(net, &x->mark, family, x->props.mode,
x->props.reqid, x->if_id, x->id.proto,
&x->id.daddr, &x->props.saddr, 0);
__xfrm_state_bump_genids(x);
__xfrm_state_insert(x);
err = 0;
out:
spin_unlock_bh(&net->xfrm.xfrm_state_lock);
if (x1) {
xfrm_state_delete(x1);
xfrm_state_put(x1);
}
if (to_put)
xfrm_state_put(to_put);
return err;
}
EXPORT_SYMBOL(xfrm_state_add);
#ifdef CONFIG_XFRM_MIGRATE
static inline int clone_security(struct xfrm_state *x, struct xfrm_sec_ctx *security)
{
struct xfrm_user_sec_ctx *uctx;
int size = sizeof(*uctx) + security->ctx_len;
int err;
uctx = kmalloc(size, GFP_KERNEL);
if (!uctx)
return -ENOMEM;
uctx->exttype = XFRMA_SEC_CTX;
uctx->len = size;
uctx->ctx_doi = security->ctx_doi;
uctx->ctx_alg = security->ctx_alg;
uctx->ctx_len = security->ctx_len;
memcpy(uctx + 1, security->ctx_str, security->ctx_len);
err = security_xfrm_state_alloc(x, uctx);
kfree(uctx);
if (err)
return err;
return 0;
}
static struct xfrm_state *xfrm_state_clone(struct xfrm_state *orig,
struct xfrm_encap_tmpl *encap)
{
struct net *net = xs_net(orig);
struct xfrm_state *x = xfrm_state_alloc(net);
if (!x)
goto out;
memcpy(&x->id, &orig->id, sizeof(x->id));
memcpy(&x->sel, &orig->sel, sizeof(x->sel));
memcpy(&x->lft, &orig->lft, sizeof(x->lft));
x->props.mode = orig->props.mode;
x->props.replay_window = orig->props.replay_window;
x->props.reqid = orig->props.reqid;
x->props.family = orig->props.family;
x->props.saddr = orig->props.saddr;
if (orig->aalg) {
x->aalg = xfrm_algo_auth_clone(orig->aalg);
if (!x->aalg)
goto error;
}
x->props.aalgo = orig->props.aalgo;
if (orig->aead) {
x->aead = xfrm_algo_aead_clone(orig->aead);
x->geniv = orig->geniv;
if (!x->aead)
goto error;
}
if (orig->ealg) {
x->ealg = xfrm_algo_clone(orig->ealg);
if (!x->ealg)
goto error;
}
x->props.ealgo = orig->props.ealgo;
if (orig->calg) {
x->calg = xfrm_algo_clone(orig->calg);
if (!x->calg)
goto error;
}
x->props.calgo = orig->props.calgo;
if (encap || orig->encap) {
if (encap)
x->encap = kmemdup(encap, sizeof(*x->encap),
GFP_KERNEL);
else
x->encap = kmemdup(orig->encap, sizeof(*x->encap),
GFP_KERNEL);
if (!x->encap)
goto error;
}
if (orig->security)
if (clone_security(x, orig->security))
goto error;
if (orig->coaddr) {
x->coaddr = kmemdup(orig->coaddr, sizeof(*x->coaddr),
GFP_KERNEL);
if (!x->coaddr)
goto error;
}
if (orig->replay_esn) {
if (xfrm_replay_clone(x, orig))
goto error;
}
memcpy(&x->mark, &orig->mark, sizeof(x->mark));
memcpy(&x->props.smark, &orig->props.smark, sizeof(x->props.smark));
x->props.flags = orig->props.flags;
x->props.extra_flags = orig->props.extra_flags;
x->if_id = orig->if_id;
x->tfcpad = orig->tfcpad;
x->replay_maxdiff = orig->replay_maxdiff;
x->replay_maxage = orig->replay_maxage;
memcpy(&x->curlft, &orig->curlft, sizeof(x->curlft));
x->km.state = orig->km.state;
x->km.seq = orig->km.seq;
x->replay = orig->replay;
x->preplay = orig->preplay;
x->mapping_maxage = orig->mapping_maxage;
x->lastused = orig->lastused;
x->new_mapping = 0;
x->new_mapping_sport = 0;
return x;
error:
xfrm_state_put(x);
out:
return NULL;
}
struct xfrm_state *xfrm_migrate_state_find(struct xfrm_migrate *m, struct net *net,
u32 if_id)
{
unsigned int h;
struct xfrm_state *x = NULL;
spin_lock_bh(&net->xfrm.xfrm_state_lock);
if (m->reqid) {
h = xfrm_dst_hash(net, &m->old_daddr, &m->old_saddr,
m->reqid, m->old_family);
hlist_for_each_entry(x, net->xfrm.state_bydst+h, bydst) {
if (x->props.mode != m->mode ||
x->id.proto != m->proto)
continue;
if (m->reqid && x->props.reqid != m->reqid)
continue;
if (if_id != 0 && x->if_id != if_id)
continue;
if (!xfrm_addr_equal(&x->id.daddr, &m->old_daddr,
m->old_family) ||
!xfrm_addr_equal(&x->props.saddr, &m->old_saddr,
m->old_family))
continue;
xfrm_state_hold(x);
break;
}
} else {
h = xfrm_src_hash(net, &m->old_daddr, &m->old_saddr,
m->old_family);
hlist_for_each_entry(x, net->xfrm.state_bysrc+h, bysrc) {
if (x->props.mode != m->mode ||
x->id.proto != m->proto)
continue;
if (if_id != 0 && x->if_id != if_id)
continue;
if (!xfrm_addr_equal(&x->id.daddr, &m->old_daddr,
m->old_family) ||
!xfrm_addr_equal(&x->props.saddr, &m->old_saddr,
m->old_family))
continue;
xfrm_state_hold(x);
break;
}
}
spin_unlock_bh(&net->xfrm.xfrm_state_lock);
return x;
}
EXPORT_SYMBOL(xfrm_migrate_state_find);
struct xfrm_state *xfrm_state_migrate(struct xfrm_state *x,
struct xfrm_migrate *m,
struct xfrm_encap_tmpl *encap)
{
struct xfrm_state *xc;
xc = xfrm_state_clone(x, encap);
if (!xc)
return NULL;
xc->props.family = m->new_family;
if (xfrm_init_state(xc) < 0)
goto error;
memcpy(&xc->id.daddr, &m->new_daddr, sizeof(xc->id.daddr));
memcpy(&xc->props.saddr, &m->new_saddr, sizeof(xc->props.saddr));
/* add state */
if (xfrm_addr_equal(&x->id.daddr, &m->new_daddr, m->new_family)) {
/* a care is needed when the destination address of the
state is to be updated as it is a part of triplet */
xfrm_state_insert(xc);
} else {
if (xfrm_state_add(xc) < 0)
goto error;
}
return xc;
error:
xfrm_state_put(xc);
return NULL;
}
EXPORT_SYMBOL(xfrm_state_migrate);
#endif
int xfrm_state_update(struct xfrm_state *x)
{
struct xfrm_state *x1, *to_put;
int err;
int use_spi = xfrm_id_proto_match(x->id.proto, IPSEC_PROTO_ANY);
struct net *net = xs_net(x);
to_put = NULL;
spin_lock_bh(&net->xfrm.xfrm_state_lock);
x1 = __xfrm_state_locate(x, use_spi, x->props.family);
err = -ESRCH;
if (!x1)
goto out;
if (xfrm_state_kern(x1)) {
to_put = x1;
err = -EEXIST;
goto out;
}
if (x1->km.state == XFRM_STATE_ACQ) {
__xfrm_state_insert(x);
x = NULL;
}
err = 0;
out:
spin_unlock_bh(&net->xfrm.xfrm_state_lock);
if (to_put)
xfrm_state_put(to_put);
if (err)
return err;
if (!x) {
xfrm_state_delete(x1);
xfrm_state_put(x1);
return 0;
}
err = -EINVAL;
spin_lock_bh(&x1->lock);
if (likely(x1->km.state == XFRM_STATE_VALID)) {
if (x->encap && x1->encap &&
x->encap->encap_type == x1->encap->encap_type)
memcpy(x1->encap, x->encap, sizeof(*x1->encap));
else if (x->encap || x1->encap)
goto fail;
if (x->coaddr && x1->coaddr) {
memcpy(x1->coaddr, x->coaddr, sizeof(*x1->coaddr));
}
if (!use_spi && memcmp(&x1->sel, &x->sel, sizeof(x1->sel)))
memcpy(&x1->sel, &x->sel, sizeof(x1->sel));
memcpy(&x1->lft, &x->lft, sizeof(x1->lft));
x1->km.dying = 0;
hrtimer_start(&x1->mtimer, ktime_set(1, 0),
HRTIMER_MODE_REL_SOFT);
if (READ_ONCE(x1->curlft.use_time))
xfrm_state_check_expire(x1);
if (x->props.smark.m || x->props.smark.v || x->if_id) {
spin_lock_bh(&net->xfrm.xfrm_state_lock);
if (x->props.smark.m || x->props.smark.v)
x1->props.smark = x->props.smark;
if (x->if_id)
x1->if_id = x->if_id;
__xfrm_state_bump_genids(x1);
spin_unlock_bh(&net->xfrm.xfrm_state_lock);
}
err = 0;
x->km.state = XFRM_STATE_DEAD;
__xfrm_state_put(x);
}
fail:
spin_unlock_bh(&x1->lock);
xfrm_state_put(x1);
return err;
}
EXPORT_SYMBOL(xfrm_state_update);
int xfrm_state_check_expire(struct xfrm_state *x)
{
xfrm_dev_state_update_curlft(x);
if (!READ_ONCE(x->curlft.use_time))
WRITE_ONCE(x->curlft.use_time, ktime_get_real_seconds());
if (x->curlft.bytes >= x->lft.hard_byte_limit ||
x->curlft.packets >= x->lft.hard_packet_limit) {
x->km.state = XFRM_STATE_EXPIRED;
hrtimer_start(&x->mtimer, 0, HRTIMER_MODE_REL_SOFT);
return -EINVAL;
}
if (!x->km.dying &&
(x->curlft.bytes >= x->lft.soft_byte_limit ||
x->curlft.packets >= x->lft.soft_packet_limit)) {
x->km.dying = 1;
km_state_expired(x, 0, 0);
}
return 0;
}
EXPORT_SYMBOL(xfrm_state_check_expire);
struct xfrm_state *
xfrm_state_lookup(struct net *net, u32 mark, const xfrm_address_t *daddr, __be32 spi,
u8 proto, unsigned short family)
{
struct xfrm_state *x;
rcu_read_lock();
x = __xfrm_state_lookup(net, mark, daddr, spi, proto, family);
rcu_read_unlock();
return x;
}
EXPORT_SYMBOL(xfrm_state_lookup);
struct xfrm_state *
xfrm_state_lookup_byaddr(struct net *net, u32 mark,
const xfrm_address_t *daddr, const xfrm_address_t *saddr,
u8 proto, unsigned short family)
{
struct xfrm_state *x;
spin_lock_bh(&net->xfrm.xfrm_state_lock);
x = __xfrm_state_lookup_byaddr(net, mark, daddr, saddr, proto, family);
spin_unlock_bh(&net->xfrm.xfrm_state_lock);
return x;
}
EXPORT_SYMBOL(xfrm_state_lookup_byaddr);
struct xfrm_state *
xfrm_find_acq(struct net *net, const struct xfrm_mark *mark, u8 mode, u32 reqid,
u32 if_id, u8 proto, const xfrm_address_t *daddr,
const xfrm_address_t *saddr, int create, unsigned short family)
{
struct xfrm_state *x;
spin_lock_bh(&net->xfrm.xfrm_state_lock);
x = __find_acq_core(net, mark, family, mode, reqid, if_id, proto, daddr, saddr, create);
spin_unlock_bh(&net->xfrm.xfrm_state_lock);
return x;
}
EXPORT_SYMBOL(xfrm_find_acq);
#ifdef CONFIG_XFRM_SUB_POLICY
#if IS_ENABLED(CONFIG_IPV6)
/* distribution counting sort function for xfrm_state and xfrm_tmpl */
static void
__xfrm6_sort(void **dst, void **src, int n,
int (*cmp)(const void *p), int maxclass)
{
int count[XFRM_MAX_DEPTH] = { };
int class[XFRM_MAX_DEPTH];
int i;
for (i = 0; i < n; i++) {
int c = cmp(src[i]);
class[i] = c;
count[c]++;
}
for (i = 2; i < maxclass; i++)
count[i] += count[i - 1];
for (i = 0; i < n; i++) {
dst[count[class[i] - 1]++] = src[i];
src[i] = NULL;
}
}
/* Rule for xfrm_state:
*
* rule 1: select IPsec transport except AH
* rule 2: select MIPv6 RO or inbound trigger
* rule 3: select IPsec transport AH
* rule 4: select IPsec tunnel
* rule 5: others
*/
static int __xfrm6_state_sort_cmp(const void *p)
{
const struct xfrm_state *v = p;
switch (v->props.mode) {
case XFRM_MODE_TRANSPORT:
if (v->id.proto != IPPROTO_AH)
return 1;
else
return 3;
#if IS_ENABLED(CONFIG_IPV6_MIP6)
case XFRM_MODE_ROUTEOPTIMIZATION:
case XFRM_MODE_IN_TRIGGER:
return 2;
#endif
case XFRM_MODE_TUNNEL:
case XFRM_MODE_BEET:
return 4;
}
return 5;
}
/* Rule for xfrm_tmpl:
*
* rule 1: select IPsec transport
* rule 2: select MIPv6 RO or inbound trigger
* rule 3: select IPsec tunnel
* rule 4: others
*/
static int __xfrm6_tmpl_sort_cmp(const void *p)
{
const struct xfrm_tmpl *v = p;
switch (v->mode) {
case XFRM_MODE_TRANSPORT:
return 1;
#if IS_ENABLED(CONFIG_IPV6_MIP6)
case XFRM_MODE_ROUTEOPTIMIZATION:
case XFRM_MODE_IN_TRIGGER:
return 2;
#endif
case XFRM_MODE_TUNNEL:
case XFRM_MODE_BEET:
return 3;
}
return 4;
}
#else
static inline int __xfrm6_state_sort_cmp(const void *p) { return 5; }
static inline int __xfrm6_tmpl_sort_cmp(const void *p) { return 4; }
static inline void
__xfrm6_sort(void **dst, void **src, int n,
int (*cmp)(const void *p), int maxclass)
{
int i;
for (i = 0; i < n; i++)
dst[i] = src[i];
}
#endif /* CONFIG_IPV6 */
void
xfrm_tmpl_sort(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n,
unsigned short family)
{
int i;
if (family == AF_INET6)
__xfrm6_sort((void **)dst, (void **)src, n,
__xfrm6_tmpl_sort_cmp, 5);
else
for (i = 0; i < n; i++)
dst[i] = src[i];
}
void
xfrm_state_sort(struct xfrm_state **dst, struct xfrm_state **src, int n,
unsigned short family)
{
int i;
if (family == AF_INET6)
__xfrm6_sort((void **)dst, (void **)src, n,
__xfrm6_state_sort_cmp, 6);
else
for (i = 0; i < n; i++)
dst[i] = src[i];
}
#endif
/* Silly enough, but I'm lazy to build resolution list */
static struct xfrm_state *__xfrm_find_acq_byseq(struct net *net, u32 mark, u32 seq)
{
unsigned int h = xfrm_seq_hash(net, seq);
struct xfrm_state *x;
hlist_for_each_entry_rcu(x, net->xfrm.state_byseq + h, byseq) {
if (x->km.seq == seq &&
(mark & x->mark.m) == x->mark.v &&
x->km.state == XFRM_STATE_ACQ) {
xfrm_state_hold(x);
return x;
}
}
return NULL;
}
struct xfrm_state *xfrm_find_acq_byseq(struct net *net, u32 mark, u32 seq)
{
struct xfrm_state *x;
spin_lock_bh(&net->xfrm.xfrm_state_lock);
x = __xfrm_find_acq_byseq(net, mark, seq);
spin_unlock_bh(&net->xfrm.xfrm_state_lock);
return x;
}
EXPORT_SYMBOL(xfrm_find_acq_byseq);
u32 xfrm_get_acqseq(void)
{
u32 res;
static atomic_t acqseq;
do {
res = atomic_inc_return(&acqseq);
} while (!res);
return res;
}
EXPORT_SYMBOL(xfrm_get_acqseq);
int verify_spi_info(u8 proto, u32 min, u32 max, struct netlink_ext_ack *extack)
{
switch (proto) {
case IPPROTO_AH:
case IPPROTO_ESP:
break;
case IPPROTO_COMP:
/* IPCOMP spi is 16-bits. */
if (max >= 0x10000) {
NL_SET_ERR_MSG(extack, "IPCOMP SPI must be <= 65535");
return -EINVAL;
}
break;
default:
NL_SET_ERR_MSG(extack, "Invalid protocol, must be one of AH, ESP, IPCOMP");
return -EINVAL;
}
if (min > max) {
NL_SET_ERR_MSG(extack, "Invalid SPI range: min > max");
return -EINVAL;
}
return 0;
}
EXPORT_SYMBOL(verify_spi_info);
int xfrm_alloc_spi(struct xfrm_state *x, u32 low, u32 high,
struct netlink_ext_ack *extack)
{
struct net *net = xs_net(x);
unsigned int h;
struct xfrm_state *x0;
int err = -ENOENT;
__be32 minspi = htonl(low);
__be32 maxspi = htonl(high);
__be32 newspi = 0;
u32 mark = x->mark.v & x->mark.m;
spin_lock_bh(&x->lock);
if (x->km.state == XFRM_STATE_DEAD) {
NL_SET_ERR_MSG(extack, "Target ACQUIRE is in DEAD state");
goto unlock;
}
err = 0;
if (x->id.spi)
goto unlock;
err = -ENOENT;
if (minspi == maxspi) {
x0 = xfrm_state_lookup(net, mark, &x->id.daddr, minspi, x->id.proto, x->props.family);
if (x0) {
NL_SET_ERR_MSG(extack, "Requested SPI is already in use");
xfrm_state_put(x0);
goto unlock;
}
newspi = minspi;
} else {
u32 spi = 0;
for (h = 0; h < high-low+1; h++) {
spi = get_random_u32_inclusive(low, high);
x0 = xfrm_state_lookup(net, mark, &x->id.daddr, htonl(spi), x->id.proto, x->props.family);
if (x0 == NULL) {
newspi = htonl(spi);
break;
}
xfrm_state_put(x0);
}
}
if (newspi) {
spin_lock_bh(&net->xfrm.xfrm_state_lock);
x->id.spi = newspi;
h = xfrm_spi_hash(net, &x->id.daddr, x->id.spi, x->id.proto, x->props.family);
XFRM_STATE_INSERT(byspi, &x->byspi, net->xfrm.state_byspi + h,
x->xso.type);
spin_unlock_bh(&net->xfrm.xfrm_state_lock);
err = 0;
} else {
NL_SET_ERR_MSG(extack, "No SPI available in the requested range");
}
unlock:
spin_unlock_bh(&x->lock);
return err;
}
EXPORT_SYMBOL(xfrm_alloc_spi);
static bool __xfrm_state_filter_match(struct xfrm_state *x,
struct xfrm_address_filter *filter)
{
if (filter) {
if ((filter->family == AF_INET ||
filter->family == AF_INET6) &&
x->props.family != filter->family)
return false;
return addr_match(&x->props.saddr, &filter->saddr,
filter->splen) &&
addr_match(&x->id.daddr, &filter->daddr,
filter->dplen);
}
return true;
}
int xfrm_state_walk(struct net *net, struct xfrm_state_walk *walk,
int (*func)(struct xfrm_state *, int, void*),
void *data)
{
struct xfrm_state *state;
struct xfrm_state_walk *x;
int err = 0;
if (walk->seq != 0 && list_empty(&walk->all))
return 0;
spin_lock_bh(&net->xfrm.xfrm_state_lock);
if (list_empty(&walk->all))
x = list_first_entry(&net->xfrm.state_all, struct xfrm_state_walk, all);
else
x = list_first_entry(&walk->all, struct xfrm_state_walk, all);
list_for_each_entry_from(x, &net->xfrm.state_all, all) {
if (x->state == XFRM_STATE_DEAD)
continue;
state = container_of(x, struct xfrm_state, km);
if (!xfrm_id_proto_match(state->id.proto, walk->proto))
continue;
if (!__xfrm_state_filter_match(state, walk->filter))
continue;
err = func(state, walk->seq, data);
if (err) {
list_move_tail(&walk->all, &x->all);
goto out;
}
walk->seq++;
}
if (walk->seq == 0) {
err = -ENOENT;
goto out;
}
list_del_init(&walk->all);
out:
spin_unlock_bh(&net->xfrm.xfrm_state_lock);
return err;
}
EXPORT_SYMBOL(xfrm_state_walk);
void xfrm_state_walk_init(struct xfrm_state_walk *walk, u8 proto,
struct xfrm_address_filter *filter)
{
INIT_LIST_HEAD(&walk->all);
walk->proto = proto;
walk->state = XFRM_STATE_DEAD;
walk->seq = 0;
walk->filter = filter;
}
EXPORT_SYMBOL(xfrm_state_walk_init);
void xfrm_state_walk_done(struct xfrm_state_walk *walk, struct net *net)
{
kfree(walk->filter);
if (list_empty(&walk->all))
return;
spin_lock_bh(&net->xfrm.xfrm_state_lock);
list_del(&walk->all);
spin_unlock_bh(&net->xfrm.xfrm_state_lock);
}
EXPORT_SYMBOL(xfrm_state_walk_done);
static void xfrm_replay_timer_handler(struct timer_list *t)
{
struct xfrm_state *x = from_timer(x, t, rtimer);
spin_lock(&x->lock);
if (x->km.state == XFRM_STATE_VALID) {
if (xfrm_aevent_is_on(xs_net(x)))
xfrm_replay_notify(x, XFRM_REPLAY_TIMEOUT);
else
x->xflags |= XFRM_TIME_DEFER;
}
spin_unlock(&x->lock);
}
static LIST_HEAD(xfrm_km_list);
void km_policy_notify(struct xfrm_policy *xp, int dir, const struct km_event *c)
{
struct xfrm_mgr *km;
rcu_read_lock();
list_for_each_entry_rcu(km, &xfrm_km_list, list)
if (km->notify_policy)
km->notify_policy(xp, dir, c);
rcu_read_unlock();
}
void km_state_notify(struct xfrm_state *x, const struct km_event *c)
{
struct xfrm_mgr *km;
rcu_read_lock();
list_for_each_entry_rcu(km, &xfrm_km_list, list)
if (km->notify)
km->notify(x, c);
rcu_read_unlock();
}
EXPORT_SYMBOL(km_policy_notify);
EXPORT_SYMBOL(km_state_notify);
void km_state_expired(struct xfrm_state *x, int hard, u32 portid)
{
struct km_event c;
c.data.hard = hard;
c.portid = portid;
c.event = XFRM_MSG_EXPIRE;
km_state_notify(x, &c);
}
EXPORT_SYMBOL(km_state_expired);
/*
* We send to all registered managers regardless of failure
* We are happy with one success
*/
int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol)
{
int err = -EINVAL, acqret;
struct xfrm_mgr *km;
rcu_read_lock();
list_for_each_entry_rcu(km, &xfrm_km_list, list) {
acqret = km->acquire(x, t, pol);
if (!acqret)
err = acqret;
}
rcu_read_unlock();
return err;
}
EXPORT_SYMBOL(km_query);
static int __km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport)
{
int err = -EINVAL;
struct xfrm_mgr *km;
rcu_read_lock();
list_for_each_entry_rcu(km, &xfrm_km_list, list) {
if (km->new_mapping)
err = km->new_mapping(x, ipaddr, sport);
if (!err)
break;
}
rcu_read_unlock();
return err;
}
int km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport)
{
int ret = 0;
if (x->mapping_maxage) {
if ((jiffies / HZ - x->new_mapping) > x->mapping_maxage ||
x->new_mapping_sport != sport) {
x->new_mapping_sport = sport;
x->new_mapping = jiffies / HZ;
ret = __km_new_mapping(x, ipaddr, sport);
}
} else {
ret = __km_new_mapping(x, ipaddr, sport);
}
return ret;
}
EXPORT_SYMBOL(km_new_mapping);
void km_policy_expired(struct xfrm_policy *pol, int dir, int hard, u32 portid)
{
struct km_event c;
c.data.hard = hard;
c.portid = portid;
c.event = XFRM_MSG_POLEXPIRE;
km_policy_notify(pol, dir, &c);
}
EXPORT_SYMBOL(km_policy_expired);
#ifdef CONFIG_XFRM_MIGRATE
int km_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
const struct xfrm_migrate *m, int num_migrate,
const struct xfrm_kmaddress *k,
const struct xfrm_encap_tmpl *encap)
{
int err = -EINVAL;
int ret;
struct xfrm_mgr *km;
rcu_read_lock();
list_for_each_entry_rcu(km, &xfrm_km_list, list) {
if (km->migrate) {
ret = km->migrate(sel, dir, type, m, num_migrate, k,
encap);
if (!ret)
err = ret;
}
}
rcu_read_unlock();
return err;
}
EXPORT_SYMBOL(km_migrate);
#endif
int km_report(struct net *net, u8 proto, struct xfrm_selector *sel, xfrm_address_t *addr)
{
int err = -EINVAL;
int ret;
struct xfrm_mgr *km;
rcu_read_lock();
list_for_each_entry_rcu(km, &xfrm_km_list, list) {
if (km->report) {
ret = km->report(net, proto, sel, addr);
if (!ret)
err = ret;
}
}
rcu_read_unlock();
return err;
}
EXPORT_SYMBOL(km_report);
static bool km_is_alive(const struct km_event *c)
{
struct xfrm_mgr *km;
bool is_alive = false;
rcu_read_lock();
list_for_each_entry_rcu(km, &xfrm_km_list, list) {
if (km->is_alive && km->is_alive(c)) {
is_alive = true;
break;
}
}
rcu_read_unlock();
return is_alive;
}
#if IS_ENABLED(CONFIG_XFRM_USER_COMPAT)
static DEFINE_SPINLOCK(xfrm_translator_lock);
static struct xfrm_translator __rcu *xfrm_translator;
struct xfrm_translator *xfrm_get_translator(void)
{
struct xfrm_translator *xtr;
rcu_read_lock();
xtr = rcu_dereference(xfrm_translator);
if (unlikely(!xtr))
goto out;
if (!try_module_get(xtr->owner))
xtr = NULL;
out:
rcu_read_unlock();
return xtr;
}
EXPORT_SYMBOL_GPL(xfrm_get_translator);
void xfrm_put_translator(struct xfrm_translator *xtr)
{
module_put(xtr->owner);
}
EXPORT_SYMBOL_GPL(xfrm_put_translator);
int xfrm_register_translator(struct xfrm_translator *xtr)
{
int err = 0;
spin_lock_bh(&xfrm_translator_lock);
if (unlikely(xfrm_translator != NULL))
err = -EEXIST;
else
rcu_assign_pointer(xfrm_translator, xtr);
spin_unlock_bh(&xfrm_translator_lock);
return err;
}
EXPORT_SYMBOL_GPL(xfrm_register_translator);
int xfrm_unregister_translator(struct xfrm_translator *xtr)
{
int err = 0;
spin_lock_bh(&xfrm_translator_lock);
if (likely(xfrm_translator != NULL)) {
if (rcu_access_pointer(xfrm_translator) != xtr)
err = -EINVAL;
else
RCU_INIT_POINTER(xfrm_translator, NULL);
}
spin_unlock_bh(&xfrm_translator_lock);
synchronize_rcu();
return err;
}
EXPORT_SYMBOL_GPL(xfrm_unregister_translator);
#endif
int xfrm_user_policy(struct sock *sk, int optname, sockptr_t optval, int optlen)
{
int err;
u8 *data;
struct xfrm_mgr *km;
struct xfrm_policy *pol = NULL;
if (sockptr_is_null(optval) && !optlen) {
xfrm_sk_policy_insert(sk, XFRM_POLICY_IN, NULL);
xfrm_sk_policy_insert(sk, XFRM_POLICY_OUT, NULL);
__sk_dst_reset(sk);
return 0;
}
if (optlen <= 0 || optlen > PAGE_SIZE)
return -EMSGSIZE;
data = memdup_sockptr(optval, optlen);
if (IS_ERR(data))
return PTR_ERR(data);
if (in_compat_syscall()) {
struct xfrm_translator *xtr = xfrm_get_translator();
if (!xtr) {
kfree(data);
return -EOPNOTSUPP;
}
err = xtr->xlate_user_policy_sockptr(&data, optlen);
xfrm_put_translator(xtr);
if (err) {
kfree(data);
return err;
}
}
err = -EINVAL;
rcu_read_lock();
list_for_each_entry_rcu(km, &xfrm_km_list, list) {
pol = km->compile_policy(sk, optname, data,
optlen, &err);
if (err >= 0)
break;
}
rcu_read_unlock();
if (err >= 0) {
xfrm_sk_policy_insert(sk, err, pol);
xfrm_pol_put(pol);
__sk_dst_reset(sk);
err = 0;
}
kfree(data);
return err;
}
EXPORT_SYMBOL(xfrm_user_policy);
static DEFINE_SPINLOCK(xfrm_km_lock);
void xfrm_register_km(struct xfrm_mgr *km)
{
spin_lock_bh(&xfrm_km_lock);
list_add_tail_rcu(&km->list, &xfrm_km_list);
spin_unlock_bh(&xfrm_km_lock);
}
EXPORT_SYMBOL(xfrm_register_km);
void xfrm_unregister_km(struct xfrm_mgr *km)
{
spin_lock_bh(&xfrm_km_lock);
list_del_rcu(&km->list);
spin_unlock_bh(&xfrm_km_lock);
synchronize_rcu();
}
EXPORT_SYMBOL(xfrm_unregister_km);
int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo)
{
int err = 0;
if (WARN_ON(afinfo->family >= NPROTO))
return -EAFNOSUPPORT;
spin_lock_bh(&xfrm_state_afinfo_lock);
if (unlikely(xfrm_state_afinfo[afinfo->family] != NULL))
err = -EEXIST;
else
rcu_assign_pointer(xfrm_state_afinfo[afinfo->family], afinfo);
spin_unlock_bh(&xfrm_state_afinfo_lock);
return err;
}
EXPORT_SYMBOL(xfrm_state_register_afinfo);
int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo)
{
int err = 0, family = afinfo->family;
if (WARN_ON(family >= NPROTO))
return -EAFNOSUPPORT;
spin_lock_bh(&xfrm_state_afinfo_lock);
if (likely(xfrm_state_afinfo[afinfo->family] != NULL)) {
if (rcu_access_pointer(xfrm_state_afinfo[family]) != afinfo)
err = -EINVAL;
else
RCU_INIT_POINTER(xfrm_state_afinfo[afinfo->family], NULL);
}
spin_unlock_bh(&xfrm_state_afinfo_lock);
synchronize_rcu();
return err;
}
EXPORT_SYMBOL(xfrm_state_unregister_afinfo);
struct xfrm_state_afinfo *xfrm_state_afinfo_get_rcu(unsigned int family)
{
if (unlikely(family >= NPROTO))
return NULL;
return rcu_dereference(xfrm_state_afinfo[family]);
}
EXPORT_SYMBOL_GPL(xfrm_state_afinfo_get_rcu);
struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family)
{
struct xfrm_state_afinfo *afinfo;
if (unlikely(family >= NPROTO))
return NULL;
rcu_read_lock();
afinfo = rcu_dereference(xfrm_state_afinfo[family]);
if (unlikely(!afinfo))
rcu_read_unlock();
return afinfo;
}
void xfrm_flush_gc(void)
{
flush_work(&xfrm_state_gc_work);
}
EXPORT_SYMBOL(xfrm_flush_gc);
/* Temporarily located here until net/xfrm/xfrm_tunnel.c is created */
void xfrm_state_delete_tunnel(struct xfrm_state *x)
{
if (x->tunnel) {
struct xfrm_state *t = x->tunnel;
if (atomic_read(&t->tunnel_users) == 2)
xfrm_state_delete(t);
atomic_dec(&t->tunnel_users);
xfrm_state_put_sync(t);
x->tunnel = NULL;
}
}
EXPORT_SYMBOL(xfrm_state_delete_tunnel);
u32 xfrm_state_mtu(struct xfrm_state *x, int mtu)
{
const struct xfrm_type *type = READ_ONCE(x->type);
struct crypto_aead *aead;
u32 blksize, net_adj = 0;
if (x->km.state != XFRM_STATE_VALID ||
!type || type->proto != IPPROTO_ESP)
return mtu - x->props.header_len;
aead = x->data;
blksize = ALIGN(crypto_aead_blocksize(aead), 4);
switch (x->props.mode) {
case XFRM_MODE_TRANSPORT:
case XFRM_MODE_BEET:
if (x->props.family == AF_INET)
net_adj = sizeof(struct iphdr);
else if (x->props.family == AF_INET6)
net_adj = sizeof(struct ipv6hdr);
break;
case XFRM_MODE_TUNNEL:
break;
default:
WARN_ON_ONCE(1);
break;
}
return ((mtu - x->props.header_len - crypto_aead_authsize(aead) -
net_adj) & ~(blksize - 1)) + net_adj - 2;
}
EXPORT_SYMBOL_GPL(xfrm_state_mtu);
int __xfrm_init_state(struct xfrm_state *x, bool init_replay, bool offload,
struct netlink_ext_ack *extack)
{
const struct xfrm_mode *inner_mode;
const struct xfrm_mode *outer_mode;
int family = x->props.family;
int err;
if (family == AF_INET &&
READ_ONCE(xs_net(x)->ipv4.sysctl_ip_no_pmtu_disc))
x->props.flags |= XFRM_STATE_NOPMTUDISC;
err = -EPROTONOSUPPORT;
if (x->sel.family != AF_UNSPEC) {
inner_mode = xfrm_get_mode(x->props.mode, x->sel.family);
if (inner_mode == NULL) {
NL_SET_ERR_MSG(extack, "Requested mode not found");
goto error;
}
if (!(inner_mode->flags & XFRM_MODE_FLAG_TUNNEL) &&
family != x->sel.family) {
NL_SET_ERR_MSG(extack, "Only tunnel modes can accommodate a change of family");
goto error;
}
x->inner_mode = *inner_mode;
} else {
const struct xfrm_mode *inner_mode_iaf;
int iafamily = AF_INET;
inner_mode = xfrm_get_mode(x->props.mode, x->props.family);
if (inner_mode == NULL) {
NL_SET_ERR_MSG(extack, "Requested mode not found");
goto error;
}
x->inner_mode = *inner_mode;
if (x->props.family == AF_INET)
iafamily = AF_INET6;
inner_mode_iaf = xfrm_get_mode(x->props.mode, iafamily);
if (inner_mode_iaf) {
if (inner_mode_iaf->flags & XFRM_MODE_FLAG_TUNNEL)
x->inner_mode_iaf = *inner_mode_iaf;
}
}
x->type = xfrm_get_type(x->id.proto, family);
if (x->type == NULL) {
NL_SET_ERR_MSG(extack, "Requested type not found");
goto error;
}
x->type_offload = xfrm_get_type_offload(x->id.proto, family, offload);
err = x->type->init_state(x, extack);
if (err)
goto error;
outer_mode = xfrm_get_mode(x->props.mode, family);
if (!outer_mode) {
NL_SET_ERR_MSG(extack, "Requested mode not found");
err = -EPROTONOSUPPORT;
goto error;
}
x->outer_mode = *outer_mode;
if (init_replay) {
err = xfrm_init_replay(x, extack);
if (err)
goto error;
}
error:
return err;
}
EXPORT_SYMBOL(__xfrm_init_state);
int xfrm_init_state(struct xfrm_state *x)
{
int err;
err = __xfrm_init_state(x, true, false, NULL);
if (!err)
x->km.state = XFRM_STATE_VALID;
return err;
}
EXPORT_SYMBOL(xfrm_init_state);
int __net_init xfrm_state_init(struct net *net)
{
unsigned int sz;
if (net_eq(net, &init_net))
xfrm_state_cache = KMEM_CACHE(xfrm_state,
SLAB_HWCACHE_ALIGN | SLAB_PANIC);
INIT_LIST_HEAD(&net->xfrm.state_all);
sz = sizeof(struct hlist_head) * 8;
net->xfrm.state_bydst = xfrm_hash_alloc(sz);
if (!net->xfrm.state_bydst)
goto out_bydst;
net->xfrm.state_bysrc = xfrm_hash_alloc(sz);
if (!net->xfrm.state_bysrc)
goto out_bysrc;
net->xfrm.state_byspi = xfrm_hash_alloc(sz);
if (!net->xfrm.state_byspi)
goto out_byspi;
net->xfrm.state_byseq = xfrm_hash_alloc(sz);
if (!net->xfrm.state_byseq)
goto out_byseq;
net->xfrm.state_hmask = ((sz / sizeof(struct hlist_head)) - 1);
net->xfrm.state_num = 0;
INIT_WORK(&net->xfrm.state_hash_work, xfrm_hash_resize);
spin_lock_init(&net->xfrm.xfrm_state_lock);
seqcount_spinlock_init(&net->xfrm.xfrm_state_hash_generation,
&net->xfrm.xfrm_state_lock);
return 0;
out_byseq:
xfrm_hash_free(net->xfrm.state_byspi, sz);
out_byspi:
xfrm_hash_free(net->xfrm.state_bysrc, sz);
out_bysrc:
xfrm_hash_free(net->xfrm.state_bydst, sz);
out_bydst:
return -ENOMEM;
}
void xfrm_state_fini(struct net *net)
{
unsigned int sz;
flush_work(&net->xfrm.state_hash_work);
flush_work(&xfrm_state_gc_work);
xfrm_state_flush(net, 0, false, true);
WARN_ON(!list_empty(&net->xfrm.state_all));
sz = (net->xfrm.state_hmask + 1) * sizeof(struct hlist_head);
WARN_ON(!hlist_empty(net->xfrm.state_byseq));
xfrm_hash_free(net->xfrm.state_byseq, sz);
WARN_ON(!hlist_empty(net->xfrm.state_byspi));
xfrm_hash_free(net->xfrm.state_byspi, sz);
WARN_ON(!hlist_empty(net->xfrm.state_bysrc));
xfrm_hash_free(net->xfrm.state_bysrc, sz);
WARN_ON(!hlist_empty(net->xfrm.state_bydst));
xfrm_hash_free(net->xfrm.state_bydst, sz);
}
#ifdef CONFIG_AUDITSYSCALL
static void xfrm_audit_helper_sainfo(struct xfrm_state *x,
struct audit_buffer *audit_buf)
{
struct xfrm_sec_ctx *ctx = x->security;
u32 spi = ntohl(x->id.spi);
if (ctx)
audit_log_format(audit_buf, " sec_alg=%u sec_doi=%u sec_obj=%s",
ctx->ctx_alg, ctx->ctx_doi, ctx->ctx_str);
switch (x->props.family) {
case AF_INET:
audit_log_format(audit_buf, " src=%pI4 dst=%pI4",
&x->props.saddr.a4, &x->id.daddr.a4);
break;
case AF_INET6:
audit_log_format(audit_buf, " src=%pI6 dst=%pI6",
x->props.saddr.a6, x->id.daddr.a6);
break;
}
audit_log_format(audit_buf, " spi=%u(0x%x)", spi, spi);
}
static void xfrm_audit_helper_pktinfo(struct sk_buff *skb, u16 family,
struct audit_buffer *audit_buf)
{
const struct iphdr *iph4;
const struct ipv6hdr *iph6;
switch (family) {
case AF_INET:
iph4 = ip_hdr(skb);
audit_log_format(audit_buf, " src=%pI4 dst=%pI4",
&iph4->saddr, &iph4->daddr);
break;
case AF_INET6:
iph6 = ipv6_hdr(skb);
audit_log_format(audit_buf,
" src=%pI6 dst=%pI6 flowlbl=0x%x%02x%02x",
&iph6->saddr, &iph6->daddr,
iph6->flow_lbl[0] & 0x0f,
iph6->flow_lbl[1],
iph6->flow_lbl[2]);
break;
}
}
void xfrm_audit_state_add(struct xfrm_state *x, int result, bool task_valid)
{
struct audit_buffer *audit_buf;
audit_buf = xfrm_audit_start("SAD-add");
if (audit_buf == NULL)
return;
xfrm_audit_helper_usrinfo(task_valid, audit_buf);
xfrm_audit_helper_sainfo(x, audit_buf);
audit_log_format(audit_buf, " res=%u", result);
audit_log_end(audit_buf);
}
EXPORT_SYMBOL_GPL(xfrm_audit_state_add);
void xfrm_audit_state_delete(struct xfrm_state *x, int result, bool task_valid)
{
struct audit_buffer *audit_buf;
audit_buf = xfrm_audit_start("SAD-delete");
if (audit_buf == NULL)
return;
xfrm_audit_helper_usrinfo(task_valid, audit_buf);
xfrm_audit_helper_sainfo(x, audit_buf);
audit_log_format(audit_buf, " res=%u", result);
audit_log_end(audit_buf);
}
EXPORT_SYMBOL_GPL(xfrm_audit_state_delete);
void xfrm_audit_state_replay_overflow(struct xfrm_state *x,
struct sk_buff *skb)
{
struct audit_buffer *audit_buf;
u32 spi;
audit_buf = xfrm_audit_start("SA-replay-overflow");
if (audit_buf == NULL)
return;
xfrm_audit_helper_pktinfo(skb, x->props.family, audit_buf);
/* don't record the sequence number because it's inherent in this kind
* of audit message */
spi = ntohl(x->id.spi);
audit_log_format(audit_buf, " spi=%u(0x%x)", spi, spi);
audit_log_end(audit_buf);
}
EXPORT_SYMBOL_GPL(xfrm_audit_state_replay_overflow);
void xfrm_audit_state_replay(struct xfrm_state *x,
struct sk_buff *skb, __be32 net_seq)
{
struct audit_buffer *audit_buf;
u32 spi;
audit_buf = xfrm_audit_start("SA-replayed-pkt");
if (audit_buf == NULL)
return;
xfrm_audit_helper_pktinfo(skb, x->props.family, audit_buf);
spi = ntohl(x->id.spi);
audit_log_format(audit_buf, " spi=%u(0x%x) seqno=%u",
spi, spi, ntohl(net_seq));
audit_log_end(audit_buf);
}
EXPORT_SYMBOL_GPL(xfrm_audit_state_replay);
void xfrm_audit_state_notfound_simple(struct sk_buff *skb, u16 family)
{
struct audit_buffer *audit_buf;
audit_buf = xfrm_audit_start("SA-notfound");
if (audit_buf == NULL)
return;
xfrm_audit_helper_pktinfo(skb, family, audit_buf);
audit_log_end(audit_buf);
}
EXPORT_SYMBOL_GPL(xfrm_audit_state_notfound_simple);
void xfrm_audit_state_notfound(struct sk_buff *skb, u16 family,
__be32 net_spi, __be32 net_seq)
{
struct audit_buffer *audit_buf;
u32 spi;
audit_buf = xfrm_audit_start("SA-notfound");
if (audit_buf == NULL)
return;
xfrm_audit_helper_pktinfo(skb, family, audit_buf);
spi = ntohl(net_spi);
audit_log_format(audit_buf, " spi=%u(0x%x) seqno=%u",
spi, spi, ntohl(net_seq));
audit_log_end(audit_buf);
}
EXPORT_SYMBOL_GPL(xfrm_audit_state_notfound);
void xfrm_audit_state_icvfail(struct xfrm_state *x,
struct sk_buff *skb, u8 proto)
{
struct audit_buffer *audit_buf;
__be32 net_spi;
__be32 net_seq;
audit_buf = xfrm_audit_start("SA-icv-failure");
if (audit_buf == NULL)
return;
xfrm_audit_helper_pktinfo(skb, x->props.family, audit_buf);
if (xfrm_parse_spi(skb, proto, &net_spi, &net_seq) == 0) {
u32 spi = ntohl(net_spi);
audit_log_format(audit_buf, " spi=%u(0x%x) seqno=%u",
spi, spi, ntohl(net_seq));
}
audit_log_end(audit_buf);
}
EXPORT_SYMBOL_GPL(xfrm_audit_state_icvfail);
#endif /* CONFIG_AUDITSYSCALL */
| linux-master | net/xfrm/xfrm_state.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* xfrm_policy.c
*
* Changes:
* Mitsuru KANDA @USAGI
* Kazunori MIYAZAWA @USAGI
* Kunihiro Ishiguro <[email protected]>
* IPv6 support
* Kazunori MIYAZAWA @USAGI
* YOSHIFUJI Hideaki
* Split up af-specific portion
* Derek Atkins <[email protected]> Add the post_input processor
*
*/
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/kmod.h>
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/workqueue.h>
#include <linux/notifier.h>
#include <linux/netdevice.h>
#include <linux/netfilter.h>
#include <linux/module.h>
#include <linux/cache.h>
#include <linux/cpu.h>
#include <linux/audit.h>
#include <linux/rhashtable.h>
#include <linux/if_tunnel.h>
#include <net/dst.h>
#include <net/flow.h>
#include <net/inet_ecn.h>
#include <net/xfrm.h>
#include <net/ip.h>
#include <net/gre.h>
#if IS_ENABLED(CONFIG_IPV6_MIP6)
#include <net/mip6.h>
#endif
#ifdef CONFIG_XFRM_STATISTICS
#include <net/snmp.h>
#endif
#ifdef CONFIG_XFRM_ESPINTCP
#include <net/espintcp.h>
#endif
#include "xfrm_hash.h"
#define XFRM_QUEUE_TMO_MIN ((unsigned)(HZ/10))
#define XFRM_QUEUE_TMO_MAX ((unsigned)(60*HZ))
#define XFRM_MAX_QUEUE_LEN 100
struct xfrm_flo {
struct dst_entry *dst_orig;
u8 flags;
};
/* prefixes smaller than this are stored in lists, not trees. */
#define INEXACT_PREFIXLEN_IPV4 16
#define INEXACT_PREFIXLEN_IPV6 48
struct xfrm_pol_inexact_node {
struct rb_node node;
union {
xfrm_address_t addr;
struct rcu_head rcu;
};
u8 prefixlen;
struct rb_root root;
/* the policies matching this node, can be empty list */
struct hlist_head hhead;
};
/* xfrm inexact policy search tree:
* xfrm_pol_inexact_bin = hash(dir,type,family,if_id);
* |
* +---- root_d: sorted by daddr:prefix
* | |
* | xfrm_pol_inexact_node
* | |
* | +- root: sorted by saddr/prefix
* | | |
* | | xfrm_pol_inexact_node
* | | |
* | | + root: unused
* | | |
* | | + hhead: saddr:daddr policies
* | |
* | +- coarse policies and all any:daddr policies
* |
* +---- root_s: sorted by saddr:prefix
* | |
* | xfrm_pol_inexact_node
* | |
* | + root: unused
* | |
* | + hhead: saddr:any policies
* |
* +---- coarse policies and all any:any policies
*
* Lookups return four candidate lists:
* 1. any:any list from top-level xfrm_pol_inexact_bin
* 2. any:daddr list from daddr tree
* 3. saddr:daddr list from 2nd level daddr tree
* 4. saddr:any list from saddr tree
*
* This result set then needs to be searched for the policy with
* the lowest priority. If two results have same prio, youngest one wins.
*/
struct xfrm_pol_inexact_key {
possible_net_t net;
u32 if_id;
u16 family;
u8 dir, type;
};
struct xfrm_pol_inexact_bin {
struct xfrm_pol_inexact_key k;
struct rhash_head head;
/* list containing '*:*' policies */
struct hlist_head hhead;
seqcount_spinlock_t count;
/* tree sorted by daddr/prefix */
struct rb_root root_d;
/* tree sorted by saddr/prefix */
struct rb_root root_s;
/* slow path below */
struct list_head inexact_bins;
struct rcu_head rcu;
};
enum xfrm_pol_inexact_candidate_type {
XFRM_POL_CAND_BOTH,
XFRM_POL_CAND_SADDR,
XFRM_POL_CAND_DADDR,
XFRM_POL_CAND_ANY,
XFRM_POL_CAND_MAX,
};
struct xfrm_pol_inexact_candidates {
struct hlist_head *res[XFRM_POL_CAND_MAX];
};
static DEFINE_SPINLOCK(xfrm_if_cb_lock);
static struct xfrm_if_cb const __rcu *xfrm_if_cb __read_mostly;
static DEFINE_SPINLOCK(xfrm_policy_afinfo_lock);
static struct xfrm_policy_afinfo const __rcu *xfrm_policy_afinfo[AF_INET6 + 1]
__read_mostly;
static struct kmem_cache *xfrm_dst_cache __ro_after_init;
static struct rhashtable xfrm_policy_inexact_table;
static const struct rhashtable_params xfrm_pol_inexact_params;
static void xfrm_init_pmtu(struct xfrm_dst **bundle, int nr);
static int stale_bundle(struct dst_entry *dst);
static int xfrm_bundle_ok(struct xfrm_dst *xdst);
static void xfrm_policy_queue_process(struct timer_list *t);
static void __xfrm_policy_link(struct xfrm_policy *pol, int dir);
static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
int dir);
static struct xfrm_pol_inexact_bin *
xfrm_policy_inexact_lookup(struct net *net, u8 type, u16 family, u8 dir,
u32 if_id);
static struct xfrm_pol_inexact_bin *
xfrm_policy_inexact_lookup_rcu(struct net *net,
u8 type, u16 family, u8 dir, u32 if_id);
static struct xfrm_policy *
xfrm_policy_insert_list(struct hlist_head *chain, struct xfrm_policy *policy,
bool excl);
static void xfrm_policy_insert_inexact_list(struct hlist_head *chain,
struct xfrm_policy *policy);
static bool
xfrm_policy_find_inexact_candidates(struct xfrm_pol_inexact_candidates *cand,
struct xfrm_pol_inexact_bin *b,
const xfrm_address_t *saddr,
const xfrm_address_t *daddr);
static inline bool xfrm_pol_hold_rcu(struct xfrm_policy *policy)
{
return refcount_inc_not_zero(&policy->refcnt);
}
static inline bool
__xfrm4_selector_match(const struct xfrm_selector *sel, const struct flowi *fl)
{
const struct flowi4 *fl4 = &fl->u.ip4;
return addr4_match(fl4->daddr, sel->daddr.a4, sel->prefixlen_d) &&
addr4_match(fl4->saddr, sel->saddr.a4, sel->prefixlen_s) &&
!((xfrm_flowi_dport(fl, &fl4->uli) ^ sel->dport) & sel->dport_mask) &&
!((xfrm_flowi_sport(fl, &fl4->uli) ^ sel->sport) & sel->sport_mask) &&
(fl4->flowi4_proto == sel->proto || !sel->proto) &&
(fl4->flowi4_oif == sel->ifindex || !sel->ifindex);
}
static inline bool
__xfrm6_selector_match(const struct xfrm_selector *sel, const struct flowi *fl)
{
const struct flowi6 *fl6 = &fl->u.ip6;
return addr_match(&fl6->daddr, &sel->daddr, sel->prefixlen_d) &&
addr_match(&fl6->saddr, &sel->saddr, sel->prefixlen_s) &&
!((xfrm_flowi_dport(fl, &fl6->uli) ^ sel->dport) & sel->dport_mask) &&
!((xfrm_flowi_sport(fl, &fl6->uli) ^ sel->sport) & sel->sport_mask) &&
(fl6->flowi6_proto == sel->proto || !sel->proto) &&
(fl6->flowi6_oif == sel->ifindex || !sel->ifindex);
}
bool xfrm_selector_match(const struct xfrm_selector *sel, const struct flowi *fl,
unsigned short family)
{
switch (family) {
case AF_INET:
return __xfrm4_selector_match(sel, fl);
case AF_INET6:
return __xfrm6_selector_match(sel, fl);
}
return false;
}
static const struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family)
{
const struct xfrm_policy_afinfo *afinfo;
if (unlikely(family >= ARRAY_SIZE(xfrm_policy_afinfo)))
return NULL;
rcu_read_lock();
afinfo = rcu_dereference(xfrm_policy_afinfo[family]);
if (unlikely(!afinfo))
rcu_read_unlock();
return afinfo;
}
/* Called with rcu_read_lock(). */
static const struct xfrm_if_cb *xfrm_if_get_cb(void)
{
return rcu_dereference(xfrm_if_cb);
}
struct dst_entry *__xfrm_dst_lookup(struct net *net, int tos, int oif,
const xfrm_address_t *saddr,
const xfrm_address_t *daddr,
int family, u32 mark)
{
const struct xfrm_policy_afinfo *afinfo;
struct dst_entry *dst;
afinfo = xfrm_policy_get_afinfo(family);
if (unlikely(afinfo == NULL))
return ERR_PTR(-EAFNOSUPPORT);
dst = afinfo->dst_lookup(net, tos, oif, saddr, daddr, mark);
rcu_read_unlock();
return dst;
}
EXPORT_SYMBOL(__xfrm_dst_lookup);
static inline struct dst_entry *xfrm_dst_lookup(struct xfrm_state *x,
int tos, int oif,
xfrm_address_t *prev_saddr,
xfrm_address_t *prev_daddr,
int family, u32 mark)
{
struct net *net = xs_net(x);
xfrm_address_t *saddr = &x->props.saddr;
xfrm_address_t *daddr = &x->id.daddr;
struct dst_entry *dst;
if (x->type->flags & XFRM_TYPE_LOCAL_COADDR) {
saddr = x->coaddr;
daddr = prev_daddr;
}
if (x->type->flags & XFRM_TYPE_REMOTE_COADDR) {
saddr = prev_saddr;
daddr = x->coaddr;
}
dst = __xfrm_dst_lookup(net, tos, oif, saddr, daddr, family, mark);
if (!IS_ERR(dst)) {
if (prev_saddr != saddr)
memcpy(prev_saddr, saddr, sizeof(*prev_saddr));
if (prev_daddr != daddr)
memcpy(prev_daddr, daddr, sizeof(*prev_daddr));
}
return dst;
}
static inline unsigned long make_jiffies(long secs)
{
if (secs >= (MAX_SCHEDULE_TIMEOUT-1)/HZ)
return MAX_SCHEDULE_TIMEOUT-1;
else
return secs*HZ;
}
static void xfrm_policy_timer(struct timer_list *t)
{
struct xfrm_policy *xp = from_timer(xp, t, timer);
time64_t now = ktime_get_real_seconds();
time64_t next = TIME64_MAX;
int warn = 0;
int dir;
read_lock(&xp->lock);
if (unlikely(xp->walk.dead))
goto out;
dir = xfrm_policy_id2dir(xp->index);
if (xp->lft.hard_add_expires_seconds) {
time64_t tmo = xp->lft.hard_add_expires_seconds +
xp->curlft.add_time - now;
if (tmo <= 0)
goto expired;
if (tmo < next)
next = tmo;
}
if (xp->lft.hard_use_expires_seconds) {
time64_t tmo = xp->lft.hard_use_expires_seconds +
(READ_ONCE(xp->curlft.use_time) ? : xp->curlft.add_time) - now;
if (tmo <= 0)
goto expired;
if (tmo < next)
next = tmo;
}
if (xp->lft.soft_add_expires_seconds) {
time64_t tmo = xp->lft.soft_add_expires_seconds +
xp->curlft.add_time - now;
if (tmo <= 0) {
warn = 1;
tmo = XFRM_KM_TIMEOUT;
}
if (tmo < next)
next = tmo;
}
if (xp->lft.soft_use_expires_seconds) {
time64_t tmo = xp->lft.soft_use_expires_seconds +
(READ_ONCE(xp->curlft.use_time) ? : xp->curlft.add_time) - now;
if (tmo <= 0) {
warn = 1;
tmo = XFRM_KM_TIMEOUT;
}
if (tmo < next)
next = tmo;
}
if (warn)
km_policy_expired(xp, dir, 0, 0);
if (next != TIME64_MAX &&
!mod_timer(&xp->timer, jiffies + make_jiffies(next)))
xfrm_pol_hold(xp);
out:
read_unlock(&xp->lock);
xfrm_pol_put(xp);
return;
expired:
read_unlock(&xp->lock);
if (!xfrm_policy_delete(xp, dir))
km_policy_expired(xp, dir, 1, 0);
xfrm_pol_put(xp);
}
/* Allocate xfrm_policy. Not used here, it is supposed to be used by pfkeyv2
* SPD calls.
*/
struct xfrm_policy *xfrm_policy_alloc(struct net *net, gfp_t gfp)
{
struct xfrm_policy *policy;
policy = kzalloc(sizeof(struct xfrm_policy), gfp);
if (policy) {
write_pnet(&policy->xp_net, net);
INIT_LIST_HEAD(&policy->walk.all);
INIT_HLIST_NODE(&policy->bydst_inexact_list);
INIT_HLIST_NODE(&policy->bydst);
INIT_HLIST_NODE(&policy->byidx);
rwlock_init(&policy->lock);
refcount_set(&policy->refcnt, 1);
skb_queue_head_init(&policy->polq.hold_queue);
timer_setup(&policy->timer, xfrm_policy_timer, 0);
timer_setup(&policy->polq.hold_timer,
xfrm_policy_queue_process, 0);
}
return policy;
}
EXPORT_SYMBOL(xfrm_policy_alloc);
static void xfrm_policy_destroy_rcu(struct rcu_head *head)
{
struct xfrm_policy *policy = container_of(head, struct xfrm_policy, rcu);
security_xfrm_policy_free(policy->security);
kfree(policy);
}
/* Destroy xfrm_policy: descendant resources must be released to this moment. */
void xfrm_policy_destroy(struct xfrm_policy *policy)
{
BUG_ON(!policy->walk.dead);
if (del_timer(&policy->timer) || del_timer(&policy->polq.hold_timer))
BUG();
xfrm_dev_policy_free(policy);
call_rcu(&policy->rcu, xfrm_policy_destroy_rcu);
}
EXPORT_SYMBOL(xfrm_policy_destroy);
/* Rule must be locked. Release descendant resources, announce
* entry dead. The rule must be unlinked from lists to the moment.
*/
static void xfrm_policy_kill(struct xfrm_policy *policy)
{
write_lock_bh(&policy->lock);
policy->walk.dead = 1;
write_unlock_bh(&policy->lock);
atomic_inc(&policy->genid);
if (del_timer(&policy->polq.hold_timer))
xfrm_pol_put(policy);
skb_queue_purge(&policy->polq.hold_queue);
if (del_timer(&policy->timer))
xfrm_pol_put(policy);
xfrm_pol_put(policy);
}
static unsigned int xfrm_policy_hashmax __read_mostly = 1 * 1024 * 1024;
static inline unsigned int idx_hash(struct net *net, u32 index)
{
return __idx_hash(index, net->xfrm.policy_idx_hmask);
}
/* calculate policy hash thresholds */
static void __get_hash_thresh(struct net *net,
unsigned short family, int dir,
u8 *dbits, u8 *sbits)
{
switch (family) {
case AF_INET:
*dbits = net->xfrm.policy_bydst[dir].dbits4;
*sbits = net->xfrm.policy_bydst[dir].sbits4;
break;
case AF_INET6:
*dbits = net->xfrm.policy_bydst[dir].dbits6;
*sbits = net->xfrm.policy_bydst[dir].sbits6;
break;
default:
*dbits = 0;
*sbits = 0;
}
}
static struct hlist_head *policy_hash_bysel(struct net *net,
const struct xfrm_selector *sel,
unsigned short family, int dir)
{
unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
unsigned int hash;
u8 dbits;
u8 sbits;
__get_hash_thresh(net, family, dir, &dbits, &sbits);
hash = __sel_hash(sel, family, hmask, dbits, sbits);
if (hash == hmask + 1)
return NULL;
return rcu_dereference_check(net->xfrm.policy_bydst[dir].table,
lockdep_is_held(&net->xfrm.xfrm_policy_lock)) + hash;
}
static struct hlist_head *policy_hash_direct(struct net *net,
const xfrm_address_t *daddr,
const xfrm_address_t *saddr,
unsigned short family, int dir)
{
unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
unsigned int hash;
u8 dbits;
u8 sbits;
__get_hash_thresh(net, family, dir, &dbits, &sbits);
hash = __addr_hash(daddr, saddr, family, hmask, dbits, sbits);
return rcu_dereference_check(net->xfrm.policy_bydst[dir].table,
lockdep_is_held(&net->xfrm.xfrm_policy_lock)) + hash;
}
static void xfrm_dst_hash_transfer(struct net *net,
struct hlist_head *list,
struct hlist_head *ndsttable,
unsigned int nhashmask,
int dir)
{
struct hlist_node *tmp, *entry0 = NULL;
struct xfrm_policy *pol;
unsigned int h0 = 0;
u8 dbits;
u8 sbits;
redo:
hlist_for_each_entry_safe(pol, tmp, list, bydst) {
unsigned int h;
__get_hash_thresh(net, pol->family, dir, &dbits, &sbits);
h = __addr_hash(&pol->selector.daddr, &pol->selector.saddr,
pol->family, nhashmask, dbits, sbits);
if (!entry0 || pol->xdo.type == XFRM_DEV_OFFLOAD_PACKET) {
hlist_del_rcu(&pol->bydst);
hlist_add_head_rcu(&pol->bydst, ndsttable + h);
h0 = h;
} else {
if (h != h0)
continue;
hlist_del_rcu(&pol->bydst);
hlist_add_behind_rcu(&pol->bydst, entry0);
}
entry0 = &pol->bydst;
}
if (!hlist_empty(list)) {
entry0 = NULL;
goto redo;
}
}
static void xfrm_idx_hash_transfer(struct hlist_head *list,
struct hlist_head *nidxtable,
unsigned int nhashmask)
{
struct hlist_node *tmp;
struct xfrm_policy *pol;
hlist_for_each_entry_safe(pol, tmp, list, byidx) {
unsigned int h;
h = __idx_hash(pol->index, nhashmask);
hlist_add_head(&pol->byidx, nidxtable+h);
}
}
static unsigned long xfrm_new_hash_mask(unsigned int old_hmask)
{
return ((old_hmask + 1) << 1) - 1;
}
static void xfrm_bydst_resize(struct net *net, int dir)
{
unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
unsigned int nhashmask = xfrm_new_hash_mask(hmask);
unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head);
struct hlist_head *ndst = xfrm_hash_alloc(nsize);
struct hlist_head *odst;
int i;
if (!ndst)
return;
spin_lock_bh(&net->xfrm.xfrm_policy_lock);
write_seqcount_begin(&net->xfrm.xfrm_policy_hash_generation);
odst = rcu_dereference_protected(net->xfrm.policy_bydst[dir].table,
lockdep_is_held(&net->xfrm.xfrm_policy_lock));
for (i = hmask; i >= 0; i--)
xfrm_dst_hash_transfer(net, odst + i, ndst, nhashmask, dir);
rcu_assign_pointer(net->xfrm.policy_bydst[dir].table, ndst);
net->xfrm.policy_bydst[dir].hmask = nhashmask;
write_seqcount_end(&net->xfrm.xfrm_policy_hash_generation);
spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
synchronize_rcu();
xfrm_hash_free(odst, (hmask + 1) * sizeof(struct hlist_head));
}
static void xfrm_byidx_resize(struct net *net)
{
unsigned int hmask = net->xfrm.policy_idx_hmask;
unsigned int nhashmask = xfrm_new_hash_mask(hmask);
unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head);
struct hlist_head *oidx = net->xfrm.policy_byidx;
struct hlist_head *nidx = xfrm_hash_alloc(nsize);
int i;
if (!nidx)
return;
spin_lock_bh(&net->xfrm.xfrm_policy_lock);
for (i = hmask; i >= 0; i--)
xfrm_idx_hash_transfer(oidx + i, nidx, nhashmask);
net->xfrm.policy_byidx = nidx;
net->xfrm.policy_idx_hmask = nhashmask;
spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
xfrm_hash_free(oidx, (hmask + 1) * sizeof(struct hlist_head));
}
static inline int xfrm_bydst_should_resize(struct net *net, int dir, int *total)
{
unsigned int cnt = net->xfrm.policy_count[dir];
unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
if (total)
*total += cnt;
if ((hmask + 1) < xfrm_policy_hashmax &&
cnt > hmask)
return 1;
return 0;
}
static inline int xfrm_byidx_should_resize(struct net *net, int total)
{
unsigned int hmask = net->xfrm.policy_idx_hmask;
if ((hmask + 1) < xfrm_policy_hashmax &&
total > hmask)
return 1;
return 0;
}
void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si)
{
si->incnt = net->xfrm.policy_count[XFRM_POLICY_IN];
si->outcnt = net->xfrm.policy_count[XFRM_POLICY_OUT];
si->fwdcnt = net->xfrm.policy_count[XFRM_POLICY_FWD];
si->inscnt = net->xfrm.policy_count[XFRM_POLICY_IN+XFRM_POLICY_MAX];
si->outscnt = net->xfrm.policy_count[XFRM_POLICY_OUT+XFRM_POLICY_MAX];
si->fwdscnt = net->xfrm.policy_count[XFRM_POLICY_FWD+XFRM_POLICY_MAX];
si->spdhcnt = net->xfrm.policy_idx_hmask;
si->spdhmcnt = xfrm_policy_hashmax;
}
EXPORT_SYMBOL(xfrm_spd_getinfo);
static DEFINE_MUTEX(hash_resize_mutex);
static void xfrm_hash_resize(struct work_struct *work)
{
struct net *net = container_of(work, struct net, xfrm.policy_hash_work);
int dir, total;
mutex_lock(&hash_resize_mutex);
total = 0;
for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
if (xfrm_bydst_should_resize(net, dir, &total))
xfrm_bydst_resize(net, dir);
}
if (xfrm_byidx_should_resize(net, total))
xfrm_byidx_resize(net);
mutex_unlock(&hash_resize_mutex);
}
/* Make sure *pol can be inserted into fastbin.
* Useful to check that later insert requests will be successful
* (provided xfrm_policy_lock is held throughout).
*/
static struct xfrm_pol_inexact_bin *
xfrm_policy_inexact_alloc_bin(const struct xfrm_policy *pol, u8 dir)
{
struct xfrm_pol_inexact_bin *bin, *prev;
struct xfrm_pol_inexact_key k = {
.family = pol->family,
.type = pol->type,
.dir = dir,
.if_id = pol->if_id,
};
struct net *net = xp_net(pol);
lockdep_assert_held(&net->xfrm.xfrm_policy_lock);
write_pnet(&k.net, net);
bin = rhashtable_lookup_fast(&xfrm_policy_inexact_table, &k,
xfrm_pol_inexact_params);
if (bin)
return bin;
bin = kzalloc(sizeof(*bin), GFP_ATOMIC);
if (!bin)
return NULL;
bin->k = k;
INIT_HLIST_HEAD(&bin->hhead);
bin->root_d = RB_ROOT;
bin->root_s = RB_ROOT;
seqcount_spinlock_init(&bin->count, &net->xfrm.xfrm_policy_lock);
prev = rhashtable_lookup_get_insert_key(&xfrm_policy_inexact_table,
&bin->k, &bin->head,
xfrm_pol_inexact_params);
if (!prev) {
list_add(&bin->inexact_bins, &net->xfrm.inexact_bins);
return bin;
}
kfree(bin);
return IS_ERR(prev) ? NULL : prev;
}
static bool xfrm_pol_inexact_addr_use_any_list(const xfrm_address_t *addr,
int family, u8 prefixlen)
{
if (xfrm_addr_any(addr, family))
return true;
if (family == AF_INET6 && prefixlen < INEXACT_PREFIXLEN_IPV6)
return true;
if (family == AF_INET && prefixlen < INEXACT_PREFIXLEN_IPV4)
return true;
return false;
}
static bool
xfrm_policy_inexact_insert_use_any_list(const struct xfrm_policy *policy)
{
const xfrm_address_t *addr;
bool saddr_any, daddr_any;
u8 prefixlen;
addr = &policy->selector.saddr;
prefixlen = policy->selector.prefixlen_s;
saddr_any = xfrm_pol_inexact_addr_use_any_list(addr,
policy->family,
prefixlen);
addr = &policy->selector.daddr;
prefixlen = policy->selector.prefixlen_d;
daddr_any = xfrm_pol_inexact_addr_use_any_list(addr,
policy->family,
prefixlen);
return saddr_any && daddr_any;
}
static void xfrm_pol_inexact_node_init(struct xfrm_pol_inexact_node *node,
const xfrm_address_t *addr, u8 prefixlen)
{
node->addr = *addr;
node->prefixlen = prefixlen;
}
static struct xfrm_pol_inexact_node *
xfrm_pol_inexact_node_alloc(const xfrm_address_t *addr, u8 prefixlen)
{
struct xfrm_pol_inexact_node *node;
node = kzalloc(sizeof(*node), GFP_ATOMIC);
if (node)
xfrm_pol_inexact_node_init(node, addr, prefixlen);
return node;
}
static int xfrm_policy_addr_delta(const xfrm_address_t *a,
const xfrm_address_t *b,
u8 prefixlen, u16 family)
{
u32 ma, mb, mask;
unsigned int pdw, pbi;
int delta = 0;
switch (family) {
case AF_INET:
if (prefixlen == 0)
return 0;
mask = ~0U << (32 - prefixlen);
ma = ntohl(a->a4) & mask;
mb = ntohl(b->a4) & mask;
if (ma < mb)
delta = -1;
else if (ma > mb)
delta = 1;
break;
case AF_INET6:
pdw = prefixlen >> 5;
pbi = prefixlen & 0x1f;
if (pdw) {
delta = memcmp(a->a6, b->a6, pdw << 2);
if (delta)
return delta;
}
if (pbi) {
mask = ~0U << (32 - pbi);
ma = ntohl(a->a6[pdw]) & mask;
mb = ntohl(b->a6[pdw]) & mask;
if (ma < mb)
delta = -1;
else if (ma > mb)
delta = 1;
}
break;
default:
break;
}
return delta;
}
static void xfrm_policy_inexact_list_reinsert(struct net *net,
struct xfrm_pol_inexact_node *n,
u16 family)
{
unsigned int matched_s, matched_d;
struct xfrm_policy *policy, *p;
matched_s = 0;
matched_d = 0;
list_for_each_entry_reverse(policy, &net->xfrm.policy_all, walk.all) {
struct hlist_node *newpos = NULL;
bool matches_s, matches_d;
if (!policy->bydst_reinsert)
continue;
WARN_ON_ONCE(policy->family != family);
policy->bydst_reinsert = false;
hlist_for_each_entry(p, &n->hhead, bydst) {
if (policy->priority > p->priority)
newpos = &p->bydst;
else if (policy->priority == p->priority &&
policy->pos > p->pos)
newpos = &p->bydst;
else
break;
}
if (newpos && policy->xdo.type != XFRM_DEV_OFFLOAD_PACKET)
hlist_add_behind_rcu(&policy->bydst, newpos);
else
hlist_add_head_rcu(&policy->bydst, &n->hhead);
/* paranoia checks follow.
* Check that the reinserted policy matches at least
* saddr or daddr for current node prefix.
*
* Matching both is fine, matching saddr in one policy
* (but not daddr) and then matching only daddr in another
* is a bug.
*/
matches_s = xfrm_policy_addr_delta(&policy->selector.saddr,
&n->addr,
n->prefixlen,
family) == 0;
matches_d = xfrm_policy_addr_delta(&policy->selector.daddr,
&n->addr,
n->prefixlen,
family) == 0;
if (matches_s && matches_d)
continue;
WARN_ON_ONCE(!matches_s && !matches_d);
if (matches_s)
matched_s++;
if (matches_d)
matched_d++;
WARN_ON_ONCE(matched_s && matched_d);
}
}
static void xfrm_policy_inexact_node_reinsert(struct net *net,
struct xfrm_pol_inexact_node *n,
struct rb_root *new,
u16 family)
{
struct xfrm_pol_inexact_node *node;
struct rb_node **p, *parent;
/* we should not have another subtree here */
WARN_ON_ONCE(!RB_EMPTY_ROOT(&n->root));
restart:
parent = NULL;
p = &new->rb_node;
while (*p) {
u8 prefixlen;
int delta;
parent = *p;
node = rb_entry(*p, struct xfrm_pol_inexact_node, node);
prefixlen = min(node->prefixlen, n->prefixlen);
delta = xfrm_policy_addr_delta(&n->addr, &node->addr,
prefixlen, family);
if (delta < 0) {
p = &parent->rb_left;
} else if (delta > 0) {
p = &parent->rb_right;
} else {
bool same_prefixlen = node->prefixlen == n->prefixlen;
struct xfrm_policy *tmp;
hlist_for_each_entry(tmp, &n->hhead, bydst) {
tmp->bydst_reinsert = true;
hlist_del_rcu(&tmp->bydst);
}
node->prefixlen = prefixlen;
xfrm_policy_inexact_list_reinsert(net, node, family);
if (same_prefixlen) {
kfree_rcu(n, rcu);
return;
}
rb_erase(*p, new);
kfree_rcu(n, rcu);
n = node;
goto restart;
}
}
rb_link_node_rcu(&n->node, parent, p);
rb_insert_color(&n->node, new);
}
/* merge nodes v and n */
static void xfrm_policy_inexact_node_merge(struct net *net,
struct xfrm_pol_inexact_node *v,
struct xfrm_pol_inexact_node *n,
u16 family)
{
struct xfrm_pol_inexact_node *node;
struct xfrm_policy *tmp;
struct rb_node *rnode;
/* To-be-merged node v has a subtree.
*
* Dismantle it and insert its nodes to n->root.
*/
while ((rnode = rb_first(&v->root)) != NULL) {
node = rb_entry(rnode, struct xfrm_pol_inexact_node, node);
rb_erase(&node->node, &v->root);
xfrm_policy_inexact_node_reinsert(net, node, &n->root,
family);
}
hlist_for_each_entry(tmp, &v->hhead, bydst) {
tmp->bydst_reinsert = true;
hlist_del_rcu(&tmp->bydst);
}
xfrm_policy_inexact_list_reinsert(net, n, family);
}
static struct xfrm_pol_inexact_node *
xfrm_policy_inexact_insert_node(struct net *net,
struct rb_root *root,
xfrm_address_t *addr,
u16 family, u8 prefixlen, u8 dir)
{
struct xfrm_pol_inexact_node *cached = NULL;
struct rb_node **p, *parent = NULL;
struct xfrm_pol_inexact_node *node;
p = &root->rb_node;
while (*p) {
int delta;
parent = *p;
node = rb_entry(*p, struct xfrm_pol_inexact_node, node);
delta = xfrm_policy_addr_delta(addr, &node->addr,
node->prefixlen,
family);
if (delta == 0 && prefixlen >= node->prefixlen) {
WARN_ON_ONCE(cached); /* ipsec policies got lost */
return node;
}
if (delta < 0)
p = &parent->rb_left;
else
p = &parent->rb_right;
if (prefixlen < node->prefixlen) {
delta = xfrm_policy_addr_delta(addr, &node->addr,
prefixlen,
family);
if (delta)
continue;
/* This node is a subnet of the new prefix. It needs
* to be removed and re-inserted with the smaller
* prefix and all nodes that are now also covered
* by the reduced prefixlen.
*/
rb_erase(&node->node, root);
if (!cached) {
xfrm_pol_inexact_node_init(node, addr,
prefixlen);
cached = node;
} else {
/* This node also falls within the new
* prefixlen. Merge the to-be-reinserted
* node and this one.
*/
xfrm_policy_inexact_node_merge(net, node,
cached, family);
kfree_rcu(node, rcu);
}
/* restart */
p = &root->rb_node;
parent = NULL;
}
}
node = cached;
if (!node) {
node = xfrm_pol_inexact_node_alloc(addr, prefixlen);
if (!node)
return NULL;
}
rb_link_node_rcu(&node->node, parent, p);
rb_insert_color(&node->node, root);
return node;
}
static void xfrm_policy_inexact_gc_tree(struct rb_root *r, bool rm)
{
struct xfrm_pol_inexact_node *node;
struct rb_node *rn = rb_first(r);
while (rn) {
node = rb_entry(rn, struct xfrm_pol_inexact_node, node);
xfrm_policy_inexact_gc_tree(&node->root, rm);
rn = rb_next(rn);
if (!hlist_empty(&node->hhead) || !RB_EMPTY_ROOT(&node->root)) {
WARN_ON_ONCE(rm);
continue;
}
rb_erase(&node->node, r);
kfree_rcu(node, rcu);
}
}
static void __xfrm_policy_inexact_prune_bin(struct xfrm_pol_inexact_bin *b, bool net_exit)
{
write_seqcount_begin(&b->count);
xfrm_policy_inexact_gc_tree(&b->root_d, net_exit);
xfrm_policy_inexact_gc_tree(&b->root_s, net_exit);
write_seqcount_end(&b->count);
if (!RB_EMPTY_ROOT(&b->root_d) || !RB_EMPTY_ROOT(&b->root_s) ||
!hlist_empty(&b->hhead)) {
WARN_ON_ONCE(net_exit);
return;
}
if (rhashtable_remove_fast(&xfrm_policy_inexact_table, &b->head,
xfrm_pol_inexact_params) == 0) {
list_del(&b->inexact_bins);
kfree_rcu(b, rcu);
}
}
static void xfrm_policy_inexact_prune_bin(struct xfrm_pol_inexact_bin *b)
{
struct net *net = read_pnet(&b->k.net);
spin_lock_bh(&net->xfrm.xfrm_policy_lock);
__xfrm_policy_inexact_prune_bin(b, false);
spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
}
static void __xfrm_policy_inexact_flush(struct net *net)
{
struct xfrm_pol_inexact_bin *bin, *t;
lockdep_assert_held(&net->xfrm.xfrm_policy_lock);
list_for_each_entry_safe(bin, t, &net->xfrm.inexact_bins, inexact_bins)
__xfrm_policy_inexact_prune_bin(bin, false);
}
static struct hlist_head *
xfrm_policy_inexact_alloc_chain(struct xfrm_pol_inexact_bin *bin,
struct xfrm_policy *policy, u8 dir)
{
struct xfrm_pol_inexact_node *n;
struct net *net;
net = xp_net(policy);
lockdep_assert_held(&net->xfrm.xfrm_policy_lock);
if (xfrm_policy_inexact_insert_use_any_list(policy))
return &bin->hhead;
if (xfrm_pol_inexact_addr_use_any_list(&policy->selector.daddr,
policy->family,
policy->selector.prefixlen_d)) {
write_seqcount_begin(&bin->count);
n = xfrm_policy_inexact_insert_node(net,
&bin->root_s,
&policy->selector.saddr,
policy->family,
policy->selector.prefixlen_s,
dir);
write_seqcount_end(&bin->count);
if (!n)
return NULL;
return &n->hhead;
}
/* daddr is fixed */
write_seqcount_begin(&bin->count);
n = xfrm_policy_inexact_insert_node(net,
&bin->root_d,
&policy->selector.daddr,
policy->family,
policy->selector.prefixlen_d, dir);
write_seqcount_end(&bin->count);
if (!n)
return NULL;
/* saddr is wildcard */
if (xfrm_pol_inexact_addr_use_any_list(&policy->selector.saddr,
policy->family,
policy->selector.prefixlen_s))
return &n->hhead;
write_seqcount_begin(&bin->count);
n = xfrm_policy_inexact_insert_node(net,
&n->root,
&policy->selector.saddr,
policy->family,
policy->selector.prefixlen_s, dir);
write_seqcount_end(&bin->count);
if (!n)
return NULL;
return &n->hhead;
}
static struct xfrm_policy *
xfrm_policy_inexact_insert(struct xfrm_policy *policy, u8 dir, int excl)
{
struct xfrm_pol_inexact_bin *bin;
struct xfrm_policy *delpol;
struct hlist_head *chain;
struct net *net;
bin = xfrm_policy_inexact_alloc_bin(policy, dir);
if (!bin)
return ERR_PTR(-ENOMEM);
net = xp_net(policy);
lockdep_assert_held(&net->xfrm.xfrm_policy_lock);
chain = xfrm_policy_inexact_alloc_chain(bin, policy, dir);
if (!chain) {
__xfrm_policy_inexact_prune_bin(bin, false);
return ERR_PTR(-ENOMEM);
}
delpol = xfrm_policy_insert_list(chain, policy, excl);
if (delpol && excl) {
__xfrm_policy_inexact_prune_bin(bin, false);
return ERR_PTR(-EEXIST);
}
chain = &net->xfrm.policy_inexact[dir];
xfrm_policy_insert_inexact_list(chain, policy);
if (delpol)
__xfrm_policy_inexact_prune_bin(bin, false);
return delpol;
}
static void xfrm_hash_rebuild(struct work_struct *work)
{
struct net *net = container_of(work, struct net,
xfrm.policy_hthresh.work);
unsigned int hmask;
struct xfrm_policy *pol;
struct xfrm_policy *policy;
struct hlist_head *chain;
struct hlist_head *odst;
struct hlist_node *newpos;
int i;
int dir;
unsigned seq;
u8 lbits4, rbits4, lbits6, rbits6;
mutex_lock(&hash_resize_mutex);
/* read selector prefixlen thresholds */
do {
seq = read_seqbegin(&net->xfrm.policy_hthresh.lock);
lbits4 = net->xfrm.policy_hthresh.lbits4;
rbits4 = net->xfrm.policy_hthresh.rbits4;
lbits6 = net->xfrm.policy_hthresh.lbits6;
rbits6 = net->xfrm.policy_hthresh.rbits6;
} while (read_seqretry(&net->xfrm.policy_hthresh.lock, seq));
spin_lock_bh(&net->xfrm.xfrm_policy_lock);
write_seqcount_begin(&net->xfrm.xfrm_policy_hash_generation);
/* make sure that we can insert the indirect policies again before
* we start with destructive action.
*/
list_for_each_entry(policy, &net->xfrm.policy_all, walk.all) {
struct xfrm_pol_inexact_bin *bin;
u8 dbits, sbits;
dir = xfrm_policy_id2dir(policy->index);
if (policy->walk.dead || dir >= XFRM_POLICY_MAX)
continue;
if ((dir & XFRM_POLICY_MASK) == XFRM_POLICY_OUT) {
if (policy->family == AF_INET) {
dbits = rbits4;
sbits = lbits4;
} else {
dbits = rbits6;
sbits = lbits6;
}
} else {
if (policy->family == AF_INET) {
dbits = lbits4;
sbits = rbits4;
} else {
dbits = lbits6;
sbits = rbits6;
}
}
if (policy->selector.prefixlen_d < dbits ||
policy->selector.prefixlen_s < sbits)
continue;
bin = xfrm_policy_inexact_alloc_bin(policy, dir);
if (!bin)
goto out_unlock;
if (!xfrm_policy_inexact_alloc_chain(bin, policy, dir))
goto out_unlock;
}
/* reset the bydst and inexact table in all directions */
for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
struct hlist_node *n;
hlist_for_each_entry_safe(policy, n,
&net->xfrm.policy_inexact[dir],
bydst_inexact_list) {
hlist_del_rcu(&policy->bydst);
hlist_del_init(&policy->bydst_inexact_list);
}
hmask = net->xfrm.policy_bydst[dir].hmask;
odst = net->xfrm.policy_bydst[dir].table;
for (i = hmask; i >= 0; i--) {
hlist_for_each_entry_safe(policy, n, odst + i, bydst)
hlist_del_rcu(&policy->bydst);
}
if ((dir & XFRM_POLICY_MASK) == XFRM_POLICY_OUT) {
/* dir out => dst = remote, src = local */
net->xfrm.policy_bydst[dir].dbits4 = rbits4;
net->xfrm.policy_bydst[dir].sbits4 = lbits4;
net->xfrm.policy_bydst[dir].dbits6 = rbits6;
net->xfrm.policy_bydst[dir].sbits6 = lbits6;
} else {
/* dir in/fwd => dst = local, src = remote */
net->xfrm.policy_bydst[dir].dbits4 = lbits4;
net->xfrm.policy_bydst[dir].sbits4 = rbits4;
net->xfrm.policy_bydst[dir].dbits6 = lbits6;
net->xfrm.policy_bydst[dir].sbits6 = rbits6;
}
}
/* re-insert all policies by order of creation */
list_for_each_entry_reverse(policy, &net->xfrm.policy_all, walk.all) {
if (policy->walk.dead)
continue;
dir = xfrm_policy_id2dir(policy->index);
if (dir >= XFRM_POLICY_MAX) {
/* skip socket policies */
continue;
}
newpos = NULL;
chain = policy_hash_bysel(net, &policy->selector,
policy->family, dir);
if (!chain) {
void *p = xfrm_policy_inexact_insert(policy, dir, 0);
WARN_ONCE(IS_ERR(p), "reinsert: %ld\n", PTR_ERR(p));
continue;
}
hlist_for_each_entry(pol, chain, bydst) {
if (policy->priority >= pol->priority)
newpos = &pol->bydst;
else
break;
}
if (newpos && policy->xdo.type != XFRM_DEV_OFFLOAD_PACKET)
hlist_add_behind_rcu(&policy->bydst, newpos);
else
hlist_add_head_rcu(&policy->bydst, chain);
}
out_unlock:
__xfrm_policy_inexact_flush(net);
write_seqcount_end(&net->xfrm.xfrm_policy_hash_generation);
spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
mutex_unlock(&hash_resize_mutex);
}
void xfrm_policy_hash_rebuild(struct net *net)
{
schedule_work(&net->xfrm.policy_hthresh.work);
}
EXPORT_SYMBOL(xfrm_policy_hash_rebuild);
/* Generate new index... KAME seems to generate them ordered by cost
* of an absolute inpredictability of ordering of rules. This will not pass. */
static u32 xfrm_gen_index(struct net *net, int dir, u32 index)
{
static u32 idx_generator;
for (;;) {
struct hlist_head *list;
struct xfrm_policy *p;
u32 idx;
int found;
if (!index) {
idx = (idx_generator | dir);
idx_generator += 8;
} else {
idx = index;
index = 0;
}
if (idx == 0)
idx = 8;
list = net->xfrm.policy_byidx + idx_hash(net, idx);
found = 0;
hlist_for_each_entry(p, list, byidx) {
if (p->index == idx) {
found = 1;
break;
}
}
if (!found)
return idx;
}
}
static inline int selector_cmp(struct xfrm_selector *s1, struct xfrm_selector *s2)
{
u32 *p1 = (u32 *) s1;
u32 *p2 = (u32 *) s2;
int len = sizeof(struct xfrm_selector) / sizeof(u32);
int i;
for (i = 0; i < len; i++) {
if (p1[i] != p2[i])
return 1;
}
return 0;
}
static void xfrm_policy_requeue(struct xfrm_policy *old,
struct xfrm_policy *new)
{
struct xfrm_policy_queue *pq = &old->polq;
struct sk_buff_head list;
if (skb_queue_empty(&pq->hold_queue))
return;
__skb_queue_head_init(&list);
spin_lock_bh(&pq->hold_queue.lock);
skb_queue_splice_init(&pq->hold_queue, &list);
if (del_timer(&pq->hold_timer))
xfrm_pol_put(old);
spin_unlock_bh(&pq->hold_queue.lock);
pq = &new->polq;
spin_lock_bh(&pq->hold_queue.lock);
skb_queue_splice(&list, &pq->hold_queue);
pq->timeout = XFRM_QUEUE_TMO_MIN;
if (!mod_timer(&pq->hold_timer, jiffies))
xfrm_pol_hold(new);
spin_unlock_bh(&pq->hold_queue.lock);
}
static inline bool xfrm_policy_mark_match(const struct xfrm_mark *mark,
struct xfrm_policy *pol)
{
return mark->v == pol->mark.v && mark->m == pol->mark.m;
}
static u32 xfrm_pol_bin_key(const void *data, u32 len, u32 seed)
{
const struct xfrm_pol_inexact_key *k = data;
u32 a = k->type << 24 | k->dir << 16 | k->family;
return jhash_3words(a, k->if_id, net_hash_mix(read_pnet(&k->net)),
seed);
}
static u32 xfrm_pol_bin_obj(const void *data, u32 len, u32 seed)
{
const struct xfrm_pol_inexact_bin *b = data;
return xfrm_pol_bin_key(&b->k, 0, seed);
}
static int xfrm_pol_bin_cmp(struct rhashtable_compare_arg *arg,
const void *ptr)
{
const struct xfrm_pol_inexact_key *key = arg->key;
const struct xfrm_pol_inexact_bin *b = ptr;
int ret;
if (!net_eq(read_pnet(&b->k.net), read_pnet(&key->net)))
return -1;
ret = b->k.dir ^ key->dir;
if (ret)
return ret;
ret = b->k.type ^ key->type;
if (ret)
return ret;
ret = b->k.family ^ key->family;
if (ret)
return ret;
return b->k.if_id ^ key->if_id;
}
static const struct rhashtable_params xfrm_pol_inexact_params = {
.head_offset = offsetof(struct xfrm_pol_inexact_bin, head),
.hashfn = xfrm_pol_bin_key,
.obj_hashfn = xfrm_pol_bin_obj,
.obj_cmpfn = xfrm_pol_bin_cmp,
.automatic_shrinking = true,
};
static void xfrm_policy_insert_inexact_list(struct hlist_head *chain,
struct xfrm_policy *policy)
{
struct xfrm_policy *pol, *delpol = NULL;
struct hlist_node *newpos = NULL;
int i = 0;
hlist_for_each_entry(pol, chain, bydst_inexact_list) {
if (pol->type == policy->type &&
pol->if_id == policy->if_id &&
!selector_cmp(&pol->selector, &policy->selector) &&
xfrm_policy_mark_match(&policy->mark, pol) &&
xfrm_sec_ctx_match(pol->security, policy->security) &&
!WARN_ON(delpol)) {
delpol = pol;
if (policy->priority > pol->priority)
continue;
} else if (policy->priority >= pol->priority) {
newpos = &pol->bydst_inexact_list;
continue;
}
if (delpol)
break;
}
if (newpos && policy->xdo.type != XFRM_DEV_OFFLOAD_PACKET)
hlist_add_behind_rcu(&policy->bydst_inexact_list, newpos);
else
hlist_add_head_rcu(&policy->bydst_inexact_list, chain);
hlist_for_each_entry(pol, chain, bydst_inexact_list) {
pol->pos = i;
i++;
}
}
static struct xfrm_policy *xfrm_policy_insert_list(struct hlist_head *chain,
struct xfrm_policy *policy,
bool excl)
{
struct xfrm_policy *pol, *newpos = NULL, *delpol = NULL;
hlist_for_each_entry(pol, chain, bydst) {
if (pol->type == policy->type &&
pol->if_id == policy->if_id &&
!selector_cmp(&pol->selector, &policy->selector) &&
xfrm_policy_mark_match(&policy->mark, pol) &&
xfrm_sec_ctx_match(pol->security, policy->security) &&
!WARN_ON(delpol)) {
if (excl)
return ERR_PTR(-EEXIST);
delpol = pol;
if (policy->priority > pol->priority)
continue;
} else if (policy->priority >= pol->priority) {
newpos = pol;
continue;
}
if (delpol)
break;
}
if (newpos && policy->xdo.type != XFRM_DEV_OFFLOAD_PACKET)
hlist_add_behind_rcu(&policy->bydst, &newpos->bydst);
else
/* Packet offload policies enter to the head
* to speed-up lookups.
*/
hlist_add_head_rcu(&policy->bydst, chain);
return delpol;
}
int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
{
struct net *net = xp_net(policy);
struct xfrm_policy *delpol;
struct hlist_head *chain;
spin_lock_bh(&net->xfrm.xfrm_policy_lock);
chain = policy_hash_bysel(net, &policy->selector, policy->family, dir);
if (chain)
delpol = xfrm_policy_insert_list(chain, policy, excl);
else
delpol = xfrm_policy_inexact_insert(policy, dir, excl);
if (IS_ERR(delpol)) {
spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
return PTR_ERR(delpol);
}
__xfrm_policy_link(policy, dir);
/* After previous checking, family can either be AF_INET or AF_INET6 */
if (policy->family == AF_INET)
rt_genid_bump_ipv4(net);
else
rt_genid_bump_ipv6(net);
if (delpol) {
xfrm_policy_requeue(delpol, policy);
__xfrm_policy_unlink(delpol, dir);
}
policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir, policy->index);
hlist_add_head(&policy->byidx, net->xfrm.policy_byidx+idx_hash(net, policy->index));
policy->curlft.add_time = ktime_get_real_seconds();
policy->curlft.use_time = 0;
if (!mod_timer(&policy->timer, jiffies + HZ))
xfrm_pol_hold(policy);
spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
if (delpol)
xfrm_policy_kill(delpol);
else if (xfrm_bydst_should_resize(net, dir, NULL))
schedule_work(&net->xfrm.policy_hash_work);
return 0;
}
EXPORT_SYMBOL(xfrm_policy_insert);
static struct xfrm_policy *
__xfrm_policy_bysel_ctx(struct hlist_head *chain, const struct xfrm_mark *mark,
u32 if_id, u8 type, int dir, struct xfrm_selector *sel,
struct xfrm_sec_ctx *ctx)
{
struct xfrm_policy *pol;
if (!chain)
return NULL;
hlist_for_each_entry(pol, chain, bydst) {
if (pol->type == type &&
pol->if_id == if_id &&
xfrm_policy_mark_match(mark, pol) &&
!selector_cmp(sel, &pol->selector) &&
xfrm_sec_ctx_match(ctx, pol->security))
return pol;
}
return NULL;
}
struct xfrm_policy *
xfrm_policy_bysel_ctx(struct net *net, const struct xfrm_mark *mark, u32 if_id,
u8 type, int dir, struct xfrm_selector *sel,
struct xfrm_sec_ctx *ctx, int delete, int *err)
{
struct xfrm_pol_inexact_bin *bin = NULL;
struct xfrm_policy *pol, *ret = NULL;
struct hlist_head *chain;
*err = 0;
spin_lock_bh(&net->xfrm.xfrm_policy_lock);
chain = policy_hash_bysel(net, sel, sel->family, dir);
if (!chain) {
struct xfrm_pol_inexact_candidates cand;
int i;
bin = xfrm_policy_inexact_lookup(net, type,
sel->family, dir, if_id);
if (!bin) {
spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
return NULL;
}
if (!xfrm_policy_find_inexact_candidates(&cand, bin,
&sel->saddr,
&sel->daddr)) {
spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
return NULL;
}
pol = NULL;
for (i = 0; i < ARRAY_SIZE(cand.res); i++) {
struct xfrm_policy *tmp;
tmp = __xfrm_policy_bysel_ctx(cand.res[i], mark,
if_id, type, dir,
sel, ctx);
if (!tmp)
continue;
if (!pol || tmp->pos < pol->pos)
pol = tmp;
}
} else {
pol = __xfrm_policy_bysel_ctx(chain, mark, if_id, type, dir,
sel, ctx);
}
if (pol) {
xfrm_pol_hold(pol);
if (delete) {
*err = security_xfrm_policy_delete(pol->security);
if (*err) {
spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
return pol;
}
__xfrm_policy_unlink(pol, dir);
}
ret = pol;
}
spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
if (ret && delete)
xfrm_policy_kill(ret);
if (bin && delete)
xfrm_policy_inexact_prune_bin(bin);
return ret;
}
EXPORT_SYMBOL(xfrm_policy_bysel_ctx);
struct xfrm_policy *
xfrm_policy_byid(struct net *net, const struct xfrm_mark *mark, u32 if_id,
u8 type, int dir, u32 id, int delete, int *err)
{
struct xfrm_policy *pol, *ret;
struct hlist_head *chain;
*err = -ENOENT;
if (xfrm_policy_id2dir(id) != dir)
return NULL;
*err = 0;
spin_lock_bh(&net->xfrm.xfrm_policy_lock);
chain = net->xfrm.policy_byidx + idx_hash(net, id);
ret = NULL;
hlist_for_each_entry(pol, chain, byidx) {
if (pol->type == type && pol->index == id &&
pol->if_id == if_id && xfrm_policy_mark_match(mark, pol)) {
xfrm_pol_hold(pol);
if (delete) {
*err = security_xfrm_policy_delete(
pol->security);
if (*err) {
spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
return pol;
}
__xfrm_policy_unlink(pol, dir);
}
ret = pol;
break;
}
}
spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
if (ret && delete)
xfrm_policy_kill(ret);
return ret;
}
EXPORT_SYMBOL(xfrm_policy_byid);
#ifdef CONFIG_SECURITY_NETWORK_XFRM
static inline int
xfrm_policy_flush_secctx_check(struct net *net, u8 type, bool task_valid)
{
struct xfrm_policy *pol;
int err = 0;
list_for_each_entry(pol, &net->xfrm.policy_all, walk.all) {
if (pol->walk.dead ||
xfrm_policy_id2dir(pol->index) >= XFRM_POLICY_MAX ||
pol->type != type)
continue;
err = security_xfrm_policy_delete(pol->security);
if (err) {
xfrm_audit_policy_delete(pol, 0, task_valid);
return err;
}
}
return err;
}
static inline int xfrm_dev_policy_flush_secctx_check(struct net *net,
struct net_device *dev,
bool task_valid)
{
struct xfrm_policy *pol;
int err = 0;
list_for_each_entry(pol, &net->xfrm.policy_all, walk.all) {
if (pol->walk.dead ||
xfrm_policy_id2dir(pol->index) >= XFRM_POLICY_MAX ||
pol->xdo.dev != dev)
continue;
err = security_xfrm_policy_delete(pol->security);
if (err) {
xfrm_audit_policy_delete(pol, 0, task_valid);
return err;
}
}
return err;
}
#else
static inline int
xfrm_policy_flush_secctx_check(struct net *net, u8 type, bool task_valid)
{
return 0;
}
static inline int xfrm_dev_policy_flush_secctx_check(struct net *net,
struct net_device *dev,
bool task_valid)
{
return 0;
}
#endif
int xfrm_policy_flush(struct net *net, u8 type, bool task_valid)
{
int dir, err = 0, cnt = 0;
struct xfrm_policy *pol;
spin_lock_bh(&net->xfrm.xfrm_policy_lock);
err = xfrm_policy_flush_secctx_check(net, type, task_valid);
if (err)
goto out;
again:
list_for_each_entry(pol, &net->xfrm.policy_all, walk.all) {
dir = xfrm_policy_id2dir(pol->index);
if (pol->walk.dead ||
dir >= XFRM_POLICY_MAX ||
pol->type != type)
continue;
__xfrm_policy_unlink(pol, dir);
spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
xfrm_dev_policy_delete(pol);
cnt++;
xfrm_audit_policy_delete(pol, 1, task_valid);
xfrm_policy_kill(pol);
spin_lock_bh(&net->xfrm.xfrm_policy_lock);
goto again;
}
if (cnt)
__xfrm_policy_inexact_flush(net);
else
err = -ESRCH;
out:
spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
return err;
}
EXPORT_SYMBOL(xfrm_policy_flush);
int xfrm_dev_policy_flush(struct net *net, struct net_device *dev,
bool task_valid)
{
int dir, err = 0, cnt = 0;
struct xfrm_policy *pol;
spin_lock_bh(&net->xfrm.xfrm_policy_lock);
err = xfrm_dev_policy_flush_secctx_check(net, dev, task_valid);
if (err)
goto out;
again:
list_for_each_entry(pol, &net->xfrm.policy_all, walk.all) {
dir = xfrm_policy_id2dir(pol->index);
if (pol->walk.dead ||
dir >= XFRM_POLICY_MAX ||
pol->xdo.dev != dev)
continue;
__xfrm_policy_unlink(pol, dir);
spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
xfrm_dev_policy_delete(pol);
cnt++;
xfrm_audit_policy_delete(pol, 1, task_valid);
xfrm_policy_kill(pol);
spin_lock_bh(&net->xfrm.xfrm_policy_lock);
goto again;
}
if (cnt)
__xfrm_policy_inexact_flush(net);
else
err = -ESRCH;
out:
spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
return err;
}
EXPORT_SYMBOL(xfrm_dev_policy_flush);
int xfrm_policy_walk(struct net *net, struct xfrm_policy_walk *walk,
int (*func)(struct xfrm_policy *, int, int, void*),
void *data)
{
struct xfrm_policy *pol;
struct xfrm_policy_walk_entry *x;
int error = 0;
if (walk->type >= XFRM_POLICY_TYPE_MAX &&
walk->type != XFRM_POLICY_TYPE_ANY)
return -EINVAL;
if (list_empty(&walk->walk.all) && walk->seq != 0)
return 0;
spin_lock_bh(&net->xfrm.xfrm_policy_lock);
if (list_empty(&walk->walk.all))
x = list_first_entry(&net->xfrm.policy_all, struct xfrm_policy_walk_entry, all);
else
x = list_first_entry(&walk->walk.all,
struct xfrm_policy_walk_entry, all);
list_for_each_entry_from(x, &net->xfrm.policy_all, all) {
if (x->dead)
continue;
pol = container_of(x, struct xfrm_policy, walk);
if (walk->type != XFRM_POLICY_TYPE_ANY &&
walk->type != pol->type)
continue;
error = func(pol, xfrm_policy_id2dir(pol->index),
walk->seq, data);
if (error) {
list_move_tail(&walk->walk.all, &x->all);
goto out;
}
walk->seq++;
}
if (walk->seq == 0) {
error = -ENOENT;
goto out;
}
list_del_init(&walk->walk.all);
out:
spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
return error;
}
EXPORT_SYMBOL(xfrm_policy_walk);
void xfrm_policy_walk_init(struct xfrm_policy_walk *walk, u8 type)
{
INIT_LIST_HEAD(&walk->walk.all);
walk->walk.dead = 1;
walk->type = type;
walk->seq = 0;
}
EXPORT_SYMBOL(xfrm_policy_walk_init);
void xfrm_policy_walk_done(struct xfrm_policy_walk *walk, struct net *net)
{
if (list_empty(&walk->walk.all))
return;
spin_lock_bh(&net->xfrm.xfrm_policy_lock); /*FIXME where is net? */
list_del(&walk->walk.all);
spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
}
EXPORT_SYMBOL(xfrm_policy_walk_done);
/*
* Find policy to apply to this flow.
*
* Returns 0 if policy found, else an -errno.
*/
static int xfrm_policy_match(const struct xfrm_policy *pol,
const struct flowi *fl,
u8 type, u16 family, u32 if_id)
{
const struct xfrm_selector *sel = &pol->selector;
int ret = -ESRCH;
bool match;
if (pol->family != family ||
pol->if_id != if_id ||
(fl->flowi_mark & pol->mark.m) != pol->mark.v ||
pol->type != type)
return ret;
match = xfrm_selector_match(sel, fl, family);
if (match)
ret = security_xfrm_policy_lookup(pol->security, fl->flowi_secid);
return ret;
}
static struct xfrm_pol_inexact_node *
xfrm_policy_lookup_inexact_addr(const struct rb_root *r,
seqcount_spinlock_t *count,
const xfrm_address_t *addr, u16 family)
{
const struct rb_node *parent;
int seq;
again:
seq = read_seqcount_begin(count);
parent = rcu_dereference_raw(r->rb_node);
while (parent) {
struct xfrm_pol_inexact_node *node;
int delta;
node = rb_entry(parent, struct xfrm_pol_inexact_node, node);
delta = xfrm_policy_addr_delta(addr, &node->addr,
node->prefixlen, family);
if (delta < 0) {
parent = rcu_dereference_raw(parent->rb_left);
continue;
} else if (delta > 0) {
parent = rcu_dereference_raw(parent->rb_right);
continue;
}
return node;
}
if (read_seqcount_retry(count, seq))
goto again;
return NULL;
}
static bool
xfrm_policy_find_inexact_candidates(struct xfrm_pol_inexact_candidates *cand,
struct xfrm_pol_inexact_bin *b,
const xfrm_address_t *saddr,
const xfrm_address_t *daddr)
{
struct xfrm_pol_inexact_node *n;
u16 family;
if (!b)
return false;
family = b->k.family;
memset(cand, 0, sizeof(*cand));
cand->res[XFRM_POL_CAND_ANY] = &b->hhead;
n = xfrm_policy_lookup_inexact_addr(&b->root_d, &b->count, daddr,
family);
if (n) {
cand->res[XFRM_POL_CAND_DADDR] = &n->hhead;
n = xfrm_policy_lookup_inexact_addr(&n->root, &b->count, saddr,
family);
if (n)
cand->res[XFRM_POL_CAND_BOTH] = &n->hhead;
}
n = xfrm_policy_lookup_inexact_addr(&b->root_s, &b->count, saddr,
family);
if (n)
cand->res[XFRM_POL_CAND_SADDR] = &n->hhead;
return true;
}
static struct xfrm_pol_inexact_bin *
xfrm_policy_inexact_lookup_rcu(struct net *net, u8 type, u16 family,
u8 dir, u32 if_id)
{
struct xfrm_pol_inexact_key k = {
.family = family,
.type = type,
.dir = dir,
.if_id = if_id,
};
write_pnet(&k.net, net);
return rhashtable_lookup(&xfrm_policy_inexact_table, &k,
xfrm_pol_inexact_params);
}
static struct xfrm_pol_inexact_bin *
xfrm_policy_inexact_lookup(struct net *net, u8 type, u16 family,
u8 dir, u32 if_id)
{
struct xfrm_pol_inexact_bin *bin;
lockdep_assert_held(&net->xfrm.xfrm_policy_lock);
rcu_read_lock();
bin = xfrm_policy_inexact_lookup_rcu(net, type, family, dir, if_id);
rcu_read_unlock();
return bin;
}
static struct xfrm_policy *
__xfrm_policy_eval_candidates(struct hlist_head *chain,
struct xfrm_policy *prefer,
const struct flowi *fl,
u8 type, u16 family, u32 if_id)
{
u32 priority = prefer ? prefer->priority : ~0u;
struct xfrm_policy *pol;
if (!chain)
return NULL;
hlist_for_each_entry_rcu(pol, chain, bydst) {
int err;
if (pol->priority > priority)
break;
err = xfrm_policy_match(pol, fl, type, family, if_id);
if (err) {
if (err != -ESRCH)
return ERR_PTR(err);
continue;
}
if (prefer) {
/* matches. Is it older than *prefer? */
if (pol->priority == priority &&
prefer->pos < pol->pos)
return prefer;
}
return pol;
}
return NULL;
}
static struct xfrm_policy *
xfrm_policy_eval_candidates(struct xfrm_pol_inexact_candidates *cand,
struct xfrm_policy *prefer,
const struct flowi *fl,
u8 type, u16 family, u32 if_id)
{
struct xfrm_policy *tmp;
int i;
for (i = 0; i < ARRAY_SIZE(cand->res); i++) {
tmp = __xfrm_policy_eval_candidates(cand->res[i],
prefer,
fl, type, family, if_id);
if (!tmp)
continue;
if (IS_ERR(tmp))
return tmp;
prefer = tmp;
}
return prefer;
}
static struct xfrm_policy *xfrm_policy_lookup_bytype(struct net *net, u8 type,
const struct flowi *fl,
u16 family, u8 dir,
u32 if_id)
{
struct xfrm_pol_inexact_candidates cand;
const xfrm_address_t *daddr, *saddr;
struct xfrm_pol_inexact_bin *bin;
struct xfrm_policy *pol, *ret;
struct hlist_head *chain;
unsigned int sequence;
int err;
daddr = xfrm_flowi_daddr(fl, family);
saddr = xfrm_flowi_saddr(fl, family);
if (unlikely(!daddr || !saddr))
return NULL;
rcu_read_lock();
retry:
do {
sequence = read_seqcount_begin(&net->xfrm.xfrm_policy_hash_generation);
chain = policy_hash_direct(net, daddr, saddr, family, dir);
} while (read_seqcount_retry(&net->xfrm.xfrm_policy_hash_generation, sequence));
ret = NULL;
hlist_for_each_entry_rcu(pol, chain, bydst) {
err = xfrm_policy_match(pol, fl, type, family, if_id);
if (err) {
if (err == -ESRCH)
continue;
else {
ret = ERR_PTR(err);
goto fail;
}
} else {
ret = pol;
break;
}
}
if (ret && ret->xdo.type == XFRM_DEV_OFFLOAD_PACKET)
goto skip_inexact;
bin = xfrm_policy_inexact_lookup_rcu(net, type, family, dir, if_id);
if (!bin || !xfrm_policy_find_inexact_candidates(&cand, bin, saddr,
daddr))
goto skip_inexact;
pol = xfrm_policy_eval_candidates(&cand, ret, fl, type,
family, if_id);
if (pol) {
ret = pol;
if (IS_ERR(pol))
goto fail;
}
skip_inexact:
if (read_seqcount_retry(&net->xfrm.xfrm_policy_hash_generation, sequence))
goto retry;
if (ret && !xfrm_pol_hold_rcu(ret))
goto retry;
fail:
rcu_read_unlock();
return ret;
}
static struct xfrm_policy *xfrm_policy_lookup(struct net *net,
const struct flowi *fl,
u16 family, u8 dir, u32 if_id)
{
#ifdef CONFIG_XFRM_SUB_POLICY
struct xfrm_policy *pol;
pol = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_SUB, fl, family,
dir, if_id);
if (pol != NULL)
return pol;
#endif
return xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN, fl, family,
dir, if_id);
}
static struct xfrm_policy *xfrm_sk_policy_lookup(const struct sock *sk, int dir,
const struct flowi *fl,
u16 family, u32 if_id)
{
struct xfrm_policy *pol;
rcu_read_lock();
again:
pol = rcu_dereference(sk->sk_policy[dir]);
if (pol != NULL) {
bool match;
int err = 0;
if (pol->family != family) {
pol = NULL;
goto out;
}
match = xfrm_selector_match(&pol->selector, fl, family);
if (match) {
if ((READ_ONCE(sk->sk_mark) & pol->mark.m) != pol->mark.v ||
pol->if_id != if_id) {
pol = NULL;
goto out;
}
err = security_xfrm_policy_lookup(pol->security,
fl->flowi_secid);
if (!err) {
if (!xfrm_pol_hold_rcu(pol))
goto again;
} else if (err == -ESRCH) {
pol = NULL;
} else {
pol = ERR_PTR(err);
}
} else
pol = NULL;
}
out:
rcu_read_unlock();
return pol;
}
static void __xfrm_policy_link(struct xfrm_policy *pol, int dir)
{
struct net *net = xp_net(pol);
list_add(&pol->walk.all, &net->xfrm.policy_all);
net->xfrm.policy_count[dir]++;
xfrm_pol_hold(pol);
}
static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
int dir)
{
struct net *net = xp_net(pol);
if (list_empty(&pol->walk.all))
return NULL;
/* Socket policies are not hashed. */
if (!hlist_unhashed(&pol->bydst)) {
hlist_del_rcu(&pol->bydst);
hlist_del_init(&pol->bydst_inexact_list);
hlist_del(&pol->byidx);
}
list_del_init(&pol->walk.all);
net->xfrm.policy_count[dir]--;
return pol;
}
static void xfrm_sk_policy_link(struct xfrm_policy *pol, int dir)
{
__xfrm_policy_link(pol, XFRM_POLICY_MAX + dir);
}
static void xfrm_sk_policy_unlink(struct xfrm_policy *pol, int dir)
{
__xfrm_policy_unlink(pol, XFRM_POLICY_MAX + dir);
}
int xfrm_policy_delete(struct xfrm_policy *pol, int dir)
{
struct net *net = xp_net(pol);
spin_lock_bh(&net->xfrm.xfrm_policy_lock);
pol = __xfrm_policy_unlink(pol, dir);
spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
if (pol) {
xfrm_dev_policy_delete(pol);
xfrm_policy_kill(pol);
return 0;
}
return -ENOENT;
}
EXPORT_SYMBOL(xfrm_policy_delete);
int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol)
{
struct net *net = sock_net(sk);
struct xfrm_policy *old_pol;
#ifdef CONFIG_XFRM_SUB_POLICY
if (pol && pol->type != XFRM_POLICY_TYPE_MAIN)
return -EINVAL;
#endif
spin_lock_bh(&net->xfrm.xfrm_policy_lock);
old_pol = rcu_dereference_protected(sk->sk_policy[dir],
lockdep_is_held(&net->xfrm.xfrm_policy_lock));
if (pol) {
pol->curlft.add_time = ktime_get_real_seconds();
pol->index = xfrm_gen_index(net, XFRM_POLICY_MAX+dir, 0);
xfrm_sk_policy_link(pol, dir);
}
rcu_assign_pointer(sk->sk_policy[dir], pol);
if (old_pol) {
if (pol)
xfrm_policy_requeue(old_pol, pol);
/* Unlinking succeeds always. This is the only function
* allowed to delete or replace socket policy.
*/
xfrm_sk_policy_unlink(old_pol, dir);
}
spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
if (old_pol) {
xfrm_policy_kill(old_pol);
}
return 0;
}
static struct xfrm_policy *clone_policy(const struct xfrm_policy *old, int dir)
{
struct xfrm_policy *newp = xfrm_policy_alloc(xp_net(old), GFP_ATOMIC);
struct net *net = xp_net(old);
if (newp) {
newp->selector = old->selector;
if (security_xfrm_policy_clone(old->security,
&newp->security)) {
kfree(newp);
return NULL; /* ENOMEM */
}
newp->lft = old->lft;
newp->curlft = old->curlft;
newp->mark = old->mark;
newp->if_id = old->if_id;
newp->action = old->action;
newp->flags = old->flags;
newp->xfrm_nr = old->xfrm_nr;
newp->index = old->index;
newp->type = old->type;
newp->family = old->family;
memcpy(newp->xfrm_vec, old->xfrm_vec,
newp->xfrm_nr*sizeof(struct xfrm_tmpl));
spin_lock_bh(&net->xfrm.xfrm_policy_lock);
xfrm_sk_policy_link(newp, dir);
spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
xfrm_pol_put(newp);
}
return newp;
}
int __xfrm_sk_clone_policy(struct sock *sk, const struct sock *osk)
{
const struct xfrm_policy *p;
struct xfrm_policy *np;
int i, ret = 0;
rcu_read_lock();
for (i = 0; i < 2; i++) {
p = rcu_dereference(osk->sk_policy[i]);
if (p) {
np = clone_policy(p, i);
if (unlikely(!np)) {
ret = -ENOMEM;
break;
}
rcu_assign_pointer(sk->sk_policy[i], np);
}
}
rcu_read_unlock();
return ret;
}
static int
xfrm_get_saddr(struct net *net, int oif, xfrm_address_t *local,
xfrm_address_t *remote, unsigned short family, u32 mark)
{
int err;
const struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
if (unlikely(afinfo == NULL))
return -EINVAL;
err = afinfo->get_saddr(net, oif, local, remote, mark);
rcu_read_unlock();
return err;
}
/* Resolve list of templates for the flow, given policy. */
static int
xfrm_tmpl_resolve_one(struct xfrm_policy *policy, const struct flowi *fl,
struct xfrm_state **xfrm, unsigned short family)
{
struct net *net = xp_net(policy);
int nx;
int i, error;
xfrm_address_t *daddr = xfrm_flowi_daddr(fl, family);
xfrm_address_t *saddr = xfrm_flowi_saddr(fl, family);
xfrm_address_t tmp;
for (nx = 0, i = 0; i < policy->xfrm_nr; i++) {
struct xfrm_state *x;
xfrm_address_t *remote = daddr;
xfrm_address_t *local = saddr;
struct xfrm_tmpl *tmpl = &policy->xfrm_vec[i];
if (tmpl->mode == XFRM_MODE_TUNNEL ||
tmpl->mode == XFRM_MODE_BEET) {
remote = &tmpl->id.daddr;
local = &tmpl->saddr;
if (xfrm_addr_any(local, tmpl->encap_family)) {
error = xfrm_get_saddr(net, fl->flowi_oif,
&tmp, remote,
tmpl->encap_family, 0);
if (error)
goto fail;
local = &tmp;
}
}
x = xfrm_state_find(remote, local, fl, tmpl, policy, &error,
family, policy->if_id);
if (x && x->km.state == XFRM_STATE_VALID) {
xfrm[nx++] = x;
daddr = remote;
saddr = local;
continue;
}
if (x) {
error = (x->km.state == XFRM_STATE_ERROR ?
-EINVAL : -EAGAIN);
xfrm_state_put(x);
} else if (error == -ESRCH) {
error = -EAGAIN;
}
if (!tmpl->optional)
goto fail;
}
return nx;
fail:
for (nx--; nx >= 0; nx--)
xfrm_state_put(xfrm[nx]);
return error;
}
static int
xfrm_tmpl_resolve(struct xfrm_policy **pols, int npols, const struct flowi *fl,
struct xfrm_state **xfrm, unsigned short family)
{
struct xfrm_state *tp[XFRM_MAX_DEPTH];
struct xfrm_state **tpp = (npols > 1) ? tp : xfrm;
int cnx = 0;
int error;
int ret;
int i;
for (i = 0; i < npols; i++) {
if (cnx + pols[i]->xfrm_nr >= XFRM_MAX_DEPTH) {
error = -ENOBUFS;
goto fail;
}
ret = xfrm_tmpl_resolve_one(pols[i], fl, &tpp[cnx], family);
if (ret < 0) {
error = ret;
goto fail;
} else
cnx += ret;
}
/* found states are sorted for outbound processing */
if (npols > 1)
xfrm_state_sort(xfrm, tpp, cnx, family);
return cnx;
fail:
for (cnx--; cnx >= 0; cnx--)
xfrm_state_put(tpp[cnx]);
return error;
}
static int xfrm_get_tos(const struct flowi *fl, int family)
{
if (family == AF_INET)
return IPTOS_RT_MASK & fl->u.ip4.flowi4_tos;
return 0;
}
static inline struct xfrm_dst *xfrm_alloc_dst(struct net *net, int family)
{
const struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
struct dst_ops *dst_ops;
struct xfrm_dst *xdst;
if (!afinfo)
return ERR_PTR(-EINVAL);
switch (family) {
case AF_INET:
dst_ops = &net->xfrm.xfrm4_dst_ops;
break;
#if IS_ENABLED(CONFIG_IPV6)
case AF_INET6:
dst_ops = &net->xfrm.xfrm6_dst_ops;
break;
#endif
default:
BUG();
}
xdst = dst_alloc(dst_ops, NULL, 1, DST_OBSOLETE_NONE, 0);
if (likely(xdst)) {
memset_after(xdst, 0, u.dst);
} else
xdst = ERR_PTR(-ENOBUFS);
rcu_read_unlock();
return xdst;
}
static void xfrm_init_path(struct xfrm_dst *path, struct dst_entry *dst,
int nfheader_len)
{
if (dst->ops->family == AF_INET6) {
struct rt6_info *rt = (struct rt6_info *)dst;
path->path_cookie = rt6_get_cookie(rt);
path->u.rt6.rt6i_nfheader_len = nfheader_len;
}
}
static inline int xfrm_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
const struct flowi *fl)
{
const struct xfrm_policy_afinfo *afinfo =
xfrm_policy_get_afinfo(xdst->u.dst.ops->family);
int err;
if (!afinfo)
return -EINVAL;
err = afinfo->fill_dst(xdst, dev, fl);
rcu_read_unlock();
return err;
}
/* Allocate chain of dst_entry's, attach known xfrm's, calculate
* all the metrics... Shortly, bundle a bundle.
*/
static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
struct xfrm_state **xfrm,
struct xfrm_dst **bundle,
int nx,
const struct flowi *fl,
struct dst_entry *dst)
{
const struct xfrm_state_afinfo *afinfo;
const struct xfrm_mode *inner_mode;
struct net *net = xp_net(policy);
unsigned long now = jiffies;
struct net_device *dev;
struct xfrm_dst *xdst_prev = NULL;
struct xfrm_dst *xdst0 = NULL;
int i = 0;
int err;
int header_len = 0;
int nfheader_len = 0;
int trailer_len = 0;
int tos;
int family = policy->selector.family;
xfrm_address_t saddr, daddr;
xfrm_flowi_addr_get(fl, &saddr, &daddr, family);
tos = xfrm_get_tos(fl, family);
dst_hold(dst);
for (; i < nx; i++) {
struct xfrm_dst *xdst = xfrm_alloc_dst(net, family);
struct dst_entry *dst1 = &xdst->u.dst;
err = PTR_ERR(xdst);
if (IS_ERR(xdst)) {
dst_release(dst);
goto put_states;
}
bundle[i] = xdst;
if (!xdst_prev)
xdst0 = xdst;
else
/* Ref count is taken during xfrm_alloc_dst()
* No need to do dst_clone() on dst1
*/
xfrm_dst_set_child(xdst_prev, &xdst->u.dst);
if (xfrm[i]->sel.family == AF_UNSPEC) {
inner_mode = xfrm_ip2inner_mode(xfrm[i],
xfrm_af2proto(family));
if (!inner_mode) {
err = -EAFNOSUPPORT;
dst_release(dst);
goto put_states;
}
} else
inner_mode = &xfrm[i]->inner_mode;
xdst->route = dst;
dst_copy_metrics(dst1, dst);
if (xfrm[i]->props.mode != XFRM_MODE_TRANSPORT) {
__u32 mark = 0;
int oif;
if (xfrm[i]->props.smark.v || xfrm[i]->props.smark.m)
mark = xfrm_smark_get(fl->flowi_mark, xfrm[i]);
family = xfrm[i]->props.family;
oif = fl->flowi_oif ? : fl->flowi_l3mdev;
dst = xfrm_dst_lookup(xfrm[i], tos, oif,
&saddr, &daddr, family, mark);
err = PTR_ERR(dst);
if (IS_ERR(dst))
goto put_states;
} else
dst_hold(dst);
dst1->xfrm = xfrm[i];
xdst->xfrm_genid = xfrm[i]->genid;
dst1->obsolete = DST_OBSOLETE_FORCE_CHK;
dst1->lastuse = now;
dst1->input = dst_discard;
rcu_read_lock();
afinfo = xfrm_state_afinfo_get_rcu(inner_mode->family);
if (likely(afinfo))
dst1->output = afinfo->output;
else
dst1->output = dst_discard_out;
rcu_read_unlock();
xdst_prev = xdst;
header_len += xfrm[i]->props.header_len;
if (xfrm[i]->type->flags & XFRM_TYPE_NON_FRAGMENT)
nfheader_len += xfrm[i]->props.header_len;
trailer_len += xfrm[i]->props.trailer_len;
}
xfrm_dst_set_child(xdst_prev, dst);
xdst0->path = dst;
err = -ENODEV;
dev = dst->dev;
if (!dev)
goto free_dst;
xfrm_init_path(xdst0, dst, nfheader_len);
xfrm_init_pmtu(bundle, nx);
for (xdst_prev = xdst0; xdst_prev != (struct xfrm_dst *)dst;
xdst_prev = (struct xfrm_dst *) xfrm_dst_child(&xdst_prev->u.dst)) {
err = xfrm_fill_dst(xdst_prev, dev, fl);
if (err)
goto free_dst;
xdst_prev->u.dst.header_len = header_len;
xdst_prev->u.dst.trailer_len = trailer_len;
header_len -= xdst_prev->u.dst.xfrm->props.header_len;
trailer_len -= xdst_prev->u.dst.xfrm->props.trailer_len;
}
return &xdst0->u.dst;
put_states:
for (; i < nx; i++)
xfrm_state_put(xfrm[i]);
free_dst:
if (xdst0)
dst_release_immediate(&xdst0->u.dst);
return ERR_PTR(err);
}
static int xfrm_expand_policies(const struct flowi *fl, u16 family,
struct xfrm_policy **pols,
int *num_pols, int *num_xfrms)
{
int i;
if (*num_pols == 0 || !pols[0]) {
*num_pols = 0;
*num_xfrms = 0;
return 0;
}
if (IS_ERR(pols[0])) {
*num_pols = 0;
return PTR_ERR(pols[0]);
}
*num_xfrms = pols[0]->xfrm_nr;
#ifdef CONFIG_XFRM_SUB_POLICY
if (pols[0]->action == XFRM_POLICY_ALLOW &&
pols[0]->type != XFRM_POLICY_TYPE_MAIN) {
pols[1] = xfrm_policy_lookup_bytype(xp_net(pols[0]),
XFRM_POLICY_TYPE_MAIN,
fl, family,
XFRM_POLICY_OUT,
pols[0]->if_id);
if (pols[1]) {
if (IS_ERR(pols[1])) {
xfrm_pols_put(pols, *num_pols);
*num_pols = 0;
return PTR_ERR(pols[1]);
}
(*num_pols)++;
(*num_xfrms) += pols[1]->xfrm_nr;
}
}
#endif
for (i = 0; i < *num_pols; i++) {
if (pols[i]->action != XFRM_POLICY_ALLOW) {
*num_xfrms = -1;
break;
}
}
return 0;
}
static struct xfrm_dst *
xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols,
const struct flowi *fl, u16 family,
struct dst_entry *dst_orig)
{
struct net *net = xp_net(pols[0]);
struct xfrm_state *xfrm[XFRM_MAX_DEPTH];
struct xfrm_dst *bundle[XFRM_MAX_DEPTH];
struct xfrm_dst *xdst;
struct dst_entry *dst;
int err;
/* Try to instantiate a bundle */
err = xfrm_tmpl_resolve(pols, num_pols, fl, xfrm, family);
if (err <= 0) {
if (err == 0)
return NULL;
if (err != -EAGAIN)
XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR);
return ERR_PTR(err);
}
dst = xfrm_bundle_create(pols[0], xfrm, bundle, err, fl, dst_orig);
if (IS_ERR(dst)) {
XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLEGENERROR);
return ERR_CAST(dst);
}
xdst = (struct xfrm_dst *)dst;
xdst->num_xfrms = err;
xdst->num_pols = num_pols;
memcpy(xdst->pols, pols, sizeof(struct xfrm_policy *) * num_pols);
xdst->policy_genid = atomic_read(&pols[0]->genid);
return xdst;
}
static void xfrm_policy_queue_process(struct timer_list *t)
{
struct sk_buff *skb;
struct sock *sk;
struct dst_entry *dst;
struct xfrm_policy *pol = from_timer(pol, t, polq.hold_timer);
struct net *net = xp_net(pol);
struct xfrm_policy_queue *pq = &pol->polq;
struct flowi fl;
struct sk_buff_head list;
__u32 skb_mark;
spin_lock(&pq->hold_queue.lock);
skb = skb_peek(&pq->hold_queue);
if (!skb) {
spin_unlock(&pq->hold_queue.lock);
goto out;
}
dst = skb_dst(skb);
sk = skb->sk;
/* Fixup the mark to support VTI. */
skb_mark = skb->mark;
skb->mark = pol->mark.v;
xfrm_decode_session(skb, &fl, dst->ops->family);
skb->mark = skb_mark;
spin_unlock(&pq->hold_queue.lock);
dst_hold(xfrm_dst_path(dst));
dst = xfrm_lookup(net, xfrm_dst_path(dst), &fl, sk, XFRM_LOOKUP_QUEUE);
if (IS_ERR(dst))
goto purge_queue;
if (dst->flags & DST_XFRM_QUEUE) {
dst_release(dst);
if (pq->timeout >= XFRM_QUEUE_TMO_MAX)
goto purge_queue;
pq->timeout = pq->timeout << 1;
if (!mod_timer(&pq->hold_timer, jiffies + pq->timeout))
xfrm_pol_hold(pol);
goto out;
}
dst_release(dst);
__skb_queue_head_init(&list);
spin_lock(&pq->hold_queue.lock);
pq->timeout = 0;
skb_queue_splice_init(&pq->hold_queue, &list);
spin_unlock(&pq->hold_queue.lock);
while (!skb_queue_empty(&list)) {
skb = __skb_dequeue(&list);
/* Fixup the mark to support VTI. */
skb_mark = skb->mark;
skb->mark = pol->mark.v;
xfrm_decode_session(skb, &fl, skb_dst(skb)->ops->family);
skb->mark = skb_mark;
dst_hold(xfrm_dst_path(skb_dst(skb)));
dst = xfrm_lookup(net, xfrm_dst_path(skb_dst(skb)), &fl, skb->sk, 0);
if (IS_ERR(dst)) {
kfree_skb(skb);
continue;
}
nf_reset_ct(skb);
skb_dst_drop(skb);
skb_dst_set(skb, dst);
dst_output(net, skb->sk, skb);
}
out:
xfrm_pol_put(pol);
return;
purge_queue:
pq->timeout = 0;
skb_queue_purge(&pq->hold_queue);
xfrm_pol_put(pol);
}
static int xdst_queue_output(struct net *net, struct sock *sk, struct sk_buff *skb)
{
unsigned long sched_next;
struct dst_entry *dst = skb_dst(skb);
struct xfrm_dst *xdst = (struct xfrm_dst *) dst;
struct xfrm_policy *pol = xdst->pols[0];
struct xfrm_policy_queue *pq = &pol->polq;
if (unlikely(skb_fclone_busy(sk, skb))) {
kfree_skb(skb);
return 0;
}
if (pq->hold_queue.qlen > XFRM_MAX_QUEUE_LEN) {
kfree_skb(skb);
return -EAGAIN;
}
skb_dst_force(skb);
spin_lock_bh(&pq->hold_queue.lock);
if (!pq->timeout)
pq->timeout = XFRM_QUEUE_TMO_MIN;
sched_next = jiffies + pq->timeout;
if (del_timer(&pq->hold_timer)) {
if (time_before(pq->hold_timer.expires, sched_next))
sched_next = pq->hold_timer.expires;
xfrm_pol_put(pol);
}
__skb_queue_tail(&pq->hold_queue, skb);
if (!mod_timer(&pq->hold_timer, sched_next))
xfrm_pol_hold(pol);
spin_unlock_bh(&pq->hold_queue.lock);
return 0;
}
static struct xfrm_dst *xfrm_create_dummy_bundle(struct net *net,
struct xfrm_flo *xflo,
const struct flowi *fl,
int num_xfrms,
u16 family)
{
int err;
struct net_device *dev;
struct dst_entry *dst;
struct dst_entry *dst1;
struct xfrm_dst *xdst;
xdst = xfrm_alloc_dst(net, family);
if (IS_ERR(xdst))
return xdst;
if (!(xflo->flags & XFRM_LOOKUP_QUEUE) ||
net->xfrm.sysctl_larval_drop ||
num_xfrms <= 0)
return xdst;
dst = xflo->dst_orig;
dst1 = &xdst->u.dst;
dst_hold(dst);
xdst->route = dst;
dst_copy_metrics(dst1, dst);
dst1->obsolete = DST_OBSOLETE_FORCE_CHK;
dst1->flags |= DST_XFRM_QUEUE;
dst1->lastuse = jiffies;
dst1->input = dst_discard;
dst1->output = xdst_queue_output;
dst_hold(dst);
xfrm_dst_set_child(xdst, dst);
xdst->path = dst;
xfrm_init_path((struct xfrm_dst *)dst1, dst, 0);
err = -ENODEV;
dev = dst->dev;
if (!dev)
goto free_dst;
err = xfrm_fill_dst(xdst, dev, fl);
if (err)
goto free_dst;
out:
return xdst;
free_dst:
dst_release(dst1);
xdst = ERR_PTR(err);
goto out;
}
static struct xfrm_dst *xfrm_bundle_lookup(struct net *net,
const struct flowi *fl,
u16 family, u8 dir,
struct xfrm_flo *xflo, u32 if_id)
{
struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
int num_pols = 0, num_xfrms = 0, err;
struct xfrm_dst *xdst;
/* Resolve policies to use if we couldn't get them from
* previous cache entry */
num_pols = 1;
pols[0] = xfrm_policy_lookup(net, fl, family, dir, if_id);
err = xfrm_expand_policies(fl, family, pols,
&num_pols, &num_xfrms);
if (err < 0)
goto inc_error;
if (num_pols == 0)
return NULL;
if (num_xfrms <= 0)
goto make_dummy_bundle;
xdst = xfrm_resolve_and_create_bundle(pols, num_pols, fl, family,
xflo->dst_orig);
if (IS_ERR(xdst)) {
err = PTR_ERR(xdst);
if (err == -EREMOTE) {
xfrm_pols_put(pols, num_pols);
return NULL;
}
if (err != -EAGAIN)
goto error;
goto make_dummy_bundle;
} else if (xdst == NULL) {
num_xfrms = 0;
goto make_dummy_bundle;
}
return xdst;
make_dummy_bundle:
/* We found policies, but there's no bundles to instantiate:
* either because the policy blocks, has no transformations or
* we could not build template (no xfrm_states).*/
xdst = xfrm_create_dummy_bundle(net, xflo, fl, num_xfrms, family);
if (IS_ERR(xdst)) {
xfrm_pols_put(pols, num_pols);
return ERR_CAST(xdst);
}
xdst->num_pols = num_pols;
xdst->num_xfrms = num_xfrms;
memcpy(xdst->pols, pols, sizeof(struct xfrm_policy *) * num_pols);
return xdst;
inc_error:
XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR);
error:
xfrm_pols_put(pols, num_pols);
return ERR_PTR(err);
}
static struct dst_entry *make_blackhole(struct net *net, u16 family,
struct dst_entry *dst_orig)
{
const struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
struct dst_entry *ret;
if (!afinfo) {
dst_release(dst_orig);
return ERR_PTR(-EINVAL);
} else {
ret = afinfo->blackhole_route(net, dst_orig);
}
rcu_read_unlock();
return ret;
}
/* Finds/creates a bundle for given flow and if_id
*
* At the moment we eat a raw IP route. Mostly to speed up lookups
* on interfaces with disabled IPsec.
*
* xfrm_lookup uses an if_id of 0 by default, and is provided for
* compatibility
*/
struct dst_entry *xfrm_lookup_with_ifid(struct net *net,
struct dst_entry *dst_orig,
const struct flowi *fl,
const struct sock *sk,
int flags, u32 if_id)
{
struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
struct xfrm_dst *xdst;
struct dst_entry *dst, *route;
u16 family = dst_orig->ops->family;
u8 dir = XFRM_POLICY_OUT;
int i, err, num_pols, num_xfrms = 0, drop_pols = 0;
dst = NULL;
xdst = NULL;
route = NULL;
sk = sk_const_to_full_sk(sk);
if (sk && sk->sk_policy[XFRM_POLICY_OUT]) {
num_pols = 1;
pols[0] = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl, family,
if_id);
err = xfrm_expand_policies(fl, family, pols,
&num_pols, &num_xfrms);
if (err < 0)
goto dropdst;
if (num_pols) {
if (num_xfrms <= 0) {
drop_pols = num_pols;
goto no_transform;
}
xdst = xfrm_resolve_and_create_bundle(
pols, num_pols, fl,
family, dst_orig);
if (IS_ERR(xdst)) {
xfrm_pols_put(pols, num_pols);
err = PTR_ERR(xdst);
if (err == -EREMOTE)
goto nopol;
goto dropdst;
} else if (xdst == NULL) {
num_xfrms = 0;
drop_pols = num_pols;
goto no_transform;
}
route = xdst->route;
}
}
if (xdst == NULL) {
struct xfrm_flo xflo;
xflo.dst_orig = dst_orig;
xflo.flags = flags;
/* To accelerate a bit... */
if (!if_id && ((dst_orig->flags & DST_NOXFRM) ||
!net->xfrm.policy_count[XFRM_POLICY_OUT]))
goto nopol;
xdst = xfrm_bundle_lookup(net, fl, family, dir, &xflo, if_id);
if (xdst == NULL)
goto nopol;
if (IS_ERR(xdst)) {
err = PTR_ERR(xdst);
goto dropdst;
}
num_pols = xdst->num_pols;
num_xfrms = xdst->num_xfrms;
memcpy(pols, xdst->pols, sizeof(struct xfrm_policy *) * num_pols);
route = xdst->route;
}
dst = &xdst->u.dst;
if (route == NULL && num_xfrms > 0) {
/* The only case when xfrm_bundle_lookup() returns a
* bundle with null route, is when the template could
* not be resolved. It means policies are there, but
* bundle could not be created, since we don't yet
* have the xfrm_state's. We need to wait for KM to
* negotiate new SA's or bail out with error.*/
if (net->xfrm.sysctl_larval_drop) {
XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
err = -EREMOTE;
goto error;
}
err = -EAGAIN;
XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
goto error;
}
no_transform:
if (num_pols == 0)
goto nopol;
if ((flags & XFRM_LOOKUP_ICMP) &&
!(pols[0]->flags & XFRM_POLICY_ICMP)) {
err = -ENOENT;
goto error;
}
for (i = 0; i < num_pols; i++)
pols[i]->curlft.use_time = ktime_get_real_seconds();
if (num_xfrms < 0) {
/* Prohibit the flow */
XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLBLOCK);
err = -EPERM;
goto error;
} else if (num_xfrms > 0) {
/* Flow transformed */
dst_release(dst_orig);
} else {
/* Flow passes untransformed */
dst_release(dst);
dst = dst_orig;
}
ok:
xfrm_pols_put(pols, drop_pols);
if (dst && dst->xfrm &&
dst->xfrm->props.mode == XFRM_MODE_TUNNEL)
dst->flags |= DST_XFRM_TUNNEL;
return dst;
nopol:
if ((!dst_orig->dev || !(dst_orig->dev->flags & IFF_LOOPBACK)) &&
net->xfrm.policy_default[dir] == XFRM_USERPOLICY_BLOCK) {
err = -EPERM;
goto error;
}
if (!(flags & XFRM_LOOKUP_ICMP)) {
dst = dst_orig;
goto ok;
}
err = -ENOENT;
error:
dst_release(dst);
dropdst:
if (!(flags & XFRM_LOOKUP_KEEP_DST_REF))
dst_release(dst_orig);
xfrm_pols_put(pols, drop_pols);
return ERR_PTR(err);
}
EXPORT_SYMBOL(xfrm_lookup_with_ifid);
/* Main function: finds/creates a bundle for given flow.
*
* At the moment we eat a raw IP route. Mostly to speed up lookups
* on interfaces with disabled IPsec.
*/
struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
const struct flowi *fl, const struct sock *sk,
int flags)
{
return xfrm_lookup_with_ifid(net, dst_orig, fl, sk, flags, 0);
}
EXPORT_SYMBOL(xfrm_lookup);
/* Callers of xfrm_lookup_route() must ensure a call to dst_output().
* Otherwise we may send out blackholed packets.
*/
struct dst_entry *xfrm_lookup_route(struct net *net, struct dst_entry *dst_orig,
const struct flowi *fl,
const struct sock *sk, int flags)
{
struct dst_entry *dst = xfrm_lookup(net, dst_orig, fl, sk,
flags | XFRM_LOOKUP_QUEUE |
XFRM_LOOKUP_KEEP_DST_REF);
if (PTR_ERR(dst) == -EREMOTE)
return make_blackhole(net, dst_orig->ops->family, dst_orig);
if (IS_ERR(dst))
dst_release(dst_orig);
return dst;
}
EXPORT_SYMBOL(xfrm_lookup_route);
static inline int
xfrm_secpath_reject(int idx, struct sk_buff *skb, const struct flowi *fl)
{
struct sec_path *sp = skb_sec_path(skb);
struct xfrm_state *x;
if (!sp || idx < 0 || idx >= sp->len)
return 0;
x = sp->xvec[idx];
if (!x->type->reject)
return 0;
return x->type->reject(x, skb, fl);
}
/* When skb is transformed back to its "native" form, we have to
* check policy restrictions. At the moment we make this in maximally
* stupid way. Shame on me. :-) Of course, connected sockets must
* have policy cached at them.
*/
static inline int
xfrm_state_ok(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x,
unsigned short family, u32 if_id)
{
if (xfrm_state_kern(x))
return tmpl->optional && !xfrm_state_addr_cmp(tmpl, x, tmpl->encap_family);
return x->id.proto == tmpl->id.proto &&
(x->id.spi == tmpl->id.spi || !tmpl->id.spi) &&
(x->props.reqid == tmpl->reqid || !tmpl->reqid) &&
x->props.mode == tmpl->mode &&
(tmpl->allalgs || (tmpl->aalgos & (1<<x->props.aalgo)) ||
!(xfrm_id_proto_match(tmpl->id.proto, IPSEC_PROTO_ANY))) &&
!(x->props.mode != XFRM_MODE_TRANSPORT &&
xfrm_state_addr_cmp(tmpl, x, family)) &&
(if_id == 0 || if_id == x->if_id);
}
/*
* 0 or more than 0 is returned when validation is succeeded (either bypass
* because of optional transport mode, or next index of the matched secpath
* state with the template.
* -1 is returned when no matching template is found.
* Otherwise "-2 - errored_index" is returned.
*/
static inline int
xfrm_policy_ok(const struct xfrm_tmpl *tmpl, const struct sec_path *sp, int start,
unsigned short family, u32 if_id)
{
int idx = start;
if (tmpl->optional) {
if (tmpl->mode == XFRM_MODE_TRANSPORT)
return start;
} else
start = -1;
for (; idx < sp->len; idx++) {
if (xfrm_state_ok(tmpl, sp->xvec[idx], family, if_id))
return ++idx;
if (sp->xvec[idx]->props.mode != XFRM_MODE_TRANSPORT) {
if (idx < sp->verified_cnt) {
/* Secpath entry previously verified, consider optional and
* continue searching
*/
continue;
}
if (start == -1)
start = -2-idx;
break;
}
}
return start;
}
static void
decode_session4(struct sk_buff *skb, struct flowi *fl, bool reverse)
{
const struct iphdr *iph = ip_hdr(skb);
int ihl = iph->ihl;
u8 *xprth = skb_network_header(skb) + ihl * 4;
struct flowi4 *fl4 = &fl->u.ip4;
int oif = 0;
if (skb_dst(skb) && skb_dst(skb)->dev)
oif = skb_dst(skb)->dev->ifindex;
memset(fl4, 0, sizeof(struct flowi4));
fl4->flowi4_mark = skb->mark;
fl4->flowi4_oif = reverse ? skb->skb_iif : oif;
fl4->flowi4_proto = iph->protocol;
fl4->daddr = reverse ? iph->saddr : iph->daddr;
fl4->saddr = reverse ? iph->daddr : iph->saddr;
fl4->flowi4_tos = iph->tos & ~INET_ECN_MASK;
if (!ip_is_fragment(iph)) {
switch (iph->protocol) {
case IPPROTO_UDP:
case IPPROTO_UDPLITE:
case IPPROTO_TCP:
case IPPROTO_SCTP:
case IPPROTO_DCCP:
if (xprth + 4 < skb->data ||
pskb_may_pull(skb, xprth + 4 - skb->data)) {
__be16 *ports;
xprth = skb_network_header(skb) + ihl * 4;
ports = (__be16 *)xprth;
fl4->fl4_sport = ports[!!reverse];
fl4->fl4_dport = ports[!reverse];
}
break;
case IPPROTO_ICMP:
if (xprth + 2 < skb->data ||
pskb_may_pull(skb, xprth + 2 - skb->data)) {
u8 *icmp;
xprth = skb_network_header(skb) + ihl * 4;
icmp = xprth;
fl4->fl4_icmp_type = icmp[0];
fl4->fl4_icmp_code = icmp[1];
}
break;
case IPPROTO_GRE:
if (xprth + 12 < skb->data ||
pskb_may_pull(skb, xprth + 12 - skb->data)) {
__be16 *greflags;
__be32 *gre_hdr;
xprth = skb_network_header(skb) + ihl * 4;
greflags = (__be16 *)xprth;
gre_hdr = (__be32 *)xprth;
if (greflags[0] & GRE_KEY) {
if (greflags[0] & GRE_CSUM)
gre_hdr++;
fl4->fl4_gre_key = gre_hdr[1];
}
}
break;
default:
break;
}
}
}
#if IS_ENABLED(CONFIG_IPV6)
static void
decode_session6(struct sk_buff *skb, struct flowi *fl, bool reverse)
{
struct flowi6 *fl6 = &fl->u.ip6;
int onlyproto = 0;
const struct ipv6hdr *hdr = ipv6_hdr(skb);
u32 offset = sizeof(*hdr);
struct ipv6_opt_hdr *exthdr;
const unsigned char *nh = skb_network_header(skb);
u16 nhoff = IP6CB(skb)->nhoff;
int oif = 0;
u8 nexthdr;
if (!nhoff)
nhoff = offsetof(struct ipv6hdr, nexthdr);
nexthdr = nh[nhoff];
if (skb_dst(skb) && skb_dst(skb)->dev)
oif = skb_dst(skb)->dev->ifindex;
memset(fl6, 0, sizeof(struct flowi6));
fl6->flowi6_mark = skb->mark;
fl6->flowi6_oif = reverse ? skb->skb_iif : oif;
fl6->daddr = reverse ? hdr->saddr : hdr->daddr;
fl6->saddr = reverse ? hdr->daddr : hdr->saddr;
while (nh + offset + sizeof(*exthdr) < skb->data ||
pskb_may_pull(skb, nh + offset + sizeof(*exthdr) - skb->data)) {
nh = skb_network_header(skb);
exthdr = (struct ipv6_opt_hdr *)(nh + offset);
switch (nexthdr) {
case NEXTHDR_FRAGMENT:
onlyproto = 1;
fallthrough;
case NEXTHDR_ROUTING:
case NEXTHDR_HOP:
case NEXTHDR_DEST:
offset += ipv6_optlen(exthdr);
nexthdr = exthdr->nexthdr;
break;
case IPPROTO_UDP:
case IPPROTO_UDPLITE:
case IPPROTO_TCP:
case IPPROTO_SCTP:
case IPPROTO_DCCP:
if (!onlyproto && (nh + offset + 4 < skb->data ||
pskb_may_pull(skb, nh + offset + 4 - skb->data))) {
__be16 *ports;
nh = skb_network_header(skb);
ports = (__be16 *)(nh + offset);
fl6->fl6_sport = ports[!!reverse];
fl6->fl6_dport = ports[!reverse];
}
fl6->flowi6_proto = nexthdr;
return;
case IPPROTO_ICMPV6:
if (!onlyproto && (nh + offset + 2 < skb->data ||
pskb_may_pull(skb, nh + offset + 2 - skb->data))) {
u8 *icmp;
nh = skb_network_header(skb);
icmp = (u8 *)(nh + offset);
fl6->fl6_icmp_type = icmp[0];
fl6->fl6_icmp_code = icmp[1];
}
fl6->flowi6_proto = nexthdr;
return;
case IPPROTO_GRE:
if (!onlyproto &&
(nh + offset + 12 < skb->data ||
pskb_may_pull(skb, nh + offset + 12 - skb->data))) {
struct gre_base_hdr *gre_hdr;
__be32 *gre_key;
nh = skb_network_header(skb);
gre_hdr = (struct gre_base_hdr *)(nh + offset);
gre_key = (__be32 *)(gre_hdr + 1);
if (gre_hdr->flags & GRE_KEY) {
if (gre_hdr->flags & GRE_CSUM)
gre_key++;
fl6->fl6_gre_key = *gre_key;
}
}
fl6->flowi6_proto = nexthdr;
return;
#if IS_ENABLED(CONFIG_IPV6_MIP6)
case IPPROTO_MH:
offset += ipv6_optlen(exthdr);
if (!onlyproto && (nh + offset + 3 < skb->data ||
pskb_may_pull(skb, nh + offset + 3 - skb->data))) {
struct ip6_mh *mh;
nh = skb_network_header(skb);
mh = (struct ip6_mh *)(nh + offset);
fl6->fl6_mh_type = mh->ip6mh_type;
}
fl6->flowi6_proto = nexthdr;
return;
#endif
default:
fl6->flowi6_proto = nexthdr;
return;
}
}
}
#endif
int __xfrm_decode_session(struct sk_buff *skb, struct flowi *fl,
unsigned int family, int reverse)
{
switch (family) {
case AF_INET:
decode_session4(skb, fl, reverse);
break;
#if IS_ENABLED(CONFIG_IPV6)
case AF_INET6:
decode_session6(skb, fl, reverse);
break;
#endif
default:
return -EAFNOSUPPORT;
}
return security_xfrm_decode_session(skb, &fl->flowi_secid);
}
EXPORT_SYMBOL(__xfrm_decode_session);
static inline int secpath_has_nontransport(const struct sec_path *sp, int k, int *idxp)
{
for (; k < sp->len; k++) {
if (sp->xvec[k]->props.mode != XFRM_MODE_TRANSPORT) {
*idxp = k;
return 1;
}
}
return 0;
}
int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
unsigned short family)
{
struct net *net = dev_net(skb->dev);
struct xfrm_policy *pol;
struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
int npols = 0;
int xfrm_nr;
int pi;
int reverse;
struct flowi fl;
int xerr_idx = -1;
const struct xfrm_if_cb *ifcb;
struct sec_path *sp;
u32 if_id = 0;
rcu_read_lock();
ifcb = xfrm_if_get_cb();
if (ifcb) {
struct xfrm_if_decode_session_result r;
if (ifcb->decode_session(skb, family, &r)) {
if_id = r.if_id;
net = r.net;
}
}
rcu_read_unlock();
reverse = dir & ~XFRM_POLICY_MASK;
dir &= XFRM_POLICY_MASK;
if (__xfrm_decode_session(skb, &fl, family, reverse) < 0) {
XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR);
return 0;
}
nf_nat_decode_session(skb, &fl, family);
/* First, check used SA against their selectors. */
sp = skb_sec_path(skb);
if (sp) {
int i;
for (i = sp->len - 1; i >= 0; i--) {
struct xfrm_state *x = sp->xvec[i];
if (!xfrm_selector_match(&x->sel, &fl, family)) {
XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMISMATCH);
return 0;
}
}
}
pol = NULL;
sk = sk_to_full_sk(sk);
if (sk && sk->sk_policy[dir]) {
pol = xfrm_sk_policy_lookup(sk, dir, &fl, family, if_id);
if (IS_ERR(pol)) {
XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
return 0;
}
}
if (!pol)
pol = xfrm_policy_lookup(net, &fl, family, dir, if_id);
if (IS_ERR(pol)) {
XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
return 0;
}
if (!pol) {
if (net->xfrm.policy_default[dir] == XFRM_USERPOLICY_BLOCK) {
XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOPOLS);
return 0;
}
if (sp && secpath_has_nontransport(sp, 0, &xerr_idx)) {
xfrm_secpath_reject(xerr_idx, skb, &fl);
XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOPOLS);
return 0;
}
return 1;
}
/* This lockless write can happen from different cpus. */
WRITE_ONCE(pol->curlft.use_time, ktime_get_real_seconds());
pols[0] = pol;
npols++;
#ifdef CONFIG_XFRM_SUB_POLICY
if (pols[0]->type != XFRM_POLICY_TYPE_MAIN) {
pols[1] = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN,
&fl, family,
XFRM_POLICY_IN, if_id);
if (pols[1]) {
if (IS_ERR(pols[1])) {
XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
xfrm_pol_put(pols[0]);
return 0;
}
/* This write can happen from different cpus. */
WRITE_ONCE(pols[1]->curlft.use_time,
ktime_get_real_seconds());
npols++;
}
}
#endif
if (pol->action == XFRM_POLICY_ALLOW) {
static struct sec_path dummy;
struct xfrm_tmpl *tp[XFRM_MAX_DEPTH];
struct xfrm_tmpl *stp[XFRM_MAX_DEPTH];
struct xfrm_tmpl **tpp = tp;
int ti = 0;
int i, k;
sp = skb_sec_path(skb);
if (!sp)
sp = &dummy;
for (pi = 0; pi < npols; pi++) {
if (pols[pi] != pol &&
pols[pi]->action != XFRM_POLICY_ALLOW) {
XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLBLOCK);
goto reject;
}
if (ti + pols[pi]->xfrm_nr >= XFRM_MAX_DEPTH) {
XFRM_INC_STATS(net, LINUX_MIB_XFRMINBUFFERERROR);
goto reject_error;
}
for (i = 0; i < pols[pi]->xfrm_nr; i++)
tpp[ti++] = &pols[pi]->xfrm_vec[i];
}
xfrm_nr = ti;
if (npols > 1) {
xfrm_tmpl_sort(stp, tpp, xfrm_nr, family);
tpp = stp;
}
/* For each tunnel xfrm, find the first matching tmpl.
* For each tmpl before that, find corresponding xfrm.
* Order is _important_. Later we will implement
* some barriers, but at the moment barriers
* are implied between each two transformations.
* Upon success, marks secpath entries as having been
* verified to allow them to be skipped in future policy
* checks (e.g. nested tunnels).
*/
for (i = xfrm_nr-1, k = 0; i >= 0; i--) {
k = xfrm_policy_ok(tpp[i], sp, k, family, if_id);
if (k < 0) {
if (k < -1)
/* "-2 - errored_index" returned */
xerr_idx = -(2+k);
XFRM_INC_STATS(net, LINUX_MIB_XFRMINTMPLMISMATCH);
goto reject;
}
}
if (secpath_has_nontransport(sp, k, &xerr_idx)) {
XFRM_INC_STATS(net, LINUX_MIB_XFRMINTMPLMISMATCH);
goto reject;
}
xfrm_pols_put(pols, npols);
sp->verified_cnt = k;
return 1;
}
XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLBLOCK);
reject:
xfrm_secpath_reject(xerr_idx, skb, &fl);
reject_error:
xfrm_pols_put(pols, npols);
return 0;
}
EXPORT_SYMBOL(__xfrm_policy_check);
int __xfrm_route_forward(struct sk_buff *skb, unsigned short family)
{
struct net *net = dev_net(skb->dev);
struct flowi fl;
struct dst_entry *dst;
int res = 1;
if (xfrm_decode_session(skb, &fl, family) < 0) {
XFRM_INC_STATS(net, LINUX_MIB_XFRMFWDHDRERROR);
return 0;
}
skb_dst_force(skb);
if (!skb_dst(skb)) {
XFRM_INC_STATS(net, LINUX_MIB_XFRMFWDHDRERROR);
return 0;
}
dst = xfrm_lookup(net, skb_dst(skb), &fl, NULL, XFRM_LOOKUP_QUEUE);
if (IS_ERR(dst)) {
res = 0;
dst = NULL;
}
skb_dst_set(skb, dst);
return res;
}
EXPORT_SYMBOL(__xfrm_route_forward);
/* Optimize later using cookies and generation ids. */
static struct dst_entry *xfrm_dst_check(struct dst_entry *dst, u32 cookie)
{
/* Code (such as __xfrm4_bundle_create()) sets dst->obsolete
* to DST_OBSOLETE_FORCE_CHK to force all XFRM destinations to
* get validated by dst_ops->check on every use. We do this
* because when a normal route referenced by an XFRM dst is
* obsoleted we do not go looking around for all parent
* referencing XFRM dsts so that we can invalidate them. It
* is just too much work. Instead we make the checks here on
* every use. For example:
*
* XFRM dst A --> IPv4 dst X
*
* X is the "xdst->route" of A (X is also the "dst->path" of A
* in this example). If X is marked obsolete, "A" will not
* notice. That's what we are validating here via the
* stale_bundle() check.
*
* When a dst is removed from the fib tree, DST_OBSOLETE_DEAD will
* be marked on it.
* This will force stale_bundle() to fail on any xdst bundle with
* this dst linked in it.
*/
if (dst->obsolete < 0 && !stale_bundle(dst))
return dst;
return NULL;
}
static int stale_bundle(struct dst_entry *dst)
{
return !xfrm_bundle_ok((struct xfrm_dst *)dst);
}
void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev)
{
while ((dst = xfrm_dst_child(dst)) && dst->xfrm && dst->dev == dev) {
dst->dev = blackhole_netdev;
dev_hold(dst->dev);
dev_put(dev);
}
}
EXPORT_SYMBOL(xfrm_dst_ifdown);
static void xfrm_link_failure(struct sk_buff *skb)
{
/* Impossible. Such dst must be popped before reaches point of failure. */
}
static struct dst_entry *xfrm_negative_advice(struct dst_entry *dst)
{
if (dst) {
if (dst->obsolete) {
dst_release(dst);
dst = NULL;
}
}
return dst;
}
static void xfrm_init_pmtu(struct xfrm_dst **bundle, int nr)
{
while (nr--) {
struct xfrm_dst *xdst = bundle[nr];
u32 pmtu, route_mtu_cached;
struct dst_entry *dst;
dst = &xdst->u.dst;
pmtu = dst_mtu(xfrm_dst_child(dst));
xdst->child_mtu_cached = pmtu;
pmtu = xfrm_state_mtu(dst->xfrm, pmtu);
route_mtu_cached = dst_mtu(xdst->route);
xdst->route_mtu_cached = route_mtu_cached;
if (pmtu > route_mtu_cached)
pmtu = route_mtu_cached;
dst_metric_set(dst, RTAX_MTU, pmtu);
}
}
/* Check that the bundle accepts the flow and its components are
* still valid.
*/
static int xfrm_bundle_ok(struct xfrm_dst *first)
{
struct xfrm_dst *bundle[XFRM_MAX_DEPTH];
struct dst_entry *dst = &first->u.dst;
struct xfrm_dst *xdst;
int start_from, nr;
u32 mtu;
if (!dst_check(xfrm_dst_path(dst), ((struct xfrm_dst *)dst)->path_cookie) ||
(dst->dev && !netif_running(dst->dev)))
return 0;
if (dst->flags & DST_XFRM_QUEUE)
return 1;
start_from = nr = 0;
do {
struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
if (dst->xfrm->km.state != XFRM_STATE_VALID)
return 0;
if (xdst->xfrm_genid != dst->xfrm->genid)
return 0;
if (xdst->num_pols > 0 &&
xdst->policy_genid != atomic_read(&xdst->pols[0]->genid))
return 0;
bundle[nr++] = xdst;
mtu = dst_mtu(xfrm_dst_child(dst));
if (xdst->child_mtu_cached != mtu) {
start_from = nr;
xdst->child_mtu_cached = mtu;
}
if (!dst_check(xdst->route, xdst->route_cookie))
return 0;
mtu = dst_mtu(xdst->route);
if (xdst->route_mtu_cached != mtu) {
start_from = nr;
xdst->route_mtu_cached = mtu;
}
dst = xfrm_dst_child(dst);
} while (dst->xfrm);
if (likely(!start_from))
return 1;
xdst = bundle[start_from - 1];
mtu = xdst->child_mtu_cached;
while (start_from--) {
dst = &xdst->u.dst;
mtu = xfrm_state_mtu(dst->xfrm, mtu);
if (mtu > xdst->route_mtu_cached)
mtu = xdst->route_mtu_cached;
dst_metric_set(dst, RTAX_MTU, mtu);
if (!start_from)
break;
xdst = bundle[start_from - 1];
xdst->child_mtu_cached = mtu;
}
return 1;
}
static unsigned int xfrm_default_advmss(const struct dst_entry *dst)
{
return dst_metric_advmss(xfrm_dst_path(dst));
}
static unsigned int xfrm_mtu(const struct dst_entry *dst)
{
unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
return mtu ? : dst_mtu(xfrm_dst_path(dst));
}
static const void *xfrm_get_dst_nexthop(const struct dst_entry *dst,
const void *daddr)
{
while (dst->xfrm) {
const struct xfrm_state *xfrm = dst->xfrm;
dst = xfrm_dst_child(dst);
if (xfrm->props.mode == XFRM_MODE_TRANSPORT)
continue;
if (xfrm->type->flags & XFRM_TYPE_REMOTE_COADDR)
daddr = xfrm->coaddr;
else if (!(xfrm->type->flags & XFRM_TYPE_LOCAL_COADDR))
daddr = &xfrm->id.daddr;
}
return daddr;
}
static struct neighbour *xfrm_neigh_lookup(const struct dst_entry *dst,
struct sk_buff *skb,
const void *daddr)
{
const struct dst_entry *path = xfrm_dst_path(dst);
if (!skb)
daddr = xfrm_get_dst_nexthop(dst, daddr);
return path->ops->neigh_lookup(path, skb, daddr);
}
static void xfrm_confirm_neigh(const struct dst_entry *dst, const void *daddr)
{
const struct dst_entry *path = xfrm_dst_path(dst);
daddr = xfrm_get_dst_nexthop(dst, daddr);
path->ops->confirm_neigh(path, daddr);
}
int xfrm_policy_register_afinfo(const struct xfrm_policy_afinfo *afinfo, int family)
{
int err = 0;
if (WARN_ON(family >= ARRAY_SIZE(xfrm_policy_afinfo)))
return -EAFNOSUPPORT;
spin_lock(&xfrm_policy_afinfo_lock);
if (unlikely(xfrm_policy_afinfo[family] != NULL))
err = -EEXIST;
else {
struct dst_ops *dst_ops = afinfo->dst_ops;
if (likely(dst_ops->kmem_cachep == NULL))
dst_ops->kmem_cachep = xfrm_dst_cache;
if (likely(dst_ops->check == NULL))
dst_ops->check = xfrm_dst_check;
if (likely(dst_ops->default_advmss == NULL))
dst_ops->default_advmss = xfrm_default_advmss;
if (likely(dst_ops->mtu == NULL))
dst_ops->mtu = xfrm_mtu;
if (likely(dst_ops->negative_advice == NULL))
dst_ops->negative_advice = xfrm_negative_advice;
if (likely(dst_ops->link_failure == NULL))
dst_ops->link_failure = xfrm_link_failure;
if (likely(dst_ops->neigh_lookup == NULL))
dst_ops->neigh_lookup = xfrm_neigh_lookup;
if (likely(!dst_ops->confirm_neigh))
dst_ops->confirm_neigh = xfrm_confirm_neigh;
rcu_assign_pointer(xfrm_policy_afinfo[family], afinfo);
}
spin_unlock(&xfrm_policy_afinfo_lock);
return err;
}
EXPORT_SYMBOL(xfrm_policy_register_afinfo);
void xfrm_policy_unregister_afinfo(const struct xfrm_policy_afinfo *afinfo)
{
struct dst_ops *dst_ops = afinfo->dst_ops;
int i;
for (i = 0; i < ARRAY_SIZE(xfrm_policy_afinfo); i++) {
if (xfrm_policy_afinfo[i] != afinfo)
continue;
RCU_INIT_POINTER(xfrm_policy_afinfo[i], NULL);
break;
}
synchronize_rcu();
dst_ops->kmem_cachep = NULL;
dst_ops->check = NULL;
dst_ops->negative_advice = NULL;
dst_ops->link_failure = NULL;
}
EXPORT_SYMBOL(xfrm_policy_unregister_afinfo);
void xfrm_if_register_cb(const struct xfrm_if_cb *ifcb)
{
spin_lock(&xfrm_if_cb_lock);
rcu_assign_pointer(xfrm_if_cb, ifcb);
spin_unlock(&xfrm_if_cb_lock);
}
EXPORT_SYMBOL(xfrm_if_register_cb);
void xfrm_if_unregister_cb(void)
{
RCU_INIT_POINTER(xfrm_if_cb, NULL);
synchronize_rcu();
}
EXPORT_SYMBOL(xfrm_if_unregister_cb);
#ifdef CONFIG_XFRM_STATISTICS
static int __net_init xfrm_statistics_init(struct net *net)
{
int rv;
net->mib.xfrm_statistics = alloc_percpu(struct linux_xfrm_mib);
if (!net->mib.xfrm_statistics)
return -ENOMEM;
rv = xfrm_proc_init(net);
if (rv < 0)
free_percpu(net->mib.xfrm_statistics);
return rv;
}
static void xfrm_statistics_fini(struct net *net)
{
xfrm_proc_fini(net);
free_percpu(net->mib.xfrm_statistics);
}
#else
static int __net_init xfrm_statistics_init(struct net *net)
{
return 0;
}
static void xfrm_statistics_fini(struct net *net)
{
}
#endif
static int __net_init xfrm_policy_init(struct net *net)
{
unsigned int hmask, sz;
int dir, err;
if (net_eq(net, &init_net)) {
xfrm_dst_cache = kmem_cache_create("xfrm_dst_cache",
sizeof(struct xfrm_dst),
0, SLAB_HWCACHE_ALIGN|SLAB_PANIC,
NULL);
err = rhashtable_init(&xfrm_policy_inexact_table,
&xfrm_pol_inexact_params);
BUG_ON(err);
}
hmask = 8 - 1;
sz = (hmask+1) * sizeof(struct hlist_head);
net->xfrm.policy_byidx = xfrm_hash_alloc(sz);
if (!net->xfrm.policy_byidx)
goto out_byidx;
net->xfrm.policy_idx_hmask = hmask;
for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
struct xfrm_policy_hash *htab;
net->xfrm.policy_count[dir] = 0;
net->xfrm.policy_count[XFRM_POLICY_MAX + dir] = 0;
INIT_HLIST_HEAD(&net->xfrm.policy_inexact[dir]);
htab = &net->xfrm.policy_bydst[dir];
htab->table = xfrm_hash_alloc(sz);
if (!htab->table)
goto out_bydst;
htab->hmask = hmask;
htab->dbits4 = 32;
htab->sbits4 = 32;
htab->dbits6 = 128;
htab->sbits6 = 128;
}
net->xfrm.policy_hthresh.lbits4 = 32;
net->xfrm.policy_hthresh.rbits4 = 32;
net->xfrm.policy_hthresh.lbits6 = 128;
net->xfrm.policy_hthresh.rbits6 = 128;
seqlock_init(&net->xfrm.policy_hthresh.lock);
INIT_LIST_HEAD(&net->xfrm.policy_all);
INIT_LIST_HEAD(&net->xfrm.inexact_bins);
INIT_WORK(&net->xfrm.policy_hash_work, xfrm_hash_resize);
INIT_WORK(&net->xfrm.policy_hthresh.work, xfrm_hash_rebuild);
return 0;
out_bydst:
for (dir--; dir >= 0; dir--) {
struct xfrm_policy_hash *htab;
htab = &net->xfrm.policy_bydst[dir];
xfrm_hash_free(htab->table, sz);
}
xfrm_hash_free(net->xfrm.policy_byidx, sz);
out_byidx:
return -ENOMEM;
}
static void xfrm_policy_fini(struct net *net)
{
struct xfrm_pol_inexact_bin *b, *t;
unsigned int sz;
int dir;
flush_work(&net->xfrm.policy_hash_work);
#ifdef CONFIG_XFRM_SUB_POLICY
xfrm_policy_flush(net, XFRM_POLICY_TYPE_SUB, false);
#endif
xfrm_policy_flush(net, XFRM_POLICY_TYPE_MAIN, false);
WARN_ON(!list_empty(&net->xfrm.policy_all));
for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
struct xfrm_policy_hash *htab;
WARN_ON(!hlist_empty(&net->xfrm.policy_inexact[dir]));
htab = &net->xfrm.policy_bydst[dir];
sz = (htab->hmask + 1) * sizeof(struct hlist_head);
WARN_ON(!hlist_empty(htab->table));
xfrm_hash_free(htab->table, sz);
}
sz = (net->xfrm.policy_idx_hmask + 1) * sizeof(struct hlist_head);
WARN_ON(!hlist_empty(net->xfrm.policy_byidx));
xfrm_hash_free(net->xfrm.policy_byidx, sz);
spin_lock_bh(&net->xfrm.xfrm_policy_lock);
list_for_each_entry_safe(b, t, &net->xfrm.inexact_bins, inexact_bins)
__xfrm_policy_inexact_prune_bin(b, true);
spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
}
static int __net_init xfrm_net_init(struct net *net)
{
int rv;
/* Initialize the per-net locks here */
spin_lock_init(&net->xfrm.xfrm_state_lock);
spin_lock_init(&net->xfrm.xfrm_policy_lock);
seqcount_spinlock_init(&net->xfrm.xfrm_policy_hash_generation, &net->xfrm.xfrm_policy_lock);
mutex_init(&net->xfrm.xfrm_cfg_mutex);
net->xfrm.policy_default[XFRM_POLICY_IN] = XFRM_USERPOLICY_ACCEPT;
net->xfrm.policy_default[XFRM_POLICY_FWD] = XFRM_USERPOLICY_ACCEPT;
net->xfrm.policy_default[XFRM_POLICY_OUT] = XFRM_USERPOLICY_ACCEPT;
rv = xfrm_statistics_init(net);
if (rv < 0)
goto out_statistics;
rv = xfrm_state_init(net);
if (rv < 0)
goto out_state;
rv = xfrm_policy_init(net);
if (rv < 0)
goto out_policy;
rv = xfrm_sysctl_init(net);
if (rv < 0)
goto out_sysctl;
return 0;
out_sysctl:
xfrm_policy_fini(net);
out_policy:
xfrm_state_fini(net);
out_state:
xfrm_statistics_fini(net);
out_statistics:
return rv;
}
static void __net_exit xfrm_net_exit(struct net *net)
{
xfrm_sysctl_fini(net);
xfrm_policy_fini(net);
xfrm_state_fini(net);
xfrm_statistics_fini(net);
}
static struct pernet_operations __net_initdata xfrm_net_ops = {
.init = xfrm_net_init,
.exit = xfrm_net_exit,
};
void __init xfrm_init(void)
{
register_pernet_subsys(&xfrm_net_ops);
xfrm_dev_init();
xfrm_input_init();
#ifdef CONFIG_XFRM_ESPINTCP
espintcp_init();
#endif
}
#ifdef CONFIG_AUDITSYSCALL
static void xfrm_audit_common_policyinfo(struct xfrm_policy *xp,
struct audit_buffer *audit_buf)
{
struct xfrm_sec_ctx *ctx = xp->security;
struct xfrm_selector *sel = &xp->selector;
if (ctx)
audit_log_format(audit_buf, " sec_alg=%u sec_doi=%u sec_obj=%s",
ctx->ctx_alg, ctx->ctx_doi, ctx->ctx_str);
switch (sel->family) {
case AF_INET:
audit_log_format(audit_buf, " src=%pI4", &sel->saddr.a4);
if (sel->prefixlen_s != 32)
audit_log_format(audit_buf, " src_prefixlen=%d",
sel->prefixlen_s);
audit_log_format(audit_buf, " dst=%pI4", &sel->daddr.a4);
if (sel->prefixlen_d != 32)
audit_log_format(audit_buf, " dst_prefixlen=%d",
sel->prefixlen_d);
break;
case AF_INET6:
audit_log_format(audit_buf, " src=%pI6", sel->saddr.a6);
if (sel->prefixlen_s != 128)
audit_log_format(audit_buf, " src_prefixlen=%d",
sel->prefixlen_s);
audit_log_format(audit_buf, " dst=%pI6", sel->daddr.a6);
if (sel->prefixlen_d != 128)
audit_log_format(audit_buf, " dst_prefixlen=%d",
sel->prefixlen_d);
break;
}
}
void xfrm_audit_policy_add(struct xfrm_policy *xp, int result, bool task_valid)
{
struct audit_buffer *audit_buf;
audit_buf = xfrm_audit_start("SPD-add");
if (audit_buf == NULL)
return;
xfrm_audit_helper_usrinfo(task_valid, audit_buf);
audit_log_format(audit_buf, " res=%u", result);
xfrm_audit_common_policyinfo(xp, audit_buf);
audit_log_end(audit_buf);
}
EXPORT_SYMBOL_GPL(xfrm_audit_policy_add);
void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result,
bool task_valid)
{
struct audit_buffer *audit_buf;
audit_buf = xfrm_audit_start("SPD-delete");
if (audit_buf == NULL)
return;
xfrm_audit_helper_usrinfo(task_valid, audit_buf);
audit_log_format(audit_buf, " res=%u", result);
xfrm_audit_common_policyinfo(xp, audit_buf);
audit_log_end(audit_buf);
}
EXPORT_SYMBOL_GPL(xfrm_audit_policy_delete);
#endif
#ifdef CONFIG_XFRM_MIGRATE
static bool xfrm_migrate_selector_match(const struct xfrm_selector *sel_cmp,
const struct xfrm_selector *sel_tgt)
{
if (sel_cmp->proto == IPSEC_ULPROTO_ANY) {
if (sel_tgt->family == sel_cmp->family &&
xfrm_addr_equal(&sel_tgt->daddr, &sel_cmp->daddr,
sel_cmp->family) &&
xfrm_addr_equal(&sel_tgt->saddr, &sel_cmp->saddr,
sel_cmp->family) &&
sel_tgt->prefixlen_d == sel_cmp->prefixlen_d &&
sel_tgt->prefixlen_s == sel_cmp->prefixlen_s) {
return true;
}
} else {
if (memcmp(sel_tgt, sel_cmp, sizeof(*sel_tgt)) == 0) {
return true;
}
}
return false;
}
static struct xfrm_policy *xfrm_migrate_policy_find(const struct xfrm_selector *sel,
u8 dir, u8 type, struct net *net, u32 if_id)
{
struct xfrm_policy *pol, *ret = NULL;
struct hlist_head *chain;
u32 priority = ~0U;
spin_lock_bh(&net->xfrm.xfrm_policy_lock);
chain = policy_hash_direct(net, &sel->daddr, &sel->saddr, sel->family, dir);
hlist_for_each_entry(pol, chain, bydst) {
if ((if_id == 0 || pol->if_id == if_id) &&
xfrm_migrate_selector_match(sel, &pol->selector) &&
pol->type == type) {
ret = pol;
priority = ret->priority;
break;
}
}
chain = &net->xfrm.policy_inexact[dir];
hlist_for_each_entry(pol, chain, bydst_inexact_list) {
if ((pol->priority >= priority) && ret)
break;
if ((if_id == 0 || pol->if_id == if_id) &&
xfrm_migrate_selector_match(sel, &pol->selector) &&
pol->type == type) {
ret = pol;
break;
}
}
xfrm_pol_hold(ret);
spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
return ret;
}
static int migrate_tmpl_match(const struct xfrm_migrate *m, const struct xfrm_tmpl *t)
{
int match = 0;
if (t->mode == m->mode && t->id.proto == m->proto &&
(m->reqid == 0 || t->reqid == m->reqid)) {
switch (t->mode) {
case XFRM_MODE_TUNNEL:
case XFRM_MODE_BEET:
if (xfrm_addr_equal(&t->id.daddr, &m->old_daddr,
m->old_family) &&
xfrm_addr_equal(&t->saddr, &m->old_saddr,
m->old_family)) {
match = 1;
}
break;
case XFRM_MODE_TRANSPORT:
/* in case of transport mode, template does not store
any IP addresses, hence we just compare mode and
protocol */
match = 1;
break;
default:
break;
}
}
return match;
}
/* update endpoint address(es) of template(s) */
static int xfrm_policy_migrate(struct xfrm_policy *pol,
struct xfrm_migrate *m, int num_migrate,
struct netlink_ext_ack *extack)
{
struct xfrm_migrate *mp;
int i, j, n = 0;
write_lock_bh(&pol->lock);
if (unlikely(pol->walk.dead)) {
/* target policy has been deleted */
NL_SET_ERR_MSG(extack, "Target policy not found");
write_unlock_bh(&pol->lock);
return -ENOENT;
}
for (i = 0; i < pol->xfrm_nr; i++) {
for (j = 0, mp = m; j < num_migrate; j++, mp++) {
if (!migrate_tmpl_match(mp, &pol->xfrm_vec[i]))
continue;
n++;
if (pol->xfrm_vec[i].mode != XFRM_MODE_TUNNEL &&
pol->xfrm_vec[i].mode != XFRM_MODE_BEET)
continue;
/* update endpoints */
memcpy(&pol->xfrm_vec[i].id.daddr, &mp->new_daddr,
sizeof(pol->xfrm_vec[i].id.daddr));
memcpy(&pol->xfrm_vec[i].saddr, &mp->new_saddr,
sizeof(pol->xfrm_vec[i].saddr));
pol->xfrm_vec[i].encap_family = mp->new_family;
/* flush bundles */
atomic_inc(&pol->genid);
}
}
write_unlock_bh(&pol->lock);
if (!n)
return -ENODATA;
return 0;
}
static int xfrm_migrate_check(const struct xfrm_migrate *m, int num_migrate,
struct netlink_ext_ack *extack)
{
int i, j;
if (num_migrate < 1 || num_migrate > XFRM_MAX_DEPTH) {
NL_SET_ERR_MSG(extack, "Invalid number of SAs to migrate, must be 0 < num <= XFRM_MAX_DEPTH (6)");
return -EINVAL;
}
for (i = 0; i < num_migrate; i++) {
if (xfrm_addr_any(&m[i].new_daddr, m[i].new_family) ||
xfrm_addr_any(&m[i].new_saddr, m[i].new_family)) {
NL_SET_ERR_MSG(extack, "Addresses in the MIGRATE attribute's list cannot be null");
return -EINVAL;
}
/* check if there is any duplicated entry */
for (j = i + 1; j < num_migrate; j++) {
if (!memcmp(&m[i].old_daddr, &m[j].old_daddr,
sizeof(m[i].old_daddr)) &&
!memcmp(&m[i].old_saddr, &m[j].old_saddr,
sizeof(m[i].old_saddr)) &&
m[i].proto == m[j].proto &&
m[i].mode == m[j].mode &&
m[i].reqid == m[j].reqid &&
m[i].old_family == m[j].old_family) {
NL_SET_ERR_MSG(extack, "Entries in the MIGRATE attribute's list must be unique");
return -EINVAL;
}
}
}
return 0;
}
int xfrm_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
struct xfrm_migrate *m, int num_migrate,
struct xfrm_kmaddress *k, struct net *net,
struct xfrm_encap_tmpl *encap, u32 if_id,
struct netlink_ext_ack *extack)
{
int i, err, nx_cur = 0, nx_new = 0;
struct xfrm_policy *pol = NULL;
struct xfrm_state *x, *xc;
struct xfrm_state *x_cur[XFRM_MAX_DEPTH];
struct xfrm_state *x_new[XFRM_MAX_DEPTH];
struct xfrm_migrate *mp;
/* Stage 0 - sanity checks */
err = xfrm_migrate_check(m, num_migrate, extack);
if (err < 0)
goto out;
if (dir >= XFRM_POLICY_MAX) {
NL_SET_ERR_MSG(extack, "Invalid policy direction");
err = -EINVAL;
goto out;
}
/* Stage 1 - find policy */
pol = xfrm_migrate_policy_find(sel, dir, type, net, if_id);
if (!pol) {
NL_SET_ERR_MSG(extack, "Target policy not found");
err = -ENOENT;
goto out;
}
/* Stage 2 - find and update state(s) */
for (i = 0, mp = m; i < num_migrate; i++, mp++) {
if ((x = xfrm_migrate_state_find(mp, net, if_id))) {
x_cur[nx_cur] = x;
nx_cur++;
xc = xfrm_state_migrate(x, mp, encap);
if (xc) {
x_new[nx_new] = xc;
nx_new++;
} else {
err = -ENODATA;
goto restore_state;
}
}
}
/* Stage 3 - update policy */
err = xfrm_policy_migrate(pol, m, num_migrate, extack);
if (err < 0)
goto restore_state;
/* Stage 4 - delete old state(s) */
if (nx_cur) {
xfrm_states_put(x_cur, nx_cur);
xfrm_states_delete(x_cur, nx_cur);
}
/* Stage 5 - announce */
km_migrate(sel, dir, type, m, num_migrate, k, encap);
xfrm_pol_put(pol);
return 0;
out:
return err;
restore_state:
if (pol)
xfrm_pol_put(pol);
if (nx_cur)
xfrm_states_put(x_cur, nx_cur);
if (nx_new)
xfrm_states_delete(x_new, nx_new);
return err;
}
EXPORT_SYMBOL(xfrm_migrate);
#endif
| linux-master | net/xfrm/xfrm_policy.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* linux/net/sunrpc/timer.c
*
* Estimate RPC request round trip time.
*
* Based on packet round-trip and variance estimator algorithms described
* in appendix A of "Congestion Avoidance and Control" by Van Jacobson
* and Michael J. Karels (ACM Computer Communication Review; Proceedings
* of the Sigcomm '88 Symposium in Stanford, CA, August, 1988).
*
* This RTT estimator is used only for RPC over datagram protocols.
*
* Copyright (C) 2002 Trond Myklebust <[email protected]>
*/
#include <asm/param.h>
#include <linux/types.h>
#include <linux/unistd.h>
#include <linux/module.h>
#include <linux/sunrpc/clnt.h>
#define RPC_RTO_MAX (60*HZ)
#define RPC_RTO_INIT (HZ/5)
#define RPC_RTO_MIN (HZ/10)
/**
* rpc_init_rtt - Initialize an RPC RTT estimator context
* @rt: context to initialize
* @timeo: initial timeout value, in jiffies
*
*/
void rpc_init_rtt(struct rpc_rtt *rt, unsigned long timeo)
{
unsigned long init = 0;
unsigned int i;
rt->timeo = timeo;
if (timeo > RPC_RTO_INIT)
init = (timeo - RPC_RTO_INIT) << 3;
for (i = 0; i < 5; i++) {
rt->srtt[i] = init;
rt->sdrtt[i] = RPC_RTO_INIT;
rt->ntimeouts[i] = 0;
}
}
EXPORT_SYMBOL_GPL(rpc_init_rtt);
/**
* rpc_update_rtt - Update an RPC RTT estimator context
* @rt: context to update
* @timer: timer array index (request type)
* @m: recent actual RTT, in jiffies
*
* NB: When computing the smoothed RTT and standard deviation,
* be careful not to produce negative intermediate results.
*/
void rpc_update_rtt(struct rpc_rtt *rt, unsigned int timer, long m)
{
long *srtt, *sdrtt;
if (timer-- == 0)
return;
/* jiffies wrapped; ignore this one */
if (m < 0)
return;
if (m == 0)
m = 1L;
srtt = (long *)&rt->srtt[timer];
m -= *srtt >> 3;
*srtt += m;
if (m < 0)
m = -m;
sdrtt = (long *)&rt->sdrtt[timer];
m -= *sdrtt >> 2;
*sdrtt += m;
/* Set lower bound on the variance */
if (*sdrtt < RPC_RTO_MIN)
*sdrtt = RPC_RTO_MIN;
}
EXPORT_SYMBOL_GPL(rpc_update_rtt);
/**
* rpc_calc_rto - Provide an estimated timeout value
* @rt: context to use for calculation
* @timer: timer array index (request type)
*
* Estimate RTO for an NFS RPC sent via an unreliable datagram. Use
* the mean and mean deviation of RTT for the appropriate type of RPC
* for frequently issued RPCs, and a fixed default for the others.
*
* The justification for doing "other" this way is that these RPCs
* happen so infrequently that timer estimation would probably be
* stale. Also, since many of these RPCs are non-idempotent, a
* conservative timeout is desired.
*
* getattr, lookup,
* read, write, commit - A+4D
* other - timeo
*/
unsigned long rpc_calc_rto(struct rpc_rtt *rt, unsigned int timer)
{
unsigned long res;
if (timer-- == 0)
return rt->timeo;
res = ((rt->srtt[timer] + 7) >> 3) + rt->sdrtt[timer];
if (res > RPC_RTO_MAX)
res = RPC_RTO_MAX;
return res;
}
EXPORT_SYMBOL_GPL(rpc_calc_rto);
| linux-master | net/sunrpc/timer.c |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.