python_code
stringlengths 0
1.8M
| repo_name
stringclasses 7
values | file_path
stringlengths 5
99
|
---|---|---|
// SPDX-License-Identifier: GPL-2.0-or-later
#include <net/genetlink.h>
#include <uapi/linux/mrp_bridge.h>
#include "br_private.h"
#include "br_private_mrp.h"
static const struct nla_policy br_mrp_policy[IFLA_BRIDGE_MRP_MAX + 1] = {
[IFLA_BRIDGE_MRP_UNSPEC] = { .type = NLA_REJECT },
[IFLA_BRIDGE_MRP_INSTANCE] = { .type = NLA_NESTED },
[IFLA_BRIDGE_MRP_PORT_STATE] = { .type = NLA_NESTED },
[IFLA_BRIDGE_MRP_PORT_ROLE] = { .type = NLA_NESTED },
[IFLA_BRIDGE_MRP_RING_STATE] = { .type = NLA_NESTED },
[IFLA_BRIDGE_MRP_RING_ROLE] = { .type = NLA_NESTED },
[IFLA_BRIDGE_MRP_START_TEST] = { .type = NLA_NESTED },
[IFLA_BRIDGE_MRP_IN_ROLE] = { .type = NLA_NESTED },
[IFLA_BRIDGE_MRP_IN_STATE] = { .type = NLA_NESTED },
[IFLA_BRIDGE_MRP_START_IN_TEST] = { .type = NLA_NESTED },
};
static const struct nla_policy
br_mrp_instance_policy[IFLA_BRIDGE_MRP_INSTANCE_MAX + 1] = {
[IFLA_BRIDGE_MRP_INSTANCE_UNSPEC] = { .type = NLA_REJECT },
[IFLA_BRIDGE_MRP_INSTANCE_RING_ID] = { .type = NLA_U32 },
[IFLA_BRIDGE_MRP_INSTANCE_P_IFINDEX] = { .type = NLA_U32 },
[IFLA_BRIDGE_MRP_INSTANCE_S_IFINDEX] = { .type = NLA_U32 },
[IFLA_BRIDGE_MRP_INSTANCE_PRIO] = { .type = NLA_U16 },
};
static int br_mrp_instance_parse(struct net_bridge *br, struct nlattr *attr,
int cmd, struct netlink_ext_ack *extack)
{
struct nlattr *tb[IFLA_BRIDGE_MRP_INSTANCE_MAX + 1];
struct br_mrp_instance inst;
int err;
err = nla_parse_nested(tb, IFLA_BRIDGE_MRP_INSTANCE_MAX, attr,
br_mrp_instance_policy, extack);
if (err)
return err;
if (!tb[IFLA_BRIDGE_MRP_INSTANCE_RING_ID] ||
!tb[IFLA_BRIDGE_MRP_INSTANCE_P_IFINDEX] ||
!tb[IFLA_BRIDGE_MRP_INSTANCE_S_IFINDEX]) {
NL_SET_ERR_MSG_MOD(extack,
"Missing attribute: RING_ID or P_IFINDEX or S_IFINDEX");
return -EINVAL;
}
memset(&inst, 0, sizeof(inst));
inst.ring_id = nla_get_u32(tb[IFLA_BRIDGE_MRP_INSTANCE_RING_ID]);
inst.p_ifindex = nla_get_u32(tb[IFLA_BRIDGE_MRP_INSTANCE_P_IFINDEX]);
inst.s_ifindex = nla_get_u32(tb[IFLA_BRIDGE_MRP_INSTANCE_S_IFINDEX]);
inst.prio = MRP_DEFAULT_PRIO;
if (tb[IFLA_BRIDGE_MRP_INSTANCE_PRIO])
inst.prio = nla_get_u16(tb[IFLA_BRIDGE_MRP_INSTANCE_PRIO]);
if (cmd == RTM_SETLINK)
return br_mrp_add(br, &inst);
else
return br_mrp_del(br, &inst);
return 0;
}
static const struct nla_policy
br_mrp_port_state_policy[IFLA_BRIDGE_MRP_PORT_STATE_MAX + 1] = {
[IFLA_BRIDGE_MRP_PORT_STATE_UNSPEC] = { .type = NLA_REJECT },
[IFLA_BRIDGE_MRP_PORT_STATE_STATE] = { .type = NLA_U32 },
};
static int br_mrp_port_state_parse(struct net_bridge_port *p,
struct nlattr *attr,
struct netlink_ext_ack *extack)
{
struct nlattr *tb[IFLA_BRIDGE_MRP_PORT_STATE_MAX + 1];
enum br_mrp_port_state_type state;
int err;
err = nla_parse_nested(tb, IFLA_BRIDGE_MRP_PORT_STATE_MAX, attr,
br_mrp_port_state_policy, extack);
if (err)
return err;
if (!tb[IFLA_BRIDGE_MRP_PORT_STATE_STATE]) {
NL_SET_ERR_MSG_MOD(extack, "Missing attribute: STATE");
return -EINVAL;
}
state = nla_get_u32(tb[IFLA_BRIDGE_MRP_PORT_STATE_STATE]);
return br_mrp_set_port_state(p, state);
}
static const struct nla_policy
br_mrp_port_role_policy[IFLA_BRIDGE_MRP_PORT_ROLE_MAX + 1] = {
[IFLA_BRIDGE_MRP_PORT_ROLE_UNSPEC] = { .type = NLA_REJECT },
[IFLA_BRIDGE_MRP_PORT_ROLE_ROLE] = { .type = NLA_U32 },
};
static int br_mrp_port_role_parse(struct net_bridge_port *p,
struct nlattr *attr,
struct netlink_ext_ack *extack)
{
struct nlattr *tb[IFLA_BRIDGE_MRP_PORT_ROLE_MAX + 1];
enum br_mrp_port_role_type role;
int err;
err = nla_parse_nested(tb, IFLA_BRIDGE_MRP_PORT_ROLE_MAX, attr,
br_mrp_port_role_policy, extack);
if (err)
return err;
if (!tb[IFLA_BRIDGE_MRP_PORT_ROLE_ROLE]) {
NL_SET_ERR_MSG_MOD(extack, "Missing attribute: ROLE");
return -EINVAL;
}
role = nla_get_u32(tb[IFLA_BRIDGE_MRP_PORT_ROLE_ROLE]);
return br_mrp_set_port_role(p, role);
}
static const struct nla_policy
br_mrp_ring_state_policy[IFLA_BRIDGE_MRP_RING_STATE_MAX + 1] = {
[IFLA_BRIDGE_MRP_RING_STATE_UNSPEC] = { .type = NLA_REJECT },
[IFLA_BRIDGE_MRP_RING_STATE_RING_ID] = { .type = NLA_U32 },
[IFLA_BRIDGE_MRP_RING_STATE_STATE] = { .type = NLA_U32 },
};
static int br_mrp_ring_state_parse(struct net_bridge *br, struct nlattr *attr,
struct netlink_ext_ack *extack)
{
struct nlattr *tb[IFLA_BRIDGE_MRP_RING_STATE_MAX + 1];
struct br_mrp_ring_state state;
int err;
err = nla_parse_nested(tb, IFLA_BRIDGE_MRP_RING_STATE_MAX, attr,
br_mrp_ring_state_policy, extack);
if (err)
return err;
if (!tb[IFLA_BRIDGE_MRP_RING_STATE_RING_ID] ||
!tb[IFLA_BRIDGE_MRP_RING_STATE_STATE]) {
NL_SET_ERR_MSG_MOD(extack,
"Missing attribute: RING_ID or STATE");
return -EINVAL;
}
memset(&state, 0x0, sizeof(state));
state.ring_id = nla_get_u32(tb[IFLA_BRIDGE_MRP_RING_STATE_RING_ID]);
state.ring_state = nla_get_u32(tb[IFLA_BRIDGE_MRP_RING_STATE_STATE]);
return br_mrp_set_ring_state(br, &state);
}
static const struct nla_policy
br_mrp_ring_role_policy[IFLA_BRIDGE_MRP_RING_ROLE_MAX + 1] = {
[IFLA_BRIDGE_MRP_RING_ROLE_UNSPEC] = { .type = NLA_REJECT },
[IFLA_BRIDGE_MRP_RING_ROLE_RING_ID] = { .type = NLA_U32 },
[IFLA_BRIDGE_MRP_RING_ROLE_ROLE] = { .type = NLA_U32 },
};
static int br_mrp_ring_role_parse(struct net_bridge *br, struct nlattr *attr,
struct netlink_ext_ack *extack)
{
struct nlattr *tb[IFLA_BRIDGE_MRP_RING_ROLE_MAX + 1];
struct br_mrp_ring_role role;
int err;
err = nla_parse_nested(tb, IFLA_BRIDGE_MRP_RING_ROLE_MAX, attr,
br_mrp_ring_role_policy, extack);
if (err)
return err;
if (!tb[IFLA_BRIDGE_MRP_RING_ROLE_RING_ID] ||
!tb[IFLA_BRIDGE_MRP_RING_ROLE_ROLE]) {
NL_SET_ERR_MSG_MOD(extack,
"Missing attribute: RING_ID or ROLE");
return -EINVAL;
}
memset(&role, 0x0, sizeof(role));
role.ring_id = nla_get_u32(tb[IFLA_BRIDGE_MRP_RING_ROLE_RING_ID]);
role.ring_role = nla_get_u32(tb[IFLA_BRIDGE_MRP_RING_ROLE_ROLE]);
return br_mrp_set_ring_role(br, &role);
}
static const struct nla_policy
br_mrp_start_test_policy[IFLA_BRIDGE_MRP_START_TEST_MAX + 1] = {
[IFLA_BRIDGE_MRP_START_TEST_UNSPEC] = { .type = NLA_REJECT },
[IFLA_BRIDGE_MRP_START_TEST_RING_ID] = { .type = NLA_U32 },
[IFLA_BRIDGE_MRP_START_TEST_INTERVAL] = { .type = NLA_U32 },
[IFLA_BRIDGE_MRP_START_TEST_MAX_MISS] = { .type = NLA_U32 },
[IFLA_BRIDGE_MRP_START_TEST_PERIOD] = { .type = NLA_U32 },
[IFLA_BRIDGE_MRP_START_TEST_MONITOR] = { .type = NLA_U32 },
};
static int br_mrp_start_test_parse(struct net_bridge *br, struct nlattr *attr,
struct netlink_ext_ack *extack)
{
struct nlattr *tb[IFLA_BRIDGE_MRP_START_TEST_MAX + 1];
struct br_mrp_start_test test;
int err;
err = nla_parse_nested(tb, IFLA_BRIDGE_MRP_START_TEST_MAX, attr,
br_mrp_start_test_policy, extack);
if (err)
return err;
if (!tb[IFLA_BRIDGE_MRP_START_TEST_RING_ID] ||
!tb[IFLA_BRIDGE_MRP_START_TEST_INTERVAL] ||
!tb[IFLA_BRIDGE_MRP_START_TEST_MAX_MISS] ||
!tb[IFLA_BRIDGE_MRP_START_TEST_PERIOD]) {
NL_SET_ERR_MSG_MOD(extack,
"Missing attribute: RING_ID or INTERVAL or MAX_MISS or PERIOD");
return -EINVAL;
}
memset(&test, 0x0, sizeof(test));
test.ring_id = nla_get_u32(tb[IFLA_BRIDGE_MRP_START_TEST_RING_ID]);
test.interval = nla_get_u32(tb[IFLA_BRIDGE_MRP_START_TEST_INTERVAL]);
test.max_miss = nla_get_u32(tb[IFLA_BRIDGE_MRP_START_TEST_MAX_MISS]);
test.period = nla_get_u32(tb[IFLA_BRIDGE_MRP_START_TEST_PERIOD]);
test.monitor = false;
if (tb[IFLA_BRIDGE_MRP_START_TEST_MONITOR])
test.monitor =
nla_get_u32(tb[IFLA_BRIDGE_MRP_START_TEST_MONITOR]);
return br_mrp_start_test(br, &test);
}
static const struct nla_policy
br_mrp_in_state_policy[IFLA_BRIDGE_MRP_IN_STATE_MAX + 1] = {
[IFLA_BRIDGE_MRP_IN_STATE_UNSPEC] = { .type = NLA_REJECT },
[IFLA_BRIDGE_MRP_IN_STATE_IN_ID] = { .type = NLA_U32 },
[IFLA_BRIDGE_MRP_IN_STATE_STATE] = { .type = NLA_U32 },
};
static int br_mrp_in_state_parse(struct net_bridge *br, struct nlattr *attr,
struct netlink_ext_ack *extack)
{
struct nlattr *tb[IFLA_BRIDGE_MRP_IN_STATE_MAX + 1];
struct br_mrp_in_state state;
int err;
err = nla_parse_nested(tb, IFLA_BRIDGE_MRP_IN_STATE_MAX, attr,
br_mrp_in_state_policy, extack);
if (err)
return err;
if (!tb[IFLA_BRIDGE_MRP_IN_STATE_IN_ID] ||
!tb[IFLA_BRIDGE_MRP_IN_STATE_STATE]) {
NL_SET_ERR_MSG_MOD(extack,
"Missing attribute: IN_ID or STATE");
return -EINVAL;
}
memset(&state, 0x0, sizeof(state));
state.in_id = nla_get_u32(tb[IFLA_BRIDGE_MRP_IN_STATE_IN_ID]);
state.in_state = nla_get_u32(tb[IFLA_BRIDGE_MRP_IN_STATE_STATE]);
return br_mrp_set_in_state(br, &state);
}
static const struct nla_policy
br_mrp_in_role_policy[IFLA_BRIDGE_MRP_IN_ROLE_MAX + 1] = {
[IFLA_BRIDGE_MRP_IN_ROLE_UNSPEC] = { .type = NLA_REJECT },
[IFLA_BRIDGE_MRP_IN_ROLE_RING_ID] = { .type = NLA_U32 },
[IFLA_BRIDGE_MRP_IN_ROLE_IN_ID] = { .type = NLA_U16 },
[IFLA_BRIDGE_MRP_IN_ROLE_ROLE] = { .type = NLA_U32 },
[IFLA_BRIDGE_MRP_IN_ROLE_I_IFINDEX] = { .type = NLA_U32 },
};
static int br_mrp_in_role_parse(struct net_bridge *br, struct nlattr *attr,
struct netlink_ext_ack *extack)
{
struct nlattr *tb[IFLA_BRIDGE_MRP_IN_ROLE_MAX + 1];
struct br_mrp_in_role role;
int err;
err = nla_parse_nested(tb, IFLA_BRIDGE_MRP_IN_ROLE_MAX, attr,
br_mrp_in_role_policy, extack);
if (err)
return err;
if (!tb[IFLA_BRIDGE_MRP_IN_ROLE_RING_ID] ||
!tb[IFLA_BRIDGE_MRP_IN_ROLE_IN_ID] ||
!tb[IFLA_BRIDGE_MRP_IN_ROLE_I_IFINDEX] ||
!tb[IFLA_BRIDGE_MRP_IN_ROLE_ROLE]) {
NL_SET_ERR_MSG_MOD(extack,
"Missing attribute: RING_ID or ROLE or IN_ID or I_IFINDEX");
return -EINVAL;
}
memset(&role, 0x0, sizeof(role));
role.ring_id = nla_get_u32(tb[IFLA_BRIDGE_MRP_IN_ROLE_RING_ID]);
role.in_id = nla_get_u16(tb[IFLA_BRIDGE_MRP_IN_ROLE_IN_ID]);
role.i_ifindex = nla_get_u32(tb[IFLA_BRIDGE_MRP_IN_ROLE_I_IFINDEX]);
role.in_role = nla_get_u32(tb[IFLA_BRIDGE_MRP_IN_ROLE_ROLE]);
return br_mrp_set_in_role(br, &role);
}
static const struct nla_policy
br_mrp_start_in_test_policy[IFLA_BRIDGE_MRP_START_IN_TEST_MAX + 1] = {
[IFLA_BRIDGE_MRP_START_IN_TEST_UNSPEC] = { .type = NLA_REJECT },
[IFLA_BRIDGE_MRP_START_IN_TEST_IN_ID] = { .type = NLA_U32 },
[IFLA_BRIDGE_MRP_START_IN_TEST_INTERVAL] = { .type = NLA_U32 },
[IFLA_BRIDGE_MRP_START_IN_TEST_MAX_MISS] = { .type = NLA_U32 },
[IFLA_BRIDGE_MRP_START_IN_TEST_PERIOD] = { .type = NLA_U32 },
};
static int br_mrp_start_in_test_parse(struct net_bridge *br,
struct nlattr *attr,
struct netlink_ext_ack *extack)
{
struct nlattr *tb[IFLA_BRIDGE_MRP_START_IN_TEST_MAX + 1];
struct br_mrp_start_in_test test;
int err;
err = nla_parse_nested(tb, IFLA_BRIDGE_MRP_START_IN_TEST_MAX, attr,
br_mrp_start_in_test_policy, extack);
if (err)
return err;
if (!tb[IFLA_BRIDGE_MRP_START_IN_TEST_IN_ID] ||
!tb[IFLA_BRIDGE_MRP_START_IN_TEST_INTERVAL] ||
!tb[IFLA_BRIDGE_MRP_START_IN_TEST_MAX_MISS] ||
!tb[IFLA_BRIDGE_MRP_START_IN_TEST_PERIOD]) {
NL_SET_ERR_MSG_MOD(extack,
"Missing attribute: RING_ID or INTERVAL or MAX_MISS or PERIOD");
return -EINVAL;
}
memset(&test, 0x0, sizeof(test));
test.in_id = nla_get_u32(tb[IFLA_BRIDGE_MRP_START_IN_TEST_IN_ID]);
test.interval = nla_get_u32(tb[IFLA_BRIDGE_MRP_START_IN_TEST_INTERVAL]);
test.max_miss = nla_get_u32(tb[IFLA_BRIDGE_MRP_START_IN_TEST_MAX_MISS]);
test.period = nla_get_u32(tb[IFLA_BRIDGE_MRP_START_IN_TEST_PERIOD]);
return br_mrp_start_in_test(br, &test);
}
int br_mrp_parse(struct net_bridge *br, struct net_bridge_port *p,
struct nlattr *attr, int cmd, struct netlink_ext_ack *extack)
{
struct nlattr *tb[IFLA_BRIDGE_MRP_MAX + 1];
int err;
/* When this function is called for a port then the br pointer is
* invalid, therefor set the br to point correctly
*/
if (p)
br = p->br;
if (br->stp_enabled != BR_NO_STP) {
NL_SET_ERR_MSG_MOD(extack, "MRP can't be enabled if STP is already enabled");
return -EINVAL;
}
err = nla_parse_nested(tb, IFLA_BRIDGE_MRP_MAX, attr,
br_mrp_policy, extack);
if (err)
return err;
if (tb[IFLA_BRIDGE_MRP_INSTANCE]) {
err = br_mrp_instance_parse(br, tb[IFLA_BRIDGE_MRP_INSTANCE],
cmd, extack);
if (err)
return err;
}
if (tb[IFLA_BRIDGE_MRP_PORT_STATE]) {
err = br_mrp_port_state_parse(p, tb[IFLA_BRIDGE_MRP_PORT_STATE],
extack);
if (err)
return err;
}
if (tb[IFLA_BRIDGE_MRP_PORT_ROLE]) {
err = br_mrp_port_role_parse(p, tb[IFLA_BRIDGE_MRP_PORT_ROLE],
extack);
if (err)
return err;
}
if (tb[IFLA_BRIDGE_MRP_RING_STATE]) {
err = br_mrp_ring_state_parse(br,
tb[IFLA_BRIDGE_MRP_RING_STATE],
extack);
if (err)
return err;
}
if (tb[IFLA_BRIDGE_MRP_RING_ROLE]) {
err = br_mrp_ring_role_parse(br, tb[IFLA_BRIDGE_MRP_RING_ROLE],
extack);
if (err)
return err;
}
if (tb[IFLA_BRIDGE_MRP_START_TEST]) {
err = br_mrp_start_test_parse(br,
tb[IFLA_BRIDGE_MRP_START_TEST],
extack);
if (err)
return err;
}
if (tb[IFLA_BRIDGE_MRP_IN_STATE]) {
err = br_mrp_in_state_parse(br, tb[IFLA_BRIDGE_MRP_IN_STATE],
extack);
if (err)
return err;
}
if (tb[IFLA_BRIDGE_MRP_IN_ROLE]) {
err = br_mrp_in_role_parse(br, tb[IFLA_BRIDGE_MRP_IN_ROLE],
extack);
if (err)
return err;
}
if (tb[IFLA_BRIDGE_MRP_START_IN_TEST]) {
err = br_mrp_start_in_test_parse(br,
tb[IFLA_BRIDGE_MRP_START_IN_TEST],
extack);
if (err)
return err;
}
return 0;
}
int br_mrp_fill_info(struct sk_buff *skb, struct net_bridge *br)
{
struct nlattr *tb, *mrp_tb;
struct br_mrp *mrp;
mrp_tb = nla_nest_start_noflag(skb, IFLA_BRIDGE_MRP);
if (!mrp_tb)
return -EMSGSIZE;
hlist_for_each_entry_rcu(mrp, &br->mrp_list, list) {
struct net_bridge_port *p;
tb = nla_nest_start_noflag(skb, IFLA_BRIDGE_MRP_INFO);
if (!tb)
goto nla_info_failure;
if (nla_put_u32(skb, IFLA_BRIDGE_MRP_INFO_RING_ID,
mrp->ring_id))
goto nla_put_failure;
p = rcu_dereference(mrp->p_port);
if (p && nla_put_u32(skb, IFLA_BRIDGE_MRP_INFO_P_IFINDEX,
p->dev->ifindex))
goto nla_put_failure;
p = rcu_dereference(mrp->s_port);
if (p && nla_put_u32(skb, IFLA_BRIDGE_MRP_INFO_S_IFINDEX,
p->dev->ifindex))
goto nla_put_failure;
p = rcu_dereference(mrp->i_port);
if (p && nla_put_u32(skb, IFLA_BRIDGE_MRP_INFO_I_IFINDEX,
p->dev->ifindex))
goto nla_put_failure;
if (nla_put_u16(skb, IFLA_BRIDGE_MRP_INFO_PRIO,
mrp->prio))
goto nla_put_failure;
if (nla_put_u32(skb, IFLA_BRIDGE_MRP_INFO_RING_STATE,
mrp->ring_state))
goto nla_put_failure;
if (nla_put_u32(skb, IFLA_BRIDGE_MRP_INFO_RING_ROLE,
mrp->ring_role))
goto nla_put_failure;
if (nla_put_u32(skb, IFLA_BRIDGE_MRP_INFO_TEST_INTERVAL,
mrp->test_interval))
goto nla_put_failure;
if (nla_put_u32(skb, IFLA_BRIDGE_MRP_INFO_TEST_MAX_MISS,
mrp->test_max_miss))
goto nla_put_failure;
if (nla_put_u32(skb, IFLA_BRIDGE_MRP_INFO_TEST_MONITOR,
mrp->test_monitor))
goto nla_put_failure;
if (nla_put_u32(skb, IFLA_BRIDGE_MRP_INFO_IN_STATE,
mrp->in_state))
goto nla_put_failure;
if (nla_put_u32(skb, IFLA_BRIDGE_MRP_INFO_IN_ROLE,
mrp->in_role))
goto nla_put_failure;
if (nla_put_u32(skb, IFLA_BRIDGE_MRP_INFO_IN_TEST_INTERVAL,
mrp->in_test_interval))
goto nla_put_failure;
if (nla_put_u32(skb, IFLA_BRIDGE_MRP_INFO_IN_TEST_MAX_MISS,
mrp->in_test_max_miss))
goto nla_put_failure;
nla_nest_end(skb, tb);
}
nla_nest_end(skb, mrp_tb);
return 0;
nla_put_failure:
nla_nest_cancel(skb, tb);
nla_info_failure:
nla_nest_cancel(skb, mrp_tb);
return -EMSGSIZE;
}
int br_mrp_ring_port_open(struct net_device *dev, u8 loc)
{
struct net_bridge_port *p;
int err = 0;
p = br_port_get_rcu(dev);
if (!p) {
err = -EINVAL;
goto out;
}
if (loc)
p->flags |= BR_MRP_LOST_CONT;
else
p->flags &= ~BR_MRP_LOST_CONT;
br_ifinfo_notify(RTM_NEWLINK, NULL, p);
out:
return err;
}
int br_mrp_in_port_open(struct net_device *dev, u8 loc)
{
struct net_bridge_port *p;
int err = 0;
p = br_port_get_rcu(dev);
if (!p) {
err = -EINVAL;
goto out;
}
if (loc)
p->flags |= BR_MRP_LOST_IN_CONT;
else
p->flags &= ~BR_MRP_LOST_IN_CONT;
br_ifinfo_notify(RTM_NEWLINK, NULL, p);
out:
return err;
}
| linux-master | net/bridge/br_mrp_netlink.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Sysfs attributes of bridge
* Linux ethernet bridge
*
* Authors:
* Stephen Hemminger <[email protected]>
*/
#include <linux/capability.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/if_bridge.h>
#include <linux/rtnetlink.h>
#include <linux/spinlock.h>
#include <linux/times.h>
#include <linux/sched/signal.h>
#include "br_private.h"
/* IMPORTANT: new bridge options must be added with netlink support only
* please do not add new sysfs entries
*/
#define to_bridge(cd) ((struct net_bridge *)netdev_priv(to_net_dev(cd)))
/*
* Common code for storing bridge parameters.
*/
static ssize_t store_bridge_parm(struct device *d,
const char *buf, size_t len,
int (*set)(struct net_bridge *br, unsigned long val,
struct netlink_ext_ack *extack))
{
struct net_bridge *br = to_bridge(d);
struct netlink_ext_ack extack = {0};
unsigned long val;
int err;
if (!ns_capable(dev_net(br->dev)->user_ns, CAP_NET_ADMIN))
return -EPERM;
err = kstrtoul(buf, 0, &val);
if (err != 0)
return err;
if (!rtnl_trylock())
return restart_syscall();
err = (*set)(br, val, &extack);
if (!err)
netdev_state_change(br->dev);
if (extack._msg) {
if (err)
br_err(br, "%s\n", extack._msg);
else
br_warn(br, "%s\n", extack._msg);
}
rtnl_unlock();
return err ? err : len;
}
static ssize_t forward_delay_show(struct device *d,
struct device_attribute *attr, char *buf)
{
struct net_bridge *br = to_bridge(d);
return sprintf(buf, "%lu\n", jiffies_to_clock_t(br->forward_delay));
}
static int set_forward_delay(struct net_bridge *br, unsigned long val,
struct netlink_ext_ack *extack)
{
return br_set_forward_delay(br, val);
}
static ssize_t forward_delay_store(struct device *d,
struct device_attribute *attr,
const char *buf, size_t len)
{
return store_bridge_parm(d, buf, len, set_forward_delay);
}
static DEVICE_ATTR_RW(forward_delay);
static ssize_t hello_time_show(struct device *d, struct device_attribute *attr,
char *buf)
{
return sprintf(buf, "%lu\n",
jiffies_to_clock_t(to_bridge(d)->hello_time));
}
static int set_hello_time(struct net_bridge *br, unsigned long val,
struct netlink_ext_ack *extack)
{
return br_set_hello_time(br, val);
}
static ssize_t hello_time_store(struct device *d,
struct device_attribute *attr, const char *buf,
size_t len)
{
return store_bridge_parm(d, buf, len, set_hello_time);
}
static DEVICE_ATTR_RW(hello_time);
static ssize_t max_age_show(struct device *d, struct device_attribute *attr,
char *buf)
{
return sprintf(buf, "%lu\n",
jiffies_to_clock_t(to_bridge(d)->max_age));
}
static int set_max_age(struct net_bridge *br, unsigned long val,
struct netlink_ext_ack *extack)
{
return br_set_max_age(br, val);
}
static ssize_t max_age_store(struct device *d, struct device_attribute *attr,
const char *buf, size_t len)
{
return store_bridge_parm(d, buf, len, set_max_age);
}
static DEVICE_ATTR_RW(max_age);
static ssize_t ageing_time_show(struct device *d,
struct device_attribute *attr, char *buf)
{
struct net_bridge *br = to_bridge(d);
return sprintf(buf, "%lu\n", jiffies_to_clock_t(br->ageing_time));
}
static int set_ageing_time(struct net_bridge *br, unsigned long val,
struct netlink_ext_ack *extack)
{
return br_set_ageing_time(br, val);
}
static ssize_t ageing_time_store(struct device *d,
struct device_attribute *attr,
const char *buf, size_t len)
{
return store_bridge_parm(d, buf, len, set_ageing_time);
}
static DEVICE_ATTR_RW(ageing_time);
static ssize_t stp_state_show(struct device *d,
struct device_attribute *attr, char *buf)
{
struct net_bridge *br = to_bridge(d);
return sprintf(buf, "%d\n", br->stp_enabled);
}
static int set_stp_state(struct net_bridge *br, unsigned long val,
struct netlink_ext_ack *extack)
{
return br_stp_set_enabled(br, val, extack);
}
static ssize_t stp_state_store(struct device *d,
struct device_attribute *attr, const char *buf,
size_t len)
{
return store_bridge_parm(d, buf, len, set_stp_state);
}
static DEVICE_ATTR_RW(stp_state);
static ssize_t group_fwd_mask_show(struct device *d,
struct device_attribute *attr,
char *buf)
{
struct net_bridge *br = to_bridge(d);
return sprintf(buf, "%#x\n", br->group_fwd_mask);
}
static int set_group_fwd_mask(struct net_bridge *br, unsigned long val,
struct netlink_ext_ack *extack)
{
if (val & BR_GROUPFWD_RESTRICTED)
return -EINVAL;
br->group_fwd_mask = val;
return 0;
}
static ssize_t group_fwd_mask_store(struct device *d,
struct device_attribute *attr,
const char *buf,
size_t len)
{
return store_bridge_parm(d, buf, len, set_group_fwd_mask);
}
static DEVICE_ATTR_RW(group_fwd_mask);
static ssize_t priority_show(struct device *d, struct device_attribute *attr,
char *buf)
{
struct net_bridge *br = to_bridge(d);
return sprintf(buf, "%d\n",
(br->bridge_id.prio[0] << 8) | br->bridge_id.prio[1]);
}
static int set_priority(struct net_bridge *br, unsigned long val,
struct netlink_ext_ack *extack)
{
br_stp_set_bridge_priority(br, (u16) val);
return 0;
}
static ssize_t priority_store(struct device *d, struct device_attribute *attr,
const char *buf, size_t len)
{
return store_bridge_parm(d, buf, len, set_priority);
}
static DEVICE_ATTR_RW(priority);
static ssize_t root_id_show(struct device *d, struct device_attribute *attr,
char *buf)
{
return br_show_bridge_id(buf, &to_bridge(d)->designated_root);
}
static DEVICE_ATTR_RO(root_id);
static ssize_t bridge_id_show(struct device *d, struct device_attribute *attr,
char *buf)
{
return br_show_bridge_id(buf, &to_bridge(d)->bridge_id);
}
static DEVICE_ATTR_RO(bridge_id);
static ssize_t root_port_show(struct device *d, struct device_attribute *attr,
char *buf)
{
return sprintf(buf, "%d\n", to_bridge(d)->root_port);
}
static DEVICE_ATTR_RO(root_port);
static ssize_t root_path_cost_show(struct device *d,
struct device_attribute *attr, char *buf)
{
return sprintf(buf, "%d\n", to_bridge(d)->root_path_cost);
}
static DEVICE_ATTR_RO(root_path_cost);
static ssize_t topology_change_show(struct device *d,
struct device_attribute *attr, char *buf)
{
return sprintf(buf, "%d\n", to_bridge(d)->topology_change);
}
static DEVICE_ATTR_RO(topology_change);
static ssize_t topology_change_detected_show(struct device *d,
struct device_attribute *attr,
char *buf)
{
struct net_bridge *br = to_bridge(d);
return sprintf(buf, "%d\n", br->topology_change_detected);
}
static DEVICE_ATTR_RO(topology_change_detected);
static ssize_t hello_timer_show(struct device *d,
struct device_attribute *attr, char *buf)
{
struct net_bridge *br = to_bridge(d);
return sprintf(buf, "%ld\n", br_timer_value(&br->hello_timer));
}
static DEVICE_ATTR_RO(hello_timer);
static ssize_t tcn_timer_show(struct device *d, struct device_attribute *attr,
char *buf)
{
struct net_bridge *br = to_bridge(d);
return sprintf(buf, "%ld\n", br_timer_value(&br->tcn_timer));
}
static DEVICE_ATTR_RO(tcn_timer);
static ssize_t topology_change_timer_show(struct device *d,
struct device_attribute *attr,
char *buf)
{
struct net_bridge *br = to_bridge(d);
return sprintf(buf, "%ld\n", br_timer_value(&br->topology_change_timer));
}
static DEVICE_ATTR_RO(topology_change_timer);
static ssize_t gc_timer_show(struct device *d, struct device_attribute *attr,
char *buf)
{
struct net_bridge *br = to_bridge(d);
return sprintf(buf, "%ld\n", br_timer_value(&br->gc_work.timer));
}
static DEVICE_ATTR_RO(gc_timer);
static ssize_t group_addr_show(struct device *d,
struct device_attribute *attr, char *buf)
{
struct net_bridge *br = to_bridge(d);
return sprintf(buf, "%pM\n", br->group_addr);
}
static ssize_t group_addr_store(struct device *d,
struct device_attribute *attr,
const char *buf, size_t len)
{
struct net_bridge *br = to_bridge(d);
u8 new_addr[6];
if (!ns_capable(dev_net(br->dev)->user_ns, CAP_NET_ADMIN))
return -EPERM;
if (!mac_pton(buf, new_addr))
return -EINVAL;
if (!is_link_local_ether_addr(new_addr))
return -EINVAL;
if (new_addr[5] == 1 || /* 802.3x Pause address */
new_addr[5] == 2 || /* 802.3ad Slow protocols */
new_addr[5] == 3) /* 802.1X PAE address */
return -EINVAL;
if (!rtnl_trylock())
return restart_syscall();
spin_lock_bh(&br->lock);
ether_addr_copy(br->group_addr, new_addr);
spin_unlock_bh(&br->lock);
br_opt_toggle(br, BROPT_GROUP_ADDR_SET, true);
br_recalculate_fwd_mask(br);
netdev_state_change(br->dev);
rtnl_unlock();
return len;
}
static DEVICE_ATTR_RW(group_addr);
static int set_flush(struct net_bridge *br, unsigned long val,
struct netlink_ext_ack *extack)
{
struct net_bridge_fdb_flush_desc desc = {
.flags_mask = BIT(BR_FDB_STATIC)
};
br_fdb_flush(br, &desc);
return 0;
}
static ssize_t flush_store(struct device *d,
struct device_attribute *attr,
const char *buf, size_t len)
{
return store_bridge_parm(d, buf, len, set_flush);
}
static DEVICE_ATTR_WO(flush);
static ssize_t no_linklocal_learn_show(struct device *d,
struct device_attribute *attr,
char *buf)
{
struct net_bridge *br = to_bridge(d);
return sprintf(buf, "%d\n", br_boolopt_get(br, BR_BOOLOPT_NO_LL_LEARN));
}
static int set_no_linklocal_learn(struct net_bridge *br, unsigned long val,
struct netlink_ext_ack *extack)
{
return br_boolopt_toggle(br, BR_BOOLOPT_NO_LL_LEARN, !!val, extack);
}
static ssize_t no_linklocal_learn_store(struct device *d,
struct device_attribute *attr,
const char *buf, size_t len)
{
return store_bridge_parm(d, buf, len, set_no_linklocal_learn);
}
static DEVICE_ATTR_RW(no_linklocal_learn);
#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
static ssize_t multicast_router_show(struct device *d,
struct device_attribute *attr, char *buf)
{
struct net_bridge *br = to_bridge(d);
return sprintf(buf, "%d\n", br->multicast_ctx.multicast_router);
}
static int set_multicast_router(struct net_bridge *br, unsigned long val,
struct netlink_ext_ack *extack)
{
return br_multicast_set_router(&br->multicast_ctx, val);
}
static ssize_t multicast_router_store(struct device *d,
struct device_attribute *attr,
const char *buf, size_t len)
{
return store_bridge_parm(d, buf, len, set_multicast_router);
}
static DEVICE_ATTR_RW(multicast_router);
static ssize_t multicast_snooping_show(struct device *d,
struct device_attribute *attr,
char *buf)
{
struct net_bridge *br = to_bridge(d);
return sprintf(buf, "%d\n", br_opt_get(br, BROPT_MULTICAST_ENABLED));
}
static ssize_t multicast_snooping_store(struct device *d,
struct device_attribute *attr,
const char *buf, size_t len)
{
return store_bridge_parm(d, buf, len, br_multicast_toggle);
}
static DEVICE_ATTR_RW(multicast_snooping);
static ssize_t multicast_query_use_ifaddr_show(struct device *d,
struct device_attribute *attr,
char *buf)
{
struct net_bridge *br = to_bridge(d);
return sprintf(buf, "%d\n",
br_opt_get(br, BROPT_MULTICAST_QUERY_USE_IFADDR));
}
static int set_query_use_ifaddr(struct net_bridge *br, unsigned long val,
struct netlink_ext_ack *extack)
{
br_opt_toggle(br, BROPT_MULTICAST_QUERY_USE_IFADDR, !!val);
return 0;
}
static ssize_t
multicast_query_use_ifaddr_store(struct device *d,
struct device_attribute *attr,
const char *buf, size_t len)
{
return store_bridge_parm(d, buf, len, set_query_use_ifaddr);
}
static DEVICE_ATTR_RW(multicast_query_use_ifaddr);
static ssize_t multicast_querier_show(struct device *d,
struct device_attribute *attr,
char *buf)
{
struct net_bridge *br = to_bridge(d);
return sprintf(buf, "%d\n", br->multicast_ctx.multicast_querier);
}
static int set_multicast_querier(struct net_bridge *br, unsigned long val,
struct netlink_ext_ack *extack)
{
return br_multicast_set_querier(&br->multicast_ctx, val);
}
static ssize_t multicast_querier_store(struct device *d,
struct device_attribute *attr,
const char *buf, size_t len)
{
return store_bridge_parm(d, buf, len, set_multicast_querier);
}
static DEVICE_ATTR_RW(multicast_querier);
static ssize_t hash_elasticity_show(struct device *d,
struct device_attribute *attr, char *buf)
{
return sprintf(buf, "%u\n", RHT_ELASTICITY);
}
static int set_elasticity(struct net_bridge *br, unsigned long val,
struct netlink_ext_ack *extack)
{
/* 16 is RHT_ELASTICITY */
NL_SET_ERR_MSG_MOD(extack,
"the hash_elasticity option has been deprecated and is always 16");
return 0;
}
static ssize_t hash_elasticity_store(struct device *d,
struct device_attribute *attr,
const char *buf, size_t len)
{
return store_bridge_parm(d, buf, len, set_elasticity);
}
static DEVICE_ATTR_RW(hash_elasticity);
static ssize_t hash_max_show(struct device *d, struct device_attribute *attr,
char *buf)
{
struct net_bridge *br = to_bridge(d);
return sprintf(buf, "%u\n", br->hash_max);
}
static int set_hash_max(struct net_bridge *br, unsigned long val,
struct netlink_ext_ack *extack)
{
br->hash_max = val;
return 0;
}
static ssize_t hash_max_store(struct device *d, struct device_attribute *attr,
const char *buf, size_t len)
{
return store_bridge_parm(d, buf, len, set_hash_max);
}
static DEVICE_ATTR_RW(hash_max);
static ssize_t multicast_igmp_version_show(struct device *d,
struct device_attribute *attr,
char *buf)
{
struct net_bridge *br = to_bridge(d);
return sprintf(buf, "%u\n", br->multicast_ctx.multicast_igmp_version);
}
static int set_multicast_igmp_version(struct net_bridge *br, unsigned long val,
struct netlink_ext_ack *extack)
{
return br_multicast_set_igmp_version(&br->multicast_ctx, val);
}
static ssize_t multicast_igmp_version_store(struct device *d,
struct device_attribute *attr,
const char *buf, size_t len)
{
return store_bridge_parm(d, buf, len, set_multicast_igmp_version);
}
static DEVICE_ATTR_RW(multicast_igmp_version);
static ssize_t multicast_last_member_count_show(struct device *d,
struct device_attribute *attr,
char *buf)
{
struct net_bridge *br = to_bridge(d);
return sprintf(buf, "%u\n", br->multicast_ctx.multicast_last_member_count);
}
static int set_last_member_count(struct net_bridge *br, unsigned long val,
struct netlink_ext_ack *extack)
{
br->multicast_ctx.multicast_last_member_count = val;
return 0;
}
static ssize_t multicast_last_member_count_store(struct device *d,
struct device_attribute *attr,
const char *buf, size_t len)
{
return store_bridge_parm(d, buf, len, set_last_member_count);
}
static DEVICE_ATTR_RW(multicast_last_member_count);
static ssize_t multicast_startup_query_count_show(
struct device *d, struct device_attribute *attr, char *buf)
{
struct net_bridge *br = to_bridge(d);
return sprintf(buf, "%u\n", br->multicast_ctx.multicast_startup_query_count);
}
static int set_startup_query_count(struct net_bridge *br, unsigned long val,
struct netlink_ext_ack *extack)
{
br->multicast_ctx.multicast_startup_query_count = val;
return 0;
}
static ssize_t multicast_startup_query_count_store(
struct device *d, struct device_attribute *attr, const char *buf,
size_t len)
{
return store_bridge_parm(d, buf, len, set_startup_query_count);
}
static DEVICE_ATTR_RW(multicast_startup_query_count);
static ssize_t multicast_last_member_interval_show(
struct device *d, struct device_attribute *attr, char *buf)
{
struct net_bridge *br = to_bridge(d);
return sprintf(buf, "%lu\n",
jiffies_to_clock_t(br->multicast_ctx.multicast_last_member_interval));
}
static int set_last_member_interval(struct net_bridge *br, unsigned long val,
struct netlink_ext_ack *extack)
{
br->multicast_ctx.multicast_last_member_interval = clock_t_to_jiffies(val);
return 0;
}
static ssize_t multicast_last_member_interval_store(
struct device *d, struct device_attribute *attr, const char *buf,
size_t len)
{
return store_bridge_parm(d, buf, len, set_last_member_interval);
}
static DEVICE_ATTR_RW(multicast_last_member_interval);
static ssize_t multicast_membership_interval_show(
struct device *d, struct device_attribute *attr, char *buf)
{
struct net_bridge *br = to_bridge(d);
return sprintf(buf, "%lu\n",
jiffies_to_clock_t(br->multicast_ctx.multicast_membership_interval));
}
static int set_membership_interval(struct net_bridge *br, unsigned long val,
struct netlink_ext_ack *extack)
{
br->multicast_ctx.multicast_membership_interval = clock_t_to_jiffies(val);
return 0;
}
static ssize_t multicast_membership_interval_store(
struct device *d, struct device_attribute *attr, const char *buf,
size_t len)
{
return store_bridge_parm(d, buf, len, set_membership_interval);
}
static DEVICE_ATTR_RW(multicast_membership_interval);
static ssize_t multicast_querier_interval_show(struct device *d,
struct device_attribute *attr,
char *buf)
{
struct net_bridge *br = to_bridge(d);
return sprintf(buf, "%lu\n",
jiffies_to_clock_t(br->multicast_ctx.multicast_querier_interval));
}
static int set_querier_interval(struct net_bridge *br, unsigned long val,
struct netlink_ext_ack *extack)
{
br->multicast_ctx.multicast_querier_interval = clock_t_to_jiffies(val);
return 0;
}
static ssize_t multicast_querier_interval_store(struct device *d,
struct device_attribute *attr,
const char *buf, size_t len)
{
return store_bridge_parm(d, buf, len, set_querier_interval);
}
static DEVICE_ATTR_RW(multicast_querier_interval);
static ssize_t multicast_query_interval_show(struct device *d,
struct device_attribute *attr,
char *buf)
{
struct net_bridge *br = to_bridge(d);
return sprintf(buf, "%lu\n",
jiffies_to_clock_t(br->multicast_ctx.multicast_query_interval));
}
static int set_query_interval(struct net_bridge *br, unsigned long val,
struct netlink_ext_ack *extack)
{
br_multicast_set_query_intvl(&br->multicast_ctx, val);
return 0;
}
static ssize_t multicast_query_interval_store(struct device *d,
struct device_attribute *attr,
const char *buf, size_t len)
{
return store_bridge_parm(d, buf, len, set_query_interval);
}
static DEVICE_ATTR_RW(multicast_query_interval);
static ssize_t multicast_query_response_interval_show(
struct device *d, struct device_attribute *attr, char *buf)
{
struct net_bridge *br = to_bridge(d);
return sprintf(
buf, "%lu\n",
jiffies_to_clock_t(br->multicast_ctx.multicast_query_response_interval));
}
static int set_query_response_interval(struct net_bridge *br, unsigned long val,
struct netlink_ext_ack *extack)
{
br->multicast_ctx.multicast_query_response_interval = clock_t_to_jiffies(val);
return 0;
}
static ssize_t multicast_query_response_interval_store(
struct device *d, struct device_attribute *attr, const char *buf,
size_t len)
{
return store_bridge_parm(d, buf, len, set_query_response_interval);
}
static DEVICE_ATTR_RW(multicast_query_response_interval);
static ssize_t multicast_startup_query_interval_show(
struct device *d, struct device_attribute *attr, char *buf)
{
struct net_bridge *br = to_bridge(d);
return sprintf(
buf, "%lu\n",
jiffies_to_clock_t(br->multicast_ctx.multicast_startup_query_interval));
}
static int set_startup_query_interval(struct net_bridge *br, unsigned long val,
struct netlink_ext_ack *extack)
{
br_multicast_set_startup_query_intvl(&br->multicast_ctx, val);
return 0;
}
static ssize_t multicast_startup_query_interval_store(
struct device *d, struct device_attribute *attr, const char *buf,
size_t len)
{
return store_bridge_parm(d, buf, len, set_startup_query_interval);
}
static DEVICE_ATTR_RW(multicast_startup_query_interval);
static ssize_t multicast_stats_enabled_show(struct device *d,
struct device_attribute *attr,
char *buf)
{
struct net_bridge *br = to_bridge(d);
return sprintf(buf, "%d\n",
br_opt_get(br, BROPT_MULTICAST_STATS_ENABLED));
}
static int set_stats_enabled(struct net_bridge *br, unsigned long val,
struct netlink_ext_ack *extack)
{
br_opt_toggle(br, BROPT_MULTICAST_STATS_ENABLED, !!val);
return 0;
}
static ssize_t multicast_stats_enabled_store(struct device *d,
struct device_attribute *attr,
const char *buf,
size_t len)
{
return store_bridge_parm(d, buf, len, set_stats_enabled);
}
static DEVICE_ATTR_RW(multicast_stats_enabled);
#if IS_ENABLED(CONFIG_IPV6)
static ssize_t multicast_mld_version_show(struct device *d,
struct device_attribute *attr,
char *buf)
{
struct net_bridge *br = to_bridge(d);
return sprintf(buf, "%u\n", br->multicast_ctx.multicast_mld_version);
}
static int set_multicast_mld_version(struct net_bridge *br, unsigned long val,
struct netlink_ext_ack *extack)
{
return br_multicast_set_mld_version(&br->multicast_ctx, val);
}
static ssize_t multicast_mld_version_store(struct device *d,
struct device_attribute *attr,
const char *buf, size_t len)
{
return store_bridge_parm(d, buf, len, set_multicast_mld_version);
}
static DEVICE_ATTR_RW(multicast_mld_version);
#endif
#endif
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
static ssize_t nf_call_iptables_show(
struct device *d, struct device_attribute *attr, char *buf)
{
struct net_bridge *br = to_bridge(d);
return sprintf(buf, "%u\n", br_opt_get(br, BROPT_NF_CALL_IPTABLES));
}
static int set_nf_call_iptables(struct net_bridge *br, unsigned long val,
struct netlink_ext_ack *extack)
{
br_opt_toggle(br, BROPT_NF_CALL_IPTABLES, !!val);
return 0;
}
static ssize_t nf_call_iptables_store(
struct device *d, struct device_attribute *attr, const char *buf,
size_t len)
{
return store_bridge_parm(d, buf, len, set_nf_call_iptables);
}
static DEVICE_ATTR_RW(nf_call_iptables);
static ssize_t nf_call_ip6tables_show(
struct device *d, struct device_attribute *attr, char *buf)
{
struct net_bridge *br = to_bridge(d);
return sprintf(buf, "%u\n", br_opt_get(br, BROPT_NF_CALL_IP6TABLES));
}
static int set_nf_call_ip6tables(struct net_bridge *br, unsigned long val,
struct netlink_ext_ack *extack)
{
br_opt_toggle(br, BROPT_NF_CALL_IP6TABLES, !!val);
return 0;
}
static ssize_t nf_call_ip6tables_store(
struct device *d, struct device_attribute *attr, const char *buf,
size_t len)
{
return store_bridge_parm(d, buf, len, set_nf_call_ip6tables);
}
static DEVICE_ATTR_RW(nf_call_ip6tables);
static ssize_t nf_call_arptables_show(
struct device *d, struct device_attribute *attr, char *buf)
{
struct net_bridge *br = to_bridge(d);
return sprintf(buf, "%u\n", br_opt_get(br, BROPT_NF_CALL_ARPTABLES));
}
static int set_nf_call_arptables(struct net_bridge *br, unsigned long val,
struct netlink_ext_ack *extack)
{
br_opt_toggle(br, BROPT_NF_CALL_ARPTABLES, !!val);
return 0;
}
static ssize_t nf_call_arptables_store(
struct device *d, struct device_attribute *attr, const char *buf,
size_t len)
{
return store_bridge_parm(d, buf, len, set_nf_call_arptables);
}
static DEVICE_ATTR_RW(nf_call_arptables);
#endif
#ifdef CONFIG_BRIDGE_VLAN_FILTERING
static ssize_t vlan_filtering_show(struct device *d,
struct device_attribute *attr,
char *buf)
{
struct net_bridge *br = to_bridge(d);
return sprintf(buf, "%d\n", br_opt_get(br, BROPT_VLAN_ENABLED));
}
static ssize_t vlan_filtering_store(struct device *d,
struct device_attribute *attr,
const char *buf, size_t len)
{
return store_bridge_parm(d, buf, len, br_vlan_filter_toggle);
}
static DEVICE_ATTR_RW(vlan_filtering);
static ssize_t vlan_protocol_show(struct device *d,
struct device_attribute *attr,
char *buf)
{
struct net_bridge *br = to_bridge(d);
return sprintf(buf, "%#06x\n", ntohs(br->vlan_proto));
}
static ssize_t vlan_protocol_store(struct device *d,
struct device_attribute *attr,
const char *buf, size_t len)
{
return store_bridge_parm(d, buf, len, br_vlan_set_proto);
}
static DEVICE_ATTR_RW(vlan_protocol);
static ssize_t default_pvid_show(struct device *d,
struct device_attribute *attr,
char *buf)
{
struct net_bridge *br = to_bridge(d);
return sprintf(buf, "%d\n", br->default_pvid);
}
static ssize_t default_pvid_store(struct device *d,
struct device_attribute *attr,
const char *buf, size_t len)
{
return store_bridge_parm(d, buf, len, br_vlan_set_default_pvid);
}
static DEVICE_ATTR_RW(default_pvid);
static ssize_t vlan_stats_enabled_show(struct device *d,
struct device_attribute *attr,
char *buf)
{
struct net_bridge *br = to_bridge(d);
return sprintf(buf, "%u\n", br_opt_get(br, BROPT_VLAN_STATS_ENABLED));
}
static int set_vlan_stats_enabled(struct net_bridge *br, unsigned long val,
struct netlink_ext_ack *extack)
{
return br_vlan_set_stats(br, val);
}
static ssize_t vlan_stats_enabled_store(struct device *d,
struct device_attribute *attr,
const char *buf, size_t len)
{
return store_bridge_parm(d, buf, len, set_vlan_stats_enabled);
}
static DEVICE_ATTR_RW(vlan_stats_enabled);
static ssize_t vlan_stats_per_port_show(struct device *d,
struct device_attribute *attr,
char *buf)
{
struct net_bridge *br = to_bridge(d);
return sprintf(buf, "%u\n", br_opt_get(br, BROPT_VLAN_STATS_PER_PORT));
}
static int set_vlan_stats_per_port(struct net_bridge *br, unsigned long val,
struct netlink_ext_ack *extack)
{
return br_vlan_set_stats_per_port(br, val);
}
static ssize_t vlan_stats_per_port_store(struct device *d,
struct device_attribute *attr,
const char *buf, size_t len)
{
return store_bridge_parm(d, buf, len, set_vlan_stats_per_port);
}
static DEVICE_ATTR_RW(vlan_stats_per_port);
#endif
static struct attribute *bridge_attrs[] = {
&dev_attr_forward_delay.attr,
&dev_attr_hello_time.attr,
&dev_attr_max_age.attr,
&dev_attr_ageing_time.attr,
&dev_attr_stp_state.attr,
&dev_attr_group_fwd_mask.attr,
&dev_attr_priority.attr,
&dev_attr_bridge_id.attr,
&dev_attr_root_id.attr,
&dev_attr_root_path_cost.attr,
&dev_attr_root_port.attr,
&dev_attr_topology_change.attr,
&dev_attr_topology_change_detected.attr,
&dev_attr_hello_timer.attr,
&dev_attr_tcn_timer.attr,
&dev_attr_topology_change_timer.attr,
&dev_attr_gc_timer.attr,
&dev_attr_group_addr.attr,
&dev_attr_flush.attr,
&dev_attr_no_linklocal_learn.attr,
#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
&dev_attr_multicast_router.attr,
&dev_attr_multicast_snooping.attr,
&dev_attr_multicast_querier.attr,
&dev_attr_multicast_query_use_ifaddr.attr,
&dev_attr_hash_elasticity.attr,
&dev_attr_hash_max.attr,
&dev_attr_multicast_last_member_count.attr,
&dev_attr_multicast_startup_query_count.attr,
&dev_attr_multicast_last_member_interval.attr,
&dev_attr_multicast_membership_interval.attr,
&dev_attr_multicast_querier_interval.attr,
&dev_attr_multicast_query_interval.attr,
&dev_attr_multicast_query_response_interval.attr,
&dev_attr_multicast_startup_query_interval.attr,
&dev_attr_multicast_stats_enabled.attr,
&dev_attr_multicast_igmp_version.attr,
#if IS_ENABLED(CONFIG_IPV6)
&dev_attr_multicast_mld_version.attr,
#endif
#endif
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
&dev_attr_nf_call_iptables.attr,
&dev_attr_nf_call_ip6tables.attr,
&dev_attr_nf_call_arptables.attr,
#endif
#ifdef CONFIG_BRIDGE_VLAN_FILTERING
&dev_attr_vlan_filtering.attr,
&dev_attr_vlan_protocol.attr,
&dev_attr_default_pvid.attr,
&dev_attr_vlan_stats_enabled.attr,
&dev_attr_vlan_stats_per_port.attr,
#endif
NULL
};
static const struct attribute_group bridge_group = {
.name = SYSFS_BRIDGE_ATTR,
.attrs = bridge_attrs,
};
/*
* Export the forwarding information table as a binary file
* The records are struct __fdb_entry.
*
* Returns the number of bytes read.
*/
static ssize_t brforward_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct device *dev = kobj_to_dev(kobj);
struct net_bridge *br = to_bridge(dev);
int n;
/* must read whole records */
if (off % sizeof(struct __fdb_entry) != 0)
return -EINVAL;
n = br_fdb_fillbuf(br, buf,
count / sizeof(struct __fdb_entry),
off / sizeof(struct __fdb_entry));
if (n > 0)
n *= sizeof(struct __fdb_entry);
return n;
}
static struct bin_attribute bridge_forward = {
.attr = { .name = SYSFS_BRIDGE_FDB,
.mode = 0444, },
.read = brforward_read,
};
/*
* Add entries in sysfs onto the existing network class device
* for the bridge.
* Adds a attribute group "bridge" containing tuning parameters.
* Binary attribute containing the forward table
* Sub directory to hold links to interfaces.
*
* Note: the ifobj exists only to be a subdirectory
* to hold links. The ifobj exists in same data structure
* as it's parent the bridge so reference counting works.
*/
int br_sysfs_addbr(struct net_device *dev)
{
struct kobject *brobj = &dev->dev.kobj;
struct net_bridge *br = netdev_priv(dev);
int err;
err = sysfs_create_group(brobj, &bridge_group);
if (err) {
pr_info("%s: can't create group %s/%s\n",
__func__, dev->name, bridge_group.name);
goto out1;
}
err = sysfs_create_bin_file(brobj, &bridge_forward);
if (err) {
pr_info("%s: can't create attribute file %s/%s\n",
__func__, dev->name, bridge_forward.attr.name);
goto out2;
}
br->ifobj = kobject_create_and_add(SYSFS_BRIDGE_PORT_SUBDIR, brobj);
if (!br->ifobj) {
pr_info("%s: can't add kobject (directory) %s/%s\n",
__func__, dev->name, SYSFS_BRIDGE_PORT_SUBDIR);
err = -ENOMEM;
goto out3;
}
return 0;
out3:
sysfs_remove_bin_file(&dev->dev.kobj, &bridge_forward);
out2:
sysfs_remove_group(&dev->dev.kobj, &bridge_group);
out1:
return err;
}
void br_sysfs_delbr(struct net_device *dev)
{
struct kobject *kobj = &dev->dev.kobj;
struct net_bridge *br = netdev_priv(dev);
kobject_put(br->ifobj);
sysfs_remove_bin_file(kobj, &bridge_forward);
sysfs_remove_group(kobj, &bridge_group);
}
| linux-master | net/bridge/br_sysfs_br.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Forwarding decision
* Linux ethernet bridge
*
* Authors:
* Lennert Buytenhek <[email protected]>
*/
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/netpoll.h>
#include <linux/skbuff.h>
#include <linux/if_vlan.h>
#include <linux/netfilter_bridge.h>
#include "br_private.h"
/* Don't forward packets to originating port or forwarding disabled */
static inline int should_deliver(const struct net_bridge_port *p,
const struct sk_buff *skb)
{
struct net_bridge_vlan_group *vg;
vg = nbp_vlan_group_rcu(p);
return ((p->flags & BR_HAIRPIN_MODE) || skb->dev != p->dev) &&
p->state == BR_STATE_FORWARDING && br_allowed_egress(vg, skb) &&
nbp_switchdev_allowed_egress(p, skb) &&
!br_skb_isolated(p, skb);
}
int br_dev_queue_push_xmit(struct net *net, struct sock *sk, struct sk_buff *skb)
{
skb_push(skb, ETH_HLEN);
if (!is_skb_forwardable(skb->dev, skb))
goto drop;
br_drop_fake_rtable(skb);
if (skb->ip_summed == CHECKSUM_PARTIAL &&
eth_type_vlan(skb->protocol)) {
int depth;
if (!vlan_get_protocol_and_depth(skb, skb->protocol, &depth))
goto drop;
skb_set_network_header(skb, depth);
}
br_switchdev_frame_set_offload_fwd_mark(skb);
dev_queue_xmit(skb);
return 0;
drop:
kfree_skb(skb);
return 0;
}
EXPORT_SYMBOL_GPL(br_dev_queue_push_xmit);
int br_forward_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
{
skb_clear_tstamp(skb);
return NF_HOOK(NFPROTO_BRIDGE, NF_BR_POST_ROUTING,
net, sk, skb, NULL, skb->dev,
br_dev_queue_push_xmit);
}
EXPORT_SYMBOL_GPL(br_forward_finish);
static void __br_forward(const struct net_bridge_port *to,
struct sk_buff *skb, bool local_orig)
{
struct net_bridge_vlan_group *vg;
struct net_device *indev;
struct net *net;
int br_hook;
/* Mark the skb for forwarding offload early so that br_handle_vlan()
* can know whether to pop the VLAN header on egress or keep it.
*/
nbp_switchdev_frame_mark_tx_fwd_offload(to, skb);
vg = nbp_vlan_group_rcu(to);
skb = br_handle_vlan(to->br, to, vg, skb);
if (!skb)
return;
indev = skb->dev;
skb->dev = to->dev;
if (!local_orig) {
if (skb_warn_if_lro(skb)) {
kfree_skb(skb);
return;
}
br_hook = NF_BR_FORWARD;
skb_forward_csum(skb);
net = dev_net(indev);
} else {
if (unlikely(netpoll_tx_running(to->br->dev))) {
skb_push(skb, ETH_HLEN);
if (!is_skb_forwardable(skb->dev, skb))
kfree_skb(skb);
else
br_netpoll_send_skb(to, skb);
return;
}
br_hook = NF_BR_LOCAL_OUT;
net = dev_net(skb->dev);
indev = NULL;
}
NF_HOOK(NFPROTO_BRIDGE, br_hook,
net, NULL, skb, indev, skb->dev,
br_forward_finish);
}
static int deliver_clone(const struct net_bridge_port *prev,
struct sk_buff *skb, bool local_orig)
{
struct net_device *dev = BR_INPUT_SKB_CB(skb)->brdev;
skb = skb_clone(skb, GFP_ATOMIC);
if (!skb) {
DEV_STATS_INC(dev, tx_dropped);
return -ENOMEM;
}
__br_forward(prev, skb, local_orig);
return 0;
}
/**
* br_forward - forward a packet to a specific port
* @to: destination port
* @skb: packet being forwarded
* @local_rcv: packet will be received locally after forwarding
* @local_orig: packet is locally originated
*
* Should be called with rcu_read_lock.
*/
void br_forward(const struct net_bridge_port *to,
struct sk_buff *skb, bool local_rcv, bool local_orig)
{
if (unlikely(!to))
goto out;
/* redirect to backup link if the destination port is down */
if (rcu_access_pointer(to->backup_port) && !netif_carrier_ok(to->dev)) {
struct net_bridge_port *backup_port;
backup_port = rcu_dereference(to->backup_port);
if (unlikely(!backup_port))
goto out;
BR_INPUT_SKB_CB(skb)->backup_nhid = READ_ONCE(to->backup_nhid);
to = backup_port;
}
if (should_deliver(to, skb)) {
if (local_rcv)
deliver_clone(to, skb, local_orig);
else
__br_forward(to, skb, local_orig);
return;
}
out:
if (!local_rcv)
kfree_skb(skb);
}
EXPORT_SYMBOL_GPL(br_forward);
static struct net_bridge_port *maybe_deliver(
struct net_bridge_port *prev, struct net_bridge_port *p,
struct sk_buff *skb, bool local_orig)
{
u8 igmp_type = br_multicast_igmp_type(skb);
int err;
if (!should_deliver(p, skb))
return prev;
nbp_switchdev_frame_mark_tx_fwd_to_hwdom(p, skb);
if (!prev)
goto out;
err = deliver_clone(prev, skb, local_orig);
if (err)
return ERR_PTR(err);
out:
br_multicast_count(p->br, p, skb, igmp_type, BR_MCAST_DIR_TX);
return p;
}
/* called under rcu_read_lock */
void br_flood(struct net_bridge *br, struct sk_buff *skb,
enum br_pkt_type pkt_type, bool local_rcv, bool local_orig,
u16 vid)
{
struct net_bridge_port *prev = NULL;
struct net_bridge_port *p;
br_tc_skb_miss_set(skb, pkt_type != BR_PKT_BROADCAST);
list_for_each_entry_rcu(p, &br->port_list, list) {
/* Do not flood unicast traffic to ports that turn it off, nor
* other traffic if flood off, except for traffic we originate
*/
switch (pkt_type) {
case BR_PKT_UNICAST:
if (!(p->flags & BR_FLOOD))
continue;
break;
case BR_PKT_MULTICAST:
if (!(p->flags & BR_MCAST_FLOOD) && skb->dev != br->dev)
continue;
break;
case BR_PKT_BROADCAST:
if (!(p->flags & BR_BCAST_FLOOD) && skb->dev != br->dev)
continue;
break;
}
/* Do not flood to ports that enable proxy ARP */
if (p->flags & BR_PROXYARP)
continue;
if (BR_INPUT_SKB_CB(skb)->proxyarp_replied &&
((p->flags & BR_PROXYARP_WIFI) ||
br_is_neigh_suppress_enabled(p, vid)))
continue;
prev = maybe_deliver(prev, p, skb, local_orig);
if (IS_ERR(prev))
goto out;
}
if (!prev)
goto out;
if (local_rcv)
deliver_clone(prev, skb, local_orig);
else
__br_forward(prev, skb, local_orig);
return;
out:
if (!local_rcv)
kfree_skb(skb);
}
#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
static void maybe_deliver_addr(struct net_bridge_port *p, struct sk_buff *skb,
const unsigned char *addr, bool local_orig)
{
struct net_device *dev = BR_INPUT_SKB_CB(skb)->brdev;
const unsigned char *src = eth_hdr(skb)->h_source;
if (!should_deliver(p, skb))
return;
/* Even with hairpin, no soliloquies - prevent breaking IPv6 DAD */
if (skb->dev == p->dev && ether_addr_equal(src, addr))
return;
skb = skb_copy(skb, GFP_ATOMIC);
if (!skb) {
DEV_STATS_INC(dev, tx_dropped);
return;
}
if (!is_broadcast_ether_addr(addr))
memcpy(eth_hdr(skb)->h_dest, addr, ETH_ALEN);
__br_forward(p, skb, local_orig);
}
/* called with rcu_read_lock */
void br_multicast_flood(struct net_bridge_mdb_entry *mdst,
struct sk_buff *skb,
struct net_bridge_mcast *brmctx,
bool local_rcv, bool local_orig)
{
struct net_bridge_port *prev = NULL;
struct net_bridge_port_group *p;
bool allow_mode_include = true;
struct hlist_node *rp;
rp = br_multicast_get_first_rport_node(brmctx, skb);
if (mdst) {
p = rcu_dereference(mdst->ports);
if (br_multicast_should_handle_mode(brmctx, mdst->addr.proto) &&
br_multicast_is_star_g(&mdst->addr))
allow_mode_include = false;
} else {
p = NULL;
br_tc_skb_miss_set(skb, true);
}
while (p || rp) {
struct net_bridge_port *port, *lport, *rport;
lport = p ? p->key.port : NULL;
rport = br_multicast_rport_from_node_skb(rp, skb);
if ((unsigned long)lport > (unsigned long)rport) {
port = lport;
if (port->flags & BR_MULTICAST_TO_UNICAST) {
maybe_deliver_addr(lport, skb, p->eth_addr,
local_orig);
goto delivered;
}
if ((!allow_mode_include &&
p->filter_mode == MCAST_INCLUDE) ||
(p->flags & MDB_PG_FLAGS_BLOCKED))
goto delivered;
} else {
port = rport;
}
prev = maybe_deliver(prev, port, skb, local_orig);
if (IS_ERR(prev))
goto out;
delivered:
if ((unsigned long)lport >= (unsigned long)port)
p = rcu_dereference(p->next);
if ((unsigned long)rport >= (unsigned long)port)
rp = rcu_dereference(hlist_next_rcu(rp));
}
if (!prev)
goto out;
if (local_rcv)
deliver_clone(prev, skb, local_orig);
else
__br_forward(prev, skb, local_orig);
return;
out:
if (!local_rcv)
kfree_skb(skb);
}
#endif
| linux-master | net/bridge/br_forward.c |
// SPDX-License-Identifier: GPL-2.0-or-later
#include <net/genetlink.h>
#include "br_private.h"
#include "br_private_cfm.h"
static const struct nla_policy
br_cfm_mep_create_policy[IFLA_BRIDGE_CFM_MEP_CREATE_MAX + 1] = {
[IFLA_BRIDGE_CFM_MEP_CREATE_UNSPEC] = { .type = NLA_REJECT },
[IFLA_BRIDGE_CFM_MEP_CREATE_INSTANCE] = { .type = NLA_U32 },
[IFLA_BRIDGE_CFM_MEP_CREATE_DOMAIN] = { .type = NLA_U32 },
[IFLA_BRIDGE_CFM_MEP_CREATE_DIRECTION] = { .type = NLA_U32 },
[IFLA_BRIDGE_CFM_MEP_CREATE_IFINDEX] = { .type = NLA_U32 },
};
static const struct nla_policy
br_cfm_mep_delete_policy[IFLA_BRIDGE_CFM_MEP_DELETE_MAX + 1] = {
[IFLA_BRIDGE_CFM_MEP_DELETE_UNSPEC] = { .type = NLA_REJECT },
[IFLA_BRIDGE_CFM_MEP_DELETE_INSTANCE] = { .type = NLA_U32 },
};
static const struct nla_policy
br_cfm_mep_config_policy[IFLA_BRIDGE_CFM_MEP_CONFIG_MAX + 1] = {
[IFLA_BRIDGE_CFM_MEP_CONFIG_UNSPEC] = { .type = NLA_REJECT },
[IFLA_BRIDGE_CFM_MEP_CONFIG_INSTANCE] = { .type = NLA_U32 },
[IFLA_BRIDGE_CFM_MEP_CONFIG_UNICAST_MAC] = NLA_POLICY_ETH_ADDR,
[IFLA_BRIDGE_CFM_MEP_CONFIG_MDLEVEL] = NLA_POLICY_MAX(NLA_U32, 7),
[IFLA_BRIDGE_CFM_MEP_CONFIG_MEPID] = NLA_POLICY_MAX(NLA_U32, 0x1FFF),
};
static const struct nla_policy
br_cfm_cc_config_policy[IFLA_BRIDGE_CFM_CC_CONFIG_MAX + 1] = {
[IFLA_BRIDGE_CFM_CC_CONFIG_UNSPEC] = { .type = NLA_REJECT },
[IFLA_BRIDGE_CFM_CC_CONFIG_INSTANCE] = { .type = NLA_U32 },
[IFLA_BRIDGE_CFM_CC_CONFIG_ENABLE] = { .type = NLA_U32 },
[IFLA_BRIDGE_CFM_CC_CONFIG_EXP_INTERVAL] = { .type = NLA_U32 },
[IFLA_BRIDGE_CFM_CC_CONFIG_EXP_MAID] = {
.type = NLA_BINARY, .len = CFM_MAID_LENGTH },
};
static const struct nla_policy
br_cfm_cc_peer_mep_policy[IFLA_BRIDGE_CFM_CC_PEER_MEP_MAX + 1] = {
[IFLA_BRIDGE_CFM_CC_PEER_MEP_UNSPEC] = { .type = NLA_REJECT },
[IFLA_BRIDGE_CFM_CC_PEER_MEP_INSTANCE] = { .type = NLA_U32 },
[IFLA_BRIDGE_CFM_CC_PEER_MEPID] = NLA_POLICY_MAX(NLA_U32, 0x1FFF),
};
static const struct nla_policy
br_cfm_cc_rdi_policy[IFLA_BRIDGE_CFM_CC_RDI_MAX + 1] = {
[IFLA_BRIDGE_CFM_CC_RDI_UNSPEC] = { .type = NLA_REJECT },
[IFLA_BRIDGE_CFM_CC_RDI_INSTANCE] = { .type = NLA_U32 },
[IFLA_BRIDGE_CFM_CC_RDI_RDI] = { .type = NLA_U32 },
};
static const struct nla_policy
br_cfm_cc_ccm_tx_policy[IFLA_BRIDGE_CFM_CC_CCM_TX_MAX + 1] = {
[IFLA_BRIDGE_CFM_CC_CCM_TX_UNSPEC] = { .type = NLA_REJECT },
[IFLA_BRIDGE_CFM_CC_CCM_TX_INSTANCE] = { .type = NLA_U32 },
[IFLA_BRIDGE_CFM_CC_CCM_TX_DMAC] = NLA_POLICY_ETH_ADDR,
[IFLA_BRIDGE_CFM_CC_CCM_TX_SEQ_NO_UPDATE] = { .type = NLA_U32 },
[IFLA_BRIDGE_CFM_CC_CCM_TX_PERIOD] = { .type = NLA_U32 },
[IFLA_BRIDGE_CFM_CC_CCM_TX_IF_TLV] = { .type = NLA_U32 },
[IFLA_BRIDGE_CFM_CC_CCM_TX_IF_TLV_VALUE] = { .type = NLA_U8 },
[IFLA_BRIDGE_CFM_CC_CCM_TX_PORT_TLV] = { .type = NLA_U32 },
[IFLA_BRIDGE_CFM_CC_CCM_TX_PORT_TLV_VALUE] = { .type = NLA_U8 },
};
static const struct nla_policy
br_cfm_policy[IFLA_BRIDGE_CFM_MAX + 1] = {
[IFLA_BRIDGE_CFM_UNSPEC] = { .type = NLA_REJECT },
[IFLA_BRIDGE_CFM_MEP_CREATE] =
NLA_POLICY_NESTED(br_cfm_mep_create_policy),
[IFLA_BRIDGE_CFM_MEP_DELETE] =
NLA_POLICY_NESTED(br_cfm_mep_delete_policy),
[IFLA_BRIDGE_CFM_MEP_CONFIG] =
NLA_POLICY_NESTED(br_cfm_mep_config_policy),
[IFLA_BRIDGE_CFM_CC_CONFIG] =
NLA_POLICY_NESTED(br_cfm_cc_config_policy),
[IFLA_BRIDGE_CFM_CC_PEER_MEP_ADD] =
NLA_POLICY_NESTED(br_cfm_cc_peer_mep_policy),
[IFLA_BRIDGE_CFM_CC_PEER_MEP_REMOVE] =
NLA_POLICY_NESTED(br_cfm_cc_peer_mep_policy),
[IFLA_BRIDGE_CFM_CC_RDI] =
NLA_POLICY_NESTED(br_cfm_cc_rdi_policy),
[IFLA_BRIDGE_CFM_CC_CCM_TX] =
NLA_POLICY_NESTED(br_cfm_cc_ccm_tx_policy),
};
static int br_mep_create_parse(struct net_bridge *br, struct nlattr *attr,
struct netlink_ext_ack *extack)
{
struct nlattr *tb[IFLA_BRIDGE_CFM_MEP_CREATE_MAX + 1];
struct br_cfm_mep_create create;
u32 instance;
int err;
err = nla_parse_nested(tb, IFLA_BRIDGE_CFM_MEP_CREATE_MAX, attr,
br_cfm_mep_create_policy, extack);
if (err)
return err;
if (!tb[IFLA_BRIDGE_CFM_MEP_CREATE_INSTANCE]) {
NL_SET_ERR_MSG_MOD(extack, "Missing INSTANCE attribute");
return -EINVAL;
}
if (!tb[IFLA_BRIDGE_CFM_MEP_CREATE_DOMAIN]) {
NL_SET_ERR_MSG_MOD(extack, "Missing DOMAIN attribute");
return -EINVAL;
}
if (!tb[IFLA_BRIDGE_CFM_MEP_CREATE_DIRECTION]) {
NL_SET_ERR_MSG_MOD(extack, "Missing DIRECTION attribute");
return -EINVAL;
}
if (!tb[IFLA_BRIDGE_CFM_MEP_CREATE_IFINDEX]) {
NL_SET_ERR_MSG_MOD(extack, "Missing IFINDEX attribute");
return -EINVAL;
}
memset(&create, 0, sizeof(create));
instance = nla_get_u32(tb[IFLA_BRIDGE_CFM_MEP_CREATE_INSTANCE]);
create.domain = nla_get_u32(tb[IFLA_BRIDGE_CFM_MEP_CREATE_DOMAIN]);
create.direction = nla_get_u32(tb[IFLA_BRIDGE_CFM_MEP_CREATE_DIRECTION]);
create.ifindex = nla_get_u32(tb[IFLA_BRIDGE_CFM_MEP_CREATE_IFINDEX]);
return br_cfm_mep_create(br, instance, &create, extack);
}
static int br_mep_delete_parse(struct net_bridge *br, struct nlattr *attr,
struct netlink_ext_ack *extack)
{
struct nlattr *tb[IFLA_BRIDGE_CFM_MEP_DELETE_MAX + 1];
u32 instance;
int err;
err = nla_parse_nested(tb, IFLA_BRIDGE_CFM_MEP_DELETE_MAX, attr,
br_cfm_mep_delete_policy, extack);
if (err)
return err;
if (!tb[IFLA_BRIDGE_CFM_MEP_DELETE_INSTANCE]) {
NL_SET_ERR_MSG_MOD(extack,
"Missing INSTANCE attribute");
return -EINVAL;
}
instance = nla_get_u32(tb[IFLA_BRIDGE_CFM_MEP_DELETE_INSTANCE]);
return br_cfm_mep_delete(br, instance, extack);
}
static int br_mep_config_parse(struct net_bridge *br, struct nlattr *attr,
struct netlink_ext_ack *extack)
{
struct nlattr *tb[IFLA_BRIDGE_CFM_MEP_CONFIG_MAX + 1];
struct br_cfm_mep_config config;
u32 instance;
int err;
err = nla_parse_nested(tb, IFLA_BRIDGE_CFM_MEP_CONFIG_MAX, attr,
br_cfm_mep_config_policy, extack);
if (err)
return err;
if (!tb[IFLA_BRIDGE_CFM_MEP_CONFIG_INSTANCE]) {
NL_SET_ERR_MSG_MOD(extack, "Missing INSTANCE attribute");
return -EINVAL;
}
if (!tb[IFLA_BRIDGE_CFM_MEP_CONFIG_UNICAST_MAC]) {
NL_SET_ERR_MSG_MOD(extack, "Missing UNICAST_MAC attribute");
return -EINVAL;
}
if (!tb[IFLA_BRIDGE_CFM_MEP_CONFIG_MDLEVEL]) {
NL_SET_ERR_MSG_MOD(extack, "Missing MDLEVEL attribute");
return -EINVAL;
}
if (!tb[IFLA_BRIDGE_CFM_MEP_CONFIG_MEPID]) {
NL_SET_ERR_MSG_MOD(extack, "Missing MEPID attribute");
return -EINVAL;
}
memset(&config, 0, sizeof(config));
instance = nla_get_u32(tb[IFLA_BRIDGE_CFM_MEP_CONFIG_INSTANCE]);
nla_memcpy(&config.unicast_mac.addr,
tb[IFLA_BRIDGE_CFM_MEP_CONFIG_UNICAST_MAC],
sizeof(config.unicast_mac.addr));
config.mdlevel = nla_get_u32(tb[IFLA_BRIDGE_CFM_MEP_CONFIG_MDLEVEL]);
config.mepid = nla_get_u32(tb[IFLA_BRIDGE_CFM_MEP_CONFIG_MEPID]);
return br_cfm_mep_config_set(br, instance, &config, extack);
}
static int br_cc_config_parse(struct net_bridge *br, struct nlattr *attr,
struct netlink_ext_ack *extack)
{
struct nlattr *tb[IFLA_BRIDGE_CFM_CC_CONFIG_MAX + 1];
struct br_cfm_cc_config config;
u32 instance;
int err;
err = nla_parse_nested(tb, IFLA_BRIDGE_CFM_CC_CONFIG_MAX, attr,
br_cfm_cc_config_policy, extack);
if (err)
return err;
if (!tb[IFLA_BRIDGE_CFM_CC_CONFIG_INSTANCE]) {
NL_SET_ERR_MSG_MOD(extack, "Missing INSTANCE attribute");
return -EINVAL;
}
if (!tb[IFLA_BRIDGE_CFM_CC_CONFIG_ENABLE]) {
NL_SET_ERR_MSG_MOD(extack, "Missing ENABLE attribute");
return -EINVAL;
}
if (!tb[IFLA_BRIDGE_CFM_CC_CONFIG_EXP_INTERVAL]) {
NL_SET_ERR_MSG_MOD(extack, "Missing INTERVAL attribute");
return -EINVAL;
}
if (!tb[IFLA_BRIDGE_CFM_CC_CONFIG_EXP_MAID]) {
NL_SET_ERR_MSG_MOD(extack, "Missing MAID attribute");
return -EINVAL;
}
memset(&config, 0, sizeof(config));
instance = nla_get_u32(tb[IFLA_BRIDGE_CFM_CC_CONFIG_INSTANCE]);
config.enable = nla_get_u32(tb[IFLA_BRIDGE_CFM_CC_CONFIG_ENABLE]);
config.exp_interval = nla_get_u32(tb[IFLA_BRIDGE_CFM_CC_CONFIG_EXP_INTERVAL]);
nla_memcpy(&config.exp_maid.data, tb[IFLA_BRIDGE_CFM_CC_CONFIG_EXP_MAID],
sizeof(config.exp_maid.data));
return br_cfm_cc_config_set(br, instance, &config, extack);
}
static int br_cc_peer_mep_add_parse(struct net_bridge *br, struct nlattr *attr,
struct netlink_ext_ack *extack)
{
struct nlattr *tb[IFLA_BRIDGE_CFM_CC_PEER_MEP_MAX + 1];
u32 instance, peer_mep_id;
int err;
err = nla_parse_nested(tb, IFLA_BRIDGE_CFM_CC_PEER_MEP_MAX, attr,
br_cfm_cc_peer_mep_policy, extack);
if (err)
return err;
if (!tb[IFLA_BRIDGE_CFM_CC_PEER_MEP_INSTANCE]) {
NL_SET_ERR_MSG_MOD(extack, "Missing INSTANCE attribute");
return -EINVAL;
}
if (!tb[IFLA_BRIDGE_CFM_CC_PEER_MEPID]) {
NL_SET_ERR_MSG_MOD(extack, "Missing PEER_MEP_ID attribute");
return -EINVAL;
}
instance = nla_get_u32(tb[IFLA_BRIDGE_CFM_CC_PEER_MEP_INSTANCE]);
peer_mep_id = nla_get_u32(tb[IFLA_BRIDGE_CFM_CC_PEER_MEPID]);
return br_cfm_cc_peer_mep_add(br, instance, peer_mep_id, extack);
}
static int br_cc_peer_mep_remove_parse(struct net_bridge *br, struct nlattr *attr,
struct netlink_ext_ack *extack)
{
struct nlattr *tb[IFLA_BRIDGE_CFM_CC_PEER_MEP_MAX + 1];
u32 instance, peer_mep_id;
int err;
err = nla_parse_nested(tb, IFLA_BRIDGE_CFM_CC_PEER_MEP_MAX, attr,
br_cfm_cc_peer_mep_policy, extack);
if (err)
return err;
if (!tb[IFLA_BRIDGE_CFM_CC_PEER_MEP_INSTANCE]) {
NL_SET_ERR_MSG_MOD(extack, "Missing INSTANCE attribute");
return -EINVAL;
}
if (!tb[IFLA_BRIDGE_CFM_CC_PEER_MEPID]) {
NL_SET_ERR_MSG_MOD(extack, "Missing PEER_MEP_ID attribute");
return -EINVAL;
}
instance = nla_get_u32(tb[IFLA_BRIDGE_CFM_CC_PEER_MEP_INSTANCE]);
peer_mep_id = nla_get_u32(tb[IFLA_BRIDGE_CFM_CC_PEER_MEPID]);
return br_cfm_cc_peer_mep_remove(br, instance, peer_mep_id, extack);
}
static int br_cc_rdi_parse(struct net_bridge *br, struct nlattr *attr,
struct netlink_ext_ack *extack)
{
struct nlattr *tb[IFLA_BRIDGE_CFM_CC_RDI_MAX + 1];
u32 instance, rdi;
int err;
err = nla_parse_nested(tb, IFLA_BRIDGE_CFM_CC_RDI_MAX, attr,
br_cfm_cc_rdi_policy, extack);
if (err)
return err;
if (!tb[IFLA_BRIDGE_CFM_CC_RDI_INSTANCE]) {
NL_SET_ERR_MSG_MOD(extack, "Missing INSTANCE attribute");
return -EINVAL;
}
if (!tb[IFLA_BRIDGE_CFM_CC_RDI_RDI]) {
NL_SET_ERR_MSG_MOD(extack, "Missing RDI attribute");
return -EINVAL;
}
instance = nla_get_u32(tb[IFLA_BRIDGE_CFM_CC_RDI_INSTANCE]);
rdi = nla_get_u32(tb[IFLA_BRIDGE_CFM_CC_RDI_RDI]);
return br_cfm_cc_rdi_set(br, instance, rdi, extack);
}
static int br_cc_ccm_tx_parse(struct net_bridge *br, struct nlattr *attr,
struct netlink_ext_ack *extack)
{
struct nlattr *tb[IFLA_BRIDGE_CFM_CC_CCM_TX_MAX + 1];
struct br_cfm_cc_ccm_tx_info tx_info;
u32 instance;
int err;
err = nla_parse_nested(tb, IFLA_BRIDGE_CFM_CC_CCM_TX_MAX, attr,
br_cfm_cc_ccm_tx_policy, extack);
if (err)
return err;
if (!tb[IFLA_BRIDGE_CFM_CC_CCM_TX_INSTANCE]) {
NL_SET_ERR_MSG_MOD(extack, "Missing INSTANCE attribute");
return -EINVAL;
}
if (!tb[IFLA_BRIDGE_CFM_CC_CCM_TX_DMAC]) {
NL_SET_ERR_MSG_MOD(extack, "Missing DMAC attribute");
return -EINVAL;
}
if (!tb[IFLA_BRIDGE_CFM_CC_CCM_TX_SEQ_NO_UPDATE]) {
NL_SET_ERR_MSG_MOD(extack, "Missing SEQ_NO_UPDATE attribute");
return -EINVAL;
}
if (!tb[IFLA_BRIDGE_CFM_CC_CCM_TX_PERIOD]) {
NL_SET_ERR_MSG_MOD(extack, "Missing PERIOD attribute");
return -EINVAL;
}
if (!tb[IFLA_BRIDGE_CFM_CC_CCM_TX_IF_TLV]) {
NL_SET_ERR_MSG_MOD(extack, "Missing IF_TLV attribute");
return -EINVAL;
}
if (!tb[IFLA_BRIDGE_CFM_CC_CCM_TX_IF_TLV_VALUE]) {
NL_SET_ERR_MSG_MOD(extack, "Missing IF_TLV_VALUE attribute");
return -EINVAL;
}
if (!tb[IFLA_BRIDGE_CFM_CC_CCM_TX_PORT_TLV]) {
NL_SET_ERR_MSG_MOD(extack, "Missing PORT_TLV attribute");
return -EINVAL;
}
if (!tb[IFLA_BRIDGE_CFM_CC_CCM_TX_PORT_TLV_VALUE]) {
NL_SET_ERR_MSG_MOD(extack, "Missing PORT_TLV_VALUE attribute");
return -EINVAL;
}
memset(&tx_info, 0, sizeof(tx_info));
instance = nla_get_u32(tb[IFLA_BRIDGE_CFM_CC_RDI_INSTANCE]);
nla_memcpy(&tx_info.dmac.addr,
tb[IFLA_BRIDGE_CFM_CC_CCM_TX_DMAC],
sizeof(tx_info.dmac.addr));
tx_info.seq_no_update = nla_get_u32(tb[IFLA_BRIDGE_CFM_CC_CCM_TX_SEQ_NO_UPDATE]);
tx_info.period = nla_get_u32(tb[IFLA_BRIDGE_CFM_CC_CCM_TX_PERIOD]);
tx_info.if_tlv = nla_get_u32(tb[IFLA_BRIDGE_CFM_CC_CCM_TX_IF_TLV]);
tx_info.if_tlv_value = nla_get_u8(tb[IFLA_BRIDGE_CFM_CC_CCM_TX_IF_TLV_VALUE]);
tx_info.port_tlv = nla_get_u32(tb[IFLA_BRIDGE_CFM_CC_CCM_TX_PORT_TLV]);
tx_info.port_tlv_value = nla_get_u8(tb[IFLA_BRIDGE_CFM_CC_CCM_TX_PORT_TLV_VALUE]);
return br_cfm_cc_ccm_tx(br, instance, &tx_info, extack);
}
int br_cfm_parse(struct net_bridge *br, struct net_bridge_port *p,
struct nlattr *attr, int cmd, struct netlink_ext_ack *extack)
{
struct nlattr *tb[IFLA_BRIDGE_CFM_MAX + 1];
int err;
/* When this function is called for a port then the br pointer is
* invalid, therefor set the br to point correctly
*/
if (p)
br = p->br;
err = nla_parse_nested(tb, IFLA_BRIDGE_CFM_MAX, attr,
br_cfm_policy, extack);
if (err)
return err;
if (tb[IFLA_BRIDGE_CFM_MEP_CREATE]) {
err = br_mep_create_parse(br, tb[IFLA_BRIDGE_CFM_MEP_CREATE],
extack);
if (err)
return err;
}
if (tb[IFLA_BRIDGE_CFM_MEP_DELETE]) {
err = br_mep_delete_parse(br, tb[IFLA_BRIDGE_CFM_MEP_DELETE],
extack);
if (err)
return err;
}
if (tb[IFLA_BRIDGE_CFM_MEP_CONFIG]) {
err = br_mep_config_parse(br, tb[IFLA_BRIDGE_CFM_MEP_CONFIG],
extack);
if (err)
return err;
}
if (tb[IFLA_BRIDGE_CFM_CC_CONFIG]) {
err = br_cc_config_parse(br, tb[IFLA_BRIDGE_CFM_CC_CONFIG],
extack);
if (err)
return err;
}
if (tb[IFLA_BRIDGE_CFM_CC_PEER_MEP_ADD]) {
err = br_cc_peer_mep_add_parse(br, tb[IFLA_BRIDGE_CFM_CC_PEER_MEP_ADD],
extack);
if (err)
return err;
}
if (tb[IFLA_BRIDGE_CFM_CC_PEER_MEP_REMOVE]) {
err = br_cc_peer_mep_remove_parse(br, tb[IFLA_BRIDGE_CFM_CC_PEER_MEP_REMOVE],
extack);
if (err)
return err;
}
if (tb[IFLA_BRIDGE_CFM_CC_RDI]) {
err = br_cc_rdi_parse(br, tb[IFLA_BRIDGE_CFM_CC_RDI],
extack);
if (err)
return err;
}
if (tb[IFLA_BRIDGE_CFM_CC_CCM_TX]) {
err = br_cc_ccm_tx_parse(br, tb[IFLA_BRIDGE_CFM_CC_CCM_TX],
extack);
if (err)
return err;
}
return 0;
}
int br_cfm_config_fill_info(struct sk_buff *skb, struct net_bridge *br)
{
struct br_cfm_peer_mep *peer_mep;
struct br_cfm_mep *mep;
struct nlattr *tb;
hlist_for_each_entry_rcu(mep, &br->mep_list, head) {
tb = nla_nest_start(skb, IFLA_BRIDGE_CFM_MEP_CREATE_INFO);
if (!tb)
goto nla_info_failure;
if (nla_put_u32(skb, IFLA_BRIDGE_CFM_MEP_CREATE_INSTANCE,
mep->instance))
goto nla_put_failure;
if (nla_put_u32(skb, IFLA_BRIDGE_CFM_MEP_CREATE_DOMAIN,
mep->create.domain))
goto nla_put_failure;
if (nla_put_u32(skb, IFLA_BRIDGE_CFM_MEP_CREATE_DIRECTION,
mep->create.direction))
goto nla_put_failure;
if (nla_put_u32(skb, IFLA_BRIDGE_CFM_MEP_CREATE_IFINDEX,
mep->create.ifindex))
goto nla_put_failure;
nla_nest_end(skb, tb);
tb = nla_nest_start(skb, IFLA_BRIDGE_CFM_MEP_CONFIG_INFO);
if (!tb)
goto nla_info_failure;
if (nla_put_u32(skb, IFLA_BRIDGE_CFM_MEP_CONFIG_INSTANCE,
mep->instance))
goto nla_put_failure;
if (nla_put(skb, IFLA_BRIDGE_CFM_MEP_CONFIG_UNICAST_MAC,
sizeof(mep->config.unicast_mac.addr),
mep->config.unicast_mac.addr))
goto nla_put_failure;
if (nla_put_u32(skb, IFLA_BRIDGE_CFM_MEP_CONFIG_MDLEVEL,
mep->config.mdlevel))
goto nla_put_failure;
if (nla_put_u32(skb, IFLA_BRIDGE_CFM_MEP_CONFIG_MEPID,
mep->config.mepid))
goto nla_put_failure;
nla_nest_end(skb, tb);
tb = nla_nest_start(skb, IFLA_BRIDGE_CFM_CC_CONFIG_INFO);
if (!tb)
goto nla_info_failure;
if (nla_put_u32(skb, IFLA_BRIDGE_CFM_CC_CONFIG_INSTANCE,
mep->instance))
goto nla_put_failure;
if (nla_put_u32(skb, IFLA_BRIDGE_CFM_CC_CONFIG_ENABLE,
mep->cc_config.enable))
goto nla_put_failure;
if (nla_put_u32(skb, IFLA_BRIDGE_CFM_CC_CONFIG_EXP_INTERVAL,
mep->cc_config.exp_interval))
goto nla_put_failure;
if (nla_put(skb, IFLA_BRIDGE_CFM_CC_CONFIG_EXP_MAID,
sizeof(mep->cc_config.exp_maid.data),
mep->cc_config.exp_maid.data))
goto nla_put_failure;
nla_nest_end(skb, tb);
tb = nla_nest_start(skb, IFLA_BRIDGE_CFM_CC_RDI_INFO);
if (!tb)
goto nla_info_failure;
if (nla_put_u32(skb, IFLA_BRIDGE_CFM_CC_RDI_INSTANCE,
mep->instance))
goto nla_put_failure;
if (nla_put_u32(skb, IFLA_BRIDGE_CFM_CC_RDI_RDI,
mep->rdi))
goto nla_put_failure;
nla_nest_end(skb, tb);
tb = nla_nest_start(skb, IFLA_BRIDGE_CFM_CC_CCM_TX_INFO);
if (!tb)
goto nla_info_failure;
if (nla_put_u32(skb, IFLA_BRIDGE_CFM_CC_CCM_TX_INSTANCE,
mep->instance))
goto nla_put_failure;
if (nla_put(skb, IFLA_BRIDGE_CFM_CC_CCM_TX_DMAC,
sizeof(mep->cc_ccm_tx_info.dmac),
mep->cc_ccm_tx_info.dmac.addr))
goto nla_put_failure;
if (nla_put_u32(skb, IFLA_BRIDGE_CFM_CC_CCM_TX_SEQ_NO_UPDATE,
mep->cc_ccm_tx_info.seq_no_update))
goto nla_put_failure;
if (nla_put_u32(skb, IFLA_BRIDGE_CFM_CC_CCM_TX_PERIOD,
mep->cc_ccm_tx_info.period))
goto nla_put_failure;
if (nla_put_u32(skb, IFLA_BRIDGE_CFM_CC_CCM_TX_IF_TLV,
mep->cc_ccm_tx_info.if_tlv))
goto nla_put_failure;
if (nla_put_u8(skb, IFLA_BRIDGE_CFM_CC_CCM_TX_IF_TLV_VALUE,
mep->cc_ccm_tx_info.if_tlv_value))
goto nla_put_failure;
if (nla_put_u32(skb, IFLA_BRIDGE_CFM_CC_CCM_TX_PORT_TLV,
mep->cc_ccm_tx_info.port_tlv))
goto nla_put_failure;
if (nla_put_u8(skb, IFLA_BRIDGE_CFM_CC_CCM_TX_PORT_TLV_VALUE,
mep->cc_ccm_tx_info.port_tlv_value))
goto nla_put_failure;
nla_nest_end(skb, tb);
hlist_for_each_entry_rcu(peer_mep, &mep->peer_mep_list, head) {
tb = nla_nest_start(skb,
IFLA_BRIDGE_CFM_CC_PEER_MEP_INFO);
if (!tb)
goto nla_info_failure;
if (nla_put_u32(skb,
IFLA_BRIDGE_CFM_CC_PEER_MEP_INSTANCE,
mep->instance))
goto nla_put_failure;
if (nla_put_u32(skb, IFLA_BRIDGE_CFM_CC_PEER_MEPID,
peer_mep->mepid))
goto nla_put_failure;
nla_nest_end(skb, tb);
}
}
return 0;
nla_put_failure:
nla_nest_cancel(skb, tb);
nla_info_failure:
return -EMSGSIZE;
}
int br_cfm_status_fill_info(struct sk_buff *skb,
struct net_bridge *br,
bool getlink)
{
struct br_cfm_peer_mep *peer_mep;
struct br_cfm_mep *mep;
struct nlattr *tb;
hlist_for_each_entry_rcu(mep, &br->mep_list, head) {
tb = nla_nest_start(skb, IFLA_BRIDGE_CFM_MEP_STATUS_INFO);
if (!tb)
goto nla_info_failure;
if (nla_put_u32(skb, IFLA_BRIDGE_CFM_MEP_STATUS_INSTANCE,
mep->instance))
goto nla_put_failure;
if (nla_put_u32(skb,
IFLA_BRIDGE_CFM_MEP_STATUS_OPCODE_UNEXP_SEEN,
mep->status.opcode_unexp_seen))
goto nla_put_failure;
if (nla_put_u32(skb,
IFLA_BRIDGE_CFM_MEP_STATUS_VERSION_UNEXP_SEEN,
mep->status.version_unexp_seen))
goto nla_put_failure;
if (nla_put_u32(skb,
IFLA_BRIDGE_CFM_MEP_STATUS_RX_LEVEL_LOW_SEEN,
mep->status.rx_level_low_seen))
goto nla_put_failure;
/* Only clear if this is a GETLINK */
if (getlink) {
/* Clear all 'seen' indications */
mep->status.opcode_unexp_seen = false;
mep->status.version_unexp_seen = false;
mep->status.rx_level_low_seen = false;
}
nla_nest_end(skb, tb);
hlist_for_each_entry_rcu(peer_mep, &mep->peer_mep_list, head) {
tb = nla_nest_start(skb,
IFLA_BRIDGE_CFM_CC_PEER_STATUS_INFO);
if (!tb)
goto nla_info_failure;
if (nla_put_u32(skb,
IFLA_BRIDGE_CFM_CC_PEER_STATUS_INSTANCE,
mep->instance))
goto nla_put_failure;
if (nla_put_u32(skb,
IFLA_BRIDGE_CFM_CC_PEER_STATUS_PEER_MEPID,
peer_mep->mepid))
goto nla_put_failure;
if (nla_put_u32(skb,
IFLA_BRIDGE_CFM_CC_PEER_STATUS_CCM_DEFECT,
peer_mep->cc_status.ccm_defect))
goto nla_put_failure;
if (nla_put_u32(skb, IFLA_BRIDGE_CFM_CC_PEER_STATUS_RDI,
peer_mep->cc_status.rdi))
goto nla_put_failure;
if (nla_put_u8(skb,
IFLA_BRIDGE_CFM_CC_PEER_STATUS_PORT_TLV_VALUE,
peer_mep->cc_status.port_tlv_value))
goto nla_put_failure;
if (nla_put_u8(skb,
IFLA_BRIDGE_CFM_CC_PEER_STATUS_IF_TLV_VALUE,
peer_mep->cc_status.if_tlv_value))
goto nla_put_failure;
if (nla_put_u32(skb,
IFLA_BRIDGE_CFM_CC_PEER_STATUS_SEEN,
peer_mep->cc_status.seen))
goto nla_put_failure;
if (nla_put_u32(skb,
IFLA_BRIDGE_CFM_CC_PEER_STATUS_TLV_SEEN,
peer_mep->cc_status.tlv_seen))
goto nla_put_failure;
if (nla_put_u32(skb,
IFLA_BRIDGE_CFM_CC_PEER_STATUS_SEQ_UNEXP_SEEN,
peer_mep->cc_status.seq_unexp_seen))
goto nla_put_failure;
if (getlink) { /* Only clear if this is a GETLINK */
/* Clear all 'seen' indications */
peer_mep->cc_status.seen = false;
peer_mep->cc_status.tlv_seen = false;
peer_mep->cc_status.seq_unexp_seen = false;
}
nla_nest_end(skb, tb);
}
}
return 0;
nla_put_failure:
nla_nest_cancel(skb, tb);
nla_info_failure:
return -EMSGSIZE;
}
| linux-master | net/bridge/br_cfm_netlink.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Handle firewalling
* Linux ethernet bridge
*
* Authors:
* Lennert Buytenhek <[email protected]>
* Bart De Schuymer <[email protected]>
*
* Lennert dedicates this file to Kerstin Wurdinger.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/ip.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <linux/if_arp.h>
#include <linux/if_ether.h>
#include <linux/if_vlan.h>
#include <linux/if_pppox.h>
#include <linux/ppp_defs.h>
#include <linux/netfilter_bridge.h>
#include <uapi/linux/netfilter_bridge.h>
#include <linux/netfilter_ipv4.h>
#include <linux/netfilter_ipv6.h>
#include <linux/netfilter_arp.h>
#include <linux/in_route.h>
#include <linux/rculist.h>
#include <linux/inetdevice.h>
#include <net/ip.h>
#include <net/ipv6.h>
#include <net/addrconf.h>
#include <net/route.h>
#include <net/netfilter/br_netfilter.h>
#include <net/netns/generic.h>
#include <linux/uaccess.h>
#include "br_private.h"
#ifdef CONFIG_SYSCTL
#include <linux/sysctl.h>
#endif
static unsigned int brnf_net_id __read_mostly;
struct brnf_net {
bool enabled;
#ifdef CONFIG_SYSCTL
struct ctl_table_header *ctl_hdr;
#endif
/* default value is 1 */
int call_iptables;
int call_ip6tables;
int call_arptables;
/* default value is 0 */
int filter_vlan_tagged;
int filter_pppoe_tagged;
int pass_vlan_indev;
};
#define IS_IP(skb) \
(!skb_vlan_tag_present(skb) && skb->protocol == htons(ETH_P_IP))
#define IS_IPV6(skb) \
(!skb_vlan_tag_present(skb) && skb->protocol == htons(ETH_P_IPV6))
#define IS_ARP(skb) \
(!skb_vlan_tag_present(skb) && skb->protocol == htons(ETH_P_ARP))
static inline __be16 vlan_proto(const struct sk_buff *skb)
{
if (skb_vlan_tag_present(skb))
return skb->protocol;
else if (skb->protocol == htons(ETH_P_8021Q))
return vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
else
return 0;
}
static inline bool is_vlan_ip(const struct sk_buff *skb, const struct net *net)
{
struct brnf_net *brnet = net_generic(net, brnf_net_id);
return vlan_proto(skb) == htons(ETH_P_IP) && brnet->filter_vlan_tagged;
}
static inline bool is_vlan_ipv6(const struct sk_buff *skb,
const struct net *net)
{
struct brnf_net *brnet = net_generic(net, brnf_net_id);
return vlan_proto(skb) == htons(ETH_P_IPV6) &&
brnet->filter_vlan_tagged;
}
static inline bool is_vlan_arp(const struct sk_buff *skb, const struct net *net)
{
struct brnf_net *brnet = net_generic(net, brnf_net_id);
return vlan_proto(skb) == htons(ETH_P_ARP) && brnet->filter_vlan_tagged;
}
static inline __be16 pppoe_proto(const struct sk_buff *skb)
{
return *((__be16 *)(skb_mac_header(skb) + ETH_HLEN +
sizeof(struct pppoe_hdr)));
}
static inline bool is_pppoe_ip(const struct sk_buff *skb, const struct net *net)
{
struct brnf_net *brnet = net_generic(net, brnf_net_id);
return skb->protocol == htons(ETH_P_PPP_SES) &&
pppoe_proto(skb) == htons(PPP_IP) && brnet->filter_pppoe_tagged;
}
static inline bool is_pppoe_ipv6(const struct sk_buff *skb,
const struct net *net)
{
struct brnf_net *brnet = net_generic(net, brnf_net_id);
return skb->protocol == htons(ETH_P_PPP_SES) &&
pppoe_proto(skb) == htons(PPP_IPV6) &&
brnet->filter_pppoe_tagged;
}
/* largest possible L2 header, see br_nf_dev_queue_xmit() */
#define NF_BRIDGE_MAX_MAC_HEADER_LENGTH (PPPOE_SES_HLEN + ETH_HLEN)
struct brnf_frag_data {
char mac[NF_BRIDGE_MAX_MAC_HEADER_LENGTH];
u8 encap_size;
u8 size;
u16 vlan_tci;
__be16 vlan_proto;
};
static DEFINE_PER_CPU(struct brnf_frag_data, brnf_frag_data_storage);
static void nf_bridge_info_free(struct sk_buff *skb)
{
skb_ext_del(skb, SKB_EXT_BRIDGE_NF);
}
static inline struct net_device *bridge_parent(const struct net_device *dev)
{
struct net_bridge_port *port;
port = br_port_get_rcu(dev);
return port ? port->br->dev : NULL;
}
static inline struct nf_bridge_info *nf_bridge_unshare(struct sk_buff *skb)
{
return skb_ext_add(skb, SKB_EXT_BRIDGE_NF);
}
unsigned int nf_bridge_encap_header_len(const struct sk_buff *skb)
{
switch (skb->protocol) {
case __cpu_to_be16(ETH_P_8021Q):
return VLAN_HLEN;
case __cpu_to_be16(ETH_P_PPP_SES):
return PPPOE_SES_HLEN;
default:
return 0;
}
}
static inline void nf_bridge_pull_encap_header(struct sk_buff *skb)
{
unsigned int len = nf_bridge_encap_header_len(skb);
skb_pull(skb, len);
skb->network_header += len;
}
static inline void nf_bridge_pull_encap_header_rcsum(struct sk_buff *skb)
{
unsigned int len = nf_bridge_encap_header_len(skb);
skb_pull_rcsum(skb, len);
skb->network_header += len;
}
/* When handing a packet over to the IP layer
* check whether we have a skb that is in the
* expected format
*/
static int br_validate_ipv4(struct net *net, struct sk_buff *skb)
{
const struct iphdr *iph;
u32 len;
if (!pskb_may_pull(skb, sizeof(struct iphdr)))
goto inhdr_error;
iph = ip_hdr(skb);
/* Basic sanity checks */
if (iph->ihl < 5 || iph->version != 4)
goto inhdr_error;
if (!pskb_may_pull(skb, iph->ihl*4))
goto inhdr_error;
iph = ip_hdr(skb);
if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl)))
goto csum_error;
len = skb_ip_totlen(skb);
if (skb->len < len) {
__IP_INC_STATS(net, IPSTATS_MIB_INTRUNCATEDPKTS);
goto drop;
} else if (len < (iph->ihl*4))
goto inhdr_error;
if (pskb_trim_rcsum(skb, len)) {
__IP_INC_STATS(net, IPSTATS_MIB_INDISCARDS);
goto drop;
}
memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
/* We should really parse IP options here but until
* somebody who actually uses IP options complains to
* us we'll just silently ignore the options because
* we're lazy!
*/
return 0;
csum_error:
__IP_INC_STATS(net, IPSTATS_MIB_CSUMERRORS);
inhdr_error:
__IP_INC_STATS(net, IPSTATS_MIB_INHDRERRORS);
drop:
return -1;
}
void nf_bridge_update_protocol(struct sk_buff *skb)
{
const struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
switch (nf_bridge->orig_proto) {
case BRNF_PROTO_8021Q:
skb->protocol = htons(ETH_P_8021Q);
break;
case BRNF_PROTO_PPPOE:
skb->protocol = htons(ETH_P_PPP_SES);
break;
case BRNF_PROTO_UNCHANGED:
break;
}
}
/* Obtain the correct destination MAC address, while preserving the original
* source MAC address. If we already know this address, we just copy it. If we
* don't, we use the neighbour framework to find out. In both cases, we make
* sure that br_handle_frame_finish() is called afterwards.
*/
int br_nf_pre_routing_finish_bridge(struct net *net, struct sock *sk, struct sk_buff *skb)
{
struct neighbour *neigh;
struct dst_entry *dst;
skb->dev = bridge_parent(skb->dev);
if (!skb->dev)
goto free_skb;
dst = skb_dst(skb);
neigh = dst_neigh_lookup_skb(dst, skb);
if (neigh) {
struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
int ret;
if ((READ_ONCE(neigh->nud_state) & NUD_CONNECTED) &&
READ_ONCE(neigh->hh.hh_len)) {
neigh_hh_bridge(&neigh->hh, skb);
skb->dev = nf_bridge->physindev;
ret = br_handle_frame_finish(net, sk, skb);
} else {
/* the neighbour function below overwrites the complete
* MAC header, so we save the Ethernet source address and
* protocol number.
*/
skb_copy_from_linear_data_offset(skb,
-(ETH_HLEN-ETH_ALEN),
nf_bridge->neigh_header,
ETH_HLEN-ETH_ALEN);
/* tell br_dev_xmit to continue with forwarding */
nf_bridge->bridged_dnat = 1;
/* FIXME Need to refragment */
ret = neigh->output(neigh, skb);
}
neigh_release(neigh);
return ret;
}
free_skb:
kfree_skb(skb);
return 0;
}
static inline bool
br_nf_ipv4_daddr_was_changed(const struct sk_buff *skb,
const struct nf_bridge_info *nf_bridge)
{
return ip_hdr(skb)->daddr != nf_bridge->ipv4_daddr;
}
/* This requires some explaining. If DNAT has taken place,
* we will need to fix up the destination Ethernet address.
* This is also true when SNAT takes place (for the reply direction).
*
* There are two cases to consider:
* 1. The packet was DNAT'ed to a device in the same bridge
* port group as it was received on. We can still bridge
* the packet.
* 2. The packet was DNAT'ed to a different device, either
* a non-bridged device or another bridge port group.
* The packet will need to be routed.
*
* The correct way of distinguishing between these two cases is to
* call ip_route_input() and to look at skb->dst->dev, which is
* changed to the destination device if ip_route_input() succeeds.
*
* Let's first consider the case that ip_route_input() succeeds:
*
* If the output device equals the logical bridge device the packet
* came in on, we can consider this bridging. The corresponding MAC
* address will be obtained in br_nf_pre_routing_finish_bridge.
* Otherwise, the packet is considered to be routed and we just
* change the destination MAC address so that the packet will
* later be passed up to the IP stack to be routed. For a redirected
* packet, ip_route_input() will give back the localhost as output device,
* which differs from the bridge device.
*
* Let's now consider the case that ip_route_input() fails:
*
* This can be because the destination address is martian, in which case
* the packet will be dropped.
* If IP forwarding is disabled, ip_route_input() will fail, while
* ip_route_output_key() can return success. The source
* address for ip_route_output_key() is set to zero, so ip_route_output_key()
* thinks we're handling a locally generated packet and won't care
* if IP forwarding is enabled. If the output device equals the logical bridge
* device, we proceed as if ip_route_input() succeeded. If it differs from the
* logical bridge port or if ip_route_output_key() fails we drop the packet.
*/
static int br_nf_pre_routing_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
{
struct net_device *dev = skb->dev;
struct iphdr *iph = ip_hdr(skb);
struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
struct rtable *rt;
int err;
nf_bridge->frag_max_size = IPCB(skb)->frag_max_size;
if (nf_bridge->pkt_otherhost) {
skb->pkt_type = PACKET_OTHERHOST;
nf_bridge->pkt_otherhost = false;
}
nf_bridge->in_prerouting = 0;
if (br_nf_ipv4_daddr_was_changed(skb, nf_bridge)) {
if ((err = ip_route_input(skb, iph->daddr, iph->saddr, iph->tos, dev))) {
struct in_device *in_dev = __in_dev_get_rcu(dev);
/* If err equals -EHOSTUNREACH the error is due to a
* martian destination or due to the fact that
* forwarding is disabled. For most martian packets,
* ip_route_output_key() will fail. It won't fail for 2 types of
* martian destinations: loopback destinations and destination
* 0.0.0.0. In both cases the packet will be dropped because the
* destination is the loopback device and not the bridge. */
if (err != -EHOSTUNREACH || !in_dev || IN_DEV_FORWARD(in_dev))
goto free_skb;
rt = ip_route_output(net, iph->daddr, 0,
RT_TOS(iph->tos), 0);
if (!IS_ERR(rt)) {
/* - Bridged-and-DNAT'ed traffic doesn't
* require ip_forwarding. */
if (rt->dst.dev == dev) {
skb_dst_drop(skb);
skb_dst_set(skb, &rt->dst);
goto bridged_dnat;
}
ip_rt_put(rt);
}
free_skb:
kfree_skb(skb);
return 0;
} else {
if (skb_dst(skb)->dev == dev) {
bridged_dnat:
skb->dev = nf_bridge->physindev;
nf_bridge_update_protocol(skb);
nf_bridge_push_encap_header(skb);
br_nf_hook_thresh(NF_BR_PRE_ROUTING,
net, sk, skb, skb->dev,
NULL,
br_nf_pre_routing_finish_bridge);
return 0;
}
ether_addr_copy(eth_hdr(skb)->h_dest, dev->dev_addr);
skb->pkt_type = PACKET_HOST;
}
} else {
rt = bridge_parent_rtable(nf_bridge->physindev);
if (!rt) {
kfree_skb(skb);
return 0;
}
skb_dst_drop(skb);
skb_dst_set_noref(skb, &rt->dst);
}
skb->dev = nf_bridge->physindev;
nf_bridge_update_protocol(skb);
nf_bridge_push_encap_header(skb);
br_nf_hook_thresh(NF_BR_PRE_ROUTING, net, sk, skb, skb->dev, NULL,
br_handle_frame_finish);
return 0;
}
static struct net_device *brnf_get_logical_dev(struct sk_buff *skb,
const struct net_device *dev,
const struct net *net)
{
struct net_device *vlan, *br;
struct brnf_net *brnet = net_generic(net, brnf_net_id);
br = bridge_parent(dev);
if (brnet->pass_vlan_indev == 0 || !skb_vlan_tag_present(skb))
return br;
vlan = __vlan_find_dev_deep_rcu(br, skb->vlan_proto,
skb_vlan_tag_get(skb) & VLAN_VID_MASK);
return vlan ? vlan : br;
}
/* Some common code for IPv4/IPv6 */
struct net_device *setup_pre_routing(struct sk_buff *skb, const struct net *net)
{
struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
if (skb->pkt_type == PACKET_OTHERHOST) {
skb->pkt_type = PACKET_HOST;
nf_bridge->pkt_otherhost = true;
}
nf_bridge->in_prerouting = 1;
nf_bridge->physindev = skb->dev;
skb->dev = brnf_get_logical_dev(skb, skb->dev, net);
if (skb->protocol == htons(ETH_P_8021Q))
nf_bridge->orig_proto = BRNF_PROTO_8021Q;
else if (skb->protocol == htons(ETH_P_PPP_SES))
nf_bridge->orig_proto = BRNF_PROTO_PPPOE;
/* Must drop socket now because of tproxy. */
skb_orphan(skb);
return skb->dev;
}
/* Direct IPv6 traffic to br_nf_pre_routing_ipv6.
* Replicate the checks that IPv4 does on packet reception.
* Set skb->dev to the bridge device (i.e. parent of the
* receiving device) to make netfilter happy, the REDIRECT
* target in particular. Save the original destination IP
* address to be able to detect DNAT afterwards. */
static unsigned int br_nf_pre_routing(void *priv,
struct sk_buff *skb,
const struct nf_hook_state *state)
{
struct nf_bridge_info *nf_bridge;
struct net_bridge_port *p;
struct net_bridge *br;
__u32 len = nf_bridge_encap_header_len(skb);
struct brnf_net *brnet;
if (unlikely(!pskb_may_pull(skb, len)))
return NF_DROP;
p = br_port_get_rcu(state->in);
if (p == NULL)
return NF_DROP;
br = p->br;
brnet = net_generic(state->net, brnf_net_id);
if (IS_IPV6(skb) || is_vlan_ipv6(skb, state->net) ||
is_pppoe_ipv6(skb, state->net)) {
if (!brnet->call_ip6tables &&
!br_opt_get(br, BROPT_NF_CALL_IP6TABLES))
return NF_ACCEPT;
if (!ipv6_mod_enabled()) {
pr_warn_once("Module ipv6 is disabled, so call_ip6tables is not supported.");
return NF_DROP;
}
nf_bridge_pull_encap_header_rcsum(skb);
return br_nf_pre_routing_ipv6(priv, skb, state);
}
if (!brnet->call_iptables && !br_opt_get(br, BROPT_NF_CALL_IPTABLES))
return NF_ACCEPT;
if (!IS_IP(skb) && !is_vlan_ip(skb, state->net) &&
!is_pppoe_ip(skb, state->net))
return NF_ACCEPT;
nf_bridge_pull_encap_header_rcsum(skb);
if (br_validate_ipv4(state->net, skb))
return NF_DROP;
if (!nf_bridge_alloc(skb))
return NF_DROP;
if (!setup_pre_routing(skb, state->net))
return NF_DROP;
nf_bridge = nf_bridge_info_get(skb);
nf_bridge->ipv4_daddr = ip_hdr(skb)->daddr;
skb->protocol = htons(ETH_P_IP);
skb->transport_header = skb->network_header + ip_hdr(skb)->ihl * 4;
NF_HOOK(NFPROTO_IPV4, NF_INET_PRE_ROUTING, state->net, state->sk, skb,
skb->dev, NULL,
br_nf_pre_routing_finish);
return NF_STOLEN;
}
/* PF_BRIDGE/FORWARD *************************************************/
static int br_nf_forward_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
{
struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
struct net_device *in;
if (!IS_ARP(skb) && !is_vlan_arp(skb, net)) {
if (skb->protocol == htons(ETH_P_IP))
nf_bridge->frag_max_size = IPCB(skb)->frag_max_size;
if (skb->protocol == htons(ETH_P_IPV6))
nf_bridge->frag_max_size = IP6CB(skb)->frag_max_size;
in = nf_bridge->physindev;
if (nf_bridge->pkt_otherhost) {
skb->pkt_type = PACKET_OTHERHOST;
nf_bridge->pkt_otherhost = false;
}
nf_bridge_update_protocol(skb);
} else {
in = *((struct net_device **)(skb->cb));
}
nf_bridge_push_encap_header(skb);
br_nf_hook_thresh(NF_BR_FORWARD, net, sk, skb, in, skb->dev,
br_forward_finish);
return 0;
}
/* This is the 'purely bridged' case. For IP, we pass the packet to
* netfilter with indev and outdev set to the bridge device,
* but we are still able to filter on the 'real' indev/outdev
* because of the physdev module. For ARP, indev and outdev are the
* bridge ports. */
static unsigned int br_nf_forward_ip(void *priv,
struct sk_buff *skb,
const struct nf_hook_state *state)
{
struct nf_bridge_info *nf_bridge;
struct net_device *parent;
u_int8_t pf;
nf_bridge = nf_bridge_info_get(skb);
if (!nf_bridge)
return NF_ACCEPT;
/* Need exclusive nf_bridge_info since we might have multiple
* different physoutdevs. */
if (!nf_bridge_unshare(skb))
return NF_DROP;
nf_bridge = nf_bridge_info_get(skb);
if (!nf_bridge)
return NF_DROP;
parent = bridge_parent(state->out);
if (!parent)
return NF_DROP;
if (IS_IP(skb) || is_vlan_ip(skb, state->net) ||
is_pppoe_ip(skb, state->net))
pf = NFPROTO_IPV4;
else if (IS_IPV6(skb) || is_vlan_ipv6(skb, state->net) ||
is_pppoe_ipv6(skb, state->net))
pf = NFPROTO_IPV6;
else
return NF_ACCEPT;
nf_bridge_pull_encap_header(skb);
if (skb->pkt_type == PACKET_OTHERHOST) {
skb->pkt_type = PACKET_HOST;
nf_bridge->pkt_otherhost = true;
}
if (pf == NFPROTO_IPV4) {
if (br_validate_ipv4(state->net, skb))
return NF_DROP;
IPCB(skb)->frag_max_size = nf_bridge->frag_max_size;
}
if (pf == NFPROTO_IPV6) {
if (br_validate_ipv6(state->net, skb))
return NF_DROP;
IP6CB(skb)->frag_max_size = nf_bridge->frag_max_size;
}
nf_bridge->physoutdev = skb->dev;
if (pf == NFPROTO_IPV4)
skb->protocol = htons(ETH_P_IP);
else
skb->protocol = htons(ETH_P_IPV6);
NF_HOOK(pf, NF_INET_FORWARD, state->net, NULL, skb,
brnf_get_logical_dev(skb, state->in, state->net),
parent, br_nf_forward_finish);
return NF_STOLEN;
}
static unsigned int br_nf_forward_arp(void *priv,
struct sk_buff *skb,
const struct nf_hook_state *state)
{
struct net_bridge_port *p;
struct net_bridge *br;
struct net_device **d = (struct net_device **)(skb->cb);
struct brnf_net *brnet;
p = br_port_get_rcu(state->out);
if (p == NULL)
return NF_ACCEPT;
br = p->br;
brnet = net_generic(state->net, brnf_net_id);
if (!brnet->call_arptables && !br_opt_get(br, BROPT_NF_CALL_ARPTABLES))
return NF_ACCEPT;
if (!IS_ARP(skb)) {
if (!is_vlan_arp(skb, state->net))
return NF_ACCEPT;
nf_bridge_pull_encap_header(skb);
}
if (unlikely(!pskb_may_pull(skb, sizeof(struct arphdr))))
return NF_DROP;
if (arp_hdr(skb)->ar_pln != 4) {
if (is_vlan_arp(skb, state->net))
nf_bridge_push_encap_header(skb);
return NF_ACCEPT;
}
*d = state->in;
NF_HOOK(NFPROTO_ARP, NF_ARP_FORWARD, state->net, state->sk, skb,
state->in, state->out, br_nf_forward_finish);
return NF_STOLEN;
}
static int br_nf_push_frag_xmit(struct net *net, struct sock *sk, struct sk_buff *skb)
{
struct brnf_frag_data *data;
int err;
data = this_cpu_ptr(&brnf_frag_data_storage);
err = skb_cow_head(skb, data->size);
if (err) {
kfree_skb(skb);
return 0;
}
if (data->vlan_proto)
__vlan_hwaccel_put_tag(skb, data->vlan_proto, data->vlan_tci);
skb_copy_to_linear_data_offset(skb, -data->size, data->mac, data->size);
__skb_push(skb, data->encap_size);
nf_bridge_info_free(skb);
return br_dev_queue_push_xmit(net, sk, skb);
}
static int
br_nf_ip_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
int (*output)(struct net *, struct sock *, struct sk_buff *))
{
unsigned int mtu = ip_skb_dst_mtu(sk, skb);
struct iphdr *iph = ip_hdr(skb);
if (unlikely(((iph->frag_off & htons(IP_DF)) && !skb->ignore_df) ||
(IPCB(skb)->frag_max_size &&
IPCB(skb)->frag_max_size > mtu))) {
IP_INC_STATS(net, IPSTATS_MIB_FRAGFAILS);
kfree_skb(skb);
return -EMSGSIZE;
}
return ip_do_fragment(net, sk, skb, output);
}
static unsigned int nf_bridge_mtu_reduction(const struct sk_buff *skb)
{
const struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
if (nf_bridge->orig_proto == BRNF_PROTO_PPPOE)
return PPPOE_SES_HLEN;
return 0;
}
static int br_nf_dev_queue_xmit(struct net *net, struct sock *sk, struct sk_buff *skb)
{
struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
unsigned int mtu, mtu_reserved;
mtu_reserved = nf_bridge_mtu_reduction(skb);
mtu = skb->dev->mtu;
if (nf_bridge->pkt_otherhost) {
skb->pkt_type = PACKET_OTHERHOST;
nf_bridge->pkt_otherhost = false;
}
if (nf_bridge->frag_max_size && nf_bridge->frag_max_size < mtu)
mtu = nf_bridge->frag_max_size;
nf_bridge_update_protocol(skb);
nf_bridge_push_encap_header(skb);
if (skb_is_gso(skb) || skb->len + mtu_reserved <= mtu) {
nf_bridge_info_free(skb);
return br_dev_queue_push_xmit(net, sk, skb);
}
/* This is wrong! We should preserve the original fragment
* boundaries by preserving frag_list rather than refragmenting.
*/
if (IS_ENABLED(CONFIG_NF_DEFRAG_IPV4) &&
skb->protocol == htons(ETH_P_IP)) {
struct brnf_frag_data *data;
if (br_validate_ipv4(net, skb))
goto drop;
IPCB(skb)->frag_max_size = nf_bridge->frag_max_size;
data = this_cpu_ptr(&brnf_frag_data_storage);
if (skb_vlan_tag_present(skb)) {
data->vlan_tci = skb->vlan_tci;
data->vlan_proto = skb->vlan_proto;
} else {
data->vlan_proto = 0;
}
data->encap_size = nf_bridge_encap_header_len(skb);
data->size = ETH_HLEN + data->encap_size;
skb_copy_from_linear_data_offset(skb, -data->size, data->mac,
data->size);
return br_nf_ip_fragment(net, sk, skb, br_nf_push_frag_xmit);
}
if (IS_ENABLED(CONFIG_NF_DEFRAG_IPV6) &&
skb->protocol == htons(ETH_P_IPV6)) {
const struct nf_ipv6_ops *v6ops = nf_get_ipv6_ops();
struct brnf_frag_data *data;
if (br_validate_ipv6(net, skb))
goto drop;
IP6CB(skb)->frag_max_size = nf_bridge->frag_max_size;
data = this_cpu_ptr(&brnf_frag_data_storage);
data->encap_size = nf_bridge_encap_header_len(skb);
data->size = ETH_HLEN + data->encap_size;
skb_copy_from_linear_data_offset(skb, -data->size, data->mac,
data->size);
if (v6ops)
return v6ops->fragment(net, sk, skb, br_nf_push_frag_xmit);
kfree_skb(skb);
return -EMSGSIZE;
}
nf_bridge_info_free(skb);
return br_dev_queue_push_xmit(net, sk, skb);
drop:
kfree_skb(skb);
return 0;
}
/* PF_BRIDGE/POST_ROUTING ********************************************/
static unsigned int br_nf_post_routing(void *priv,
struct sk_buff *skb,
const struct nf_hook_state *state)
{
struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
struct net_device *realoutdev = bridge_parent(skb->dev);
u_int8_t pf;
/* if nf_bridge is set, but ->physoutdev is NULL, this packet came in
* on a bridge, but was delivered locally and is now being routed:
*
* POST_ROUTING was already invoked from the ip stack.
*/
if (!nf_bridge || !nf_bridge->physoutdev)
return NF_ACCEPT;
if (!realoutdev)
return NF_DROP;
if (IS_IP(skb) || is_vlan_ip(skb, state->net) ||
is_pppoe_ip(skb, state->net))
pf = NFPROTO_IPV4;
else if (IS_IPV6(skb) || is_vlan_ipv6(skb, state->net) ||
is_pppoe_ipv6(skb, state->net))
pf = NFPROTO_IPV6;
else
return NF_ACCEPT;
if (skb->pkt_type == PACKET_OTHERHOST) {
skb->pkt_type = PACKET_HOST;
nf_bridge->pkt_otherhost = true;
}
nf_bridge_pull_encap_header(skb);
if (pf == NFPROTO_IPV4)
skb->protocol = htons(ETH_P_IP);
else
skb->protocol = htons(ETH_P_IPV6);
NF_HOOK(pf, NF_INET_POST_ROUTING, state->net, state->sk, skb,
NULL, realoutdev,
br_nf_dev_queue_xmit);
return NF_STOLEN;
}
/* IP/SABOTAGE *****************************************************/
/* Don't hand locally destined packets to PF_INET(6)/PRE_ROUTING
* for the second time. */
static unsigned int ip_sabotage_in(void *priv,
struct sk_buff *skb,
const struct nf_hook_state *state)
{
struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
if (nf_bridge) {
if (nf_bridge->sabotage_in_done)
return NF_ACCEPT;
if (!nf_bridge->in_prerouting &&
!netif_is_l3_master(skb->dev) &&
!netif_is_l3_slave(skb->dev)) {
nf_bridge->sabotage_in_done = 1;
state->okfn(state->net, state->sk, skb);
return NF_STOLEN;
}
}
return NF_ACCEPT;
}
/* This is called when br_netfilter has called into iptables/netfilter,
* and DNAT has taken place on a bridge-forwarded packet.
*
* neigh->output has created a new MAC header, with local br0 MAC
* as saddr.
*
* This restores the original MAC saddr of the bridged packet
* before invoking bridge forward logic to transmit the packet.
*/
static void br_nf_pre_routing_finish_bridge_slow(struct sk_buff *skb)
{
struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
skb_pull(skb, ETH_HLEN);
nf_bridge->bridged_dnat = 0;
BUILD_BUG_ON(sizeof(nf_bridge->neigh_header) != (ETH_HLEN - ETH_ALEN));
skb_copy_to_linear_data_offset(skb, -(ETH_HLEN - ETH_ALEN),
nf_bridge->neigh_header,
ETH_HLEN - ETH_ALEN);
skb->dev = nf_bridge->physindev;
nf_bridge->physoutdev = NULL;
br_handle_frame_finish(dev_net(skb->dev), NULL, skb);
}
static int br_nf_dev_xmit(struct sk_buff *skb)
{
const struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
if (nf_bridge && nf_bridge->bridged_dnat) {
br_nf_pre_routing_finish_bridge_slow(skb);
return 1;
}
return 0;
}
static const struct nf_br_ops br_ops = {
.br_dev_xmit_hook = br_nf_dev_xmit,
};
/* For br_nf_post_routing, we need (prio = NF_BR_PRI_LAST), because
* br_dev_queue_push_xmit is called afterwards */
static const struct nf_hook_ops br_nf_ops[] = {
{
.hook = br_nf_pre_routing,
.pf = NFPROTO_BRIDGE,
.hooknum = NF_BR_PRE_ROUTING,
.priority = NF_BR_PRI_BRNF,
},
{
.hook = br_nf_forward_ip,
.pf = NFPROTO_BRIDGE,
.hooknum = NF_BR_FORWARD,
.priority = NF_BR_PRI_BRNF - 1,
},
{
.hook = br_nf_forward_arp,
.pf = NFPROTO_BRIDGE,
.hooknum = NF_BR_FORWARD,
.priority = NF_BR_PRI_BRNF,
},
{
.hook = br_nf_post_routing,
.pf = NFPROTO_BRIDGE,
.hooknum = NF_BR_POST_ROUTING,
.priority = NF_BR_PRI_LAST,
},
{
.hook = ip_sabotage_in,
.pf = NFPROTO_IPV4,
.hooknum = NF_INET_PRE_ROUTING,
.priority = NF_IP_PRI_FIRST,
},
{
.hook = ip_sabotage_in,
.pf = NFPROTO_IPV6,
.hooknum = NF_INET_PRE_ROUTING,
.priority = NF_IP6_PRI_FIRST,
},
};
static int brnf_device_event(struct notifier_block *unused, unsigned long event,
void *ptr)
{
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
struct brnf_net *brnet;
struct net *net;
int ret;
if (event != NETDEV_REGISTER || !netif_is_bridge_master(dev))
return NOTIFY_DONE;
ASSERT_RTNL();
net = dev_net(dev);
brnet = net_generic(net, brnf_net_id);
if (brnet->enabled)
return NOTIFY_OK;
ret = nf_register_net_hooks(net, br_nf_ops, ARRAY_SIZE(br_nf_ops));
if (ret)
return NOTIFY_BAD;
brnet->enabled = true;
return NOTIFY_OK;
}
static struct notifier_block brnf_notifier __read_mostly = {
.notifier_call = brnf_device_event,
};
/* recursively invokes nf_hook_slow (again), skipping already-called
* hooks (< NF_BR_PRI_BRNF).
*
* Called with rcu read lock held.
*/
int br_nf_hook_thresh(unsigned int hook, struct net *net,
struct sock *sk, struct sk_buff *skb,
struct net_device *indev,
struct net_device *outdev,
int (*okfn)(struct net *, struct sock *,
struct sk_buff *))
{
const struct nf_hook_entries *e;
struct nf_hook_state state;
struct nf_hook_ops **ops;
unsigned int i;
int ret;
e = rcu_dereference(net->nf.hooks_bridge[hook]);
if (!e)
return okfn(net, sk, skb);
ops = nf_hook_entries_get_hook_ops(e);
for (i = 0; i < e->num_hook_entries; i++) {
/* These hooks have already been called */
if (ops[i]->priority < NF_BR_PRI_BRNF)
continue;
/* These hooks have not been called yet, run them. */
if (ops[i]->priority > NF_BR_PRI_BRNF)
break;
/* take a closer look at NF_BR_PRI_BRNF. */
if (ops[i]->hook == br_nf_pre_routing) {
/* This hook diverted the skb to this function,
* hooks after this have not been run yet.
*/
i++;
break;
}
}
nf_hook_state_init(&state, hook, NFPROTO_BRIDGE, indev, outdev,
sk, net, okfn);
ret = nf_hook_slow(skb, &state, e, i);
if (ret == 1)
ret = okfn(net, sk, skb);
return ret;
}
#ifdef CONFIG_SYSCTL
static
int brnf_sysctl_call_tables(struct ctl_table *ctl, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
int ret;
ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
if (write && *(int *)(ctl->data))
*(int *)(ctl->data) = 1;
return ret;
}
static struct ctl_table brnf_table[] = {
{
.procname = "bridge-nf-call-arptables",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = brnf_sysctl_call_tables,
},
{
.procname = "bridge-nf-call-iptables",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = brnf_sysctl_call_tables,
},
{
.procname = "bridge-nf-call-ip6tables",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = brnf_sysctl_call_tables,
},
{
.procname = "bridge-nf-filter-vlan-tagged",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = brnf_sysctl_call_tables,
},
{
.procname = "bridge-nf-filter-pppoe-tagged",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = brnf_sysctl_call_tables,
},
{
.procname = "bridge-nf-pass-vlan-input-dev",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = brnf_sysctl_call_tables,
},
{ }
};
static inline void br_netfilter_sysctl_default(struct brnf_net *brnf)
{
brnf->call_iptables = 1;
brnf->call_ip6tables = 1;
brnf->call_arptables = 1;
brnf->filter_vlan_tagged = 0;
brnf->filter_pppoe_tagged = 0;
brnf->pass_vlan_indev = 0;
}
static int br_netfilter_sysctl_init_net(struct net *net)
{
struct ctl_table *table = brnf_table;
struct brnf_net *brnet;
if (!net_eq(net, &init_net)) {
table = kmemdup(table, sizeof(brnf_table), GFP_KERNEL);
if (!table)
return -ENOMEM;
}
brnet = net_generic(net, brnf_net_id);
table[0].data = &brnet->call_arptables;
table[1].data = &brnet->call_iptables;
table[2].data = &brnet->call_ip6tables;
table[3].data = &brnet->filter_vlan_tagged;
table[4].data = &brnet->filter_pppoe_tagged;
table[5].data = &brnet->pass_vlan_indev;
br_netfilter_sysctl_default(brnet);
brnet->ctl_hdr = register_net_sysctl_sz(net, "net/bridge", table,
ARRAY_SIZE(brnf_table));
if (!brnet->ctl_hdr) {
if (!net_eq(net, &init_net))
kfree(table);
return -ENOMEM;
}
return 0;
}
static void br_netfilter_sysctl_exit_net(struct net *net,
struct brnf_net *brnet)
{
struct ctl_table *table = brnet->ctl_hdr->ctl_table_arg;
unregister_net_sysctl_table(brnet->ctl_hdr);
if (!net_eq(net, &init_net))
kfree(table);
}
static int __net_init brnf_init_net(struct net *net)
{
return br_netfilter_sysctl_init_net(net);
}
#endif
static void __net_exit brnf_exit_net(struct net *net)
{
struct brnf_net *brnet;
brnet = net_generic(net, brnf_net_id);
if (brnet->enabled) {
nf_unregister_net_hooks(net, br_nf_ops, ARRAY_SIZE(br_nf_ops));
brnet->enabled = false;
}
#ifdef CONFIG_SYSCTL
br_netfilter_sysctl_exit_net(net, brnet);
#endif
}
static struct pernet_operations brnf_net_ops __read_mostly = {
#ifdef CONFIG_SYSCTL
.init = brnf_init_net,
#endif
.exit = brnf_exit_net,
.id = &brnf_net_id,
.size = sizeof(struct brnf_net),
};
static int __init br_netfilter_init(void)
{
int ret;
ret = register_pernet_subsys(&brnf_net_ops);
if (ret < 0)
return ret;
ret = register_netdevice_notifier(&brnf_notifier);
if (ret < 0) {
unregister_pernet_subsys(&brnf_net_ops);
return ret;
}
RCU_INIT_POINTER(nf_br_ops, &br_ops);
printk(KERN_NOTICE "Bridge firewalling registered\n");
return 0;
}
static void __exit br_netfilter_fini(void)
{
RCU_INIT_POINTER(nf_br_ops, NULL);
unregister_netdevice_notifier(&brnf_notifier);
unregister_pernet_subsys(&brnf_net_ops);
}
module_init(br_netfilter_init);
module_exit(br_netfilter_fini);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Lennert Buytenhek <[email protected]>");
MODULE_AUTHOR("Bart De Schuymer <[email protected]>");
MODULE_DESCRIPTION("Linux ethernet netfilter firewall bridge");
| linux-master | net/bridge/br_netfilter_hooks.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/err.h>
#include <linux/igmp.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/rculist.h>
#include <linux/skbuff.h>
#include <linux/if_ether.h>
#include <net/ip.h>
#include <net/netlink.h>
#include <net/switchdev.h>
#if IS_ENABLED(CONFIG_IPV6)
#include <net/ipv6.h>
#include <net/addrconf.h>
#endif
#include "br_private.h"
static bool
br_ip4_rports_get_timer(struct net_bridge_mcast_port *pmctx,
unsigned long *timer)
{
*timer = br_timer_value(&pmctx->ip4_mc_router_timer);
return !hlist_unhashed(&pmctx->ip4_rlist);
}
static bool
br_ip6_rports_get_timer(struct net_bridge_mcast_port *pmctx,
unsigned long *timer)
{
#if IS_ENABLED(CONFIG_IPV6)
*timer = br_timer_value(&pmctx->ip6_mc_router_timer);
return !hlist_unhashed(&pmctx->ip6_rlist);
#else
*timer = 0;
return false;
#endif
}
static size_t __br_rports_one_size(void)
{
return nla_total_size(sizeof(u32)) + /* MDBA_ROUTER_PORT */
nla_total_size(sizeof(u32)) + /* MDBA_ROUTER_PATTR_TIMER */
nla_total_size(sizeof(u8)) + /* MDBA_ROUTER_PATTR_TYPE */
nla_total_size(sizeof(u32)) + /* MDBA_ROUTER_PATTR_INET_TIMER */
nla_total_size(sizeof(u32)) + /* MDBA_ROUTER_PATTR_INET6_TIMER */
nla_total_size(sizeof(u32)); /* MDBA_ROUTER_PATTR_VID */
}
size_t br_rports_size(const struct net_bridge_mcast *brmctx)
{
struct net_bridge_mcast_port *pmctx;
size_t size = nla_total_size(0); /* MDBA_ROUTER */
rcu_read_lock();
hlist_for_each_entry_rcu(pmctx, &brmctx->ip4_mc_router_list,
ip4_rlist)
size += __br_rports_one_size();
#if IS_ENABLED(CONFIG_IPV6)
hlist_for_each_entry_rcu(pmctx, &brmctx->ip6_mc_router_list,
ip6_rlist)
size += __br_rports_one_size();
#endif
rcu_read_unlock();
return size;
}
int br_rports_fill_info(struct sk_buff *skb,
const struct net_bridge_mcast *brmctx)
{
u16 vid = brmctx->vlan ? brmctx->vlan->vid : 0;
bool have_ip4_mc_rtr, have_ip6_mc_rtr;
unsigned long ip4_timer, ip6_timer;
struct nlattr *nest, *port_nest;
struct net_bridge_port *p;
if (!brmctx->multicast_router || !br_rports_have_mc_router(brmctx))
return 0;
nest = nla_nest_start_noflag(skb, MDBA_ROUTER);
if (nest == NULL)
return -EMSGSIZE;
list_for_each_entry_rcu(p, &brmctx->br->port_list, list) {
struct net_bridge_mcast_port *pmctx;
if (vid) {
struct net_bridge_vlan *v;
v = br_vlan_find(nbp_vlan_group(p), vid);
if (!v)
continue;
pmctx = &v->port_mcast_ctx;
} else {
pmctx = &p->multicast_ctx;
}
have_ip4_mc_rtr = br_ip4_rports_get_timer(pmctx, &ip4_timer);
have_ip6_mc_rtr = br_ip6_rports_get_timer(pmctx, &ip6_timer);
if (!have_ip4_mc_rtr && !have_ip6_mc_rtr)
continue;
port_nest = nla_nest_start_noflag(skb, MDBA_ROUTER_PORT);
if (!port_nest)
goto fail;
if (nla_put_nohdr(skb, sizeof(u32), &p->dev->ifindex) ||
nla_put_u32(skb, MDBA_ROUTER_PATTR_TIMER,
max(ip4_timer, ip6_timer)) ||
nla_put_u8(skb, MDBA_ROUTER_PATTR_TYPE,
p->multicast_ctx.multicast_router) ||
(have_ip4_mc_rtr &&
nla_put_u32(skb, MDBA_ROUTER_PATTR_INET_TIMER,
ip4_timer)) ||
(have_ip6_mc_rtr &&
nla_put_u32(skb, MDBA_ROUTER_PATTR_INET6_TIMER,
ip6_timer)) ||
(vid && nla_put_u16(skb, MDBA_ROUTER_PATTR_VID, vid))) {
nla_nest_cancel(skb, port_nest);
goto fail;
}
nla_nest_end(skb, port_nest);
}
nla_nest_end(skb, nest);
return 0;
fail:
nla_nest_cancel(skb, nest);
return -EMSGSIZE;
}
static void __mdb_entry_fill_flags(struct br_mdb_entry *e, unsigned char flags)
{
e->state = flags & MDB_PG_FLAGS_PERMANENT;
e->flags = 0;
if (flags & MDB_PG_FLAGS_OFFLOAD)
e->flags |= MDB_FLAGS_OFFLOAD;
if (flags & MDB_PG_FLAGS_FAST_LEAVE)
e->flags |= MDB_FLAGS_FAST_LEAVE;
if (flags & MDB_PG_FLAGS_STAR_EXCL)
e->flags |= MDB_FLAGS_STAR_EXCL;
if (flags & MDB_PG_FLAGS_BLOCKED)
e->flags |= MDB_FLAGS_BLOCKED;
}
static void __mdb_entry_to_br_ip(struct br_mdb_entry *entry, struct br_ip *ip,
struct nlattr **mdb_attrs)
{
memset(ip, 0, sizeof(struct br_ip));
ip->vid = entry->vid;
ip->proto = entry->addr.proto;
switch (ip->proto) {
case htons(ETH_P_IP):
ip->dst.ip4 = entry->addr.u.ip4;
if (mdb_attrs && mdb_attrs[MDBE_ATTR_SOURCE])
ip->src.ip4 = nla_get_in_addr(mdb_attrs[MDBE_ATTR_SOURCE]);
break;
#if IS_ENABLED(CONFIG_IPV6)
case htons(ETH_P_IPV6):
ip->dst.ip6 = entry->addr.u.ip6;
if (mdb_attrs && mdb_attrs[MDBE_ATTR_SOURCE])
ip->src.ip6 = nla_get_in6_addr(mdb_attrs[MDBE_ATTR_SOURCE]);
break;
#endif
default:
ether_addr_copy(ip->dst.mac_addr, entry->addr.u.mac_addr);
}
}
static int __mdb_fill_srcs(struct sk_buff *skb,
struct net_bridge_port_group *p)
{
struct net_bridge_group_src *ent;
struct nlattr *nest, *nest_ent;
if (hlist_empty(&p->src_list))
return 0;
nest = nla_nest_start(skb, MDBA_MDB_EATTR_SRC_LIST);
if (!nest)
return -EMSGSIZE;
hlist_for_each_entry_rcu(ent, &p->src_list, node,
lockdep_is_held(&p->key.port->br->multicast_lock)) {
nest_ent = nla_nest_start(skb, MDBA_MDB_SRCLIST_ENTRY);
if (!nest_ent)
goto out_cancel_err;
switch (ent->addr.proto) {
case htons(ETH_P_IP):
if (nla_put_in_addr(skb, MDBA_MDB_SRCATTR_ADDRESS,
ent->addr.src.ip4)) {
nla_nest_cancel(skb, nest_ent);
goto out_cancel_err;
}
break;
#if IS_ENABLED(CONFIG_IPV6)
case htons(ETH_P_IPV6):
if (nla_put_in6_addr(skb, MDBA_MDB_SRCATTR_ADDRESS,
&ent->addr.src.ip6)) {
nla_nest_cancel(skb, nest_ent);
goto out_cancel_err;
}
break;
#endif
default:
nla_nest_cancel(skb, nest_ent);
continue;
}
if (nla_put_u32(skb, MDBA_MDB_SRCATTR_TIMER,
br_timer_value(&ent->timer))) {
nla_nest_cancel(skb, nest_ent);
goto out_cancel_err;
}
nla_nest_end(skb, nest_ent);
}
nla_nest_end(skb, nest);
return 0;
out_cancel_err:
nla_nest_cancel(skb, nest);
return -EMSGSIZE;
}
static int __mdb_fill_info(struct sk_buff *skb,
struct net_bridge_mdb_entry *mp,
struct net_bridge_port_group *p)
{
bool dump_srcs_mode = false;
struct timer_list *mtimer;
struct nlattr *nest_ent;
struct br_mdb_entry e;
u8 flags = 0;
int ifindex;
memset(&e, 0, sizeof(e));
if (p) {
ifindex = p->key.port->dev->ifindex;
mtimer = &p->timer;
flags = p->flags;
} else {
ifindex = mp->br->dev->ifindex;
mtimer = &mp->timer;
}
__mdb_entry_fill_flags(&e, flags);
e.ifindex = ifindex;
e.vid = mp->addr.vid;
if (mp->addr.proto == htons(ETH_P_IP)) {
e.addr.u.ip4 = mp->addr.dst.ip4;
#if IS_ENABLED(CONFIG_IPV6)
} else if (mp->addr.proto == htons(ETH_P_IPV6)) {
e.addr.u.ip6 = mp->addr.dst.ip6;
#endif
} else {
ether_addr_copy(e.addr.u.mac_addr, mp->addr.dst.mac_addr);
e.state = MDB_PERMANENT;
}
e.addr.proto = mp->addr.proto;
nest_ent = nla_nest_start_noflag(skb,
MDBA_MDB_ENTRY_INFO);
if (!nest_ent)
return -EMSGSIZE;
if (nla_put_nohdr(skb, sizeof(e), &e) ||
nla_put_u32(skb,
MDBA_MDB_EATTR_TIMER,
br_timer_value(mtimer)))
goto nest_err;
switch (mp->addr.proto) {
case htons(ETH_P_IP):
dump_srcs_mode = !!(mp->br->multicast_ctx.multicast_igmp_version == 3);
if (mp->addr.src.ip4) {
if (nla_put_in_addr(skb, MDBA_MDB_EATTR_SOURCE,
mp->addr.src.ip4))
goto nest_err;
break;
}
break;
#if IS_ENABLED(CONFIG_IPV6)
case htons(ETH_P_IPV6):
dump_srcs_mode = !!(mp->br->multicast_ctx.multicast_mld_version == 2);
if (!ipv6_addr_any(&mp->addr.src.ip6)) {
if (nla_put_in6_addr(skb, MDBA_MDB_EATTR_SOURCE,
&mp->addr.src.ip6))
goto nest_err;
break;
}
break;
#endif
default:
ether_addr_copy(e.addr.u.mac_addr, mp->addr.dst.mac_addr);
}
if (p) {
if (nla_put_u8(skb, MDBA_MDB_EATTR_RTPROT, p->rt_protocol))
goto nest_err;
if (dump_srcs_mode &&
(__mdb_fill_srcs(skb, p) ||
nla_put_u8(skb, MDBA_MDB_EATTR_GROUP_MODE,
p->filter_mode)))
goto nest_err;
}
nla_nest_end(skb, nest_ent);
return 0;
nest_err:
nla_nest_cancel(skb, nest_ent);
return -EMSGSIZE;
}
static int br_mdb_fill_info(struct sk_buff *skb, struct netlink_callback *cb,
struct net_device *dev)
{
int idx = 0, s_idx = cb->args[1], err = 0, pidx = 0, s_pidx = cb->args[2];
struct net_bridge *br = netdev_priv(dev);
struct net_bridge_mdb_entry *mp;
struct nlattr *nest, *nest2;
if (!br_opt_get(br, BROPT_MULTICAST_ENABLED))
return 0;
nest = nla_nest_start_noflag(skb, MDBA_MDB);
if (nest == NULL)
return -EMSGSIZE;
hlist_for_each_entry_rcu(mp, &br->mdb_list, mdb_node) {
struct net_bridge_port_group *p;
struct net_bridge_port_group __rcu **pp;
if (idx < s_idx)
goto skip;
nest2 = nla_nest_start_noflag(skb, MDBA_MDB_ENTRY);
if (!nest2) {
err = -EMSGSIZE;
break;
}
if (!s_pidx && mp->host_joined) {
err = __mdb_fill_info(skb, mp, NULL);
if (err) {
nla_nest_cancel(skb, nest2);
break;
}
}
for (pp = &mp->ports; (p = rcu_dereference(*pp)) != NULL;
pp = &p->next) {
if (!p->key.port)
continue;
if (pidx < s_pidx)
goto skip_pg;
err = __mdb_fill_info(skb, mp, p);
if (err) {
nla_nest_end(skb, nest2);
goto out;
}
skip_pg:
pidx++;
}
pidx = 0;
s_pidx = 0;
nla_nest_end(skb, nest2);
skip:
idx++;
}
out:
cb->args[1] = idx;
cb->args[2] = pidx;
nla_nest_end(skb, nest);
return err;
}
int br_mdb_dump(struct net_device *dev, struct sk_buff *skb,
struct netlink_callback *cb)
{
struct net_bridge *br = netdev_priv(dev);
struct br_port_msg *bpm;
struct nlmsghdr *nlh;
int err;
nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq, RTM_GETMDB, sizeof(*bpm),
NLM_F_MULTI);
if (!nlh)
return -EMSGSIZE;
bpm = nlmsg_data(nlh);
memset(bpm, 0, sizeof(*bpm));
bpm->ifindex = dev->ifindex;
rcu_read_lock();
err = br_mdb_fill_info(skb, cb, dev);
if (err)
goto out;
err = br_rports_fill_info(skb, &br->multicast_ctx);
if (err)
goto out;
out:
rcu_read_unlock();
nlmsg_end(skb, nlh);
return err;
}
static int nlmsg_populate_mdb_fill(struct sk_buff *skb,
struct net_device *dev,
struct net_bridge_mdb_entry *mp,
struct net_bridge_port_group *pg,
int type)
{
struct nlmsghdr *nlh;
struct br_port_msg *bpm;
struct nlattr *nest, *nest2;
nlh = nlmsg_put(skb, 0, 0, type, sizeof(*bpm), 0);
if (!nlh)
return -EMSGSIZE;
bpm = nlmsg_data(nlh);
memset(bpm, 0, sizeof(*bpm));
bpm->family = AF_BRIDGE;
bpm->ifindex = dev->ifindex;
nest = nla_nest_start_noflag(skb, MDBA_MDB);
if (nest == NULL)
goto cancel;
nest2 = nla_nest_start_noflag(skb, MDBA_MDB_ENTRY);
if (nest2 == NULL)
goto end;
if (__mdb_fill_info(skb, mp, pg))
goto end;
nla_nest_end(skb, nest2);
nla_nest_end(skb, nest);
nlmsg_end(skb, nlh);
return 0;
end:
nla_nest_end(skb, nest);
cancel:
nlmsg_cancel(skb, nlh);
return -EMSGSIZE;
}
static size_t rtnl_mdb_nlmsg_size(struct net_bridge_port_group *pg)
{
size_t nlmsg_size = NLMSG_ALIGN(sizeof(struct br_port_msg)) +
nla_total_size(sizeof(struct br_mdb_entry)) +
nla_total_size(sizeof(u32));
struct net_bridge_group_src *ent;
size_t addr_size = 0;
if (!pg)
goto out;
/* MDBA_MDB_EATTR_RTPROT */
nlmsg_size += nla_total_size(sizeof(u8));
switch (pg->key.addr.proto) {
case htons(ETH_P_IP):
/* MDBA_MDB_EATTR_SOURCE */
if (pg->key.addr.src.ip4)
nlmsg_size += nla_total_size(sizeof(__be32));
if (pg->key.port->br->multicast_ctx.multicast_igmp_version == 2)
goto out;
addr_size = sizeof(__be32);
break;
#if IS_ENABLED(CONFIG_IPV6)
case htons(ETH_P_IPV6):
/* MDBA_MDB_EATTR_SOURCE */
if (!ipv6_addr_any(&pg->key.addr.src.ip6))
nlmsg_size += nla_total_size(sizeof(struct in6_addr));
if (pg->key.port->br->multicast_ctx.multicast_mld_version == 1)
goto out;
addr_size = sizeof(struct in6_addr);
break;
#endif
}
/* MDBA_MDB_EATTR_GROUP_MODE */
nlmsg_size += nla_total_size(sizeof(u8));
/* MDBA_MDB_EATTR_SRC_LIST nested attr */
if (!hlist_empty(&pg->src_list))
nlmsg_size += nla_total_size(0);
hlist_for_each_entry(ent, &pg->src_list, node) {
/* MDBA_MDB_SRCLIST_ENTRY nested attr +
* MDBA_MDB_SRCATTR_ADDRESS + MDBA_MDB_SRCATTR_TIMER
*/
nlmsg_size += nla_total_size(0) +
nla_total_size(addr_size) +
nla_total_size(sizeof(u32));
}
out:
return nlmsg_size;
}
void br_mdb_notify(struct net_device *dev,
struct net_bridge_mdb_entry *mp,
struct net_bridge_port_group *pg,
int type)
{
struct net *net = dev_net(dev);
struct sk_buff *skb;
int err = -ENOBUFS;
br_switchdev_mdb_notify(dev, mp, pg, type);
skb = nlmsg_new(rtnl_mdb_nlmsg_size(pg), GFP_ATOMIC);
if (!skb)
goto errout;
err = nlmsg_populate_mdb_fill(skb, dev, mp, pg, type);
if (err < 0) {
kfree_skb(skb);
goto errout;
}
rtnl_notify(skb, net, 0, RTNLGRP_MDB, NULL, GFP_ATOMIC);
return;
errout:
rtnl_set_sk_err(net, RTNLGRP_MDB, err);
}
static int nlmsg_populate_rtr_fill(struct sk_buff *skb,
struct net_device *dev,
int ifindex, u16 vid, u32 pid,
u32 seq, int type, unsigned int flags)
{
struct nlattr *nest, *port_nest;
struct br_port_msg *bpm;
struct nlmsghdr *nlh;
nlh = nlmsg_put(skb, pid, seq, type, sizeof(*bpm), 0);
if (!nlh)
return -EMSGSIZE;
bpm = nlmsg_data(nlh);
memset(bpm, 0, sizeof(*bpm));
bpm->family = AF_BRIDGE;
bpm->ifindex = dev->ifindex;
nest = nla_nest_start_noflag(skb, MDBA_ROUTER);
if (!nest)
goto cancel;
port_nest = nla_nest_start_noflag(skb, MDBA_ROUTER_PORT);
if (!port_nest)
goto end;
if (nla_put_nohdr(skb, sizeof(u32), &ifindex)) {
nla_nest_cancel(skb, port_nest);
goto end;
}
if (vid && nla_put_u16(skb, MDBA_ROUTER_PATTR_VID, vid)) {
nla_nest_cancel(skb, port_nest);
goto end;
}
nla_nest_end(skb, port_nest);
nla_nest_end(skb, nest);
nlmsg_end(skb, nlh);
return 0;
end:
nla_nest_end(skb, nest);
cancel:
nlmsg_cancel(skb, nlh);
return -EMSGSIZE;
}
static inline size_t rtnl_rtr_nlmsg_size(void)
{
return NLMSG_ALIGN(sizeof(struct br_port_msg))
+ nla_total_size(sizeof(__u32))
+ nla_total_size(sizeof(u16));
}
void br_rtr_notify(struct net_device *dev, struct net_bridge_mcast_port *pmctx,
int type)
{
struct net *net = dev_net(dev);
struct sk_buff *skb;
int err = -ENOBUFS;
int ifindex;
u16 vid;
ifindex = pmctx ? pmctx->port->dev->ifindex : 0;
vid = pmctx && br_multicast_port_ctx_is_vlan(pmctx) ? pmctx->vlan->vid :
0;
skb = nlmsg_new(rtnl_rtr_nlmsg_size(), GFP_ATOMIC);
if (!skb)
goto errout;
err = nlmsg_populate_rtr_fill(skb, dev, ifindex, vid, 0, 0, type,
NTF_SELF);
if (err < 0) {
kfree_skb(skb);
goto errout;
}
rtnl_notify(skb, net, 0, RTNLGRP_MDB, NULL, GFP_ATOMIC);
return;
errout:
rtnl_set_sk_err(net, RTNLGRP_MDB, err);
}
static const struct nla_policy
br_mdbe_src_list_entry_pol[MDBE_SRCATTR_MAX + 1] = {
[MDBE_SRCATTR_ADDRESS] = NLA_POLICY_RANGE(NLA_BINARY,
sizeof(struct in_addr),
sizeof(struct in6_addr)),
};
static const struct nla_policy
br_mdbe_src_list_pol[MDBE_SRC_LIST_MAX + 1] = {
[MDBE_SRC_LIST_ENTRY] = NLA_POLICY_NESTED(br_mdbe_src_list_entry_pol),
};
static const struct nla_policy br_mdbe_attrs_pol[MDBE_ATTR_MAX + 1] = {
[MDBE_ATTR_SOURCE] = NLA_POLICY_RANGE(NLA_BINARY,
sizeof(struct in_addr),
sizeof(struct in6_addr)),
[MDBE_ATTR_GROUP_MODE] = NLA_POLICY_RANGE(NLA_U8, MCAST_EXCLUDE,
MCAST_INCLUDE),
[MDBE_ATTR_SRC_LIST] = NLA_POLICY_NESTED(br_mdbe_src_list_pol),
[MDBE_ATTR_RTPROT] = NLA_POLICY_MIN(NLA_U8, RTPROT_STATIC),
};
static bool is_valid_mdb_source(struct nlattr *attr, __be16 proto,
struct netlink_ext_ack *extack)
{
switch (proto) {
case htons(ETH_P_IP):
if (nla_len(attr) != sizeof(struct in_addr)) {
NL_SET_ERR_MSG_MOD(extack, "IPv4 invalid source address length");
return false;
}
if (ipv4_is_multicast(nla_get_in_addr(attr))) {
NL_SET_ERR_MSG_MOD(extack, "IPv4 multicast source address is not allowed");
return false;
}
break;
#if IS_ENABLED(CONFIG_IPV6)
case htons(ETH_P_IPV6): {
struct in6_addr src;
if (nla_len(attr) != sizeof(struct in6_addr)) {
NL_SET_ERR_MSG_MOD(extack, "IPv6 invalid source address length");
return false;
}
src = nla_get_in6_addr(attr);
if (ipv6_addr_is_multicast(&src)) {
NL_SET_ERR_MSG_MOD(extack, "IPv6 multicast source address is not allowed");
return false;
}
break;
}
#endif
default:
NL_SET_ERR_MSG_MOD(extack, "Invalid protocol used with source address");
return false;
}
return true;
}
static struct net_bridge_mcast *
__br_mdb_choose_context(struct net_bridge *br,
const struct br_mdb_entry *entry,
struct netlink_ext_ack *extack)
{
struct net_bridge_mcast *brmctx = NULL;
struct net_bridge_vlan *v;
if (!br_opt_get(br, BROPT_MCAST_VLAN_SNOOPING_ENABLED)) {
brmctx = &br->multicast_ctx;
goto out;
}
if (!entry->vid) {
NL_SET_ERR_MSG_MOD(extack, "Cannot add an entry without a vlan when vlan snooping is enabled");
goto out;
}
v = br_vlan_find(br_vlan_group(br), entry->vid);
if (!v) {
NL_SET_ERR_MSG_MOD(extack, "Vlan is not configured");
goto out;
}
if (br_multicast_ctx_vlan_global_disabled(&v->br_mcast_ctx)) {
NL_SET_ERR_MSG_MOD(extack, "Vlan's multicast processing is disabled");
goto out;
}
brmctx = &v->br_mcast_ctx;
out:
return brmctx;
}
static int br_mdb_replace_group_sg(const struct br_mdb_config *cfg,
struct net_bridge_mdb_entry *mp,
struct net_bridge_port_group *pg,
struct net_bridge_mcast *brmctx,
unsigned char flags)
{
unsigned long now = jiffies;
pg->flags = flags;
pg->rt_protocol = cfg->rt_protocol;
if (!(flags & MDB_PG_FLAGS_PERMANENT) && !cfg->src_entry)
mod_timer(&pg->timer,
now + brmctx->multicast_membership_interval);
else
del_timer(&pg->timer);
br_mdb_notify(cfg->br->dev, mp, pg, RTM_NEWMDB);
return 0;
}
static int br_mdb_add_group_sg(const struct br_mdb_config *cfg,
struct net_bridge_mdb_entry *mp,
struct net_bridge_mcast *brmctx,
unsigned char flags,
struct netlink_ext_ack *extack)
{
struct net_bridge_port_group __rcu **pp;
struct net_bridge_port_group *p;
unsigned long now = jiffies;
for (pp = &mp->ports;
(p = mlock_dereference(*pp, cfg->br)) != NULL;
pp = &p->next) {
if (p->key.port == cfg->p) {
if (!(cfg->nlflags & NLM_F_REPLACE)) {
NL_SET_ERR_MSG_MOD(extack, "(S, G) group is already joined by port");
return -EEXIST;
}
return br_mdb_replace_group_sg(cfg, mp, p, brmctx,
flags);
}
if ((unsigned long)p->key.port < (unsigned long)cfg->p)
break;
}
p = br_multicast_new_port_group(cfg->p, &cfg->group, *pp, flags, NULL,
MCAST_INCLUDE, cfg->rt_protocol, extack);
if (unlikely(!p))
return -ENOMEM;
rcu_assign_pointer(*pp, p);
if (!(flags & MDB_PG_FLAGS_PERMANENT) && !cfg->src_entry)
mod_timer(&p->timer,
now + brmctx->multicast_membership_interval);
br_mdb_notify(cfg->br->dev, mp, p, RTM_NEWMDB);
/* All of (*, G) EXCLUDE ports need to be added to the new (S, G) for
* proper replication.
*/
if (br_multicast_should_handle_mode(brmctx, cfg->group.proto)) {
struct net_bridge_mdb_entry *star_mp;
struct br_ip star_group;
star_group = p->key.addr;
memset(&star_group.src, 0, sizeof(star_group.src));
star_mp = br_mdb_ip_get(cfg->br, &star_group);
if (star_mp)
br_multicast_sg_add_exclude_ports(star_mp, p);
}
return 0;
}
static int br_mdb_add_group_src_fwd(const struct br_mdb_config *cfg,
struct br_ip *src_ip,
struct net_bridge_mcast *brmctx,
struct netlink_ext_ack *extack)
{
struct net_bridge_mdb_entry *sgmp;
struct br_mdb_config sg_cfg;
struct br_ip sg_ip;
u8 flags = 0;
sg_ip = cfg->group;
sg_ip.src = src_ip->src;
sgmp = br_multicast_new_group(cfg->br, &sg_ip);
if (IS_ERR(sgmp)) {
NL_SET_ERR_MSG_MOD(extack, "Failed to add (S, G) MDB entry");
return PTR_ERR(sgmp);
}
if (cfg->entry->state == MDB_PERMANENT)
flags |= MDB_PG_FLAGS_PERMANENT;
if (cfg->filter_mode == MCAST_EXCLUDE)
flags |= MDB_PG_FLAGS_BLOCKED;
memset(&sg_cfg, 0, sizeof(sg_cfg));
sg_cfg.br = cfg->br;
sg_cfg.p = cfg->p;
sg_cfg.entry = cfg->entry;
sg_cfg.group = sg_ip;
sg_cfg.src_entry = true;
sg_cfg.filter_mode = MCAST_INCLUDE;
sg_cfg.rt_protocol = cfg->rt_protocol;
sg_cfg.nlflags = cfg->nlflags;
return br_mdb_add_group_sg(&sg_cfg, sgmp, brmctx, flags, extack);
}
static int br_mdb_add_group_src(const struct br_mdb_config *cfg,
struct net_bridge_port_group *pg,
struct net_bridge_mcast *brmctx,
struct br_mdb_src_entry *src,
struct netlink_ext_ack *extack)
{
struct net_bridge_group_src *ent;
unsigned long now = jiffies;
int err;
ent = br_multicast_find_group_src(pg, &src->addr);
if (!ent) {
ent = br_multicast_new_group_src(pg, &src->addr);
if (!ent) {
NL_SET_ERR_MSG_MOD(extack, "Failed to add new source entry");
return -ENOSPC;
}
} else if (!(cfg->nlflags & NLM_F_REPLACE)) {
NL_SET_ERR_MSG_MOD(extack, "Source entry already exists");
return -EEXIST;
}
if (cfg->filter_mode == MCAST_INCLUDE &&
cfg->entry->state == MDB_TEMPORARY)
mod_timer(&ent->timer, now + br_multicast_gmi(brmctx));
else
del_timer(&ent->timer);
/* Install a (S, G) forwarding entry for the source. */
err = br_mdb_add_group_src_fwd(cfg, &src->addr, brmctx, extack);
if (err)
goto err_del_sg;
ent->flags = BR_SGRP_F_INSTALLED | BR_SGRP_F_USER_ADDED;
return 0;
err_del_sg:
__br_multicast_del_group_src(ent);
return err;
}
static void br_mdb_del_group_src(struct net_bridge_port_group *pg,
struct br_mdb_src_entry *src)
{
struct net_bridge_group_src *ent;
ent = br_multicast_find_group_src(pg, &src->addr);
if (WARN_ON_ONCE(!ent))
return;
br_multicast_del_group_src(ent, false);
}
static int br_mdb_add_group_srcs(const struct br_mdb_config *cfg,
struct net_bridge_port_group *pg,
struct net_bridge_mcast *brmctx,
struct netlink_ext_ack *extack)
{
int i, err;
for (i = 0; i < cfg->num_src_entries; i++) {
err = br_mdb_add_group_src(cfg, pg, brmctx,
&cfg->src_entries[i], extack);
if (err)
goto err_del_group_srcs;
}
return 0;
err_del_group_srcs:
for (i--; i >= 0; i--)
br_mdb_del_group_src(pg, &cfg->src_entries[i]);
return err;
}
static int br_mdb_replace_group_srcs(const struct br_mdb_config *cfg,
struct net_bridge_port_group *pg,
struct net_bridge_mcast *brmctx,
struct netlink_ext_ack *extack)
{
struct net_bridge_group_src *ent;
struct hlist_node *tmp;
int err;
hlist_for_each_entry(ent, &pg->src_list, node)
ent->flags |= BR_SGRP_F_DELETE;
err = br_mdb_add_group_srcs(cfg, pg, brmctx, extack);
if (err)
goto err_clear_delete;
hlist_for_each_entry_safe(ent, tmp, &pg->src_list, node) {
if (ent->flags & BR_SGRP_F_DELETE)
br_multicast_del_group_src(ent, false);
}
return 0;
err_clear_delete:
hlist_for_each_entry(ent, &pg->src_list, node)
ent->flags &= ~BR_SGRP_F_DELETE;
return err;
}
static int br_mdb_replace_group_star_g(const struct br_mdb_config *cfg,
struct net_bridge_mdb_entry *mp,
struct net_bridge_port_group *pg,
struct net_bridge_mcast *brmctx,
unsigned char flags,
struct netlink_ext_ack *extack)
{
unsigned long now = jiffies;
int err;
err = br_mdb_replace_group_srcs(cfg, pg, brmctx, extack);
if (err)
return err;
pg->flags = flags;
pg->filter_mode = cfg->filter_mode;
pg->rt_protocol = cfg->rt_protocol;
if (!(flags & MDB_PG_FLAGS_PERMANENT) &&
cfg->filter_mode == MCAST_EXCLUDE)
mod_timer(&pg->timer,
now + brmctx->multicast_membership_interval);
else
del_timer(&pg->timer);
br_mdb_notify(cfg->br->dev, mp, pg, RTM_NEWMDB);
if (br_multicast_should_handle_mode(brmctx, cfg->group.proto))
br_multicast_star_g_handle_mode(pg, cfg->filter_mode);
return 0;
}
static int br_mdb_add_group_star_g(const struct br_mdb_config *cfg,
struct net_bridge_mdb_entry *mp,
struct net_bridge_mcast *brmctx,
unsigned char flags,
struct netlink_ext_ack *extack)
{
struct net_bridge_port_group __rcu **pp;
struct net_bridge_port_group *p;
unsigned long now = jiffies;
int err;
for (pp = &mp->ports;
(p = mlock_dereference(*pp, cfg->br)) != NULL;
pp = &p->next) {
if (p->key.port == cfg->p) {
if (!(cfg->nlflags & NLM_F_REPLACE)) {
NL_SET_ERR_MSG_MOD(extack, "(*, G) group is already joined by port");
return -EEXIST;
}
return br_mdb_replace_group_star_g(cfg, mp, p, brmctx,
flags, extack);
}
if ((unsigned long)p->key.port < (unsigned long)cfg->p)
break;
}
p = br_multicast_new_port_group(cfg->p, &cfg->group, *pp, flags, NULL,
cfg->filter_mode, cfg->rt_protocol,
extack);
if (unlikely(!p))
return -ENOMEM;
err = br_mdb_add_group_srcs(cfg, p, brmctx, extack);
if (err)
goto err_del_port_group;
rcu_assign_pointer(*pp, p);
if (!(flags & MDB_PG_FLAGS_PERMANENT) &&
cfg->filter_mode == MCAST_EXCLUDE)
mod_timer(&p->timer,
now + brmctx->multicast_membership_interval);
br_mdb_notify(cfg->br->dev, mp, p, RTM_NEWMDB);
/* If we are adding a new EXCLUDE port group (*, G), it needs to be
* also added to all (S, G) entries for proper replication.
*/
if (br_multicast_should_handle_mode(brmctx, cfg->group.proto) &&
cfg->filter_mode == MCAST_EXCLUDE)
br_multicast_star_g_handle_mode(p, MCAST_EXCLUDE);
return 0;
err_del_port_group:
br_multicast_del_port_group(p);
return err;
}
static int br_mdb_add_group(const struct br_mdb_config *cfg,
struct netlink_ext_ack *extack)
{
struct br_mdb_entry *entry = cfg->entry;
struct net_bridge_port *port = cfg->p;
struct net_bridge_mdb_entry *mp;
struct net_bridge *br = cfg->br;
struct net_bridge_mcast *brmctx;
struct br_ip group = cfg->group;
unsigned char flags = 0;
brmctx = __br_mdb_choose_context(br, entry, extack);
if (!brmctx)
return -EINVAL;
mp = br_multicast_new_group(br, &group);
if (IS_ERR(mp))
return PTR_ERR(mp);
/* host join */
if (!port) {
if (mp->host_joined) {
NL_SET_ERR_MSG_MOD(extack, "Group is already joined by host");
return -EEXIST;
}
br_multicast_host_join(brmctx, mp, false);
br_mdb_notify(br->dev, mp, NULL, RTM_NEWMDB);
return 0;
}
if (entry->state == MDB_PERMANENT)
flags |= MDB_PG_FLAGS_PERMANENT;
if (br_multicast_is_star_g(&group))
return br_mdb_add_group_star_g(cfg, mp, brmctx, flags, extack);
else
return br_mdb_add_group_sg(cfg, mp, brmctx, flags, extack);
}
static int __br_mdb_add(const struct br_mdb_config *cfg,
struct netlink_ext_ack *extack)
{
int ret;
spin_lock_bh(&cfg->br->multicast_lock);
ret = br_mdb_add_group(cfg, extack);
spin_unlock_bh(&cfg->br->multicast_lock);
return ret;
}
static int br_mdb_config_src_entry_init(struct nlattr *src_entry,
struct br_mdb_src_entry *src,
__be16 proto,
struct netlink_ext_ack *extack)
{
struct nlattr *tb[MDBE_SRCATTR_MAX + 1];
int err;
err = nla_parse_nested(tb, MDBE_SRCATTR_MAX, src_entry,
br_mdbe_src_list_entry_pol, extack);
if (err)
return err;
if (NL_REQ_ATTR_CHECK(extack, src_entry, tb, MDBE_SRCATTR_ADDRESS))
return -EINVAL;
if (!is_valid_mdb_source(tb[MDBE_SRCATTR_ADDRESS], proto, extack))
return -EINVAL;
src->addr.proto = proto;
nla_memcpy(&src->addr.src, tb[MDBE_SRCATTR_ADDRESS],
nla_len(tb[MDBE_SRCATTR_ADDRESS]));
return 0;
}
static int br_mdb_config_src_list_init(struct nlattr *src_list,
struct br_mdb_config *cfg,
struct netlink_ext_ack *extack)
{
struct nlattr *src_entry;
int rem, err;
int i = 0;
nla_for_each_nested(src_entry, src_list, rem)
cfg->num_src_entries++;
if (cfg->num_src_entries >= PG_SRC_ENT_LIMIT) {
NL_SET_ERR_MSG_FMT_MOD(extack, "Exceeded maximum number of source entries (%u)",
PG_SRC_ENT_LIMIT - 1);
return -EINVAL;
}
cfg->src_entries = kcalloc(cfg->num_src_entries,
sizeof(struct br_mdb_src_entry), GFP_KERNEL);
if (!cfg->src_entries)
return -ENOMEM;
nla_for_each_nested(src_entry, src_list, rem) {
err = br_mdb_config_src_entry_init(src_entry,
&cfg->src_entries[i],
cfg->entry->addr.proto,
extack);
if (err)
goto err_src_entry_init;
i++;
}
return 0;
err_src_entry_init:
kfree(cfg->src_entries);
return err;
}
static void br_mdb_config_src_list_fini(struct br_mdb_config *cfg)
{
kfree(cfg->src_entries);
}
static int br_mdb_config_attrs_init(struct nlattr *set_attrs,
struct br_mdb_config *cfg,
struct netlink_ext_ack *extack)
{
struct nlattr *mdb_attrs[MDBE_ATTR_MAX + 1];
int err;
err = nla_parse_nested(mdb_attrs, MDBE_ATTR_MAX, set_attrs,
br_mdbe_attrs_pol, extack);
if (err)
return err;
if (mdb_attrs[MDBE_ATTR_SOURCE] &&
!is_valid_mdb_source(mdb_attrs[MDBE_ATTR_SOURCE],
cfg->entry->addr.proto, extack))
return -EINVAL;
__mdb_entry_to_br_ip(cfg->entry, &cfg->group, mdb_attrs);
if (mdb_attrs[MDBE_ATTR_GROUP_MODE]) {
if (!cfg->p) {
NL_SET_ERR_MSG_MOD(extack, "Filter mode cannot be set for host groups");
return -EINVAL;
}
if (!br_multicast_is_star_g(&cfg->group)) {
NL_SET_ERR_MSG_MOD(extack, "Filter mode can only be set for (*, G) entries");
return -EINVAL;
}
cfg->filter_mode = nla_get_u8(mdb_attrs[MDBE_ATTR_GROUP_MODE]);
} else {
cfg->filter_mode = MCAST_EXCLUDE;
}
if (mdb_attrs[MDBE_ATTR_SRC_LIST]) {
if (!cfg->p) {
NL_SET_ERR_MSG_MOD(extack, "Source list cannot be set for host groups");
return -EINVAL;
}
if (!br_multicast_is_star_g(&cfg->group)) {
NL_SET_ERR_MSG_MOD(extack, "Source list can only be set for (*, G) entries");
return -EINVAL;
}
if (!mdb_attrs[MDBE_ATTR_GROUP_MODE]) {
NL_SET_ERR_MSG_MOD(extack, "Source list cannot be set without filter mode");
return -EINVAL;
}
err = br_mdb_config_src_list_init(mdb_attrs[MDBE_ATTR_SRC_LIST],
cfg, extack);
if (err)
return err;
}
if (!cfg->num_src_entries && cfg->filter_mode == MCAST_INCLUDE) {
NL_SET_ERR_MSG_MOD(extack, "Cannot add (*, G) INCLUDE with an empty source list");
return -EINVAL;
}
if (mdb_attrs[MDBE_ATTR_RTPROT]) {
if (!cfg->p) {
NL_SET_ERR_MSG_MOD(extack, "Protocol cannot be set for host groups");
return -EINVAL;
}
cfg->rt_protocol = nla_get_u8(mdb_attrs[MDBE_ATTR_RTPROT]);
}
return 0;
}
static int br_mdb_config_init(struct br_mdb_config *cfg, struct net_device *dev,
struct nlattr *tb[], u16 nlmsg_flags,
struct netlink_ext_ack *extack)
{
struct net *net = dev_net(dev);
memset(cfg, 0, sizeof(*cfg));
cfg->filter_mode = MCAST_EXCLUDE;
cfg->rt_protocol = RTPROT_STATIC;
cfg->nlflags = nlmsg_flags;
cfg->br = netdev_priv(dev);
if (!netif_running(cfg->br->dev)) {
NL_SET_ERR_MSG_MOD(extack, "Bridge device is not running");
return -EINVAL;
}
if (!br_opt_get(cfg->br, BROPT_MULTICAST_ENABLED)) {
NL_SET_ERR_MSG_MOD(extack, "Bridge's multicast processing is disabled");
return -EINVAL;
}
cfg->entry = nla_data(tb[MDBA_SET_ENTRY]);
if (cfg->entry->ifindex != cfg->br->dev->ifindex) {
struct net_device *pdev;
pdev = __dev_get_by_index(net, cfg->entry->ifindex);
if (!pdev) {
NL_SET_ERR_MSG_MOD(extack, "Port net device doesn't exist");
return -ENODEV;
}
cfg->p = br_port_get_rtnl(pdev);
if (!cfg->p) {
NL_SET_ERR_MSG_MOD(extack, "Net device is not a bridge port");
return -EINVAL;
}
if (cfg->p->br != cfg->br) {
NL_SET_ERR_MSG_MOD(extack, "Port belongs to a different bridge device");
return -EINVAL;
}
}
if (cfg->entry->addr.proto == htons(ETH_P_IP) &&
ipv4_is_zeronet(cfg->entry->addr.u.ip4)) {
NL_SET_ERR_MSG_MOD(extack, "IPv4 entry group address 0.0.0.0 is not allowed");
return -EINVAL;
}
if (tb[MDBA_SET_ENTRY_ATTRS])
return br_mdb_config_attrs_init(tb[MDBA_SET_ENTRY_ATTRS], cfg,
extack);
else
__mdb_entry_to_br_ip(cfg->entry, &cfg->group, NULL);
return 0;
}
static void br_mdb_config_fini(struct br_mdb_config *cfg)
{
br_mdb_config_src_list_fini(cfg);
}
int br_mdb_add(struct net_device *dev, struct nlattr *tb[], u16 nlmsg_flags,
struct netlink_ext_ack *extack)
{
struct net_bridge_vlan_group *vg;
struct net_bridge_vlan *v;
struct br_mdb_config cfg;
int err;
err = br_mdb_config_init(&cfg, dev, tb, nlmsg_flags, extack);
if (err)
return err;
err = -EINVAL;
/* host join errors which can happen before creating the group */
if (!cfg.p && !br_group_is_l2(&cfg.group)) {
/* don't allow any flags for host-joined IP groups */
if (cfg.entry->state) {
NL_SET_ERR_MSG_MOD(extack, "Flags are not allowed for host groups");
goto out;
}
if (!br_multicast_is_star_g(&cfg.group)) {
NL_SET_ERR_MSG_MOD(extack, "Groups with sources cannot be manually host joined");
goto out;
}
}
if (br_group_is_l2(&cfg.group) && cfg.entry->state != MDB_PERMANENT) {
NL_SET_ERR_MSG_MOD(extack, "Only permanent L2 entries allowed");
goto out;
}
if (cfg.p) {
if (cfg.p->state == BR_STATE_DISABLED && cfg.entry->state != MDB_PERMANENT) {
NL_SET_ERR_MSG_MOD(extack, "Port is in disabled state and entry is not permanent");
goto out;
}
vg = nbp_vlan_group(cfg.p);
} else {
vg = br_vlan_group(cfg.br);
}
/* If vlan filtering is enabled and VLAN is not specified
* install mdb entry on all vlans configured on the port.
*/
if (br_vlan_enabled(cfg.br->dev) && vg && cfg.entry->vid == 0) {
list_for_each_entry(v, &vg->vlan_list, vlist) {
cfg.entry->vid = v->vid;
cfg.group.vid = v->vid;
err = __br_mdb_add(&cfg, extack);
if (err)
break;
}
} else {
err = __br_mdb_add(&cfg, extack);
}
out:
br_mdb_config_fini(&cfg);
return err;
}
static int __br_mdb_del(const struct br_mdb_config *cfg)
{
struct br_mdb_entry *entry = cfg->entry;
struct net_bridge *br = cfg->br;
struct net_bridge_mdb_entry *mp;
struct net_bridge_port_group *p;
struct net_bridge_port_group __rcu **pp;
struct br_ip ip = cfg->group;
int err = -EINVAL;
spin_lock_bh(&br->multicast_lock);
mp = br_mdb_ip_get(br, &ip);
if (!mp)
goto unlock;
/* host leave */
if (entry->ifindex == mp->br->dev->ifindex && mp->host_joined) {
br_multicast_host_leave(mp, false);
err = 0;
br_mdb_notify(br->dev, mp, NULL, RTM_DELMDB);
if (!mp->ports && netif_running(br->dev))
mod_timer(&mp->timer, jiffies);
goto unlock;
}
for (pp = &mp->ports;
(p = mlock_dereference(*pp, br)) != NULL;
pp = &p->next) {
if (!p->key.port || p->key.port->dev->ifindex != entry->ifindex)
continue;
br_multicast_del_pg(mp, p, pp);
err = 0;
break;
}
unlock:
spin_unlock_bh(&br->multicast_lock);
return err;
}
int br_mdb_del(struct net_device *dev, struct nlattr *tb[],
struct netlink_ext_ack *extack)
{
struct net_bridge_vlan_group *vg;
struct net_bridge_vlan *v;
struct br_mdb_config cfg;
int err;
err = br_mdb_config_init(&cfg, dev, tb, 0, extack);
if (err)
return err;
if (cfg.p)
vg = nbp_vlan_group(cfg.p);
else
vg = br_vlan_group(cfg.br);
/* If vlan filtering is enabled and VLAN is not specified
* delete mdb entry on all vlans configured on the port.
*/
if (br_vlan_enabled(cfg.br->dev) && vg && cfg.entry->vid == 0) {
list_for_each_entry(v, &vg->vlan_list, vlist) {
cfg.entry->vid = v->vid;
cfg.group.vid = v->vid;
err = __br_mdb_del(&cfg);
}
} else {
err = __br_mdb_del(&cfg);
}
br_mdb_config_fini(&cfg);
return err;
}
| linux-master | net/bridge/br_mdb.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* 802_3
*
* Author:
* Chris Vitale [email protected]
*
* May 2003
*
*/
#include <linux/module.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter_bridge/ebtables.h>
#include <linux/skbuff.h>
#include <uapi/linux/netfilter_bridge/ebt_802_3.h>
static struct ebt_802_3_hdr *ebt_802_3_hdr(const struct sk_buff *skb)
{
return (struct ebt_802_3_hdr *)skb_mac_header(skb);
}
static bool
ebt_802_3_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct ebt_802_3_info *info = par->matchinfo;
const struct ebt_802_3_hdr *hdr = ebt_802_3_hdr(skb);
__be16 type = hdr->llc.ui.ctrl & IS_UI ? hdr->llc.ui.type : hdr->llc.ni.type;
if (info->bitmask & EBT_802_3_SAP) {
if (NF_INVF(info, EBT_802_3_SAP, info->sap != hdr->llc.ui.ssap))
return false;
if (NF_INVF(info, EBT_802_3_SAP, info->sap != hdr->llc.ui.dsap))
return false;
}
if (info->bitmask & EBT_802_3_TYPE) {
if (!(hdr->llc.ui.dsap == CHECK_TYPE && hdr->llc.ui.ssap == CHECK_TYPE))
return false;
if (NF_INVF(info, EBT_802_3_TYPE, info->type != type))
return false;
}
return true;
}
static int ebt_802_3_mt_check(const struct xt_mtchk_param *par)
{
const struct ebt_802_3_info *info = par->matchinfo;
if (info->bitmask & ~EBT_802_3_MASK || info->invflags & ~EBT_802_3_MASK)
return -EINVAL;
return 0;
}
static struct xt_match ebt_802_3_mt_reg __read_mostly = {
.name = "802_3",
.revision = 0,
.family = NFPROTO_BRIDGE,
.match = ebt_802_3_mt,
.checkentry = ebt_802_3_mt_check,
.matchsize = sizeof(struct ebt_802_3_info),
.me = THIS_MODULE,
};
static int __init ebt_802_3_init(void)
{
return xt_register_match(&ebt_802_3_mt_reg);
}
static void __exit ebt_802_3_fini(void)
{
xt_unregister_match(&ebt_802_3_mt_reg);
}
module_init(ebt_802_3_init);
module_exit(ebt_802_3_fini);
MODULE_DESCRIPTION("Ebtables: DSAP/SSAP field and SNAP type matching");
MODULE_LICENSE("GPL");
| linux-master | net/bridge/netfilter/ebt_802_3.c |
/* SPDX-License-Identifier: GPL-2.0 */
#include <linux/types.h>
#include <linux/ip.h>
#include <linux/netfilter.h>
#include <linux/netfilter_ipv6.h>
#include <linux/netfilter_bridge.h>
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/icmp.h>
#include <linux/sysctl.h>
#include <net/route.h>
#include <net/ip.h>
#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_conntrack_core.h>
#include <net/netfilter/nf_conntrack_helper.h>
#include <net/netfilter/nf_conntrack_bridge.h>
#include <linux/netfilter/nf_tables.h>
#include <net/netfilter/nf_tables.h>
#include "../br_private.h"
/* Best effort variant of ip_do_fragment which preserves geometry, unless skbuff
* has been linearized or cloned.
*/
static int nf_br_ip_fragment(struct net *net, struct sock *sk,
struct sk_buff *skb,
struct nf_bridge_frag_data *data,
int (*output)(struct net *, struct sock *sk,
const struct nf_bridge_frag_data *data,
struct sk_buff *))
{
int frag_max_size = BR_INPUT_SKB_CB(skb)->frag_max_size;
bool mono_delivery_time = skb->mono_delivery_time;
unsigned int hlen, ll_rs, mtu;
ktime_t tstamp = skb->tstamp;
struct ip_frag_state state;
struct iphdr *iph;
int err;
/* for offloaded checksums cleanup checksum before fragmentation */
if (skb->ip_summed == CHECKSUM_PARTIAL &&
(err = skb_checksum_help(skb)))
goto blackhole;
iph = ip_hdr(skb);
/*
* Setup starting values
*/
hlen = iph->ihl * 4;
frag_max_size -= hlen;
ll_rs = LL_RESERVED_SPACE(skb->dev);
mtu = skb->dev->mtu;
if (skb_has_frag_list(skb)) {
unsigned int first_len = skb_pagelen(skb);
struct ip_fraglist_iter iter;
struct sk_buff *frag;
if (first_len - hlen > mtu ||
skb_headroom(skb) < ll_rs)
goto blackhole;
if (skb_cloned(skb))
goto slow_path;
skb_walk_frags(skb, frag) {
if (frag->len > mtu ||
skb_headroom(frag) < hlen + ll_rs)
goto blackhole;
if (skb_shared(frag))
goto slow_path;
}
ip_fraglist_init(skb, iph, hlen, &iter);
for (;;) {
if (iter.frag)
ip_fraglist_prepare(skb, &iter);
skb_set_delivery_time(skb, tstamp, mono_delivery_time);
err = output(net, sk, data, skb);
if (err || !iter.frag)
break;
skb = ip_fraglist_next(&iter);
}
if (!err)
return 0;
kfree_skb_list(iter.frag);
return err;
}
slow_path:
/* This is a linearized skbuff, the original geometry is lost for us.
* This may also be a clone skbuff, we could preserve the geometry for
* the copies but probably not worth the effort.
*/
ip_frag_init(skb, hlen, ll_rs, frag_max_size, false, &state);
while (state.left > 0) {
struct sk_buff *skb2;
skb2 = ip_frag_next(skb, &state);
if (IS_ERR(skb2)) {
err = PTR_ERR(skb2);
goto blackhole;
}
skb_set_delivery_time(skb2, tstamp, mono_delivery_time);
err = output(net, sk, data, skb2);
if (err)
goto blackhole;
}
consume_skb(skb);
return err;
blackhole:
kfree_skb(skb);
return 0;
}
/* ip_defrag() expects IPCB() in place. */
static void br_skb_cb_save(struct sk_buff *skb, struct br_input_skb_cb *cb,
size_t inet_skb_parm_size)
{
memcpy(cb, skb->cb, sizeof(*cb));
memset(skb->cb, 0, inet_skb_parm_size);
}
static void br_skb_cb_restore(struct sk_buff *skb,
const struct br_input_skb_cb *cb,
u16 fragsz)
{
memcpy(skb->cb, cb, sizeof(*cb));
BR_INPUT_SKB_CB(skb)->frag_max_size = fragsz;
}
static unsigned int nf_ct_br_defrag4(struct sk_buff *skb,
const struct nf_hook_state *state)
{
u16 zone_id = NF_CT_DEFAULT_ZONE_ID;
enum ip_conntrack_info ctinfo;
struct br_input_skb_cb cb;
const struct nf_conn *ct;
int err;
if (!ip_is_fragment(ip_hdr(skb)))
return NF_ACCEPT;
ct = nf_ct_get(skb, &ctinfo);
if (ct)
zone_id = nf_ct_zone_id(nf_ct_zone(ct), CTINFO2DIR(ctinfo));
br_skb_cb_save(skb, &cb, sizeof(struct inet_skb_parm));
local_bh_disable();
err = ip_defrag(state->net, skb,
IP_DEFRAG_CONNTRACK_BRIDGE_IN + zone_id);
local_bh_enable();
if (!err) {
br_skb_cb_restore(skb, &cb, IPCB(skb)->frag_max_size);
skb->ignore_df = 1;
return NF_ACCEPT;
}
return NF_STOLEN;
}
static unsigned int nf_ct_br_defrag6(struct sk_buff *skb,
const struct nf_hook_state *state)
{
#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
u16 zone_id = NF_CT_DEFAULT_ZONE_ID;
enum ip_conntrack_info ctinfo;
struct br_input_skb_cb cb;
const struct nf_conn *ct;
int err;
ct = nf_ct_get(skb, &ctinfo);
if (ct)
zone_id = nf_ct_zone_id(nf_ct_zone(ct), CTINFO2DIR(ctinfo));
br_skb_cb_save(skb, &cb, sizeof(struct inet6_skb_parm));
err = nf_ct_frag6_gather(state->net, skb,
IP_DEFRAG_CONNTRACK_BRIDGE_IN + zone_id);
/* queued */
if (err == -EINPROGRESS)
return NF_STOLEN;
br_skb_cb_restore(skb, &cb, IP6CB(skb)->frag_max_size);
return err == 0 ? NF_ACCEPT : NF_DROP;
#else
return NF_ACCEPT;
#endif
}
static int nf_ct_br_ip_check(const struct sk_buff *skb)
{
const struct iphdr *iph;
int nhoff, len;
nhoff = skb_network_offset(skb);
iph = ip_hdr(skb);
if (iph->ihl < 5 ||
iph->version != 4)
return -1;
len = skb_ip_totlen(skb);
if (skb->len < nhoff + len ||
len < (iph->ihl * 4))
return -1;
return 0;
}
static int nf_ct_br_ipv6_check(const struct sk_buff *skb)
{
const struct ipv6hdr *hdr;
int nhoff, len;
nhoff = skb_network_offset(skb);
hdr = ipv6_hdr(skb);
if (hdr->version != 6)
return -1;
len = ntohs(hdr->payload_len) + sizeof(struct ipv6hdr) + nhoff;
if (skb->len < len)
return -1;
return 0;
}
static unsigned int nf_ct_bridge_pre(void *priv, struct sk_buff *skb,
const struct nf_hook_state *state)
{
struct nf_hook_state bridge_state = *state;
enum ip_conntrack_info ctinfo;
struct nf_conn *ct;
u32 len;
int ret;
ct = nf_ct_get(skb, &ctinfo);
if ((ct && !nf_ct_is_template(ct)) ||
ctinfo == IP_CT_UNTRACKED)
return NF_ACCEPT;
switch (skb->protocol) {
case htons(ETH_P_IP):
if (!pskb_may_pull(skb, sizeof(struct iphdr)))
return NF_ACCEPT;
len = skb_ip_totlen(skb);
if (pskb_trim_rcsum(skb, len))
return NF_ACCEPT;
if (nf_ct_br_ip_check(skb))
return NF_ACCEPT;
bridge_state.pf = NFPROTO_IPV4;
ret = nf_ct_br_defrag4(skb, &bridge_state);
break;
case htons(ETH_P_IPV6):
if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
return NF_ACCEPT;
len = sizeof(struct ipv6hdr) + ntohs(ipv6_hdr(skb)->payload_len);
if (pskb_trim_rcsum(skb, len))
return NF_ACCEPT;
if (nf_ct_br_ipv6_check(skb))
return NF_ACCEPT;
bridge_state.pf = NFPROTO_IPV6;
ret = nf_ct_br_defrag6(skb, &bridge_state);
break;
default:
nf_ct_set(skb, NULL, IP_CT_UNTRACKED);
return NF_ACCEPT;
}
if (ret != NF_ACCEPT)
return ret;
return nf_conntrack_in(skb, &bridge_state);
}
static void nf_ct_bridge_frag_save(struct sk_buff *skb,
struct nf_bridge_frag_data *data)
{
if (skb_vlan_tag_present(skb)) {
data->vlan_present = true;
data->vlan_tci = skb->vlan_tci;
data->vlan_proto = skb->vlan_proto;
} else {
data->vlan_present = false;
}
skb_copy_from_linear_data_offset(skb, -ETH_HLEN, data->mac, ETH_HLEN);
}
static unsigned int
nf_ct_bridge_refrag(struct sk_buff *skb, const struct nf_hook_state *state,
int (*output)(struct net *, struct sock *sk,
const struct nf_bridge_frag_data *data,
struct sk_buff *))
{
struct nf_bridge_frag_data data;
if (!BR_INPUT_SKB_CB(skb)->frag_max_size)
return NF_ACCEPT;
nf_ct_bridge_frag_save(skb, &data);
switch (skb->protocol) {
case htons(ETH_P_IP):
nf_br_ip_fragment(state->net, state->sk, skb, &data, output);
break;
case htons(ETH_P_IPV6):
nf_br_ip6_fragment(state->net, state->sk, skb, &data, output);
break;
default:
WARN_ON_ONCE(1);
return NF_DROP;
}
return NF_STOLEN;
}
/* Actually only slow path refragmentation needs this. */
static int nf_ct_bridge_frag_restore(struct sk_buff *skb,
const struct nf_bridge_frag_data *data)
{
int err;
err = skb_cow_head(skb, ETH_HLEN);
if (err) {
kfree_skb(skb);
return -ENOMEM;
}
if (data->vlan_present)
__vlan_hwaccel_put_tag(skb, data->vlan_proto, data->vlan_tci);
else if (skb_vlan_tag_present(skb))
__vlan_hwaccel_clear_tag(skb);
skb_copy_to_linear_data_offset(skb, -ETH_HLEN, data->mac, ETH_HLEN);
skb_reset_mac_header(skb);
return 0;
}
static int nf_ct_bridge_refrag_post(struct net *net, struct sock *sk,
const struct nf_bridge_frag_data *data,
struct sk_buff *skb)
{
int err;
err = nf_ct_bridge_frag_restore(skb, data);
if (err < 0)
return err;
return br_dev_queue_push_xmit(net, sk, skb);
}
static unsigned int nf_ct_bridge_post(void *priv, struct sk_buff *skb,
const struct nf_hook_state *state)
{
int ret;
ret = nf_confirm(priv, skb, state);
if (ret != NF_ACCEPT)
return ret;
return nf_ct_bridge_refrag(skb, state, nf_ct_bridge_refrag_post);
}
static struct nf_hook_ops nf_ct_bridge_hook_ops[] __read_mostly = {
{
.hook = nf_ct_bridge_pre,
.pf = NFPROTO_BRIDGE,
.hooknum = NF_BR_PRE_ROUTING,
.priority = NF_IP_PRI_CONNTRACK,
},
{
.hook = nf_ct_bridge_post,
.pf = NFPROTO_BRIDGE,
.hooknum = NF_BR_POST_ROUTING,
.priority = NF_IP_PRI_CONNTRACK_CONFIRM,
},
};
static struct nf_ct_bridge_info bridge_info = {
.ops = nf_ct_bridge_hook_ops,
.ops_size = ARRAY_SIZE(nf_ct_bridge_hook_ops),
.me = THIS_MODULE,
};
static int __init nf_conntrack_l3proto_bridge_init(void)
{
nf_ct_bridge_register(&bridge_info);
return 0;
}
static void __exit nf_conntrack_l3proto_bridge_fini(void)
{
nf_ct_bridge_unregister(&bridge_info);
}
module_init(nf_conntrack_l3proto_bridge_init);
module_exit(nf_conntrack_l3proto_bridge_fini);
MODULE_ALIAS("nf_conntrack-" __stringify(AF_BRIDGE));
MODULE_LICENSE("GPL");
| linux-master | net/bridge/netfilter/nf_conntrack_bridge.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* ebt_among
*
* Authors:
* Grzegorz Borowiak <[email protected]>
*
* August, 2003
*
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/ip.h>
#include <linux/if_arp.h>
#include <linux/module.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter_bridge/ebtables.h>
#include <linux/netfilter_bridge/ebt_among.h>
static bool ebt_mac_wormhash_contains(const struct ebt_mac_wormhash *wh,
const char *mac, __be32 ip)
{
/* You may be puzzled as to how this code works.
* Some tricks were used, refer to
* include/linux/netfilter_bridge/ebt_among.h
* as there you can find a solution of this mystery.
*/
const struct ebt_mac_wormhash_tuple *p;
int start, limit, i;
uint32_t cmp[2] = { 0, 0 };
int key = ((const unsigned char *)mac)[5];
ether_addr_copy(((char *) cmp) + 2, mac);
start = wh->table[key];
limit = wh->table[key + 1];
if (ip) {
for (i = start; i < limit; i++) {
p = &wh->pool[i];
if (cmp[1] == p->cmp[1] && cmp[0] == p->cmp[0])
if (p->ip == 0 || p->ip == ip)
return true;
}
} else {
for (i = start; i < limit; i++) {
p = &wh->pool[i];
if (cmp[1] == p->cmp[1] && cmp[0] == p->cmp[0])
if (p->ip == 0)
return true;
}
}
return false;
}
static int ebt_mac_wormhash_check_integrity(const struct ebt_mac_wormhash
*wh)
{
int i;
for (i = 0; i < 256; i++) {
if (wh->table[i] > wh->table[i + 1])
return -0x100 - i;
if (wh->table[i] < 0)
return -0x200 - i;
if (wh->table[i] > wh->poolsize)
return -0x300 - i;
}
if (wh->table[256] > wh->poolsize)
return -0xc00;
return 0;
}
static int get_ip_dst(const struct sk_buff *skb, __be32 *addr)
{
if (eth_hdr(skb)->h_proto == htons(ETH_P_IP)) {
const struct iphdr *ih;
struct iphdr _iph;
ih = skb_header_pointer(skb, 0, sizeof(_iph), &_iph);
if (ih == NULL)
return -1;
*addr = ih->daddr;
} else if (eth_hdr(skb)->h_proto == htons(ETH_P_ARP)) {
const struct arphdr *ah;
struct arphdr _arph;
const __be32 *bp;
__be32 buf;
ah = skb_header_pointer(skb, 0, sizeof(_arph), &_arph);
if (ah == NULL ||
ah->ar_pln != sizeof(__be32) ||
ah->ar_hln != ETH_ALEN)
return -1;
bp = skb_header_pointer(skb, sizeof(struct arphdr) +
2 * ETH_ALEN + sizeof(__be32),
sizeof(__be32), &buf);
if (bp == NULL)
return -1;
*addr = *bp;
}
return 0;
}
static int get_ip_src(const struct sk_buff *skb, __be32 *addr)
{
if (eth_hdr(skb)->h_proto == htons(ETH_P_IP)) {
const struct iphdr *ih;
struct iphdr _iph;
ih = skb_header_pointer(skb, 0, sizeof(_iph), &_iph);
if (ih == NULL)
return -1;
*addr = ih->saddr;
} else if (eth_hdr(skb)->h_proto == htons(ETH_P_ARP)) {
const struct arphdr *ah;
struct arphdr _arph;
const __be32 *bp;
__be32 buf;
ah = skb_header_pointer(skb, 0, sizeof(_arph), &_arph);
if (ah == NULL ||
ah->ar_pln != sizeof(__be32) ||
ah->ar_hln != ETH_ALEN)
return -1;
bp = skb_header_pointer(skb, sizeof(struct arphdr) +
ETH_ALEN, sizeof(__be32), &buf);
if (bp == NULL)
return -1;
*addr = *bp;
}
return 0;
}
static bool
ebt_among_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct ebt_among_info *info = par->matchinfo;
const char *dmac, *smac;
const struct ebt_mac_wormhash *wh_dst, *wh_src;
__be32 dip = 0, sip = 0;
wh_dst = ebt_among_wh_dst(info);
wh_src = ebt_among_wh_src(info);
if (wh_src) {
smac = eth_hdr(skb)->h_source;
if (get_ip_src(skb, &sip))
return false;
if (!(info->bitmask & EBT_AMONG_SRC_NEG)) {
/* we match only if it contains */
if (!ebt_mac_wormhash_contains(wh_src, smac, sip))
return false;
} else {
/* we match only if it DOES NOT contain */
if (ebt_mac_wormhash_contains(wh_src, smac, sip))
return false;
}
}
if (wh_dst) {
dmac = eth_hdr(skb)->h_dest;
if (get_ip_dst(skb, &dip))
return false;
if (!(info->bitmask & EBT_AMONG_DST_NEG)) {
/* we match only if it contains */
if (!ebt_mac_wormhash_contains(wh_dst, dmac, dip))
return false;
} else {
/* we match only if it DOES NOT contain */
if (ebt_mac_wormhash_contains(wh_dst, dmac, dip))
return false;
}
}
return true;
}
static bool poolsize_invalid(const struct ebt_mac_wormhash *w)
{
return w && w->poolsize >= (INT_MAX / sizeof(struct ebt_mac_wormhash_tuple));
}
static bool wormhash_offset_invalid(int off, unsigned int len)
{
if (off == 0) /* not present */
return false;
if (off < (int)sizeof(struct ebt_among_info) ||
off % __alignof__(struct ebt_mac_wormhash))
return true;
off += sizeof(struct ebt_mac_wormhash);
return off > len;
}
static bool wormhash_sizes_valid(const struct ebt_mac_wormhash *wh, int a, int b)
{
if (a == 0)
a = sizeof(struct ebt_among_info);
return ebt_mac_wormhash_size(wh) + a == b;
}
static int ebt_among_mt_check(const struct xt_mtchk_param *par)
{
const struct ebt_among_info *info = par->matchinfo;
const struct ebt_entry_match *em =
container_of(par->matchinfo, const struct ebt_entry_match, data);
unsigned int expected_length = sizeof(struct ebt_among_info);
const struct ebt_mac_wormhash *wh_dst, *wh_src;
int err;
if (expected_length > em->match_size)
return -EINVAL;
if (wormhash_offset_invalid(info->wh_dst_ofs, em->match_size) ||
wormhash_offset_invalid(info->wh_src_ofs, em->match_size))
return -EINVAL;
wh_dst = ebt_among_wh_dst(info);
if (poolsize_invalid(wh_dst))
return -EINVAL;
expected_length += ebt_mac_wormhash_size(wh_dst);
if (expected_length > em->match_size)
return -EINVAL;
wh_src = ebt_among_wh_src(info);
if (poolsize_invalid(wh_src))
return -EINVAL;
if (info->wh_src_ofs < info->wh_dst_ofs) {
if (!wormhash_sizes_valid(wh_src, info->wh_src_ofs, info->wh_dst_ofs))
return -EINVAL;
} else {
if (!wormhash_sizes_valid(wh_dst, info->wh_dst_ofs, info->wh_src_ofs))
return -EINVAL;
}
expected_length += ebt_mac_wormhash_size(wh_src);
if (em->match_size != EBT_ALIGN(expected_length)) {
pr_err_ratelimited("wrong size: %d against expected %d, rounded to %zd\n",
em->match_size, expected_length,
EBT_ALIGN(expected_length));
return -EINVAL;
}
if (wh_dst && (err = ebt_mac_wormhash_check_integrity(wh_dst))) {
pr_err_ratelimited("dst integrity fail: %x\n", -err);
return -EINVAL;
}
if (wh_src && (err = ebt_mac_wormhash_check_integrity(wh_src))) {
pr_err_ratelimited("src integrity fail: %x\n", -err);
return -EINVAL;
}
return 0;
}
static struct xt_match ebt_among_mt_reg __read_mostly = {
.name = "among",
.revision = 0,
.family = NFPROTO_BRIDGE,
.match = ebt_among_mt,
.checkentry = ebt_among_mt_check,
.matchsize = -1, /* special case */
.me = THIS_MODULE,
};
static int __init ebt_among_init(void)
{
return xt_register_match(&ebt_among_mt_reg);
}
static void __exit ebt_among_fini(void)
{
xt_unregister_match(&ebt_among_mt_reg);
}
module_init(ebt_among_init);
module_exit(ebt_among_fini);
MODULE_DESCRIPTION("Ebtables: Combined MAC/IP address list matching");
MODULE_LICENSE("GPL");
| linux-master | net/bridge/netfilter/ebt_among.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* ebt_redirect
*
* Authors:
* Bart De Schuymer <[email protected]>
*
* April, 2002
*
*/
#include <linux/module.h>
#include <net/sock.h>
#include "../br_private.h"
#include <linux/netfilter.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter_bridge/ebtables.h>
#include <linux/netfilter_bridge/ebt_redirect.h>
static unsigned int
ebt_redirect_tg(struct sk_buff *skb, const struct xt_action_param *par)
{
const struct ebt_redirect_info *info = par->targinfo;
if (skb_ensure_writable(skb, 0))
return EBT_DROP;
if (xt_hooknum(par) != NF_BR_BROUTING)
/* rcu_read_lock()ed by nf_hook_thresh */
ether_addr_copy(eth_hdr(skb)->h_dest,
br_port_get_rcu(xt_in(par))->br->dev->dev_addr);
else
ether_addr_copy(eth_hdr(skb)->h_dest, xt_in(par)->dev_addr);
skb->pkt_type = PACKET_HOST;
return info->target;
}
static int ebt_redirect_tg_check(const struct xt_tgchk_param *par)
{
const struct ebt_redirect_info *info = par->targinfo;
unsigned int hook_mask;
if (BASE_CHAIN && info->target == EBT_RETURN)
return -EINVAL;
hook_mask = par->hook_mask & ~(1 << NF_BR_NUMHOOKS);
if ((strcmp(par->table, "nat") != 0 ||
hook_mask & ~(1 << NF_BR_PRE_ROUTING)) &&
(strcmp(par->table, "broute") != 0 ||
hook_mask & ~(1 << NF_BR_BROUTING)))
return -EINVAL;
if (ebt_invalid_target(info->target))
return -EINVAL;
return 0;
}
static struct xt_target ebt_redirect_tg_reg __read_mostly = {
.name = "redirect",
.revision = 0,
.family = NFPROTO_BRIDGE,
.hooks = (1 << NF_BR_NUMHOOKS) | (1 << NF_BR_PRE_ROUTING) |
(1 << NF_BR_BROUTING),
.target = ebt_redirect_tg,
.checkentry = ebt_redirect_tg_check,
.targetsize = sizeof(struct ebt_redirect_info),
.me = THIS_MODULE,
};
static int __init ebt_redirect_init(void)
{
return xt_register_target(&ebt_redirect_tg_reg);
}
static void __exit ebt_redirect_fini(void)
{
xt_unregister_target(&ebt_redirect_tg_reg);
}
module_init(ebt_redirect_init);
module_exit(ebt_redirect_fini);
MODULE_DESCRIPTION("Ebtables: Packet redirection to localhost");
MODULE_LICENSE("GPL");
| linux-master | net/bridge/netfilter/ebt_redirect.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* ebt_limit
*
* Authors:
* Tom Marshall <[email protected]>
*
* Mostly copied from netfilter's ipt_limit.c, see that file for
* more explanation
*
* September, 2003
*
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/spinlock.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter_bridge/ebtables.h>
#include <linux/netfilter_bridge/ebt_limit.h>
static DEFINE_SPINLOCK(limit_lock);
#define MAX_CPJ (0xFFFFFFFF / (HZ*60*60*24))
#define _POW2_BELOW2(x) ((x)|((x)>>1))
#define _POW2_BELOW4(x) (_POW2_BELOW2(x)|_POW2_BELOW2((x)>>2))
#define _POW2_BELOW8(x) (_POW2_BELOW4(x)|_POW2_BELOW4((x)>>4))
#define _POW2_BELOW16(x) (_POW2_BELOW8(x)|_POW2_BELOW8((x)>>8))
#define _POW2_BELOW32(x) (_POW2_BELOW16(x)|_POW2_BELOW16((x)>>16))
#define POW2_BELOW32(x) ((_POW2_BELOW32(x)>>1) + 1)
#define CREDITS_PER_JIFFY POW2_BELOW32(MAX_CPJ)
static bool
ebt_limit_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
struct ebt_limit_info *info = (void *)par->matchinfo;
unsigned long now = jiffies;
spin_lock_bh(&limit_lock);
info->credit += (now - xchg(&info->prev, now)) * CREDITS_PER_JIFFY;
if (info->credit > info->credit_cap)
info->credit = info->credit_cap;
if (info->credit >= info->cost) {
/* We're not limited. */
info->credit -= info->cost;
spin_unlock_bh(&limit_lock);
return true;
}
spin_unlock_bh(&limit_lock);
return false;
}
/* Precision saver. */
static u_int32_t
user2credits(u_int32_t user)
{
/* If multiplying would overflow... */
if (user > 0xFFFFFFFF / (HZ*CREDITS_PER_JIFFY))
/* Divide first. */
return (user / EBT_LIMIT_SCALE) * HZ * CREDITS_PER_JIFFY;
return (user * HZ * CREDITS_PER_JIFFY) / EBT_LIMIT_SCALE;
}
static int ebt_limit_mt_check(const struct xt_mtchk_param *par)
{
struct ebt_limit_info *info = par->matchinfo;
/* Check for overflow. */
if (info->burst == 0 ||
user2credits(info->avg * info->burst) < user2credits(info->avg)) {
pr_info_ratelimited("overflow, try lower: %u/%u\n",
info->avg, info->burst);
return -EINVAL;
}
/* User avg in seconds * EBT_LIMIT_SCALE: convert to jiffies * 128. */
info->prev = jiffies;
info->credit = user2credits(info->avg * info->burst);
info->credit_cap = user2credits(info->avg * info->burst);
info->cost = user2credits(info->avg);
return 0;
}
#ifdef CONFIG_NETFILTER_XTABLES_COMPAT
/*
* no conversion function needed --
* only avg/burst have meaningful values in userspace.
*/
struct ebt_compat_limit_info {
compat_uint_t avg, burst;
compat_ulong_t prev;
compat_uint_t credit, credit_cap, cost;
};
#endif
static struct xt_match ebt_limit_mt_reg __read_mostly = {
.name = "limit",
.revision = 0,
.family = NFPROTO_BRIDGE,
.match = ebt_limit_mt,
.checkentry = ebt_limit_mt_check,
.matchsize = sizeof(struct ebt_limit_info),
.usersize = offsetof(struct ebt_limit_info, prev),
#ifdef CONFIG_NETFILTER_XTABLES_COMPAT
.compatsize = sizeof(struct ebt_compat_limit_info),
#endif
.me = THIS_MODULE,
};
static int __init ebt_limit_init(void)
{
return xt_register_match(&ebt_limit_mt_reg);
}
static void __exit ebt_limit_fini(void)
{
xt_unregister_match(&ebt_limit_mt_reg);
}
module_init(ebt_limit_init);
module_exit(ebt_limit_fini);
MODULE_DESCRIPTION("Ebtables: Rate-limit match");
MODULE_LICENSE("GPL");
| linux-master | net/bridge/netfilter/ebt_limit.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* ebt_pkttype
*
* Authors:
* Bart De Schuymer <[email protected]>
*
* April, 2003
*
*/
#include <linux/module.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter_bridge/ebtables.h>
#include <linux/netfilter_bridge/ebt_pkttype.h>
static bool
ebt_pkttype_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct ebt_pkttype_info *info = par->matchinfo;
return (skb->pkt_type == info->pkt_type) ^ info->invert;
}
static int ebt_pkttype_mt_check(const struct xt_mtchk_param *par)
{
const struct ebt_pkttype_info *info = par->matchinfo;
if (info->invert != 0 && info->invert != 1)
return -EINVAL;
/* Allow any pkt_type value */
return 0;
}
static struct xt_match ebt_pkttype_mt_reg __read_mostly = {
.name = "pkttype",
.revision = 0,
.family = NFPROTO_BRIDGE,
.match = ebt_pkttype_mt,
.checkentry = ebt_pkttype_mt_check,
.matchsize = sizeof(struct ebt_pkttype_info),
.me = THIS_MODULE,
};
static int __init ebt_pkttype_init(void)
{
return xt_register_match(&ebt_pkttype_mt_reg);
}
static void __exit ebt_pkttype_fini(void)
{
xt_unregister_match(&ebt_pkttype_mt_reg);
}
module_init(ebt_pkttype_init);
module_exit(ebt_pkttype_fini);
MODULE_DESCRIPTION("Ebtables: Link layer packet type match");
MODULE_LICENSE("GPL");
| linux-master | net/bridge/netfilter/ebt_pkttype.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* ebt_ip6
*
* Authors:
* Manohar Castelino <[email protected]>
* Kuo-Lang Tseng <[email protected]>
* Jan Engelhardt <[email protected]>
*
* Summary:
* This is just a modification of the IPv4 code written by
* Bart De Schuymer <[email protected]>
* with the changes required to support IPv6
*
* Jan, 2008
*/
#include <linux/ipv6.h>
#include <net/ipv6.h>
#include <linux/in.h>
#include <linux/module.h>
#include <net/dsfield.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter_bridge/ebtables.h>
#include <linux/netfilter_bridge/ebt_ip6.h>
union pkthdr {
struct {
__be16 src;
__be16 dst;
} tcpudphdr;
struct {
u8 type;
u8 code;
} icmphdr;
};
static bool
ebt_ip6_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct ebt_ip6_info *info = par->matchinfo;
const struct ipv6hdr *ih6;
struct ipv6hdr _ip6h;
const union pkthdr *pptr;
union pkthdr _pkthdr;
ih6 = skb_header_pointer(skb, 0, sizeof(_ip6h), &_ip6h);
if (ih6 == NULL)
return false;
if ((info->bitmask & EBT_IP6_TCLASS) &&
NF_INVF(info, EBT_IP6_TCLASS,
info->tclass != ipv6_get_dsfield(ih6)))
return false;
if (((info->bitmask & EBT_IP6_SOURCE) &&
NF_INVF(info, EBT_IP6_SOURCE,
ipv6_masked_addr_cmp(&ih6->saddr, &info->smsk,
&info->saddr))) ||
((info->bitmask & EBT_IP6_DEST) &&
NF_INVF(info, EBT_IP6_DEST,
ipv6_masked_addr_cmp(&ih6->daddr, &info->dmsk,
&info->daddr))))
return false;
if (info->bitmask & EBT_IP6_PROTO) {
uint8_t nexthdr = ih6->nexthdr;
__be16 frag_off;
int offset_ph;
offset_ph = ipv6_skip_exthdr(skb, sizeof(_ip6h), &nexthdr, &frag_off);
if (offset_ph == -1)
return false;
if (NF_INVF(info, EBT_IP6_PROTO, info->protocol != nexthdr))
return false;
if (!(info->bitmask & (EBT_IP6_DPORT |
EBT_IP6_SPORT | EBT_IP6_ICMP6)))
return true;
/* min icmpv6 headersize is 4, so sizeof(_pkthdr) is ok. */
pptr = skb_header_pointer(skb, offset_ph, sizeof(_pkthdr),
&_pkthdr);
if (pptr == NULL)
return false;
if (info->bitmask & EBT_IP6_DPORT) {
u16 dst = ntohs(pptr->tcpudphdr.dst);
if (NF_INVF(info, EBT_IP6_DPORT,
dst < info->dport[0] ||
dst > info->dport[1]))
return false;
}
if (info->bitmask & EBT_IP6_SPORT) {
u16 src = ntohs(pptr->tcpudphdr.src);
if (NF_INVF(info, EBT_IP6_SPORT,
src < info->sport[0] ||
src > info->sport[1]))
return false;
}
if ((info->bitmask & EBT_IP6_ICMP6) &&
NF_INVF(info, EBT_IP6_ICMP6,
pptr->icmphdr.type < info->icmpv6_type[0] ||
pptr->icmphdr.type > info->icmpv6_type[1] ||
pptr->icmphdr.code < info->icmpv6_code[0] ||
pptr->icmphdr.code > info->icmpv6_code[1]))
return false;
}
return true;
}
static int ebt_ip6_mt_check(const struct xt_mtchk_param *par)
{
const struct ebt_entry *e = par->entryinfo;
struct ebt_ip6_info *info = par->matchinfo;
if (e->ethproto != htons(ETH_P_IPV6) || e->invflags & EBT_IPROTO)
return -EINVAL;
if (info->bitmask & ~EBT_IP6_MASK || info->invflags & ~EBT_IP6_MASK)
return -EINVAL;
if (info->bitmask & (EBT_IP6_DPORT | EBT_IP6_SPORT)) {
if (info->invflags & EBT_IP6_PROTO)
return -EINVAL;
if (info->protocol != IPPROTO_TCP &&
info->protocol != IPPROTO_UDP &&
info->protocol != IPPROTO_UDPLITE &&
info->protocol != IPPROTO_SCTP &&
info->protocol != IPPROTO_DCCP)
return -EINVAL;
}
if (info->bitmask & EBT_IP6_DPORT && info->dport[0] > info->dport[1])
return -EINVAL;
if (info->bitmask & EBT_IP6_SPORT && info->sport[0] > info->sport[1])
return -EINVAL;
if (info->bitmask & EBT_IP6_ICMP6) {
if ((info->invflags & EBT_IP6_PROTO) ||
info->protocol != IPPROTO_ICMPV6)
return -EINVAL;
if (info->icmpv6_type[0] > info->icmpv6_type[1] ||
info->icmpv6_code[0] > info->icmpv6_code[1])
return -EINVAL;
}
return 0;
}
static struct xt_match ebt_ip6_mt_reg __read_mostly = {
.name = "ip6",
.revision = 0,
.family = NFPROTO_BRIDGE,
.match = ebt_ip6_mt,
.checkentry = ebt_ip6_mt_check,
.matchsize = sizeof(struct ebt_ip6_info),
.me = THIS_MODULE,
};
static int __init ebt_ip6_init(void)
{
return xt_register_match(&ebt_ip6_mt_reg);
}
static void __exit ebt_ip6_fini(void)
{
xt_unregister_match(&ebt_ip6_mt_reg);
}
module_init(ebt_ip6_init);
module_exit(ebt_ip6_fini);
MODULE_DESCRIPTION("Ebtables: IPv6 protocol packet match");
MODULE_AUTHOR("Kuo-Lang Tseng <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | net/bridge/netfilter/ebt_ip6.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/netlink.h>
#include <linux/netfilter.h>
#include <linux/netfilter/nf_tables.h>
#include <net/netfilter/nf_tables.h>
#include <net/netfilter/nft_meta.h>
#include <linux/if_bridge.h>
#include <uapi/linux/netfilter_bridge.h> /* NF_BR_PRE_ROUTING */
#include "../br_private.h"
static const struct net_device *
nft_meta_get_bridge(const struct net_device *dev)
{
if (dev && netif_is_bridge_port(dev))
return netdev_master_upper_dev_get_rcu((struct net_device *)dev);
return NULL;
}
static void nft_meta_bridge_get_eval(const struct nft_expr *expr,
struct nft_regs *regs,
const struct nft_pktinfo *pkt)
{
const struct nft_meta *priv = nft_expr_priv(expr);
const struct net_device *in = nft_in(pkt), *out = nft_out(pkt);
u32 *dest = ®s->data[priv->dreg];
const struct net_device *br_dev;
switch (priv->key) {
case NFT_META_BRI_IIFNAME:
br_dev = nft_meta_get_bridge(in);
break;
case NFT_META_BRI_OIFNAME:
br_dev = nft_meta_get_bridge(out);
break;
case NFT_META_BRI_IIFPVID: {
u16 p_pvid;
br_dev = nft_meta_get_bridge(in);
if (!br_dev || !br_vlan_enabled(br_dev))
goto err;
br_vlan_get_pvid_rcu(in, &p_pvid);
nft_reg_store16(dest, p_pvid);
return;
}
case NFT_META_BRI_IIFVPROTO: {
u16 p_proto;
br_dev = nft_meta_get_bridge(in);
if (!br_dev || !br_vlan_enabled(br_dev))
goto err;
br_vlan_get_proto(br_dev, &p_proto);
nft_reg_store_be16(dest, htons(p_proto));
return;
}
default:
return nft_meta_get_eval(expr, regs, pkt);
}
strncpy((char *)dest, br_dev ? br_dev->name : "", IFNAMSIZ);
return;
err:
regs->verdict.code = NFT_BREAK;
}
static int nft_meta_bridge_get_init(const struct nft_ctx *ctx,
const struct nft_expr *expr,
const struct nlattr * const tb[])
{
struct nft_meta *priv = nft_expr_priv(expr);
unsigned int len;
priv->key = ntohl(nla_get_be32(tb[NFTA_META_KEY]));
switch (priv->key) {
case NFT_META_BRI_IIFNAME:
case NFT_META_BRI_OIFNAME:
len = IFNAMSIZ;
break;
case NFT_META_BRI_IIFPVID:
case NFT_META_BRI_IIFVPROTO:
len = sizeof(u16);
break;
default:
return nft_meta_get_init(ctx, expr, tb);
}
priv->len = len;
return nft_parse_register_store(ctx, tb[NFTA_META_DREG], &priv->dreg,
NULL, NFT_DATA_VALUE, len);
}
static struct nft_expr_type nft_meta_bridge_type;
static const struct nft_expr_ops nft_meta_bridge_get_ops = {
.type = &nft_meta_bridge_type,
.size = NFT_EXPR_SIZE(sizeof(struct nft_meta)),
.eval = nft_meta_bridge_get_eval,
.init = nft_meta_bridge_get_init,
.dump = nft_meta_get_dump,
.reduce = nft_meta_get_reduce,
};
static void nft_meta_bridge_set_eval(const struct nft_expr *expr,
struct nft_regs *regs,
const struct nft_pktinfo *pkt)
{
const struct nft_meta *meta = nft_expr_priv(expr);
u32 *sreg = ®s->data[meta->sreg];
struct sk_buff *skb = pkt->skb;
u8 value8;
switch (meta->key) {
case NFT_META_BRI_BROUTE:
value8 = nft_reg_load8(sreg);
BR_INPUT_SKB_CB(skb)->br_netfilter_broute = !!value8;
break;
default:
nft_meta_set_eval(expr, regs, pkt);
}
}
static int nft_meta_bridge_set_init(const struct nft_ctx *ctx,
const struct nft_expr *expr,
const struct nlattr * const tb[])
{
struct nft_meta *priv = nft_expr_priv(expr);
unsigned int len;
int err;
priv->key = ntohl(nla_get_be32(tb[NFTA_META_KEY]));
switch (priv->key) {
case NFT_META_BRI_BROUTE:
len = sizeof(u8);
break;
default:
return nft_meta_set_init(ctx, expr, tb);
}
priv->len = len;
err = nft_parse_register_load(tb[NFTA_META_SREG], &priv->sreg, len);
if (err < 0)
return err;
return 0;
}
static bool nft_meta_bridge_set_reduce(struct nft_regs_track *track,
const struct nft_expr *expr)
{
int i;
for (i = 0; i < NFT_REG32_NUM; i++) {
if (!track->regs[i].selector)
continue;
if (track->regs[i].selector->ops != &nft_meta_bridge_get_ops)
continue;
__nft_reg_track_cancel(track, i);
}
return false;
}
static int nft_meta_bridge_set_validate(const struct nft_ctx *ctx,
const struct nft_expr *expr,
const struct nft_data **data)
{
struct nft_meta *priv = nft_expr_priv(expr);
unsigned int hooks;
switch (priv->key) {
case NFT_META_BRI_BROUTE:
hooks = 1 << NF_BR_PRE_ROUTING;
break;
default:
return nft_meta_set_validate(ctx, expr, data);
}
return nft_chain_validate_hooks(ctx->chain, hooks);
}
static const struct nft_expr_ops nft_meta_bridge_set_ops = {
.type = &nft_meta_bridge_type,
.size = NFT_EXPR_SIZE(sizeof(struct nft_meta)),
.eval = nft_meta_bridge_set_eval,
.init = nft_meta_bridge_set_init,
.destroy = nft_meta_set_destroy,
.dump = nft_meta_set_dump,
.reduce = nft_meta_bridge_set_reduce,
.validate = nft_meta_bridge_set_validate,
};
static const struct nft_expr_ops *
nft_meta_bridge_select_ops(const struct nft_ctx *ctx,
const struct nlattr * const tb[])
{
if (tb[NFTA_META_KEY] == NULL)
return ERR_PTR(-EINVAL);
if (tb[NFTA_META_DREG] && tb[NFTA_META_SREG])
return ERR_PTR(-EINVAL);
if (tb[NFTA_META_DREG])
return &nft_meta_bridge_get_ops;
if (tb[NFTA_META_SREG])
return &nft_meta_bridge_set_ops;
return ERR_PTR(-EINVAL);
}
static struct nft_expr_type nft_meta_bridge_type __read_mostly = {
.family = NFPROTO_BRIDGE,
.name = "meta",
.select_ops = nft_meta_bridge_select_ops,
.policy = nft_meta_policy,
.maxattr = NFTA_META_MAX,
.owner = THIS_MODULE,
};
static int __init nft_meta_bridge_module_init(void)
{
return nft_register_expr(&nft_meta_bridge_type);
}
static void __exit nft_meta_bridge_module_exit(void)
{
nft_unregister_expr(&nft_meta_bridge_type);
}
module_init(nft_meta_bridge_module_init);
module_exit(nft_meta_bridge_module_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("wenxu <[email protected]>");
MODULE_ALIAS_NFT_AF_EXPR(AF_BRIDGE, "meta");
MODULE_DESCRIPTION("Support for bridge dedicated meta key");
| linux-master | net/bridge/netfilter/nft_meta_bridge.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* ebt_snat
*
* Authors:
* Bart De Schuymer <[email protected]>
*
* June, 2002
*
*/
#include <linux/module.h>
#include <net/sock.h>
#include <linux/if_arp.h>
#include <net/arp.h>
#include <linux/netfilter.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter_bridge/ebtables.h>
#include <linux/netfilter_bridge/ebt_nat.h>
static unsigned int
ebt_snat_tg(struct sk_buff *skb, const struct xt_action_param *par)
{
const struct ebt_nat_info *info = par->targinfo;
if (skb_ensure_writable(skb, 0))
return EBT_DROP;
ether_addr_copy(eth_hdr(skb)->h_source, info->mac);
if (!(info->target & NAT_ARP_BIT) &&
eth_hdr(skb)->h_proto == htons(ETH_P_ARP)) {
const struct arphdr *ap;
struct arphdr _ah;
ap = skb_header_pointer(skb, 0, sizeof(_ah), &_ah);
if (ap == NULL)
return EBT_DROP;
if (ap->ar_hln != ETH_ALEN)
goto out;
if (skb_store_bits(skb, sizeof(_ah), info->mac, ETH_ALEN))
return EBT_DROP;
}
out:
return info->target | ~EBT_VERDICT_BITS;
}
static int ebt_snat_tg_check(const struct xt_tgchk_param *par)
{
const struct ebt_nat_info *info = par->targinfo;
int tmp;
tmp = info->target | ~EBT_VERDICT_BITS;
if (BASE_CHAIN && tmp == EBT_RETURN)
return -EINVAL;
if (ebt_invalid_target(tmp))
return -EINVAL;
tmp = info->target | EBT_VERDICT_BITS;
if ((tmp & ~NAT_ARP_BIT) != ~NAT_ARP_BIT)
return -EINVAL;
return 0;
}
static struct xt_target ebt_snat_tg_reg __read_mostly = {
.name = "snat",
.revision = 0,
.family = NFPROTO_BRIDGE,
.table = "nat",
.hooks = (1 << NF_BR_NUMHOOKS) | (1 << NF_BR_POST_ROUTING),
.target = ebt_snat_tg,
.checkentry = ebt_snat_tg_check,
.targetsize = sizeof(struct ebt_nat_info),
.me = THIS_MODULE,
};
static int __init ebt_snat_init(void)
{
return xt_register_target(&ebt_snat_tg_reg);
}
static void __exit ebt_snat_fini(void)
{
xt_unregister_target(&ebt_snat_tg_reg);
}
module_init(ebt_snat_init);
module_exit(ebt_snat_fini);
MODULE_DESCRIPTION("Ebtables: Source MAC address translation");
MODULE_LICENSE("GPL");
| linux-master | net/bridge/netfilter/ebt_snat.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* ebt_arp
*
* Authors:
* Bart De Schuymer <[email protected]>
* Tim Gardner <[email protected]>
*
* April, 2002
*
*/
#include <linux/if_arp.h>
#include <linux/if_ether.h>
#include <linux/module.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter_bridge/ebtables.h>
#include <linux/netfilter_bridge/ebt_arp.h>
static bool
ebt_arp_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct ebt_arp_info *info = par->matchinfo;
const struct arphdr *ah;
struct arphdr _arph;
ah = skb_header_pointer(skb, 0, sizeof(_arph), &_arph);
if (ah == NULL)
return false;
if ((info->bitmask & EBT_ARP_OPCODE) &&
NF_INVF(info, EBT_ARP_OPCODE, info->opcode != ah->ar_op))
return false;
if ((info->bitmask & EBT_ARP_HTYPE) &&
NF_INVF(info, EBT_ARP_HTYPE, info->htype != ah->ar_hrd))
return false;
if ((info->bitmask & EBT_ARP_PTYPE) &&
NF_INVF(info, EBT_ARP_PTYPE, info->ptype != ah->ar_pro))
return false;
if (info->bitmask & (EBT_ARP_SRC_IP | EBT_ARP_DST_IP | EBT_ARP_GRAT)) {
const __be32 *sap, *dap;
__be32 saddr, daddr;
if (ah->ar_pln != sizeof(__be32) || ah->ar_pro != htons(ETH_P_IP))
return false;
sap = skb_header_pointer(skb, sizeof(struct arphdr) +
ah->ar_hln, sizeof(saddr),
&saddr);
if (sap == NULL)
return false;
dap = skb_header_pointer(skb, sizeof(struct arphdr) +
2*ah->ar_hln+sizeof(saddr),
sizeof(daddr), &daddr);
if (dap == NULL)
return false;
if ((info->bitmask & EBT_ARP_SRC_IP) &&
NF_INVF(info, EBT_ARP_SRC_IP,
info->saddr != (*sap & info->smsk)))
return false;
if ((info->bitmask & EBT_ARP_DST_IP) &&
NF_INVF(info, EBT_ARP_DST_IP,
info->daddr != (*dap & info->dmsk)))
return false;
if ((info->bitmask & EBT_ARP_GRAT) &&
NF_INVF(info, EBT_ARP_GRAT, *dap != *sap))
return false;
}
if (info->bitmask & (EBT_ARP_SRC_MAC | EBT_ARP_DST_MAC)) {
const unsigned char *mp;
unsigned char _mac[ETH_ALEN];
if (ah->ar_hln != ETH_ALEN || ah->ar_hrd != htons(ARPHRD_ETHER))
return false;
if (info->bitmask & EBT_ARP_SRC_MAC) {
mp = skb_header_pointer(skb, sizeof(struct arphdr),
sizeof(_mac), &_mac);
if (mp == NULL)
return false;
if (NF_INVF(info, EBT_ARP_SRC_MAC,
!ether_addr_equal_masked(mp, info->smaddr,
info->smmsk)))
return false;
}
if (info->bitmask & EBT_ARP_DST_MAC) {
mp = skb_header_pointer(skb, sizeof(struct arphdr) +
ah->ar_hln + ah->ar_pln,
sizeof(_mac), &_mac);
if (mp == NULL)
return false;
if (NF_INVF(info, EBT_ARP_DST_MAC,
!ether_addr_equal_masked(mp, info->dmaddr,
info->dmmsk)))
return false;
}
}
return true;
}
static int ebt_arp_mt_check(const struct xt_mtchk_param *par)
{
const struct ebt_arp_info *info = par->matchinfo;
const struct ebt_entry *e = par->entryinfo;
if ((e->ethproto != htons(ETH_P_ARP) &&
e->ethproto != htons(ETH_P_RARP)) ||
e->invflags & EBT_IPROTO)
return -EINVAL;
if (info->bitmask & ~EBT_ARP_MASK || info->invflags & ~EBT_ARP_MASK)
return -EINVAL;
return 0;
}
static struct xt_match ebt_arp_mt_reg __read_mostly = {
.name = "arp",
.revision = 0,
.family = NFPROTO_BRIDGE,
.match = ebt_arp_mt,
.checkentry = ebt_arp_mt_check,
.matchsize = sizeof(struct ebt_arp_info),
.me = THIS_MODULE,
};
static int __init ebt_arp_init(void)
{
return xt_register_match(&ebt_arp_mt_reg);
}
static void __exit ebt_arp_fini(void)
{
xt_unregister_match(&ebt_arp_mt_reg);
}
module_init(ebt_arp_init);
module_exit(ebt_arp_fini);
MODULE_DESCRIPTION("Ebtables: ARP protocol packet match");
MODULE_LICENSE("GPL");
| linux-master | net/bridge/netfilter/ebt_arp.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* ebtable_broute
*
* Authors:
* Bart De Schuymer <[email protected]>
*
* April, 2002
*
* This table lets you choose between routing and bridging for frames
* entering on a bridge enslaved nic. This table is traversed before any
* other ebtables table. See net/bridge/br_input.c.
*/
#include <linux/netfilter_bridge/ebtables.h>
#include <linux/module.h>
#include <linux/if_bridge.h>
#include "../br_private.h"
/* EBT_ACCEPT means the frame will be bridged
* EBT_DROP means the frame will be routed
*/
static struct ebt_entries initial_chain = {
.name = "BROUTING",
.policy = EBT_ACCEPT,
};
static struct ebt_replace_kernel initial_table = {
.name = "broute",
.valid_hooks = 1 << NF_BR_BROUTING,
.entries_size = sizeof(struct ebt_entries),
.hook_entry = {
[NF_BR_BROUTING] = &initial_chain,
},
.entries = (char *)&initial_chain,
};
static const struct ebt_table broute_table = {
.name = "broute",
.table = &initial_table,
.valid_hooks = 1 << NF_BR_BROUTING,
.me = THIS_MODULE,
};
static unsigned int ebt_broute(void *priv, struct sk_buff *skb,
const struct nf_hook_state *s)
{
struct net_bridge_port *p = br_port_get_rcu(skb->dev);
struct nf_hook_state state;
unsigned char *dest;
int ret;
if (!p || p->state != BR_STATE_FORWARDING)
return NF_ACCEPT;
nf_hook_state_init(&state, NF_BR_BROUTING,
NFPROTO_BRIDGE, s->in, NULL, NULL,
s->net, NULL);
ret = ebt_do_table(priv, skb, &state);
if (ret != NF_DROP)
return ret;
/* DROP in ebtables -t broute means that the
* skb should be routed, not bridged.
* This is awkward, but can't be changed for compatibility
* reasons.
*
* We map DROP to ACCEPT and set the ->br_netfilter_broute flag.
*/
BR_INPUT_SKB_CB(skb)->br_netfilter_broute = 1;
/* undo PACKET_HOST mangling done in br_input in case the dst
* address matches the logical bridge but not the port.
*/
dest = eth_hdr(skb)->h_dest;
if (skb->pkt_type == PACKET_HOST &&
!ether_addr_equal(skb->dev->dev_addr, dest) &&
ether_addr_equal(p->br->dev->dev_addr, dest))
skb->pkt_type = PACKET_OTHERHOST;
return NF_ACCEPT;
}
static const struct nf_hook_ops ebt_ops_broute = {
.hook = ebt_broute,
.pf = NFPROTO_BRIDGE,
.hooknum = NF_BR_PRE_ROUTING,
.priority = NF_BR_PRI_FIRST,
};
static int broute_table_init(struct net *net)
{
return ebt_register_table(net, &broute_table, &ebt_ops_broute);
}
static void __net_exit broute_net_pre_exit(struct net *net)
{
ebt_unregister_table_pre_exit(net, "broute");
}
static void __net_exit broute_net_exit(struct net *net)
{
ebt_unregister_table(net, "broute");
}
static struct pernet_operations broute_net_ops = {
.exit = broute_net_exit,
.pre_exit = broute_net_pre_exit,
};
static int __init ebtable_broute_init(void)
{
int ret = ebt_register_template(&broute_table, broute_table_init);
if (ret)
return ret;
ret = register_pernet_subsys(&broute_net_ops);
if (ret) {
ebt_unregister_template(&broute_table);
return ret;
}
return 0;
}
static void __exit ebtable_broute_fini(void)
{
unregister_pernet_subsys(&broute_net_ops);
ebt_unregister_template(&broute_table);
}
module_init(ebtable_broute_init);
module_exit(ebtable_broute_fini);
MODULE_LICENSE("GPL");
| linux-master | net/bridge/netfilter/ebtable_broute.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* ebt_mark_m
*
* Authors:
* Bart De Schuymer <[email protected]>
*
* July, 2002
*
*/
#include <linux/module.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter_bridge/ebtables.h>
#include <linux/netfilter_bridge/ebt_mark_m.h>
static bool
ebt_mark_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct ebt_mark_m_info *info = par->matchinfo;
if (info->bitmask & EBT_MARK_OR)
return !!(skb->mark & info->mask) ^ info->invert;
return ((skb->mark & info->mask) == info->mark) ^ info->invert;
}
static int ebt_mark_mt_check(const struct xt_mtchk_param *par)
{
const struct ebt_mark_m_info *info = par->matchinfo;
if (info->bitmask & ~EBT_MARK_MASK)
return -EINVAL;
if ((info->bitmask & EBT_MARK_OR) && (info->bitmask & EBT_MARK_AND))
return -EINVAL;
if (!info->bitmask)
return -EINVAL;
return 0;
}
#ifdef CONFIG_NETFILTER_XTABLES_COMPAT
struct compat_ebt_mark_m_info {
compat_ulong_t mark, mask;
uint8_t invert, bitmask;
};
static void mark_mt_compat_from_user(void *dst, const void *src)
{
const struct compat_ebt_mark_m_info *user = src;
struct ebt_mark_m_info *kern = dst;
kern->mark = user->mark;
kern->mask = user->mask;
kern->invert = user->invert;
kern->bitmask = user->bitmask;
}
static int mark_mt_compat_to_user(void __user *dst, const void *src)
{
struct compat_ebt_mark_m_info __user *user = dst;
const struct ebt_mark_m_info *kern = src;
if (put_user(kern->mark, &user->mark) ||
put_user(kern->mask, &user->mask) ||
put_user(kern->invert, &user->invert) ||
put_user(kern->bitmask, &user->bitmask))
return -EFAULT;
return 0;
}
#endif
static struct xt_match ebt_mark_mt_reg __read_mostly = {
.name = "mark_m",
.revision = 0,
.family = NFPROTO_BRIDGE,
.match = ebt_mark_mt,
.checkentry = ebt_mark_mt_check,
.matchsize = sizeof(struct ebt_mark_m_info),
#ifdef CONFIG_NETFILTER_XTABLES_COMPAT
.compatsize = sizeof(struct compat_ebt_mark_m_info),
.compat_from_user = mark_mt_compat_from_user,
.compat_to_user = mark_mt_compat_to_user,
#endif
.me = THIS_MODULE,
};
static int __init ebt_mark_m_init(void)
{
return xt_register_match(&ebt_mark_mt_reg);
}
static void __exit ebt_mark_m_fini(void)
{
xt_unregister_match(&ebt_mark_mt_reg);
}
module_init(ebt_mark_m_init);
module_exit(ebt_mark_m_fini);
MODULE_DESCRIPTION("Ebtables: Packet mark match");
MODULE_LICENSE("GPL");
| linux-master | net/bridge/netfilter/ebt_mark_m.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Description: EBTables 802.1Q match extension kernelspace module.
* Authors: Nick Fedchik <[email protected]>
* Bart De Schuymer <[email protected]>
*/
#include <linux/if_ether.h>
#include <linux/if_vlan.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter_bridge/ebtables.h>
#include <linux/netfilter_bridge/ebt_vlan.h>
#define MODULE_VERS "0.6"
MODULE_AUTHOR("Nick Fedchik <[email protected]>");
MODULE_DESCRIPTION("Ebtables: 802.1Q VLAN tag match");
MODULE_LICENSE("GPL");
#define GET_BITMASK(_BIT_MASK_) info->bitmask & _BIT_MASK_
#define EXIT_ON_MISMATCH(_MATCH_,_MASK_) {if (!((info->_MATCH_ == _MATCH_)^!!(info->invflags & _MASK_))) return false; }
static bool
ebt_vlan_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct ebt_vlan_info *info = par->matchinfo;
unsigned short TCI; /* Whole TCI, given from parsed frame */
unsigned short id; /* VLAN ID, given from frame TCI */
unsigned char prio; /* user_priority, given from frame TCI */
/* VLAN encapsulated Type/Length field, given from orig frame */
__be16 encap;
if (skb_vlan_tag_present(skb)) {
TCI = skb_vlan_tag_get(skb);
encap = skb->protocol;
} else {
const struct vlan_hdr *fp;
struct vlan_hdr _frame;
fp = skb_header_pointer(skb, 0, sizeof(_frame), &_frame);
if (fp == NULL)
return false;
TCI = ntohs(fp->h_vlan_TCI);
encap = fp->h_vlan_encapsulated_proto;
}
/* Tag Control Information (TCI) consists of the following elements:
* - User_priority. The user_priority field is three bits in length,
* interpreted as a binary number.
* - Canonical Format Indicator (CFI). The Canonical Format Indicator
* (CFI) is a single bit flag value. Currently ignored.
* - VLAN Identifier (VID). The VID is encoded as
* an unsigned binary number.
*/
id = TCI & VLAN_VID_MASK;
prio = (TCI >> 13) & 0x7;
/* Checking VLAN Identifier (VID) */
if (GET_BITMASK(EBT_VLAN_ID))
EXIT_ON_MISMATCH(id, EBT_VLAN_ID);
/* Checking user_priority */
if (GET_BITMASK(EBT_VLAN_PRIO))
EXIT_ON_MISMATCH(prio, EBT_VLAN_PRIO);
/* Checking Encapsulated Proto (Length/Type) field */
if (GET_BITMASK(EBT_VLAN_ENCAP))
EXIT_ON_MISMATCH(encap, EBT_VLAN_ENCAP);
return true;
}
static int ebt_vlan_mt_check(const struct xt_mtchk_param *par)
{
struct ebt_vlan_info *info = par->matchinfo;
const struct ebt_entry *e = par->entryinfo;
/* Is it 802.1Q frame checked? */
if (e->ethproto != htons(ETH_P_8021Q)) {
pr_debug("passed entry proto %2.4X is not 802.1Q (8100)\n",
ntohs(e->ethproto));
return -EINVAL;
}
/* Check for bitmask range
* True if even one bit is out of mask
*/
if (info->bitmask & ~EBT_VLAN_MASK) {
pr_debug("bitmask %2X is out of mask (%2X)\n",
info->bitmask, EBT_VLAN_MASK);
return -EINVAL;
}
/* Check for inversion flags range */
if (info->invflags & ~EBT_VLAN_MASK) {
pr_debug("inversion flags %2X is out of mask (%2X)\n",
info->invflags, EBT_VLAN_MASK);
return -EINVAL;
}
/* Reserved VLAN ID (VID) values
* -----------------------------
* 0 - The null VLAN ID.
* 1 - The default Port VID (PVID)
* 0x0FFF - Reserved for implementation use.
* if_vlan.h: VLAN_N_VID 4096.
*/
if (GET_BITMASK(EBT_VLAN_ID)) {
if (!!info->id) { /* if id!=0 => check vid range */
if (info->id > VLAN_N_VID) {
pr_debug("id %d is out of range (1-4096)\n",
info->id);
return -EINVAL;
}
/* Note: This is valid VLAN-tagged frame point.
* Any value of user_priority are acceptable,
* but should be ignored according to 802.1Q Std.
* So we just drop the prio flag.
*/
info->bitmask &= ~EBT_VLAN_PRIO;
}
/* Else, id=0 (null VLAN ID) => user_priority range (any?) */
}
if (GET_BITMASK(EBT_VLAN_PRIO)) {
if ((unsigned char) info->prio > 7) {
pr_debug("prio %d is out of range (0-7)\n",
info->prio);
return -EINVAL;
}
}
/* Check for encapsulated proto range - it is possible to be
* any value for u_short range.
* if_ether.h: ETH_ZLEN 60 - Min. octets in frame sans FCS
*/
if (GET_BITMASK(EBT_VLAN_ENCAP)) {
if ((unsigned short) ntohs(info->encap) < ETH_ZLEN) {
pr_debug("encap frame length %d is less than "
"minimal\n", ntohs(info->encap));
return -EINVAL;
}
}
return 0;
}
static struct xt_match ebt_vlan_mt_reg __read_mostly = {
.name = "vlan",
.revision = 0,
.family = NFPROTO_BRIDGE,
.match = ebt_vlan_mt,
.checkentry = ebt_vlan_mt_check,
.matchsize = sizeof(struct ebt_vlan_info),
.me = THIS_MODULE,
};
static int __init ebt_vlan_init(void)
{
pr_debug("ebtables 802.1Q extension module v" MODULE_VERS "\n");
return xt_register_match(&ebt_vlan_mt_reg);
}
static void __exit ebt_vlan_fini(void)
{
xt_unregister_match(&ebt_vlan_mt_reg);
}
module_init(ebt_vlan_init);
module_exit(ebt_vlan_fini);
| linux-master | net/bridge/netfilter/ebt_vlan.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2014 Pablo Neira Ayuso <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/netlink.h>
#include <linux/netfilter.h>
#include <linux/netfilter/nf_tables.h>
#include <net/netfilter/nf_tables.h>
#include <net/netfilter/nft_reject.h>
#include <net/netfilter/ipv4/nf_reject.h>
#include <net/netfilter/ipv6/nf_reject.h>
#include <linux/ip.h>
#include <net/ip.h>
#include <net/ip6_checksum.h>
#include <linux/netfilter_bridge.h>
#include <linux/netfilter_ipv6.h>
#include "../br_private.h"
static void nft_reject_br_push_etherhdr(struct sk_buff *oldskb,
struct sk_buff *nskb)
{
struct ethhdr *eth;
eth = skb_push(nskb, ETH_HLEN);
skb_reset_mac_header(nskb);
ether_addr_copy(eth->h_source, eth_hdr(oldskb)->h_dest);
ether_addr_copy(eth->h_dest, eth_hdr(oldskb)->h_source);
eth->h_proto = eth_hdr(oldskb)->h_proto;
skb_pull(nskb, ETH_HLEN);
if (skb_vlan_tag_present(oldskb)) {
u16 vid = skb_vlan_tag_get(oldskb);
__vlan_hwaccel_put_tag(nskb, oldskb->vlan_proto, vid);
}
}
/* We cannot use oldskb->dev, it can be either bridge device (NF_BRIDGE INPUT)
* or the bridge port (NF_BRIDGE PREROUTING).
*/
static void nft_reject_br_send_v4_tcp_reset(struct net *net,
struct sk_buff *oldskb,
const struct net_device *dev,
int hook)
{
struct sk_buff *nskb;
nskb = nf_reject_skb_v4_tcp_reset(net, oldskb, NULL, hook);
if (!nskb)
return;
nft_reject_br_push_etherhdr(oldskb, nskb);
br_forward(br_port_get_rcu(dev), nskb, false, true);
}
static void nft_reject_br_send_v4_unreach(struct net *net,
struct sk_buff *oldskb,
const struct net_device *dev,
int hook, u8 code)
{
struct sk_buff *nskb;
nskb = nf_reject_skb_v4_unreach(net, oldskb, NULL, hook, code);
if (!nskb)
return;
nft_reject_br_push_etherhdr(oldskb, nskb);
br_forward(br_port_get_rcu(dev), nskb, false, true);
}
static void nft_reject_br_send_v6_tcp_reset(struct net *net,
struct sk_buff *oldskb,
const struct net_device *dev,
int hook)
{
struct sk_buff *nskb;
nskb = nf_reject_skb_v6_tcp_reset(net, oldskb, NULL, hook);
if (!nskb)
return;
nft_reject_br_push_etherhdr(oldskb, nskb);
br_forward(br_port_get_rcu(dev), nskb, false, true);
}
static void nft_reject_br_send_v6_unreach(struct net *net,
struct sk_buff *oldskb,
const struct net_device *dev,
int hook, u8 code)
{
struct sk_buff *nskb;
nskb = nf_reject_skb_v6_unreach(net, oldskb, NULL, hook, code);
if (!nskb)
return;
nft_reject_br_push_etherhdr(oldskb, nskb);
br_forward(br_port_get_rcu(dev), nskb, false, true);
}
static void nft_reject_bridge_eval(const struct nft_expr *expr,
struct nft_regs *regs,
const struct nft_pktinfo *pkt)
{
struct nft_reject *priv = nft_expr_priv(expr);
const unsigned char *dest = eth_hdr(pkt->skb)->h_dest;
if (is_broadcast_ether_addr(dest) ||
is_multicast_ether_addr(dest))
goto out;
switch (eth_hdr(pkt->skb)->h_proto) {
case htons(ETH_P_IP):
switch (priv->type) {
case NFT_REJECT_ICMP_UNREACH:
nft_reject_br_send_v4_unreach(nft_net(pkt), pkt->skb,
nft_in(pkt),
nft_hook(pkt),
priv->icmp_code);
break;
case NFT_REJECT_TCP_RST:
nft_reject_br_send_v4_tcp_reset(nft_net(pkt), pkt->skb,
nft_in(pkt),
nft_hook(pkt));
break;
case NFT_REJECT_ICMPX_UNREACH:
nft_reject_br_send_v4_unreach(nft_net(pkt), pkt->skb,
nft_in(pkt),
nft_hook(pkt),
nft_reject_icmp_code(priv->icmp_code));
break;
}
break;
case htons(ETH_P_IPV6):
switch (priv->type) {
case NFT_REJECT_ICMP_UNREACH:
nft_reject_br_send_v6_unreach(nft_net(pkt), pkt->skb,
nft_in(pkt),
nft_hook(pkt),
priv->icmp_code);
break;
case NFT_REJECT_TCP_RST:
nft_reject_br_send_v6_tcp_reset(nft_net(pkt), pkt->skb,
nft_in(pkt),
nft_hook(pkt));
break;
case NFT_REJECT_ICMPX_UNREACH:
nft_reject_br_send_v6_unreach(nft_net(pkt), pkt->skb,
nft_in(pkt),
nft_hook(pkt),
nft_reject_icmpv6_code(priv->icmp_code));
break;
}
break;
default:
/* No explicit way to reject this protocol, drop it. */
break;
}
out:
regs->verdict.code = NF_DROP;
}
static int nft_reject_bridge_validate(const struct nft_ctx *ctx,
const struct nft_expr *expr,
const struct nft_data **data)
{
return nft_chain_validate_hooks(ctx->chain, (1 << NF_BR_PRE_ROUTING) |
(1 << NF_BR_LOCAL_IN));
}
static struct nft_expr_type nft_reject_bridge_type;
static const struct nft_expr_ops nft_reject_bridge_ops = {
.type = &nft_reject_bridge_type,
.size = NFT_EXPR_SIZE(sizeof(struct nft_reject)),
.eval = nft_reject_bridge_eval,
.init = nft_reject_init,
.dump = nft_reject_dump,
.validate = nft_reject_bridge_validate,
.reduce = NFT_REDUCE_READONLY,
};
static struct nft_expr_type nft_reject_bridge_type __read_mostly = {
.family = NFPROTO_BRIDGE,
.name = "reject",
.ops = &nft_reject_bridge_ops,
.policy = nft_reject_policy,
.maxattr = NFTA_REJECT_MAX,
.owner = THIS_MODULE,
};
static int __init nft_reject_bridge_module_init(void)
{
return nft_register_expr(&nft_reject_bridge_type);
}
static void __exit nft_reject_bridge_module_exit(void)
{
nft_unregister_expr(&nft_reject_bridge_type);
}
module_init(nft_reject_bridge_module_init);
module_exit(nft_reject_bridge_module_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Pablo Neira Ayuso <[email protected]>");
MODULE_ALIAS_NFT_AF_EXPR(AF_BRIDGE, "reject");
MODULE_DESCRIPTION("Reject packets from bridge via nftables");
| linux-master | net/bridge/netfilter/nft_reject_bridge.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* ebt_log
*
* Authors:
* Bart De Schuymer <[email protected]>
* Harald Welte <[email protected]>
*
* April, 2002
*
*/
#include <linux/module.h>
#include <linux/ip.h>
#include <linux/in.h>
#include <linux/if_arp.h>
#include <linux/spinlock.h>
#include <net/netfilter/nf_log.h>
#include <linux/ipv6.h>
#include <net/ipv6.h>
#include <linux/in6.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter_bridge/ebtables.h>
#include <linux/netfilter_bridge/ebt_log.h>
#include <linux/netfilter.h>
static DEFINE_SPINLOCK(ebt_log_lock);
static int ebt_log_tg_check(const struct xt_tgchk_param *par)
{
struct ebt_log_info *info = par->targinfo;
if (info->bitmask & ~EBT_LOG_MASK)
return -EINVAL;
if (info->loglevel >= 8)
return -EINVAL;
info->prefix[EBT_LOG_PREFIX_SIZE - 1] = '\0';
return 0;
}
struct tcpudphdr {
__be16 src;
__be16 dst;
};
struct arppayload {
unsigned char mac_src[ETH_ALEN];
unsigned char ip_src[4];
unsigned char mac_dst[ETH_ALEN];
unsigned char ip_dst[4];
};
static void
print_ports(const struct sk_buff *skb, uint8_t protocol, int offset)
{
if (protocol == IPPROTO_TCP ||
protocol == IPPROTO_UDP ||
protocol == IPPROTO_UDPLITE ||
protocol == IPPROTO_SCTP ||
protocol == IPPROTO_DCCP) {
const struct tcpudphdr *pptr;
struct tcpudphdr _ports;
pptr = skb_header_pointer(skb, offset,
sizeof(_ports), &_ports);
if (pptr == NULL) {
pr_cont(" INCOMPLETE TCP/UDP header");
return;
}
pr_cont(" SPT=%u DPT=%u", ntohs(pptr->src), ntohs(pptr->dst));
}
}
static void
ebt_log_packet(struct net *net, u_int8_t pf, unsigned int hooknum,
const struct sk_buff *skb, const struct net_device *in,
const struct net_device *out, const struct nf_loginfo *loginfo,
const char *prefix)
{
unsigned int bitmask;
/* FIXME: Disabled from containers until syslog ns is supported */
if (!net_eq(net, &init_net) && !sysctl_nf_log_all_netns)
return;
spin_lock_bh(&ebt_log_lock);
printk(KERN_SOH "%c%s IN=%s OUT=%s MAC source = %pM MAC dest = %pM proto = 0x%04x",
'0' + loginfo->u.log.level, prefix,
in ? in->name : "", out ? out->name : "",
eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest,
ntohs(eth_hdr(skb)->h_proto));
if (loginfo->type == NF_LOG_TYPE_LOG)
bitmask = loginfo->u.log.logflags;
else
bitmask = NF_LOG_DEFAULT_MASK;
if ((bitmask & EBT_LOG_IP) && eth_hdr(skb)->h_proto ==
htons(ETH_P_IP)) {
const struct iphdr *ih;
struct iphdr _iph;
ih = skb_header_pointer(skb, 0, sizeof(_iph), &_iph);
if (ih == NULL) {
pr_cont(" INCOMPLETE IP header");
goto out;
}
pr_cont(" IP SRC=%pI4 IP DST=%pI4, IP tos=0x%02X, IP proto=%d",
&ih->saddr, &ih->daddr, ih->tos, ih->protocol);
print_ports(skb, ih->protocol, ih->ihl*4);
goto out;
}
#if IS_ENABLED(CONFIG_BRIDGE_EBT_IP6)
if ((bitmask & EBT_LOG_IP6) && eth_hdr(skb)->h_proto ==
htons(ETH_P_IPV6)) {
const struct ipv6hdr *ih;
struct ipv6hdr _iph;
uint8_t nexthdr;
__be16 frag_off;
int offset_ph;
ih = skb_header_pointer(skb, 0, sizeof(_iph), &_iph);
if (ih == NULL) {
pr_cont(" INCOMPLETE IPv6 header");
goto out;
}
pr_cont(" IPv6 SRC=%pI6 IPv6 DST=%pI6, IPv6 priority=0x%01X, Next Header=%d",
&ih->saddr, &ih->daddr, ih->priority, ih->nexthdr);
nexthdr = ih->nexthdr;
offset_ph = ipv6_skip_exthdr(skb, sizeof(_iph), &nexthdr, &frag_off);
if (offset_ph == -1)
goto out;
print_ports(skb, nexthdr, offset_ph);
goto out;
}
#endif
if ((bitmask & EBT_LOG_ARP) &&
((eth_hdr(skb)->h_proto == htons(ETH_P_ARP)) ||
(eth_hdr(skb)->h_proto == htons(ETH_P_RARP)))) {
const struct arphdr *ah;
struct arphdr _arph;
ah = skb_header_pointer(skb, 0, sizeof(_arph), &_arph);
if (ah == NULL) {
pr_cont(" INCOMPLETE ARP header");
goto out;
}
pr_cont(" ARP HTYPE=%d, PTYPE=0x%04x, OPCODE=%d",
ntohs(ah->ar_hrd), ntohs(ah->ar_pro),
ntohs(ah->ar_op));
/* If it's for Ethernet and the lengths are OK,
* then log the ARP payload
*/
if (ah->ar_hrd == htons(1) &&
ah->ar_hln == ETH_ALEN &&
ah->ar_pln == sizeof(__be32)) {
const struct arppayload *ap;
struct arppayload _arpp;
ap = skb_header_pointer(skb, sizeof(_arph),
sizeof(_arpp), &_arpp);
if (ap == NULL) {
pr_cont(" INCOMPLETE ARP payload");
goto out;
}
pr_cont(" ARP MAC SRC=%pM ARP IP SRC=%pI4 ARP MAC DST=%pM ARP IP DST=%pI4",
ap->mac_src, ap->ip_src,
ap->mac_dst, ap->ip_dst);
}
}
out:
pr_cont("\n");
spin_unlock_bh(&ebt_log_lock);
}
static unsigned int
ebt_log_tg(struct sk_buff *skb, const struct xt_action_param *par)
{
const struct ebt_log_info *info = par->targinfo;
struct nf_loginfo li;
struct net *net = xt_net(par);
li.type = NF_LOG_TYPE_LOG;
li.u.log.level = info->loglevel;
li.u.log.logflags = info->bitmask;
/* Remember that we have to use ebt_log_packet() not to break backward
* compatibility. We cannot use the default bridge packet logger via
* nf_log_packet() with NFT_LOG_TYPE_LOG here. --Pablo
*/
if (info->bitmask & EBT_LOG_NFLOG)
nf_log_packet(net, NFPROTO_BRIDGE, xt_hooknum(par), skb,
xt_in(par), xt_out(par), &li, "%s",
info->prefix);
else
ebt_log_packet(net, NFPROTO_BRIDGE, xt_hooknum(par), skb,
xt_in(par), xt_out(par), &li, info->prefix);
return EBT_CONTINUE;
}
static struct xt_target ebt_log_tg_reg __read_mostly = {
.name = "log",
.revision = 0,
.family = NFPROTO_BRIDGE,
.target = ebt_log_tg,
.checkentry = ebt_log_tg_check,
.targetsize = sizeof(struct ebt_log_info),
.me = THIS_MODULE,
};
static int __init ebt_log_init(void)
{
return xt_register_target(&ebt_log_tg_reg);
}
static void __exit ebt_log_fini(void)
{
xt_unregister_target(&ebt_log_tg_reg);
}
module_init(ebt_log_init);
module_exit(ebt_log_fini);
MODULE_DESCRIPTION("Ebtables: Packet logging to syslog");
MODULE_LICENSE("GPL");
| linux-master | net/bridge/netfilter/ebt_log.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* ebt_dnat
*
* Authors:
* Bart De Schuymer <[email protected]>
*
* June, 2002
*
*/
#include <linux/module.h>
#include <net/sock.h>
#include "../br_private.h"
#include <linux/netfilter.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter_bridge/ebtables.h>
#include <linux/netfilter_bridge/ebt_nat.h>
static unsigned int
ebt_dnat_tg(struct sk_buff *skb, const struct xt_action_param *par)
{
const struct ebt_nat_info *info = par->targinfo;
if (skb_ensure_writable(skb, 0))
return EBT_DROP;
ether_addr_copy(eth_hdr(skb)->h_dest, info->mac);
if (is_multicast_ether_addr(info->mac)) {
if (is_broadcast_ether_addr(info->mac))
skb->pkt_type = PACKET_BROADCAST;
else
skb->pkt_type = PACKET_MULTICAST;
} else {
const struct net_device *dev;
switch (xt_hooknum(par)) {
case NF_BR_BROUTING:
dev = xt_in(par);
break;
case NF_BR_PRE_ROUTING:
dev = br_port_get_rcu(xt_in(par))->br->dev;
break;
default:
dev = NULL;
break;
}
if (!dev) /* NF_BR_LOCAL_OUT */
return info->target;
if (ether_addr_equal(info->mac, dev->dev_addr))
skb->pkt_type = PACKET_HOST;
else
skb->pkt_type = PACKET_OTHERHOST;
}
return info->target;
}
static int ebt_dnat_tg_check(const struct xt_tgchk_param *par)
{
const struct ebt_nat_info *info = par->targinfo;
unsigned int hook_mask;
if (BASE_CHAIN && info->target == EBT_RETURN)
return -EINVAL;
hook_mask = par->hook_mask & ~(1 << NF_BR_NUMHOOKS);
if ((strcmp(par->table, "nat") != 0 ||
(hook_mask & ~((1 << NF_BR_PRE_ROUTING) |
(1 << NF_BR_LOCAL_OUT)))) &&
(strcmp(par->table, "broute") != 0 ||
hook_mask & ~(1 << NF_BR_BROUTING)))
return -EINVAL;
if (ebt_invalid_target(info->target))
return -EINVAL;
return 0;
}
static struct xt_target ebt_dnat_tg_reg __read_mostly = {
.name = "dnat",
.revision = 0,
.family = NFPROTO_BRIDGE,
.hooks = (1 << NF_BR_NUMHOOKS) | (1 << NF_BR_PRE_ROUTING) |
(1 << NF_BR_LOCAL_OUT) | (1 << NF_BR_BROUTING),
.target = ebt_dnat_tg,
.checkentry = ebt_dnat_tg_check,
.targetsize = sizeof(struct ebt_nat_info),
.me = THIS_MODULE,
};
static int __init ebt_dnat_init(void)
{
return xt_register_target(&ebt_dnat_tg_reg);
}
static void __exit ebt_dnat_fini(void)
{
xt_unregister_target(&ebt_dnat_tg_reg);
}
module_init(ebt_dnat_init);
module_exit(ebt_dnat_fini);
MODULE_DESCRIPTION("Ebtables: Destination MAC address translation");
MODULE_LICENSE("GPL");
| linux-master | net/bridge/netfilter/ebt_dnat.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* ebtable_nat
*
* Authors:
* Bart De Schuymer <[email protected]>
*
* April, 2002
*
*/
#include <linux/netfilter_bridge/ebtables.h>
#include <uapi/linux/netfilter_bridge.h>
#include <linux/module.h>
#define NAT_VALID_HOOKS ((1 << NF_BR_PRE_ROUTING) | (1 << NF_BR_LOCAL_OUT) | \
(1 << NF_BR_POST_ROUTING))
static struct ebt_entries initial_chains[] = {
{
.name = "PREROUTING",
.policy = EBT_ACCEPT,
},
{
.name = "OUTPUT",
.policy = EBT_ACCEPT,
},
{
.name = "POSTROUTING",
.policy = EBT_ACCEPT,
}
};
static struct ebt_replace_kernel initial_table = {
.name = "nat",
.valid_hooks = NAT_VALID_HOOKS,
.entries_size = 3 * sizeof(struct ebt_entries),
.hook_entry = {
[NF_BR_PRE_ROUTING] = &initial_chains[0],
[NF_BR_LOCAL_OUT] = &initial_chains[1],
[NF_BR_POST_ROUTING] = &initial_chains[2],
},
.entries = (char *)initial_chains,
};
static const struct ebt_table frame_nat = {
.name = "nat",
.table = &initial_table,
.valid_hooks = NAT_VALID_HOOKS,
.me = THIS_MODULE,
};
static const struct nf_hook_ops ebt_ops_nat[] = {
{
.hook = ebt_do_table,
.pf = NFPROTO_BRIDGE,
.hooknum = NF_BR_LOCAL_OUT,
.priority = NF_BR_PRI_NAT_DST_OTHER,
},
{
.hook = ebt_do_table,
.pf = NFPROTO_BRIDGE,
.hooknum = NF_BR_POST_ROUTING,
.priority = NF_BR_PRI_NAT_SRC,
},
{
.hook = ebt_do_table,
.pf = NFPROTO_BRIDGE,
.hooknum = NF_BR_PRE_ROUTING,
.priority = NF_BR_PRI_NAT_DST_BRIDGED,
},
};
static int frame_nat_table_init(struct net *net)
{
return ebt_register_table(net, &frame_nat, ebt_ops_nat);
}
static void __net_exit frame_nat_net_pre_exit(struct net *net)
{
ebt_unregister_table_pre_exit(net, "nat");
}
static void __net_exit frame_nat_net_exit(struct net *net)
{
ebt_unregister_table(net, "nat");
}
static struct pernet_operations frame_nat_net_ops = {
.exit = frame_nat_net_exit,
.pre_exit = frame_nat_net_pre_exit,
};
static int __init ebtable_nat_init(void)
{
int ret = ebt_register_template(&frame_nat, frame_nat_table_init);
if (ret)
return ret;
ret = register_pernet_subsys(&frame_nat_net_ops);
if (ret) {
ebt_unregister_template(&frame_nat);
return ret;
}
return ret;
}
static void __exit ebtable_nat_fini(void)
{
unregister_pernet_subsys(&frame_nat_net_ops);
ebt_unregister_template(&frame_nat);
}
module_init(ebtable_nat_init);
module_exit(ebtable_nat_fini);
MODULE_LICENSE("GPL");
| linux-master | net/bridge/netfilter/ebtable_nat.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* ebtable_filter
*
* Authors:
* Bart De Schuymer <[email protected]>
*
* April, 2002
*
*/
#include <linux/netfilter_bridge/ebtables.h>
#include <uapi/linux/netfilter_bridge.h>
#include <linux/module.h>
#define FILTER_VALID_HOOKS ((1 << NF_BR_LOCAL_IN) | (1 << NF_BR_FORWARD) | \
(1 << NF_BR_LOCAL_OUT))
static struct ebt_entries initial_chains[] = {
{
.name = "INPUT",
.policy = EBT_ACCEPT,
},
{
.name = "FORWARD",
.policy = EBT_ACCEPT,
},
{
.name = "OUTPUT",
.policy = EBT_ACCEPT,
},
};
static struct ebt_replace_kernel initial_table = {
.name = "filter",
.valid_hooks = FILTER_VALID_HOOKS,
.entries_size = 3 * sizeof(struct ebt_entries),
.hook_entry = {
[NF_BR_LOCAL_IN] = &initial_chains[0],
[NF_BR_FORWARD] = &initial_chains[1],
[NF_BR_LOCAL_OUT] = &initial_chains[2],
},
.entries = (char *)initial_chains,
};
static const struct ebt_table frame_filter = {
.name = "filter",
.table = &initial_table,
.valid_hooks = FILTER_VALID_HOOKS,
.me = THIS_MODULE,
};
static const struct nf_hook_ops ebt_ops_filter[] = {
{
.hook = ebt_do_table,
.pf = NFPROTO_BRIDGE,
.hooknum = NF_BR_LOCAL_IN,
.priority = NF_BR_PRI_FILTER_BRIDGED,
},
{
.hook = ebt_do_table,
.pf = NFPROTO_BRIDGE,
.hooknum = NF_BR_FORWARD,
.priority = NF_BR_PRI_FILTER_BRIDGED,
},
{
.hook = ebt_do_table,
.pf = NFPROTO_BRIDGE,
.hooknum = NF_BR_LOCAL_OUT,
.priority = NF_BR_PRI_FILTER_OTHER,
},
};
static int frame_filter_table_init(struct net *net)
{
return ebt_register_table(net, &frame_filter, ebt_ops_filter);
}
static void __net_exit frame_filter_net_pre_exit(struct net *net)
{
ebt_unregister_table_pre_exit(net, "filter");
}
static void __net_exit frame_filter_net_exit(struct net *net)
{
ebt_unregister_table(net, "filter");
}
static struct pernet_operations frame_filter_net_ops = {
.exit = frame_filter_net_exit,
.pre_exit = frame_filter_net_pre_exit,
};
static int __init ebtable_filter_init(void)
{
int ret = ebt_register_template(&frame_filter, frame_filter_table_init);
if (ret)
return ret;
ret = register_pernet_subsys(&frame_filter_net_ops);
if (ret) {
ebt_unregister_template(&frame_filter);
return ret;
}
return 0;
}
static void __exit ebtable_filter_fini(void)
{
unregister_pernet_subsys(&frame_filter_net_ops);
ebt_unregister_template(&frame_filter);
}
module_init(ebtable_filter_init);
module_exit(ebtable_filter_fini);
MODULE_LICENSE("GPL");
| linux-master | net/bridge/netfilter/ebtable_filter.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* ebt_stp
*
* Authors:
* Bart De Schuymer <[email protected]>
* Stephen Hemminger <[email protected]>
*
* July, 2003
*/
#include <linux/etherdevice.h>
#include <linux/module.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter_bridge/ebtables.h>
#include <linux/netfilter_bridge/ebt_stp.h>
#define BPDU_TYPE_CONFIG 0
struct stp_header {
u8 dsap;
u8 ssap;
u8 ctrl;
u8 pid;
u8 vers;
u8 type;
};
struct stp_config_pdu {
u8 flags;
u8 root[8];
u8 root_cost[4];
u8 sender[8];
u8 port[2];
u8 msg_age[2];
u8 max_age[2];
u8 hello_time[2];
u8 forward_delay[2];
};
#define NR16(p) (p[0] << 8 | p[1])
#define NR32(p) ((p[0] << 24) | (p[1] << 16) | (p[2] << 8) | p[3])
static bool ebt_filter_config(const struct ebt_stp_info *info,
const struct stp_config_pdu *stpc)
{
const struct ebt_stp_config_info *c;
u16 v16;
u32 v32;
c = &info->config;
if ((info->bitmask & EBT_STP_FLAGS) &&
NF_INVF(info, EBT_STP_FLAGS, c->flags != stpc->flags))
return false;
if (info->bitmask & EBT_STP_ROOTPRIO) {
v16 = NR16(stpc->root);
if (NF_INVF(info, EBT_STP_ROOTPRIO,
v16 < c->root_priol || v16 > c->root_priou))
return false;
}
if (info->bitmask & EBT_STP_ROOTADDR) {
if (NF_INVF(info, EBT_STP_ROOTADDR,
!ether_addr_equal_masked(&stpc->root[2],
c->root_addr,
c->root_addrmsk)))
return false;
}
if (info->bitmask & EBT_STP_ROOTCOST) {
v32 = NR32(stpc->root_cost);
if (NF_INVF(info, EBT_STP_ROOTCOST,
v32 < c->root_costl || v32 > c->root_costu))
return false;
}
if (info->bitmask & EBT_STP_SENDERPRIO) {
v16 = NR16(stpc->sender);
if (NF_INVF(info, EBT_STP_SENDERPRIO,
v16 < c->sender_priol || v16 > c->sender_priou))
return false;
}
if (info->bitmask & EBT_STP_SENDERADDR) {
if (NF_INVF(info, EBT_STP_SENDERADDR,
!ether_addr_equal_masked(&stpc->sender[2],
c->sender_addr,
c->sender_addrmsk)))
return false;
}
if (info->bitmask & EBT_STP_PORT) {
v16 = NR16(stpc->port);
if (NF_INVF(info, EBT_STP_PORT,
v16 < c->portl || v16 > c->portu))
return false;
}
if (info->bitmask & EBT_STP_MSGAGE) {
v16 = NR16(stpc->msg_age);
if (NF_INVF(info, EBT_STP_MSGAGE,
v16 < c->msg_agel || v16 > c->msg_ageu))
return false;
}
if (info->bitmask & EBT_STP_MAXAGE) {
v16 = NR16(stpc->max_age);
if (NF_INVF(info, EBT_STP_MAXAGE,
v16 < c->max_agel || v16 > c->max_ageu))
return false;
}
if (info->bitmask & EBT_STP_HELLOTIME) {
v16 = NR16(stpc->hello_time);
if (NF_INVF(info, EBT_STP_HELLOTIME,
v16 < c->hello_timel || v16 > c->hello_timeu))
return false;
}
if (info->bitmask & EBT_STP_FWDD) {
v16 = NR16(stpc->forward_delay);
if (NF_INVF(info, EBT_STP_FWDD,
v16 < c->forward_delayl || v16 > c->forward_delayu))
return false;
}
return true;
}
static bool
ebt_stp_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct ebt_stp_info *info = par->matchinfo;
const struct stp_header *sp;
struct stp_header _stph;
const u8 header[6] = {0x42, 0x42, 0x03, 0x00, 0x00, 0x00};
sp = skb_header_pointer(skb, 0, sizeof(_stph), &_stph);
if (sp == NULL)
return false;
/* The stp code only considers these */
if (memcmp(sp, header, sizeof(header)))
return false;
if ((info->bitmask & EBT_STP_TYPE) &&
NF_INVF(info, EBT_STP_TYPE, info->type != sp->type))
return false;
if (sp->type == BPDU_TYPE_CONFIG &&
info->bitmask & EBT_STP_CONFIG_MASK) {
const struct stp_config_pdu *st;
struct stp_config_pdu _stpc;
st = skb_header_pointer(skb, sizeof(_stph),
sizeof(_stpc), &_stpc);
if (st == NULL)
return false;
return ebt_filter_config(info, st);
}
return true;
}
static int ebt_stp_mt_check(const struct xt_mtchk_param *par)
{
const struct ebt_stp_info *info = par->matchinfo;
const struct ebt_entry *e = par->entryinfo;
if (info->bitmask & ~EBT_STP_MASK || info->invflags & ~EBT_STP_MASK ||
!(info->bitmask & EBT_STP_MASK))
return -EINVAL;
/* Make sure the match only receives stp frames */
if (!par->nft_compat &&
(!ether_addr_equal(e->destmac, eth_stp_addr) ||
!(e->bitmask & EBT_DESTMAC) ||
!is_broadcast_ether_addr(e->destmsk)))
return -EINVAL;
return 0;
}
static struct xt_match ebt_stp_mt_reg __read_mostly = {
.name = "stp",
.revision = 0,
.family = NFPROTO_BRIDGE,
.match = ebt_stp_mt,
.checkentry = ebt_stp_mt_check,
.matchsize = sizeof(struct ebt_stp_info),
.me = THIS_MODULE,
};
static int __init ebt_stp_init(void)
{
return xt_register_match(&ebt_stp_mt_reg);
}
static void __exit ebt_stp_fini(void)
{
xt_unregister_match(&ebt_stp_mt_reg);
}
module_init(ebt_stp_init);
module_exit(ebt_stp_fini);
MODULE_DESCRIPTION("Ebtables: Spanning Tree Protocol packet match");
MODULE_LICENSE("GPL");
| linux-master | net/bridge/netfilter/ebt_stp.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* ebt_mark
*
* Authors:
* Bart De Schuymer <[email protected]>
*
* July, 2002
*
*/
/* The mark target can be used in any chain,
* I believe adding a mangle table just for marking is total overkill.
* Marking a frame doesn't really change anything in the frame anyway.
*/
#include <linux/module.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter_bridge/ebtables.h>
#include <linux/netfilter_bridge/ebt_mark_t.h>
static unsigned int
ebt_mark_tg(struct sk_buff *skb, const struct xt_action_param *par)
{
const struct ebt_mark_t_info *info = par->targinfo;
int action = info->target & -16;
if (action == MARK_SET_VALUE)
skb->mark = info->mark;
else if (action == MARK_OR_VALUE)
skb->mark |= info->mark;
else if (action == MARK_AND_VALUE)
skb->mark &= info->mark;
else
skb->mark ^= info->mark;
return info->target | ~EBT_VERDICT_BITS;
}
static int ebt_mark_tg_check(const struct xt_tgchk_param *par)
{
const struct ebt_mark_t_info *info = par->targinfo;
int tmp;
tmp = info->target | ~EBT_VERDICT_BITS;
if (BASE_CHAIN && tmp == EBT_RETURN)
return -EINVAL;
if (ebt_invalid_target(tmp))
return -EINVAL;
tmp = info->target & ~EBT_VERDICT_BITS;
if (tmp != MARK_SET_VALUE && tmp != MARK_OR_VALUE &&
tmp != MARK_AND_VALUE && tmp != MARK_XOR_VALUE)
return -EINVAL;
return 0;
}
#ifdef CONFIG_NETFILTER_XTABLES_COMPAT
struct compat_ebt_mark_t_info {
compat_ulong_t mark;
compat_uint_t target;
};
static void mark_tg_compat_from_user(void *dst, const void *src)
{
const struct compat_ebt_mark_t_info *user = src;
struct ebt_mark_t_info *kern = dst;
kern->mark = user->mark;
kern->target = user->target;
}
static int mark_tg_compat_to_user(void __user *dst, const void *src)
{
struct compat_ebt_mark_t_info __user *user = dst;
const struct ebt_mark_t_info *kern = src;
if (put_user(kern->mark, &user->mark) ||
put_user(kern->target, &user->target))
return -EFAULT;
return 0;
}
#endif
static struct xt_target ebt_mark_tg_reg __read_mostly = {
.name = "mark",
.revision = 0,
.family = NFPROTO_BRIDGE,
.target = ebt_mark_tg,
.checkentry = ebt_mark_tg_check,
.targetsize = sizeof(struct ebt_mark_t_info),
#ifdef CONFIG_NETFILTER_XTABLES_COMPAT
.compatsize = sizeof(struct compat_ebt_mark_t_info),
.compat_from_user = mark_tg_compat_from_user,
.compat_to_user = mark_tg_compat_to_user,
#endif
.me = THIS_MODULE,
};
static int __init ebt_mark_init(void)
{
return xt_register_target(&ebt_mark_tg_reg);
}
static void __exit ebt_mark_fini(void)
{
xt_unregister_target(&ebt_mark_tg_reg);
}
module_init(ebt_mark_init);
module_exit(ebt_mark_fini);
MODULE_DESCRIPTION("Ebtables: Packet mark modification");
MODULE_LICENSE("GPL");
| linux-master | net/bridge/netfilter/ebt_mark.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* ebt_arpreply
*
* Authors:
* Grzegorz Borowiak <[email protected]>
* Bart De Schuymer <[email protected]>
*
* August, 2003
*
*/
#include <linux/if_arp.h>
#include <net/arp.h>
#include <linux/module.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter_bridge/ebtables.h>
#include <linux/netfilter_bridge/ebt_arpreply.h>
static unsigned int
ebt_arpreply_tg(struct sk_buff *skb, const struct xt_action_param *par)
{
const struct ebt_arpreply_info *info = par->targinfo;
const __be32 *siptr, *diptr;
__be32 _sip, _dip;
const struct arphdr *ap;
struct arphdr _ah;
const unsigned char *shp;
unsigned char _sha[ETH_ALEN];
ap = skb_header_pointer(skb, 0, sizeof(_ah), &_ah);
if (ap == NULL)
return EBT_DROP;
if (ap->ar_op != htons(ARPOP_REQUEST) ||
ap->ar_hln != ETH_ALEN ||
ap->ar_pro != htons(ETH_P_IP) ||
ap->ar_pln != 4)
return EBT_CONTINUE;
shp = skb_header_pointer(skb, sizeof(_ah), ETH_ALEN, &_sha);
if (shp == NULL)
return EBT_DROP;
siptr = skb_header_pointer(skb, sizeof(_ah) + ETH_ALEN,
sizeof(_sip), &_sip);
if (siptr == NULL)
return EBT_DROP;
diptr = skb_header_pointer(skb,
sizeof(_ah) + 2 * ETH_ALEN + sizeof(_sip),
sizeof(_dip), &_dip);
if (diptr == NULL)
return EBT_DROP;
arp_send(ARPOP_REPLY, ETH_P_ARP, *siptr,
(struct net_device *)xt_in(par),
*diptr, shp, info->mac, shp);
return info->target;
}
static int ebt_arpreply_tg_check(const struct xt_tgchk_param *par)
{
const struct ebt_arpreply_info *info = par->targinfo;
const struct ebt_entry *e = par->entryinfo;
if (BASE_CHAIN && info->target == EBT_RETURN)
return -EINVAL;
if (e->ethproto != htons(ETH_P_ARP) ||
e->invflags & EBT_IPROTO)
return -EINVAL;
if (ebt_invalid_target(info->target))
return -EINVAL;
return 0;
}
static struct xt_target ebt_arpreply_tg_reg __read_mostly = {
.name = "arpreply",
.revision = 0,
.family = NFPROTO_BRIDGE,
.table = "nat",
.hooks = (1 << NF_BR_NUMHOOKS) | (1 << NF_BR_PRE_ROUTING),
.target = ebt_arpreply_tg,
.checkentry = ebt_arpreply_tg_check,
.targetsize = sizeof(struct ebt_arpreply_info),
.me = THIS_MODULE,
};
static int __init ebt_arpreply_init(void)
{
return xt_register_target(&ebt_arpreply_tg_reg);
}
static void __exit ebt_arpreply_fini(void)
{
xt_unregister_target(&ebt_arpreply_tg_reg);
}
module_init(ebt_arpreply_init);
module_exit(ebt_arpreply_fini);
MODULE_DESCRIPTION("Ebtables: ARP reply target");
MODULE_LICENSE("GPL");
| linux-master | net/bridge/netfilter/ebt_arpreply.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* ebt_ip
*
* Authors:
* Bart De Schuymer <[email protected]>
*
* April, 2002
*
* Changes:
* added ip-sport and ip-dport
* Innominate Security Technologies AG <[email protected]>
* September, 2002
*/
#include <linux/ip.h>
#include <net/ip.h>
#include <linux/in.h>
#include <linux/module.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter_bridge/ebtables.h>
#include <linux/netfilter_bridge/ebt_ip.h>
union pkthdr {
struct {
__be16 src;
__be16 dst;
} tcpudphdr;
struct {
u8 type;
u8 code;
} icmphdr;
struct {
u8 type;
} igmphdr;
};
static bool
ebt_ip_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct ebt_ip_info *info = par->matchinfo;
const struct iphdr *ih;
struct iphdr _iph;
const union pkthdr *pptr;
union pkthdr _pkthdr;
ih = skb_header_pointer(skb, 0, sizeof(_iph), &_iph);
if (ih == NULL)
return false;
if ((info->bitmask & EBT_IP_TOS) &&
NF_INVF(info, EBT_IP_TOS, info->tos != ih->tos))
return false;
if ((info->bitmask & EBT_IP_SOURCE) &&
NF_INVF(info, EBT_IP_SOURCE,
(ih->saddr & info->smsk) != info->saddr))
return false;
if ((info->bitmask & EBT_IP_DEST) &&
NF_INVF(info, EBT_IP_DEST,
(ih->daddr & info->dmsk) != info->daddr))
return false;
if (info->bitmask & EBT_IP_PROTO) {
if (NF_INVF(info, EBT_IP_PROTO, info->protocol != ih->protocol))
return false;
if (!(info->bitmask & (EBT_IP_DPORT | EBT_IP_SPORT |
EBT_IP_ICMP | EBT_IP_IGMP)))
return true;
if (ntohs(ih->frag_off) & IP_OFFSET)
return false;
/* min icmp/igmp headersize is 4, so sizeof(_pkthdr) is ok. */
pptr = skb_header_pointer(skb, ih->ihl*4,
sizeof(_pkthdr), &_pkthdr);
if (pptr == NULL)
return false;
if (info->bitmask & EBT_IP_DPORT) {
u32 dst = ntohs(pptr->tcpudphdr.dst);
if (NF_INVF(info, EBT_IP_DPORT,
dst < info->dport[0] ||
dst > info->dport[1]))
return false;
}
if (info->bitmask & EBT_IP_SPORT) {
u32 src = ntohs(pptr->tcpudphdr.src);
if (NF_INVF(info, EBT_IP_SPORT,
src < info->sport[0] ||
src > info->sport[1]))
return false;
}
if ((info->bitmask & EBT_IP_ICMP) &&
NF_INVF(info, EBT_IP_ICMP,
pptr->icmphdr.type < info->icmp_type[0] ||
pptr->icmphdr.type > info->icmp_type[1] ||
pptr->icmphdr.code < info->icmp_code[0] ||
pptr->icmphdr.code > info->icmp_code[1]))
return false;
if ((info->bitmask & EBT_IP_IGMP) &&
NF_INVF(info, EBT_IP_IGMP,
pptr->igmphdr.type < info->igmp_type[0] ||
pptr->igmphdr.type > info->igmp_type[1]))
return false;
}
return true;
}
static int ebt_ip_mt_check(const struct xt_mtchk_param *par)
{
const struct ebt_ip_info *info = par->matchinfo;
const struct ebt_entry *e = par->entryinfo;
if (e->ethproto != htons(ETH_P_IP) ||
e->invflags & EBT_IPROTO)
return -EINVAL;
if (info->bitmask & ~EBT_IP_MASK || info->invflags & ~EBT_IP_MASK)
return -EINVAL;
if (info->bitmask & (EBT_IP_DPORT | EBT_IP_SPORT)) {
if (info->invflags & EBT_IP_PROTO)
return -EINVAL;
if (info->protocol != IPPROTO_TCP &&
info->protocol != IPPROTO_UDP &&
info->protocol != IPPROTO_UDPLITE &&
info->protocol != IPPROTO_SCTP &&
info->protocol != IPPROTO_DCCP)
return -EINVAL;
}
if (info->bitmask & EBT_IP_DPORT && info->dport[0] > info->dport[1])
return -EINVAL;
if (info->bitmask & EBT_IP_SPORT && info->sport[0] > info->sport[1])
return -EINVAL;
if (info->bitmask & EBT_IP_ICMP) {
if ((info->invflags & EBT_IP_PROTO) ||
info->protocol != IPPROTO_ICMP)
return -EINVAL;
if (info->icmp_type[0] > info->icmp_type[1] ||
info->icmp_code[0] > info->icmp_code[1])
return -EINVAL;
}
if (info->bitmask & EBT_IP_IGMP) {
if ((info->invflags & EBT_IP_PROTO) ||
info->protocol != IPPROTO_IGMP)
return -EINVAL;
if (info->igmp_type[0] > info->igmp_type[1])
return -EINVAL;
}
return 0;
}
static struct xt_match ebt_ip_mt_reg __read_mostly = {
.name = "ip",
.revision = 0,
.family = NFPROTO_BRIDGE,
.match = ebt_ip_mt,
.checkentry = ebt_ip_mt_check,
.matchsize = sizeof(struct ebt_ip_info),
.me = THIS_MODULE,
};
static int __init ebt_ip_init(void)
{
return xt_register_match(&ebt_ip_mt_reg);
}
static void __exit ebt_ip_fini(void)
{
xt_unregister_match(&ebt_ip_mt_reg);
}
module_init(ebt_ip_init);
module_exit(ebt_ip_fini);
MODULE_DESCRIPTION("Ebtables: IPv4 protocol packet match");
MODULE_LICENSE("GPL");
| linux-master | net/bridge/netfilter/ebt_ip.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* ebt_nflog
*
* Author:
* Peter Warasin <[email protected]>
*
* February, 2008
*
* Based on:
* xt_NFLOG.c, (C) 2006 by Patrick McHardy <[email protected]>
* ebt_ulog.c, (C) 2004 by Bart De Schuymer <[email protected]>
*
*/
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter_bridge/ebtables.h>
#include <linux/netfilter_bridge/ebt_nflog.h>
#include <net/netfilter/nf_log.h>
static unsigned int
ebt_nflog_tg(struct sk_buff *skb, const struct xt_action_param *par)
{
const struct ebt_nflog_info *info = par->targinfo;
struct net *net = xt_net(par);
struct nf_loginfo li;
li.type = NF_LOG_TYPE_ULOG;
li.u.ulog.copy_len = info->len;
li.u.ulog.group = info->group;
li.u.ulog.qthreshold = info->threshold;
li.u.ulog.flags = 0;
nf_log_packet(net, PF_BRIDGE, xt_hooknum(par), skb, xt_in(par),
xt_out(par), &li, "%s", info->prefix);
return EBT_CONTINUE;
}
static int ebt_nflog_tg_check(const struct xt_tgchk_param *par)
{
struct ebt_nflog_info *info = par->targinfo;
if (info->flags & ~EBT_NFLOG_MASK)
return -EINVAL;
info->prefix[EBT_NFLOG_PREFIX_SIZE - 1] = '\0';
return 0;
}
static struct xt_target ebt_nflog_tg_reg __read_mostly = {
.name = "nflog",
.revision = 0,
.family = NFPROTO_BRIDGE,
.target = ebt_nflog_tg,
.checkentry = ebt_nflog_tg_check,
.targetsize = sizeof(struct ebt_nflog_info),
.me = THIS_MODULE,
};
static int __init ebt_nflog_init(void)
{
return xt_register_target(&ebt_nflog_tg_reg);
}
static void __exit ebt_nflog_fini(void)
{
xt_unregister_target(&ebt_nflog_tg_reg);
}
module_init(ebt_nflog_init);
module_exit(ebt_nflog_fini);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Peter Warasin <[email protected]>");
MODULE_DESCRIPTION("ebtables NFLOG netfilter logging module");
| linux-master | net/bridge/netfilter/ebt_nflog.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* ebtables
*
* Author:
* Bart De Schuymer <[email protected]>
*
* ebtables.c,v 2.0, July, 2002
*
* This code is strongly inspired by the iptables code which is
* Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kmod.h>
#include <linux/module.h>
#include <linux/vmalloc.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter_bridge/ebtables.h>
#include <linux/spinlock.h>
#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <linux/smp.h>
#include <linux/cpumask.h>
#include <linux/audit.h>
#include <net/sock.h>
#include <net/netns/generic.h>
/* needed for logical [in,out]-dev filtering */
#include "../br_private.h"
/* Each cpu has its own set of counters, so there is no need for write_lock in
* the softirq
* For reading or updating the counters, the user context needs to
* get a write_lock
*/
/* The size of each set of counters is altered to get cache alignment */
#define SMP_ALIGN(x) (((x) + SMP_CACHE_BYTES-1) & ~(SMP_CACHE_BYTES-1))
#define COUNTER_OFFSET(n) (SMP_ALIGN(n * sizeof(struct ebt_counter)))
#define COUNTER_BASE(c, n, cpu) ((struct ebt_counter *)(((char *)c) + \
COUNTER_OFFSET(n) * cpu))
struct ebt_pernet {
struct list_head tables;
};
struct ebt_template {
struct list_head list;
char name[EBT_TABLE_MAXNAMELEN];
struct module *owner;
/* called when table is needed in the given netns */
int (*table_init)(struct net *net);
};
static unsigned int ebt_pernet_id __read_mostly;
static LIST_HEAD(template_tables);
static DEFINE_MUTEX(ebt_mutex);
#ifdef CONFIG_NETFILTER_XTABLES_COMPAT
static void ebt_standard_compat_from_user(void *dst, const void *src)
{
int v = *(compat_int_t *)src;
if (v >= 0)
v += xt_compat_calc_jump(NFPROTO_BRIDGE, v);
memcpy(dst, &v, sizeof(v));
}
static int ebt_standard_compat_to_user(void __user *dst, const void *src)
{
compat_int_t cv = *(int *)src;
if (cv >= 0)
cv -= xt_compat_calc_jump(NFPROTO_BRIDGE, cv);
return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
}
#endif
static struct xt_target ebt_standard_target = {
.name = "standard",
.revision = 0,
.family = NFPROTO_BRIDGE,
.targetsize = sizeof(int),
#ifdef CONFIG_NETFILTER_XTABLES_COMPAT
.compatsize = sizeof(compat_int_t),
.compat_from_user = ebt_standard_compat_from_user,
.compat_to_user = ebt_standard_compat_to_user,
#endif
};
static inline int
ebt_do_watcher(const struct ebt_entry_watcher *w, struct sk_buff *skb,
struct xt_action_param *par)
{
par->target = w->u.watcher;
par->targinfo = w->data;
w->u.watcher->target(skb, par);
/* watchers don't give a verdict */
return 0;
}
static inline int
ebt_do_match(struct ebt_entry_match *m, const struct sk_buff *skb,
struct xt_action_param *par)
{
par->match = m->u.match;
par->matchinfo = m->data;
return !m->u.match->match(skb, par);
}
static inline int
ebt_dev_check(const char *entry, const struct net_device *device)
{
int i = 0;
const char *devname;
if (*entry == '\0')
return 0;
if (!device)
return 1;
devname = device->name;
/* 1 is the wildcard token */
while (entry[i] != '\0' && entry[i] != 1 && entry[i] == devname[i])
i++;
return devname[i] != entry[i] && entry[i] != 1;
}
/* process standard matches */
static inline int
ebt_basic_match(const struct ebt_entry *e, const struct sk_buff *skb,
const struct net_device *in, const struct net_device *out)
{
const struct ethhdr *h = eth_hdr(skb);
const struct net_bridge_port *p;
__be16 ethproto;
if (skb_vlan_tag_present(skb))
ethproto = htons(ETH_P_8021Q);
else
ethproto = h->h_proto;
if (e->bitmask & EBT_802_3) {
if (NF_INVF(e, EBT_IPROTO, eth_proto_is_802_3(ethproto)))
return 1;
} else if (!(e->bitmask & EBT_NOPROTO) &&
NF_INVF(e, EBT_IPROTO, e->ethproto != ethproto))
return 1;
if (NF_INVF(e, EBT_IIN, ebt_dev_check(e->in, in)))
return 1;
if (NF_INVF(e, EBT_IOUT, ebt_dev_check(e->out, out)))
return 1;
/* rcu_read_lock()ed by nf_hook_thresh */
if (in && (p = br_port_get_rcu(in)) != NULL &&
NF_INVF(e, EBT_ILOGICALIN,
ebt_dev_check(e->logical_in, p->br->dev)))
return 1;
if (out && (p = br_port_get_rcu(out)) != NULL &&
NF_INVF(e, EBT_ILOGICALOUT,
ebt_dev_check(e->logical_out, p->br->dev)))
return 1;
if (e->bitmask & EBT_SOURCEMAC) {
if (NF_INVF(e, EBT_ISOURCE,
!ether_addr_equal_masked(h->h_source, e->sourcemac,
e->sourcemsk)))
return 1;
}
if (e->bitmask & EBT_DESTMAC) {
if (NF_INVF(e, EBT_IDEST,
!ether_addr_equal_masked(h->h_dest, e->destmac,
e->destmsk)))
return 1;
}
return 0;
}
static inline
struct ebt_entry *ebt_next_entry(const struct ebt_entry *entry)
{
return (void *)entry + entry->next_offset;
}
static inline const struct ebt_entry_target *
ebt_get_target_c(const struct ebt_entry *e)
{
return ebt_get_target((struct ebt_entry *)e);
}
/* Do some firewalling */
unsigned int ebt_do_table(void *priv, struct sk_buff *skb,
const struct nf_hook_state *state)
{
struct ebt_table *table = priv;
unsigned int hook = state->hook;
int i, nentries;
struct ebt_entry *point;
struct ebt_counter *counter_base, *cb_base;
const struct ebt_entry_target *t;
int verdict, sp = 0;
struct ebt_chainstack *cs;
struct ebt_entries *chaininfo;
const char *base;
const struct ebt_table_info *private;
struct xt_action_param acpar;
acpar.state = state;
acpar.hotdrop = false;
read_lock_bh(&table->lock);
private = table->private;
cb_base = COUNTER_BASE(private->counters, private->nentries,
smp_processor_id());
if (private->chainstack)
cs = private->chainstack[smp_processor_id()];
else
cs = NULL;
chaininfo = private->hook_entry[hook];
nentries = private->hook_entry[hook]->nentries;
point = (struct ebt_entry *)(private->hook_entry[hook]->data);
counter_base = cb_base + private->hook_entry[hook]->counter_offset;
/* base for chain jumps */
base = private->entries;
i = 0;
while (i < nentries) {
if (ebt_basic_match(point, skb, state->in, state->out))
goto letscontinue;
if (EBT_MATCH_ITERATE(point, ebt_do_match, skb, &acpar) != 0)
goto letscontinue;
if (acpar.hotdrop) {
read_unlock_bh(&table->lock);
return NF_DROP;
}
ADD_COUNTER(*(counter_base + i), skb->len, 1);
/* these should only watch: not modify, nor tell us
* what to do with the packet
*/
EBT_WATCHER_ITERATE(point, ebt_do_watcher, skb, &acpar);
t = ebt_get_target_c(point);
/* standard target */
if (!t->u.target->target)
verdict = ((struct ebt_standard_target *)t)->verdict;
else {
acpar.target = t->u.target;
acpar.targinfo = t->data;
verdict = t->u.target->target(skb, &acpar);
}
if (verdict == EBT_ACCEPT) {
read_unlock_bh(&table->lock);
return NF_ACCEPT;
}
if (verdict == EBT_DROP) {
read_unlock_bh(&table->lock);
return NF_DROP;
}
if (verdict == EBT_RETURN) {
letsreturn:
if (WARN(sp == 0, "RETURN on base chain")) {
/* act like this is EBT_CONTINUE */
goto letscontinue;
}
sp--;
/* put all the local variables right */
i = cs[sp].n;
chaininfo = cs[sp].chaininfo;
nentries = chaininfo->nentries;
point = cs[sp].e;
counter_base = cb_base +
chaininfo->counter_offset;
continue;
}
if (verdict == EBT_CONTINUE)
goto letscontinue;
if (WARN(verdict < 0, "bogus standard verdict\n")) {
read_unlock_bh(&table->lock);
return NF_DROP;
}
/* jump to a udc */
cs[sp].n = i + 1;
cs[sp].chaininfo = chaininfo;
cs[sp].e = ebt_next_entry(point);
i = 0;
chaininfo = (struct ebt_entries *) (base + verdict);
if (WARN(chaininfo->distinguisher, "jump to non-chain\n")) {
read_unlock_bh(&table->lock);
return NF_DROP;
}
nentries = chaininfo->nentries;
point = (struct ebt_entry *)chaininfo->data;
counter_base = cb_base + chaininfo->counter_offset;
sp++;
continue;
letscontinue:
point = ebt_next_entry(point);
i++;
}
/* I actually like this :) */
if (chaininfo->policy == EBT_RETURN)
goto letsreturn;
if (chaininfo->policy == EBT_ACCEPT) {
read_unlock_bh(&table->lock);
return NF_ACCEPT;
}
read_unlock_bh(&table->lock);
return NF_DROP;
}
/* If it succeeds, returns element and locks mutex */
static inline void *
find_inlist_lock_noload(struct net *net, const char *name, int *error,
struct mutex *mutex)
{
struct ebt_pernet *ebt_net = net_generic(net, ebt_pernet_id);
struct ebt_template *tmpl;
struct ebt_table *table;
mutex_lock(mutex);
list_for_each_entry(table, &ebt_net->tables, list) {
if (strcmp(table->name, name) == 0)
return table;
}
list_for_each_entry(tmpl, &template_tables, list) {
if (strcmp(name, tmpl->name) == 0) {
struct module *owner = tmpl->owner;
if (!try_module_get(owner))
goto out;
mutex_unlock(mutex);
*error = tmpl->table_init(net);
if (*error) {
module_put(owner);
return NULL;
}
mutex_lock(mutex);
module_put(owner);
break;
}
}
list_for_each_entry(table, &ebt_net->tables, list) {
if (strcmp(table->name, name) == 0)
return table;
}
out:
*error = -ENOENT;
mutex_unlock(mutex);
return NULL;
}
static void *
find_inlist_lock(struct net *net, const char *name, const char *prefix,
int *error, struct mutex *mutex)
{
return try_then_request_module(
find_inlist_lock_noload(net, name, error, mutex),
"%s%s", prefix, name);
}
static inline struct ebt_table *
find_table_lock(struct net *net, const char *name, int *error,
struct mutex *mutex)
{
return find_inlist_lock(net, name, "ebtable_", error, mutex);
}
static inline void ebt_free_table_info(struct ebt_table_info *info)
{
int i;
if (info->chainstack) {
for_each_possible_cpu(i)
vfree(info->chainstack[i]);
vfree(info->chainstack);
}
}
static inline int
ebt_check_match(struct ebt_entry_match *m, struct xt_mtchk_param *par,
unsigned int *cnt)
{
const struct ebt_entry *e = par->entryinfo;
struct xt_match *match;
size_t left = ((char *)e + e->watchers_offset) - (char *)m;
int ret;
if (left < sizeof(struct ebt_entry_match) ||
left - sizeof(struct ebt_entry_match) < m->match_size)
return -EINVAL;
match = xt_find_match(NFPROTO_BRIDGE, m->u.name, m->u.revision);
if (IS_ERR(match) || match->family != NFPROTO_BRIDGE) {
if (!IS_ERR(match))
module_put(match->me);
request_module("ebt_%s", m->u.name);
match = xt_find_match(NFPROTO_BRIDGE, m->u.name, m->u.revision);
}
if (IS_ERR(match))
return PTR_ERR(match);
m->u.match = match;
par->match = match;
par->matchinfo = m->data;
ret = xt_check_match(par, m->match_size,
ntohs(e->ethproto), e->invflags & EBT_IPROTO);
if (ret < 0) {
module_put(match->me);
return ret;
}
(*cnt)++;
return 0;
}
static inline int
ebt_check_watcher(struct ebt_entry_watcher *w, struct xt_tgchk_param *par,
unsigned int *cnt)
{
const struct ebt_entry *e = par->entryinfo;
struct xt_target *watcher;
size_t left = ((char *)e + e->target_offset) - (char *)w;
int ret;
if (left < sizeof(struct ebt_entry_watcher) ||
left - sizeof(struct ebt_entry_watcher) < w->watcher_size)
return -EINVAL;
watcher = xt_request_find_target(NFPROTO_BRIDGE, w->u.name, 0);
if (IS_ERR(watcher))
return PTR_ERR(watcher);
if (watcher->family != NFPROTO_BRIDGE) {
module_put(watcher->me);
return -ENOENT;
}
w->u.watcher = watcher;
par->target = watcher;
par->targinfo = w->data;
ret = xt_check_target(par, w->watcher_size,
ntohs(e->ethproto), e->invflags & EBT_IPROTO);
if (ret < 0) {
module_put(watcher->me);
return ret;
}
(*cnt)++;
return 0;
}
static int ebt_verify_pointers(const struct ebt_replace *repl,
struct ebt_table_info *newinfo)
{
unsigned int limit = repl->entries_size;
unsigned int valid_hooks = repl->valid_hooks;
unsigned int offset = 0;
int i;
for (i = 0; i < NF_BR_NUMHOOKS; i++)
newinfo->hook_entry[i] = NULL;
newinfo->entries_size = repl->entries_size;
newinfo->nentries = repl->nentries;
while (offset < limit) {
size_t left = limit - offset;
struct ebt_entry *e = (void *)newinfo->entries + offset;
if (left < sizeof(unsigned int))
break;
for (i = 0; i < NF_BR_NUMHOOKS; i++) {
if ((valid_hooks & (1 << i)) == 0)
continue;
if ((char __user *)repl->hook_entry[i] ==
repl->entries + offset)
break;
}
if (i != NF_BR_NUMHOOKS || !(e->bitmask & EBT_ENTRY_OR_ENTRIES)) {
if (e->bitmask != 0) {
/* we make userspace set this right,
* so there is no misunderstanding
*/
return -EINVAL;
}
if (i != NF_BR_NUMHOOKS)
newinfo->hook_entry[i] = (struct ebt_entries *)e;
if (left < sizeof(struct ebt_entries))
break;
offset += sizeof(struct ebt_entries);
} else {
if (left < sizeof(struct ebt_entry))
break;
if (left < e->next_offset)
break;
if (e->next_offset < sizeof(struct ebt_entry))
return -EINVAL;
offset += e->next_offset;
}
}
if (offset != limit)
return -EINVAL;
/* check if all valid hooks have a chain */
for (i = 0; i < NF_BR_NUMHOOKS; i++) {
if (!newinfo->hook_entry[i] &&
(valid_hooks & (1 << i)))
return -EINVAL;
}
return 0;
}
/* this one is very careful, as it is the first function
* to parse the userspace data
*/
static inline int
ebt_check_entry_size_and_hooks(const struct ebt_entry *e,
const struct ebt_table_info *newinfo,
unsigned int *n, unsigned int *cnt,
unsigned int *totalcnt, unsigned int *udc_cnt)
{
int i;
for (i = 0; i < NF_BR_NUMHOOKS; i++) {
if ((void *)e == (void *)newinfo->hook_entry[i])
break;
}
/* beginning of a new chain
* if i == NF_BR_NUMHOOKS it must be a user defined chain
*/
if (i != NF_BR_NUMHOOKS || !e->bitmask) {
/* this checks if the previous chain has as many entries
* as it said it has
*/
if (*n != *cnt)
return -EINVAL;
if (((struct ebt_entries *)e)->policy != EBT_DROP &&
((struct ebt_entries *)e)->policy != EBT_ACCEPT) {
/* only RETURN from udc */
if (i != NF_BR_NUMHOOKS ||
((struct ebt_entries *)e)->policy != EBT_RETURN)
return -EINVAL;
}
if (i == NF_BR_NUMHOOKS) /* it's a user defined chain */
(*udc_cnt)++;
if (((struct ebt_entries *)e)->counter_offset != *totalcnt)
return -EINVAL;
*n = ((struct ebt_entries *)e)->nentries;
*cnt = 0;
return 0;
}
/* a plain old entry, heh */
if (sizeof(struct ebt_entry) > e->watchers_offset ||
e->watchers_offset > e->target_offset ||
e->target_offset >= e->next_offset)
return -EINVAL;
/* this is not checked anywhere else */
if (e->next_offset - e->target_offset < sizeof(struct ebt_entry_target))
return -EINVAL;
(*cnt)++;
(*totalcnt)++;
return 0;
}
struct ebt_cl_stack {
struct ebt_chainstack cs;
int from;
unsigned int hookmask;
};
/* We need these positions to check that the jumps to a different part of the
* entries is a jump to the beginning of a new chain.
*/
static inline int
ebt_get_udc_positions(struct ebt_entry *e, struct ebt_table_info *newinfo,
unsigned int *n, struct ebt_cl_stack *udc)
{
int i;
/* we're only interested in chain starts */
if (e->bitmask)
return 0;
for (i = 0; i < NF_BR_NUMHOOKS; i++) {
if (newinfo->hook_entry[i] == (struct ebt_entries *)e)
break;
}
/* only care about udc */
if (i != NF_BR_NUMHOOKS)
return 0;
udc[*n].cs.chaininfo = (struct ebt_entries *)e;
/* these initialisations are depended on later in check_chainloops() */
udc[*n].cs.n = 0;
udc[*n].hookmask = 0;
(*n)++;
return 0;
}
static inline int
ebt_cleanup_match(struct ebt_entry_match *m, struct net *net, unsigned int *i)
{
struct xt_mtdtor_param par;
if (i && (*i)-- == 0)
return 1;
par.net = net;
par.match = m->u.match;
par.matchinfo = m->data;
par.family = NFPROTO_BRIDGE;
if (par.match->destroy != NULL)
par.match->destroy(&par);
module_put(par.match->me);
return 0;
}
static inline int
ebt_cleanup_watcher(struct ebt_entry_watcher *w, struct net *net, unsigned int *i)
{
struct xt_tgdtor_param par;
if (i && (*i)-- == 0)
return 1;
par.net = net;
par.target = w->u.watcher;
par.targinfo = w->data;
par.family = NFPROTO_BRIDGE;
if (par.target->destroy != NULL)
par.target->destroy(&par);
module_put(par.target->me);
return 0;
}
static inline int
ebt_cleanup_entry(struct ebt_entry *e, struct net *net, unsigned int *cnt)
{
struct xt_tgdtor_param par;
struct ebt_entry_target *t;
if (e->bitmask == 0)
return 0;
/* we're done */
if (cnt && (*cnt)-- == 0)
return 1;
EBT_WATCHER_ITERATE(e, ebt_cleanup_watcher, net, NULL);
EBT_MATCH_ITERATE(e, ebt_cleanup_match, net, NULL);
t = ebt_get_target(e);
par.net = net;
par.target = t->u.target;
par.targinfo = t->data;
par.family = NFPROTO_BRIDGE;
if (par.target->destroy != NULL)
par.target->destroy(&par);
module_put(par.target->me);
return 0;
}
static inline int
ebt_check_entry(struct ebt_entry *e, struct net *net,
const struct ebt_table_info *newinfo,
const char *name, unsigned int *cnt,
struct ebt_cl_stack *cl_s, unsigned int udc_cnt)
{
struct ebt_entry_target *t;
struct xt_target *target;
unsigned int i, j, hook = 0, hookmask = 0;
size_t gap;
int ret;
struct xt_mtchk_param mtpar;
struct xt_tgchk_param tgpar;
/* don't mess with the struct ebt_entries */
if (e->bitmask == 0)
return 0;
if (e->bitmask & ~EBT_F_MASK)
return -EINVAL;
if (e->invflags & ~EBT_INV_MASK)
return -EINVAL;
if ((e->bitmask & EBT_NOPROTO) && (e->bitmask & EBT_802_3))
return -EINVAL;
/* what hook do we belong to? */
for (i = 0; i < NF_BR_NUMHOOKS; i++) {
if (!newinfo->hook_entry[i])
continue;
if ((char *)newinfo->hook_entry[i] < (char *)e)
hook = i;
else
break;
}
/* (1 << NF_BR_NUMHOOKS) tells the check functions the rule is on
* a base chain
*/
if (i < NF_BR_NUMHOOKS)
hookmask = (1 << hook) | (1 << NF_BR_NUMHOOKS);
else {
for (i = 0; i < udc_cnt; i++)
if ((char *)(cl_s[i].cs.chaininfo) > (char *)e)
break;
if (i == 0)
hookmask = (1 << hook) | (1 << NF_BR_NUMHOOKS);
else
hookmask = cl_s[i - 1].hookmask;
}
i = 0;
memset(&mtpar, 0, sizeof(mtpar));
memset(&tgpar, 0, sizeof(tgpar));
mtpar.net = tgpar.net = net;
mtpar.table = tgpar.table = name;
mtpar.entryinfo = tgpar.entryinfo = e;
mtpar.hook_mask = tgpar.hook_mask = hookmask;
mtpar.family = tgpar.family = NFPROTO_BRIDGE;
ret = EBT_MATCH_ITERATE(e, ebt_check_match, &mtpar, &i);
if (ret != 0)
goto cleanup_matches;
j = 0;
ret = EBT_WATCHER_ITERATE(e, ebt_check_watcher, &tgpar, &j);
if (ret != 0)
goto cleanup_watchers;
t = ebt_get_target(e);
gap = e->next_offset - e->target_offset;
target = xt_request_find_target(NFPROTO_BRIDGE, t->u.name, 0);
if (IS_ERR(target)) {
ret = PTR_ERR(target);
goto cleanup_watchers;
}
/* Reject UNSPEC, xtables verdicts/return values are incompatible */
if (target->family != NFPROTO_BRIDGE) {
module_put(target->me);
ret = -ENOENT;
goto cleanup_watchers;
}
t->u.target = target;
if (t->u.target == &ebt_standard_target) {
if (gap < sizeof(struct ebt_standard_target)) {
ret = -EFAULT;
goto cleanup_watchers;
}
if (((struct ebt_standard_target *)t)->verdict <
-NUM_STANDARD_TARGETS) {
ret = -EFAULT;
goto cleanup_watchers;
}
} else if (t->target_size > gap - sizeof(struct ebt_entry_target)) {
module_put(t->u.target->me);
ret = -EFAULT;
goto cleanup_watchers;
}
tgpar.target = target;
tgpar.targinfo = t->data;
ret = xt_check_target(&tgpar, t->target_size,
ntohs(e->ethproto), e->invflags & EBT_IPROTO);
if (ret < 0) {
module_put(target->me);
goto cleanup_watchers;
}
(*cnt)++;
return 0;
cleanup_watchers:
EBT_WATCHER_ITERATE(e, ebt_cleanup_watcher, net, &j);
cleanup_matches:
EBT_MATCH_ITERATE(e, ebt_cleanup_match, net, &i);
return ret;
}
/* checks for loops and sets the hook mask for udc
* the hook mask for udc tells us from which base chains the udc can be
* accessed. This mask is a parameter to the check() functions of the extensions
*/
static int check_chainloops(const struct ebt_entries *chain, struct ebt_cl_stack *cl_s,
unsigned int udc_cnt, unsigned int hooknr, char *base)
{
int i, chain_nr = -1, pos = 0, nentries = chain->nentries, verdict;
const struct ebt_entry *e = (struct ebt_entry *)chain->data;
const struct ebt_entry_target *t;
while (pos < nentries || chain_nr != -1) {
/* end of udc, go back one 'recursion' step */
if (pos == nentries) {
/* put back values of the time when this chain was called */
e = cl_s[chain_nr].cs.e;
if (cl_s[chain_nr].from != -1)
nentries =
cl_s[cl_s[chain_nr].from].cs.chaininfo->nentries;
else
nentries = chain->nentries;
pos = cl_s[chain_nr].cs.n;
/* make sure we won't see a loop that isn't one */
cl_s[chain_nr].cs.n = 0;
chain_nr = cl_s[chain_nr].from;
if (pos == nentries)
continue;
}
t = ebt_get_target_c(e);
if (strcmp(t->u.name, EBT_STANDARD_TARGET))
goto letscontinue;
if (e->target_offset + sizeof(struct ebt_standard_target) >
e->next_offset)
return -1;
verdict = ((struct ebt_standard_target *)t)->verdict;
if (verdict >= 0) { /* jump to another chain */
struct ebt_entries *hlp2 =
(struct ebt_entries *)(base + verdict);
for (i = 0; i < udc_cnt; i++)
if (hlp2 == cl_s[i].cs.chaininfo)
break;
/* bad destination or loop */
if (i == udc_cnt)
return -1;
if (cl_s[i].cs.n)
return -1;
if (cl_s[i].hookmask & (1 << hooknr))
goto letscontinue;
/* this can't be 0, so the loop test is correct */
cl_s[i].cs.n = pos + 1;
pos = 0;
cl_s[i].cs.e = ebt_next_entry(e);
e = (struct ebt_entry *)(hlp2->data);
nentries = hlp2->nentries;
cl_s[i].from = chain_nr;
chain_nr = i;
/* this udc is accessible from the base chain for hooknr */
cl_s[i].hookmask |= (1 << hooknr);
continue;
}
letscontinue:
e = ebt_next_entry(e);
pos++;
}
return 0;
}
/* do the parsing of the table/chains/entries/matches/watchers/targets, heh */
static int translate_table(struct net *net, const char *name,
struct ebt_table_info *newinfo)
{
unsigned int i, j, k, udc_cnt;
int ret;
struct ebt_cl_stack *cl_s = NULL; /* used in the checking for chain loops */
i = 0;
while (i < NF_BR_NUMHOOKS && !newinfo->hook_entry[i])
i++;
if (i == NF_BR_NUMHOOKS)
return -EINVAL;
if (newinfo->hook_entry[i] != (struct ebt_entries *)newinfo->entries)
return -EINVAL;
/* make sure chains are ordered after each other in same order
* as their corresponding hooks
*/
for (j = i + 1; j < NF_BR_NUMHOOKS; j++) {
if (!newinfo->hook_entry[j])
continue;
if (newinfo->hook_entry[j] <= newinfo->hook_entry[i])
return -EINVAL;
i = j;
}
/* do some early checkings and initialize some things */
i = 0; /* holds the expected nr. of entries for the chain */
j = 0; /* holds the up to now counted entries for the chain */
k = 0; /* holds the total nr. of entries, should equal
* newinfo->nentries afterwards
*/
udc_cnt = 0; /* will hold the nr. of user defined chains (udc) */
ret = EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
ebt_check_entry_size_and_hooks, newinfo,
&i, &j, &k, &udc_cnt);
if (ret != 0)
return ret;
if (i != j)
return -EINVAL;
if (k != newinfo->nentries)
return -EINVAL;
/* get the location of the udc, put them in an array
* while we're at it, allocate the chainstack
*/
if (udc_cnt) {
/* this will get free'd in do_replace()/ebt_register_table()
* if an error occurs
*/
newinfo->chainstack =
vmalloc(array_size(nr_cpu_ids,
sizeof(*(newinfo->chainstack))));
if (!newinfo->chainstack)
return -ENOMEM;
for_each_possible_cpu(i) {
newinfo->chainstack[i] =
vmalloc_node(array_size(udc_cnt,
sizeof(*(newinfo->chainstack[0]))),
cpu_to_node(i));
if (!newinfo->chainstack[i]) {
while (i)
vfree(newinfo->chainstack[--i]);
vfree(newinfo->chainstack);
newinfo->chainstack = NULL;
return -ENOMEM;
}
}
cl_s = vmalloc(array_size(udc_cnt, sizeof(*cl_s)));
if (!cl_s)
return -ENOMEM;
i = 0; /* the i'th udc */
EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
ebt_get_udc_positions, newinfo, &i, cl_s);
/* sanity check */
if (i != udc_cnt) {
vfree(cl_s);
return -EFAULT;
}
}
/* Check for loops */
for (i = 0; i < NF_BR_NUMHOOKS; i++)
if (newinfo->hook_entry[i])
if (check_chainloops(newinfo->hook_entry[i],
cl_s, udc_cnt, i, newinfo->entries)) {
vfree(cl_s);
return -EINVAL;
}
/* we now know the following (along with E=mc²):
* - the nr of entries in each chain is right
* - the size of the allocated space is right
* - all valid hooks have a corresponding chain
* - there are no loops
* - wrong data can still be on the level of a single entry
* - could be there are jumps to places that are not the
* beginning of a chain. This can only occur in chains that
* are not accessible from any base chains, so we don't care.
*/
/* used to know what we need to clean up if something goes wrong */
i = 0;
ret = EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
ebt_check_entry, net, newinfo, name, &i, cl_s, udc_cnt);
if (ret != 0) {
EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
ebt_cleanup_entry, net, &i);
}
vfree(cl_s);
return ret;
}
/* called under write_lock */
static void get_counters(const struct ebt_counter *oldcounters,
struct ebt_counter *counters, unsigned int nentries)
{
int i, cpu;
struct ebt_counter *counter_base;
/* counters of cpu 0 */
memcpy(counters, oldcounters,
sizeof(struct ebt_counter) * nentries);
/* add other counters to those of cpu 0 */
for_each_possible_cpu(cpu) {
if (cpu == 0)
continue;
counter_base = COUNTER_BASE(oldcounters, nentries, cpu);
for (i = 0; i < nentries; i++)
ADD_COUNTER(counters[i], counter_base[i].bcnt,
counter_base[i].pcnt);
}
}
static int do_replace_finish(struct net *net, struct ebt_replace *repl,
struct ebt_table_info *newinfo)
{
int ret;
struct ebt_counter *counterstmp = NULL;
/* used to be able to unlock earlier */
struct ebt_table_info *table;
struct ebt_table *t;
/* the user wants counters back
* the check on the size is done later, when we have the lock
*/
if (repl->num_counters) {
unsigned long size = repl->num_counters * sizeof(*counterstmp);
counterstmp = vmalloc(size);
if (!counterstmp)
return -ENOMEM;
}
newinfo->chainstack = NULL;
ret = ebt_verify_pointers(repl, newinfo);
if (ret != 0)
goto free_counterstmp;
ret = translate_table(net, repl->name, newinfo);
if (ret != 0)
goto free_counterstmp;
t = find_table_lock(net, repl->name, &ret, &ebt_mutex);
if (!t) {
ret = -ENOENT;
goto free_iterate;
}
if (repl->valid_hooks != t->valid_hooks) {
ret = -EINVAL;
goto free_unlock;
}
if (repl->num_counters && repl->num_counters != t->private->nentries) {
ret = -EINVAL;
goto free_unlock;
}
/* we have the mutex lock, so no danger in reading this pointer */
table = t->private;
/* make sure the table can only be rmmod'ed if it contains no rules */
if (!table->nentries && newinfo->nentries && !try_module_get(t->me)) {
ret = -ENOENT;
goto free_unlock;
} else if (table->nentries && !newinfo->nentries)
module_put(t->me);
/* we need an atomic snapshot of the counters */
write_lock_bh(&t->lock);
if (repl->num_counters)
get_counters(t->private->counters, counterstmp,
t->private->nentries);
t->private = newinfo;
write_unlock_bh(&t->lock);
mutex_unlock(&ebt_mutex);
/* so, a user can change the chains while having messed up her counter
* allocation. Only reason why this is done is because this way the lock
* is held only once, while this doesn't bring the kernel into a
* dangerous state.
*/
if (repl->num_counters &&
copy_to_user(repl->counters, counterstmp,
array_size(repl->num_counters, sizeof(struct ebt_counter)))) {
/* Silent error, can't fail, new table is already in place */
net_warn_ratelimited("ebtables: counters copy to user failed while replacing table\n");
}
/* decrease module count and free resources */
EBT_ENTRY_ITERATE(table->entries, table->entries_size,
ebt_cleanup_entry, net, NULL);
vfree(table->entries);
ebt_free_table_info(table);
vfree(table);
vfree(counterstmp);
audit_log_nfcfg(repl->name, AF_BRIDGE, repl->nentries,
AUDIT_XT_OP_REPLACE, GFP_KERNEL);
return 0;
free_unlock:
mutex_unlock(&ebt_mutex);
free_iterate:
EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
ebt_cleanup_entry, net, NULL);
free_counterstmp:
vfree(counterstmp);
/* can be initialized in translate_table() */
ebt_free_table_info(newinfo);
return ret;
}
/* replace the table */
static int do_replace(struct net *net, sockptr_t arg, unsigned int len)
{
int ret, countersize;
struct ebt_table_info *newinfo;
struct ebt_replace tmp;
if (copy_from_sockptr(&tmp, arg, sizeof(tmp)) != 0)
return -EFAULT;
if (len != sizeof(tmp) + tmp.entries_size)
return -EINVAL;
if (tmp.entries_size == 0)
return -EINVAL;
/* overflow check */
if (tmp.nentries >= ((INT_MAX - sizeof(struct ebt_table_info)) /
NR_CPUS - SMP_CACHE_BYTES) / sizeof(struct ebt_counter))
return -ENOMEM;
if (tmp.num_counters >= INT_MAX / sizeof(struct ebt_counter))
return -ENOMEM;
tmp.name[sizeof(tmp.name) - 1] = 0;
countersize = COUNTER_OFFSET(tmp.nentries) * nr_cpu_ids;
newinfo = __vmalloc(sizeof(*newinfo) + countersize, GFP_KERNEL_ACCOUNT);
if (!newinfo)
return -ENOMEM;
if (countersize)
memset(newinfo->counters, 0, countersize);
newinfo->entries = __vmalloc(tmp.entries_size, GFP_KERNEL_ACCOUNT);
if (!newinfo->entries) {
ret = -ENOMEM;
goto free_newinfo;
}
if (copy_from_user(
newinfo->entries, tmp.entries, tmp.entries_size) != 0) {
ret = -EFAULT;
goto free_entries;
}
ret = do_replace_finish(net, &tmp, newinfo);
if (ret == 0)
return ret;
free_entries:
vfree(newinfo->entries);
free_newinfo:
vfree(newinfo);
return ret;
}
static void __ebt_unregister_table(struct net *net, struct ebt_table *table)
{
mutex_lock(&ebt_mutex);
list_del(&table->list);
mutex_unlock(&ebt_mutex);
audit_log_nfcfg(table->name, AF_BRIDGE, table->private->nentries,
AUDIT_XT_OP_UNREGISTER, GFP_KERNEL);
EBT_ENTRY_ITERATE(table->private->entries, table->private->entries_size,
ebt_cleanup_entry, net, NULL);
if (table->private->nentries)
module_put(table->me);
vfree(table->private->entries);
ebt_free_table_info(table->private);
vfree(table->private);
kfree(table->ops);
kfree(table);
}
int ebt_register_table(struct net *net, const struct ebt_table *input_table,
const struct nf_hook_ops *template_ops)
{
struct ebt_pernet *ebt_net = net_generic(net, ebt_pernet_id);
struct ebt_table_info *newinfo;
struct ebt_table *t, *table;
struct nf_hook_ops *ops;
unsigned int num_ops;
struct ebt_replace_kernel *repl;
int ret, i, countersize;
void *p;
if (input_table == NULL || (repl = input_table->table) == NULL ||
repl->entries == NULL || repl->entries_size == 0 ||
repl->counters != NULL || input_table->private != NULL)
return -EINVAL;
/* Don't add one table to multiple lists. */
table = kmemdup(input_table, sizeof(struct ebt_table), GFP_KERNEL);
if (!table) {
ret = -ENOMEM;
goto out;
}
countersize = COUNTER_OFFSET(repl->nentries) * nr_cpu_ids;
newinfo = vmalloc(sizeof(*newinfo) + countersize);
ret = -ENOMEM;
if (!newinfo)
goto free_table;
p = vmalloc(repl->entries_size);
if (!p)
goto free_newinfo;
memcpy(p, repl->entries, repl->entries_size);
newinfo->entries = p;
newinfo->entries_size = repl->entries_size;
newinfo->nentries = repl->nentries;
if (countersize)
memset(newinfo->counters, 0, countersize);
/* fill in newinfo and parse the entries */
newinfo->chainstack = NULL;
for (i = 0; i < NF_BR_NUMHOOKS; i++) {
if ((repl->valid_hooks & (1 << i)) == 0)
newinfo->hook_entry[i] = NULL;
else
newinfo->hook_entry[i] = p +
((char *)repl->hook_entry[i] - repl->entries);
}
ret = translate_table(net, repl->name, newinfo);
if (ret != 0)
goto free_chainstack;
table->private = newinfo;
rwlock_init(&table->lock);
mutex_lock(&ebt_mutex);
list_for_each_entry(t, &ebt_net->tables, list) {
if (strcmp(t->name, table->name) == 0) {
ret = -EEXIST;
goto free_unlock;
}
}
/* Hold a reference count if the chains aren't empty */
if (newinfo->nentries && !try_module_get(table->me)) {
ret = -ENOENT;
goto free_unlock;
}
num_ops = hweight32(table->valid_hooks);
if (num_ops == 0) {
ret = -EINVAL;
goto free_unlock;
}
ops = kmemdup(template_ops, sizeof(*ops) * num_ops, GFP_KERNEL);
if (!ops) {
ret = -ENOMEM;
if (newinfo->nentries)
module_put(table->me);
goto free_unlock;
}
for (i = 0; i < num_ops; i++)
ops[i].priv = table;
list_add(&table->list, &ebt_net->tables);
mutex_unlock(&ebt_mutex);
table->ops = ops;
ret = nf_register_net_hooks(net, ops, num_ops);
if (ret)
__ebt_unregister_table(net, table);
audit_log_nfcfg(repl->name, AF_BRIDGE, repl->nentries,
AUDIT_XT_OP_REGISTER, GFP_KERNEL);
return ret;
free_unlock:
mutex_unlock(&ebt_mutex);
free_chainstack:
ebt_free_table_info(newinfo);
vfree(newinfo->entries);
free_newinfo:
vfree(newinfo);
free_table:
kfree(table);
out:
return ret;
}
int ebt_register_template(const struct ebt_table *t, int (*table_init)(struct net *net))
{
struct ebt_template *tmpl;
mutex_lock(&ebt_mutex);
list_for_each_entry(tmpl, &template_tables, list) {
if (WARN_ON_ONCE(strcmp(t->name, tmpl->name) == 0)) {
mutex_unlock(&ebt_mutex);
return -EEXIST;
}
}
tmpl = kzalloc(sizeof(*tmpl), GFP_KERNEL);
if (!tmpl) {
mutex_unlock(&ebt_mutex);
return -ENOMEM;
}
tmpl->table_init = table_init;
strscpy(tmpl->name, t->name, sizeof(tmpl->name));
tmpl->owner = t->me;
list_add(&tmpl->list, &template_tables);
mutex_unlock(&ebt_mutex);
return 0;
}
EXPORT_SYMBOL(ebt_register_template);
void ebt_unregister_template(const struct ebt_table *t)
{
struct ebt_template *tmpl;
mutex_lock(&ebt_mutex);
list_for_each_entry(tmpl, &template_tables, list) {
if (strcmp(t->name, tmpl->name))
continue;
list_del(&tmpl->list);
mutex_unlock(&ebt_mutex);
kfree(tmpl);
return;
}
mutex_unlock(&ebt_mutex);
WARN_ON_ONCE(1);
}
EXPORT_SYMBOL(ebt_unregister_template);
static struct ebt_table *__ebt_find_table(struct net *net, const char *name)
{
struct ebt_pernet *ebt_net = net_generic(net, ebt_pernet_id);
struct ebt_table *t;
mutex_lock(&ebt_mutex);
list_for_each_entry(t, &ebt_net->tables, list) {
if (strcmp(t->name, name) == 0) {
mutex_unlock(&ebt_mutex);
return t;
}
}
mutex_unlock(&ebt_mutex);
return NULL;
}
void ebt_unregister_table_pre_exit(struct net *net, const char *name)
{
struct ebt_table *table = __ebt_find_table(net, name);
if (table)
nf_unregister_net_hooks(net, table->ops, hweight32(table->valid_hooks));
}
EXPORT_SYMBOL(ebt_unregister_table_pre_exit);
void ebt_unregister_table(struct net *net, const char *name)
{
struct ebt_table *table = __ebt_find_table(net, name);
if (table)
__ebt_unregister_table(net, table);
}
/* userspace just supplied us with counters */
static int do_update_counters(struct net *net, const char *name,
struct ebt_counter __user *counters,
unsigned int num_counters, unsigned int len)
{
int i, ret;
struct ebt_counter *tmp;
struct ebt_table *t;
if (num_counters == 0)
return -EINVAL;
tmp = vmalloc(array_size(num_counters, sizeof(*tmp)));
if (!tmp)
return -ENOMEM;
t = find_table_lock(net, name, &ret, &ebt_mutex);
if (!t)
goto free_tmp;
if (num_counters != t->private->nentries) {
ret = -EINVAL;
goto unlock_mutex;
}
if (copy_from_user(tmp, counters,
array_size(num_counters, sizeof(*counters)))) {
ret = -EFAULT;
goto unlock_mutex;
}
/* we want an atomic add of the counters */
write_lock_bh(&t->lock);
/* we add to the counters of the first cpu */
for (i = 0; i < num_counters; i++)
ADD_COUNTER(t->private->counters[i], tmp[i].bcnt, tmp[i].pcnt);
write_unlock_bh(&t->lock);
ret = 0;
unlock_mutex:
mutex_unlock(&ebt_mutex);
free_tmp:
vfree(tmp);
return ret;
}
static int update_counters(struct net *net, sockptr_t arg, unsigned int len)
{
struct ebt_replace hlp;
if (copy_from_sockptr(&hlp, arg, sizeof(hlp)))
return -EFAULT;
if (len != sizeof(hlp) + hlp.num_counters * sizeof(struct ebt_counter))
return -EINVAL;
return do_update_counters(net, hlp.name, hlp.counters,
hlp.num_counters, len);
}
static inline int ebt_obj_to_user(char __user *um, const char *_name,
const char *data, int entrysize,
int usersize, int datasize, u8 revision)
{
char name[EBT_EXTENSION_MAXNAMELEN] = {0};
/* ebtables expects 31 bytes long names but xt_match names are 29 bytes
* long. Copy 29 bytes and fill remaining bytes with zeroes.
*/
strscpy(name, _name, sizeof(name));
if (copy_to_user(um, name, EBT_EXTENSION_MAXNAMELEN) ||
put_user(revision, (u8 __user *)(um + EBT_EXTENSION_MAXNAMELEN)) ||
put_user(datasize, (int __user *)(um + EBT_EXTENSION_MAXNAMELEN + 1)) ||
xt_data_to_user(um + entrysize, data, usersize, datasize,
XT_ALIGN(datasize)))
return -EFAULT;
return 0;
}
static inline int ebt_match_to_user(const struct ebt_entry_match *m,
const char *base, char __user *ubase)
{
return ebt_obj_to_user(ubase + ((char *)m - base),
m->u.match->name, m->data, sizeof(*m),
m->u.match->usersize, m->match_size,
m->u.match->revision);
}
static inline int ebt_watcher_to_user(const struct ebt_entry_watcher *w,
const char *base, char __user *ubase)
{
return ebt_obj_to_user(ubase + ((char *)w - base),
w->u.watcher->name, w->data, sizeof(*w),
w->u.watcher->usersize, w->watcher_size,
w->u.watcher->revision);
}
static inline int ebt_entry_to_user(struct ebt_entry *e, const char *base,
char __user *ubase)
{
int ret;
char __user *hlp;
const struct ebt_entry_target *t;
if (e->bitmask == 0) {
/* special case !EBT_ENTRY_OR_ENTRIES */
if (copy_to_user(ubase + ((char *)e - base), e,
sizeof(struct ebt_entries)))
return -EFAULT;
return 0;
}
if (copy_to_user(ubase + ((char *)e - base), e, sizeof(*e)))
return -EFAULT;
hlp = ubase + (((char *)e + e->target_offset) - base);
t = ebt_get_target_c(e);
ret = EBT_MATCH_ITERATE(e, ebt_match_to_user, base, ubase);
if (ret != 0)
return ret;
ret = EBT_WATCHER_ITERATE(e, ebt_watcher_to_user, base, ubase);
if (ret != 0)
return ret;
ret = ebt_obj_to_user(hlp, t->u.target->name, t->data, sizeof(*t),
t->u.target->usersize, t->target_size,
t->u.target->revision);
if (ret != 0)
return ret;
return 0;
}
static int copy_counters_to_user(struct ebt_table *t,
const struct ebt_counter *oldcounters,
void __user *user, unsigned int num_counters,
unsigned int nentries)
{
struct ebt_counter *counterstmp;
int ret = 0;
/* userspace might not need the counters */
if (num_counters == 0)
return 0;
if (num_counters != nentries)
return -EINVAL;
counterstmp = vmalloc(array_size(nentries, sizeof(*counterstmp)));
if (!counterstmp)
return -ENOMEM;
write_lock_bh(&t->lock);
get_counters(oldcounters, counterstmp, nentries);
write_unlock_bh(&t->lock);
if (copy_to_user(user, counterstmp,
array_size(nentries, sizeof(struct ebt_counter))))
ret = -EFAULT;
vfree(counterstmp);
return ret;
}
/* called with ebt_mutex locked */
static int copy_everything_to_user(struct ebt_table *t, void __user *user,
const int *len, int cmd)
{
struct ebt_replace tmp;
const struct ebt_counter *oldcounters;
unsigned int entries_size, nentries;
int ret;
char *entries;
if (cmd == EBT_SO_GET_ENTRIES) {
entries_size = t->private->entries_size;
nentries = t->private->nentries;
entries = t->private->entries;
oldcounters = t->private->counters;
} else {
entries_size = t->table->entries_size;
nentries = t->table->nentries;
entries = t->table->entries;
oldcounters = t->table->counters;
}
if (copy_from_user(&tmp, user, sizeof(tmp)))
return -EFAULT;
if (*len != sizeof(struct ebt_replace) + entries_size +
(tmp.num_counters ? nentries * sizeof(struct ebt_counter) : 0))
return -EINVAL;
if (tmp.nentries != nentries)
return -EINVAL;
if (tmp.entries_size != entries_size)
return -EINVAL;
ret = copy_counters_to_user(t, oldcounters, tmp.counters,
tmp.num_counters, nentries);
if (ret)
return ret;
/* set the match/watcher/target names right */
return EBT_ENTRY_ITERATE(entries, entries_size,
ebt_entry_to_user, entries, tmp.entries);
}
#ifdef CONFIG_NETFILTER_XTABLES_COMPAT
/* 32 bit-userspace compatibility definitions. */
struct compat_ebt_replace {
char name[EBT_TABLE_MAXNAMELEN];
compat_uint_t valid_hooks;
compat_uint_t nentries;
compat_uint_t entries_size;
/* start of the chains */
compat_uptr_t hook_entry[NF_BR_NUMHOOKS];
/* nr of counters userspace expects back */
compat_uint_t num_counters;
/* where the kernel will put the old counters. */
compat_uptr_t counters;
compat_uptr_t entries;
};
/* struct ebt_entry_match, _target and _watcher have same layout */
struct compat_ebt_entry_mwt {
union {
struct {
char name[EBT_EXTENSION_MAXNAMELEN];
u8 revision;
};
compat_uptr_t ptr;
} u;
compat_uint_t match_size;
compat_uint_t data[] __aligned(__alignof__(struct compat_ebt_replace));
};
/* account for possible padding between match_size and ->data */
static int ebt_compat_entry_padsize(void)
{
BUILD_BUG_ON(sizeof(struct ebt_entry_match) <
sizeof(struct compat_ebt_entry_mwt));
return (int) sizeof(struct ebt_entry_match) -
sizeof(struct compat_ebt_entry_mwt);
}
static int ebt_compat_match_offset(const struct xt_match *match,
unsigned int userlen)
{
/* ebt_among needs special handling. The kernel .matchsize is
* set to -1 at registration time; at runtime an EBT_ALIGN()ed
* value is expected.
* Example: userspace sends 4500, ebt_among.c wants 4504.
*/
if (unlikely(match->matchsize == -1))
return XT_ALIGN(userlen) - COMPAT_XT_ALIGN(userlen);
return xt_compat_match_offset(match);
}
static int compat_match_to_user(struct ebt_entry_match *m, void __user **dstptr,
unsigned int *size)
{
const struct xt_match *match = m->u.match;
struct compat_ebt_entry_mwt __user *cm = *dstptr;
int off = ebt_compat_match_offset(match, m->match_size);
compat_uint_t msize = m->match_size - off;
if (WARN_ON(off >= m->match_size))
return -EINVAL;
if (copy_to_user(cm->u.name, match->name, strlen(match->name) + 1) ||
put_user(match->revision, &cm->u.revision) ||
put_user(msize, &cm->match_size))
return -EFAULT;
if (match->compat_to_user) {
if (match->compat_to_user(cm->data, m->data))
return -EFAULT;
} else {
if (xt_data_to_user(cm->data, m->data, match->usersize, msize,
COMPAT_XT_ALIGN(msize)))
return -EFAULT;
}
*size -= ebt_compat_entry_padsize() + off;
*dstptr = cm->data;
*dstptr += msize;
return 0;
}
static int compat_target_to_user(struct ebt_entry_target *t,
void __user **dstptr,
unsigned int *size)
{
const struct xt_target *target = t->u.target;
struct compat_ebt_entry_mwt __user *cm = *dstptr;
int off = xt_compat_target_offset(target);
compat_uint_t tsize = t->target_size - off;
if (WARN_ON(off >= t->target_size))
return -EINVAL;
if (copy_to_user(cm->u.name, target->name, strlen(target->name) + 1) ||
put_user(target->revision, &cm->u.revision) ||
put_user(tsize, &cm->match_size))
return -EFAULT;
if (target->compat_to_user) {
if (target->compat_to_user(cm->data, t->data))
return -EFAULT;
} else {
if (xt_data_to_user(cm->data, t->data, target->usersize, tsize,
COMPAT_XT_ALIGN(tsize)))
return -EFAULT;
}
*size -= ebt_compat_entry_padsize() + off;
*dstptr = cm->data;
*dstptr += tsize;
return 0;
}
static int compat_watcher_to_user(struct ebt_entry_watcher *w,
void __user **dstptr,
unsigned int *size)
{
return compat_target_to_user((struct ebt_entry_target *)w,
dstptr, size);
}
static int compat_copy_entry_to_user(struct ebt_entry *e, void __user **dstptr,
unsigned int *size)
{
struct ebt_entry_target *t;
struct ebt_entry __user *ce;
u32 watchers_offset, target_offset, next_offset;
compat_uint_t origsize;
int ret;
if (e->bitmask == 0) {
if (*size < sizeof(struct ebt_entries))
return -EINVAL;
if (copy_to_user(*dstptr, e, sizeof(struct ebt_entries)))
return -EFAULT;
*dstptr += sizeof(struct ebt_entries);
*size -= sizeof(struct ebt_entries);
return 0;
}
if (*size < sizeof(*ce))
return -EINVAL;
ce = *dstptr;
if (copy_to_user(ce, e, sizeof(*ce)))
return -EFAULT;
origsize = *size;
*dstptr += sizeof(*ce);
ret = EBT_MATCH_ITERATE(e, compat_match_to_user, dstptr, size);
if (ret)
return ret;
watchers_offset = e->watchers_offset - (origsize - *size);
ret = EBT_WATCHER_ITERATE(e, compat_watcher_to_user, dstptr, size);
if (ret)
return ret;
target_offset = e->target_offset - (origsize - *size);
t = ebt_get_target(e);
ret = compat_target_to_user(t, dstptr, size);
if (ret)
return ret;
next_offset = e->next_offset - (origsize - *size);
if (put_user(watchers_offset, &ce->watchers_offset) ||
put_user(target_offset, &ce->target_offset) ||
put_user(next_offset, &ce->next_offset))
return -EFAULT;
*size -= sizeof(*ce);
return 0;
}
static int compat_calc_match(struct ebt_entry_match *m, int *off)
{
*off += ebt_compat_match_offset(m->u.match, m->match_size);
*off += ebt_compat_entry_padsize();
return 0;
}
static int compat_calc_watcher(struct ebt_entry_watcher *w, int *off)
{
*off += xt_compat_target_offset(w->u.watcher);
*off += ebt_compat_entry_padsize();
return 0;
}
static int compat_calc_entry(const struct ebt_entry *e,
const struct ebt_table_info *info,
const void *base,
struct compat_ebt_replace *newinfo)
{
const struct ebt_entry_target *t;
unsigned int entry_offset;
int off, ret, i;
if (e->bitmask == 0)
return 0;
off = 0;
entry_offset = (void *)e - base;
EBT_MATCH_ITERATE(e, compat_calc_match, &off);
EBT_WATCHER_ITERATE(e, compat_calc_watcher, &off);
t = ebt_get_target_c(e);
off += xt_compat_target_offset(t->u.target);
off += ebt_compat_entry_padsize();
newinfo->entries_size -= off;
ret = xt_compat_add_offset(NFPROTO_BRIDGE, entry_offset, off);
if (ret)
return ret;
for (i = 0; i < NF_BR_NUMHOOKS; i++) {
const void *hookptr = info->hook_entry[i];
if (info->hook_entry[i] &&
(e < (struct ebt_entry *)(base - hookptr))) {
newinfo->hook_entry[i] -= off;
pr_debug("0x%08X -> 0x%08X\n",
newinfo->hook_entry[i] + off,
newinfo->hook_entry[i]);
}
}
return 0;
}
static int ebt_compat_init_offsets(unsigned int number)
{
if (number > INT_MAX)
return -EINVAL;
/* also count the base chain policies */
number += NF_BR_NUMHOOKS;
return xt_compat_init_offsets(NFPROTO_BRIDGE, number);
}
static int compat_table_info(const struct ebt_table_info *info,
struct compat_ebt_replace *newinfo)
{
unsigned int size = info->entries_size;
const void *entries = info->entries;
int ret;
newinfo->entries_size = size;
ret = ebt_compat_init_offsets(info->nentries);
if (ret)
return ret;
return EBT_ENTRY_ITERATE(entries, size, compat_calc_entry, info,
entries, newinfo);
}
static int compat_copy_everything_to_user(struct ebt_table *t,
void __user *user, int *len, int cmd)
{
struct compat_ebt_replace repl, tmp;
struct ebt_counter *oldcounters;
struct ebt_table_info tinfo;
int ret;
void __user *pos;
memset(&tinfo, 0, sizeof(tinfo));
if (cmd == EBT_SO_GET_ENTRIES) {
tinfo.entries_size = t->private->entries_size;
tinfo.nentries = t->private->nentries;
tinfo.entries = t->private->entries;
oldcounters = t->private->counters;
} else {
tinfo.entries_size = t->table->entries_size;
tinfo.nentries = t->table->nentries;
tinfo.entries = t->table->entries;
oldcounters = t->table->counters;
}
if (copy_from_user(&tmp, user, sizeof(tmp)))
return -EFAULT;
if (tmp.nentries != tinfo.nentries ||
(tmp.num_counters && tmp.num_counters != tinfo.nentries))
return -EINVAL;
memcpy(&repl, &tmp, sizeof(repl));
if (cmd == EBT_SO_GET_ENTRIES)
ret = compat_table_info(t->private, &repl);
else
ret = compat_table_info(&tinfo, &repl);
if (ret)
return ret;
if (*len != sizeof(tmp) + repl.entries_size +
(tmp.num_counters? tinfo.nentries * sizeof(struct ebt_counter): 0)) {
pr_err("wrong size: *len %d, entries_size %u, replsz %d\n",
*len, tinfo.entries_size, repl.entries_size);
return -EINVAL;
}
/* userspace might not need the counters */
ret = copy_counters_to_user(t, oldcounters, compat_ptr(tmp.counters),
tmp.num_counters, tinfo.nentries);
if (ret)
return ret;
pos = compat_ptr(tmp.entries);
return EBT_ENTRY_ITERATE(tinfo.entries, tinfo.entries_size,
compat_copy_entry_to_user, &pos, &tmp.entries_size);
}
struct ebt_entries_buf_state {
char *buf_kern_start; /* kernel buffer to copy (translated) data to */
u32 buf_kern_len; /* total size of kernel buffer */
u32 buf_kern_offset; /* amount of data copied so far */
u32 buf_user_offset; /* read position in userspace buffer */
};
static int ebt_buf_count(struct ebt_entries_buf_state *state, unsigned int sz)
{
state->buf_kern_offset += sz;
return state->buf_kern_offset >= sz ? 0 : -EINVAL;
}
static int ebt_buf_add(struct ebt_entries_buf_state *state,
const void *data, unsigned int sz)
{
if (state->buf_kern_start == NULL)
goto count_only;
if (WARN_ON(state->buf_kern_offset + sz > state->buf_kern_len))
return -EINVAL;
memcpy(state->buf_kern_start + state->buf_kern_offset, data, sz);
count_only:
state->buf_user_offset += sz;
return ebt_buf_count(state, sz);
}
static int ebt_buf_add_pad(struct ebt_entries_buf_state *state, unsigned int sz)
{
char *b = state->buf_kern_start;
if (WARN_ON(b && state->buf_kern_offset > state->buf_kern_len))
return -EINVAL;
if (b != NULL && sz > 0)
memset(b + state->buf_kern_offset, 0, sz);
/* do not adjust ->buf_user_offset here, we added kernel-side padding */
return ebt_buf_count(state, sz);
}
enum compat_mwt {
EBT_COMPAT_MATCH,
EBT_COMPAT_WATCHER,
EBT_COMPAT_TARGET,
};
static int compat_mtw_from_user(const struct compat_ebt_entry_mwt *mwt,
enum compat_mwt compat_mwt,
struct ebt_entries_buf_state *state,
const unsigned char *base)
{
char name[EBT_EXTENSION_MAXNAMELEN];
struct xt_match *match;
struct xt_target *wt;
void *dst = NULL;
int off, pad = 0;
unsigned int size_kern, match_size = mwt->match_size;
if (strscpy(name, mwt->u.name, sizeof(name)) < 0)
return -EINVAL;
if (state->buf_kern_start)
dst = state->buf_kern_start + state->buf_kern_offset;
switch (compat_mwt) {
case EBT_COMPAT_MATCH:
match = xt_request_find_match(NFPROTO_BRIDGE, name,
mwt->u.revision);
if (IS_ERR(match))
return PTR_ERR(match);
off = ebt_compat_match_offset(match, match_size);
if (dst) {
if (match->compat_from_user)
match->compat_from_user(dst, mwt->data);
else
memcpy(dst, mwt->data, match_size);
}
size_kern = match->matchsize;
if (unlikely(size_kern == -1))
size_kern = match_size;
module_put(match->me);
break;
case EBT_COMPAT_WATCHER:
case EBT_COMPAT_TARGET:
wt = xt_request_find_target(NFPROTO_BRIDGE, name,
mwt->u.revision);
if (IS_ERR(wt))
return PTR_ERR(wt);
off = xt_compat_target_offset(wt);
if (dst) {
if (wt->compat_from_user)
wt->compat_from_user(dst, mwt->data);
else
memcpy(dst, mwt->data, match_size);
}
size_kern = wt->targetsize;
module_put(wt->me);
break;
default:
return -EINVAL;
}
state->buf_kern_offset += match_size + off;
state->buf_user_offset += match_size;
pad = XT_ALIGN(size_kern) - size_kern;
if (pad > 0 && dst) {
if (WARN_ON(state->buf_kern_len <= pad))
return -EINVAL;
if (WARN_ON(state->buf_kern_offset - (match_size + off) + size_kern > state->buf_kern_len - pad))
return -EINVAL;
memset(dst + size_kern, 0, pad);
}
return off + match_size;
}
/* return size of all matches, watchers or target, including necessary
* alignment and padding.
*/
static int ebt_size_mwt(const struct compat_ebt_entry_mwt *match32,
unsigned int size_left, enum compat_mwt type,
struct ebt_entries_buf_state *state, const void *base)
{
const char *buf = (const char *)match32;
int growth = 0;
if (size_left == 0)
return 0;
do {
struct ebt_entry_match *match_kern;
int ret;
if (size_left < sizeof(*match32))
return -EINVAL;
match_kern = (struct ebt_entry_match *) state->buf_kern_start;
if (match_kern) {
char *tmp;
tmp = state->buf_kern_start + state->buf_kern_offset;
match_kern = (struct ebt_entry_match *) tmp;
}
ret = ebt_buf_add(state, buf, sizeof(*match32));
if (ret < 0)
return ret;
size_left -= sizeof(*match32);
/* add padding before match->data (if any) */
ret = ebt_buf_add_pad(state, ebt_compat_entry_padsize());
if (ret < 0)
return ret;
if (match32->match_size > size_left)
return -EINVAL;
size_left -= match32->match_size;
ret = compat_mtw_from_user(match32, type, state, base);
if (ret < 0)
return ret;
if (WARN_ON(ret < match32->match_size))
return -EINVAL;
growth += ret - match32->match_size;
growth += ebt_compat_entry_padsize();
buf += sizeof(*match32);
buf += match32->match_size;
if (match_kern)
match_kern->match_size = ret;
match32 = (struct compat_ebt_entry_mwt *) buf;
} while (size_left);
return growth;
}
/* called for all ebt_entry structures. */
static int size_entry_mwt(const struct ebt_entry *entry, const unsigned char *base,
unsigned int *total,
struct ebt_entries_buf_state *state)
{
unsigned int i, j, startoff, next_expected_off, new_offset = 0;
/* stores match/watchers/targets & offset of next struct ebt_entry: */
unsigned int offsets[4];
unsigned int *offsets_update = NULL;
int ret;
char *buf_start;
if (*total < sizeof(struct ebt_entries))
return -EINVAL;
if (!entry->bitmask) {
*total -= sizeof(struct ebt_entries);
return ebt_buf_add(state, entry, sizeof(struct ebt_entries));
}
if (*total < sizeof(*entry) || entry->next_offset < sizeof(*entry))
return -EINVAL;
startoff = state->buf_user_offset;
/* pull in most part of ebt_entry, it does not need to be changed. */
ret = ebt_buf_add(state, entry,
offsetof(struct ebt_entry, watchers_offset));
if (ret < 0)
return ret;
offsets[0] = sizeof(struct ebt_entry); /* matches come first */
memcpy(&offsets[1], &entry->offsets, sizeof(entry->offsets));
if (state->buf_kern_start) {
buf_start = state->buf_kern_start + state->buf_kern_offset;
offsets_update = (unsigned int *) buf_start;
}
ret = ebt_buf_add(state, &offsets[1],
sizeof(offsets) - sizeof(offsets[0]));
if (ret < 0)
return ret;
buf_start = (char *) entry;
/* 0: matches offset, always follows ebt_entry.
* 1: watchers offset, from ebt_entry structure
* 2: target offset, from ebt_entry structure
* 3: next ebt_entry offset, from ebt_entry structure
*
* offsets are relative to beginning of struct ebt_entry (i.e., 0).
*/
for (i = 0; i < 4 ; ++i) {
if (offsets[i] > *total)
return -EINVAL;
if (i < 3 && offsets[i] == *total)
return -EINVAL;
if (i == 0)
continue;
if (offsets[i-1] > offsets[i])
return -EINVAL;
}
for (i = 0, j = 1 ; j < 4 ; j++, i++) {
struct compat_ebt_entry_mwt *match32;
unsigned int size;
char *buf = buf_start + offsets[i];
if (offsets[i] > offsets[j])
return -EINVAL;
match32 = (struct compat_ebt_entry_mwt *) buf;
size = offsets[j] - offsets[i];
ret = ebt_size_mwt(match32, size, i, state, base);
if (ret < 0)
return ret;
new_offset += ret;
if (offsets_update && new_offset) {
pr_debug("change offset %d to %d\n",
offsets_update[i], offsets[j] + new_offset);
offsets_update[i] = offsets[j] + new_offset;
}
}
if (state->buf_kern_start == NULL) {
unsigned int offset = buf_start - (char *) base;
ret = xt_compat_add_offset(NFPROTO_BRIDGE, offset, new_offset);
if (ret < 0)
return ret;
}
next_expected_off = state->buf_user_offset - startoff;
if (next_expected_off != entry->next_offset)
return -EINVAL;
if (*total < entry->next_offset)
return -EINVAL;
*total -= entry->next_offset;
return 0;
}
/* repl->entries_size is the size of the ebt_entry blob in userspace.
* It might need more memory when copied to a 64 bit kernel in case
* userspace is 32-bit. So, first task: find out how much memory is needed.
*
* Called before validation is performed.
*/
static int compat_copy_entries(unsigned char *data, unsigned int size_user,
struct ebt_entries_buf_state *state)
{
unsigned int size_remaining = size_user;
int ret;
ret = EBT_ENTRY_ITERATE(data, size_user, size_entry_mwt, data,
&size_remaining, state);
if (ret < 0)
return ret;
if (size_remaining)
return -EINVAL;
return state->buf_kern_offset;
}
static int compat_copy_ebt_replace_from_user(struct ebt_replace *repl,
sockptr_t arg, unsigned int len)
{
struct compat_ebt_replace tmp;
int i;
if (len < sizeof(tmp))
return -EINVAL;
if (copy_from_sockptr(&tmp, arg, sizeof(tmp)))
return -EFAULT;
if (len != sizeof(tmp) + tmp.entries_size)
return -EINVAL;
if (tmp.entries_size == 0)
return -EINVAL;
if (tmp.nentries >= ((INT_MAX - sizeof(struct ebt_table_info)) /
NR_CPUS - SMP_CACHE_BYTES) / sizeof(struct ebt_counter))
return -ENOMEM;
if (tmp.num_counters >= INT_MAX / sizeof(struct ebt_counter))
return -ENOMEM;
memcpy(repl, &tmp, offsetof(struct ebt_replace, hook_entry));
/* starting with hook_entry, 32 vs. 64 bit structures are different */
for (i = 0; i < NF_BR_NUMHOOKS; i++)
repl->hook_entry[i] = compat_ptr(tmp.hook_entry[i]);
repl->num_counters = tmp.num_counters;
repl->counters = compat_ptr(tmp.counters);
repl->entries = compat_ptr(tmp.entries);
return 0;
}
static int compat_do_replace(struct net *net, sockptr_t arg, unsigned int len)
{
int ret, i, countersize, size64;
struct ebt_table_info *newinfo;
struct ebt_replace tmp;
struct ebt_entries_buf_state state;
void *entries_tmp;
ret = compat_copy_ebt_replace_from_user(&tmp, arg, len);
if (ret) {
/* try real handler in case userland supplied needed padding */
if (ret == -EINVAL && do_replace(net, arg, len) == 0)
ret = 0;
return ret;
}
countersize = COUNTER_OFFSET(tmp.nentries) * nr_cpu_ids;
newinfo = vmalloc(sizeof(*newinfo) + countersize);
if (!newinfo)
return -ENOMEM;
if (countersize)
memset(newinfo->counters, 0, countersize);
memset(&state, 0, sizeof(state));
newinfo->entries = vmalloc(tmp.entries_size);
if (!newinfo->entries) {
ret = -ENOMEM;
goto free_newinfo;
}
if (copy_from_user(
newinfo->entries, tmp.entries, tmp.entries_size) != 0) {
ret = -EFAULT;
goto free_entries;
}
entries_tmp = newinfo->entries;
xt_compat_lock(NFPROTO_BRIDGE);
ret = ebt_compat_init_offsets(tmp.nentries);
if (ret < 0)
goto out_unlock;
ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state);
if (ret < 0)
goto out_unlock;
pr_debug("tmp.entries_size %d, kern off %d, user off %d delta %d\n",
tmp.entries_size, state.buf_kern_offset, state.buf_user_offset,
xt_compat_calc_jump(NFPROTO_BRIDGE, tmp.entries_size));
size64 = ret;
newinfo->entries = vmalloc(size64);
if (!newinfo->entries) {
vfree(entries_tmp);
ret = -ENOMEM;
goto out_unlock;
}
memset(&state, 0, sizeof(state));
state.buf_kern_start = newinfo->entries;
state.buf_kern_len = size64;
ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state);
if (WARN_ON(ret < 0)) {
vfree(entries_tmp);
goto out_unlock;
}
vfree(entries_tmp);
tmp.entries_size = size64;
for (i = 0; i < NF_BR_NUMHOOKS; i++) {
char __user *usrptr;
if (tmp.hook_entry[i]) {
unsigned int delta;
usrptr = (char __user *) tmp.hook_entry[i];
delta = usrptr - tmp.entries;
usrptr += xt_compat_calc_jump(NFPROTO_BRIDGE, delta);
tmp.hook_entry[i] = (struct ebt_entries __user *)usrptr;
}
}
xt_compat_flush_offsets(NFPROTO_BRIDGE);
xt_compat_unlock(NFPROTO_BRIDGE);
ret = do_replace_finish(net, &tmp, newinfo);
if (ret == 0)
return ret;
free_entries:
vfree(newinfo->entries);
free_newinfo:
vfree(newinfo);
return ret;
out_unlock:
xt_compat_flush_offsets(NFPROTO_BRIDGE);
xt_compat_unlock(NFPROTO_BRIDGE);
goto free_entries;
}
static int compat_update_counters(struct net *net, sockptr_t arg,
unsigned int len)
{
struct compat_ebt_replace hlp;
if (copy_from_sockptr(&hlp, arg, sizeof(hlp)))
return -EFAULT;
/* try real handler in case userland supplied needed padding */
if (len != sizeof(hlp) + hlp.num_counters * sizeof(struct ebt_counter))
return update_counters(net, arg, len);
return do_update_counters(net, hlp.name, compat_ptr(hlp.counters),
hlp.num_counters, len);
}
static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
void __user *user, int *len)
{
int ret;
struct compat_ebt_replace tmp;
struct ebt_table *t;
struct net *net = sock_net(sk);
if ((cmd == EBT_SO_GET_INFO || cmd == EBT_SO_GET_INIT_INFO) &&
*len != sizeof(struct compat_ebt_replace))
return -EINVAL;
if (copy_from_user(&tmp, user, sizeof(tmp)))
return -EFAULT;
tmp.name[sizeof(tmp.name) - 1] = '\0';
t = find_table_lock(net, tmp.name, &ret, &ebt_mutex);
if (!t)
return ret;
xt_compat_lock(NFPROTO_BRIDGE);
switch (cmd) {
case EBT_SO_GET_INFO:
tmp.nentries = t->private->nentries;
ret = compat_table_info(t->private, &tmp);
if (ret)
goto out;
tmp.valid_hooks = t->valid_hooks;
if (copy_to_user(user, &tmp, *len) != 0) {
ret = -EFAULT;
break;
}
ret = 0;
break;
case EBT_SO_GET_INIT_INFO:
tmp.nentries = t->table->nentries;
tmp.entries_size = t->table->entries_size;
tmp.valid_hooks = t->table->valid_hooks;
if (copy_to_user(user, &tmp, *len) != 0) {
ret = -EFAULT;
break;
}
ret = 0;
break;
case EBT_SO_GET_ENTRIES:
case EBT_SO_GET_INIT_ENTRIES:
/* try real handler first in case of userland-side padding.
* in case we are dealing with an 'ordinary' 32 bit binary
* without 64bit compatibility padding, this will fail right
* after copy_from_user when the *len argument is validated.
*
* the compat_ variant needs to do one pass over the kernel
* data set to adjust for size differences before it the check.
*/
if (copy_everything_to_user(t, user, len, cmd) == 0)
ret = 0;
else
ret = compat_copy_everything_to_user(t, user, len, cmd);
break;
default:
ret = -EINVAL;
}
out:
xt_compat_flush_offsets(NFPROTO_BRIDGE);
xt_compat_unlock(NFPROTO_BRIDGE);
mutex_unlock(&ebt_mutex);
return ret;
}
#endif
static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
{
struct net *net = sock_net(sk);
struct ebt_replace tmp;
struct ebt_table *t;
int ret;
if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
return -EPERM;
#ifdef CONFIG_NETFILTER_XTABLES_COMPAT
/* try real handler in case userland supplied needed padding */
if (in_compat_syscall() &&
((cmd != EBT_SO_GET_INFO && cmd != EBT_SO_GET_INIT_INFO) ||
*len != sizeof(tmp)))
return compat_do_ebt_get_ctl(sk, cmd, user, len);
#endif
if (copy_from_user(&tmp, user, sizeof(tmp)))
return -EFAULT;
tmp.name[sizeof(tmp.name) - 1] = '\0';
t = find_table_lock(net, tmp.name, &ret, &ebt_mutex);
if (!t)
return ret;
switch (cmd) {
case EBT_SO_GET_INFO:
case EBT_SO_GET_INIT_INFO:
if (*len != sizeof(struct ebt_replace)) {
ret = -EINVAL;
mutex_unlock(&ebt_mutex);
break;
}
if (cmd == EBT_SO_GET_INFO) {
tmp.nentries = t->private->nentries;
tmp.entries_size = t->private->entries_size;
tmp.valid_hooks = t->valid_hooks;
} else {
tmp.nentries = t->table->nentries;
tmp.entries_size = t->table->entries_size;
tmp.valid_hooks = t->table->valid_hooks;
}
mutex_unlock(&ebt_mutex);
if (copy_to_user(user, &tmp, *len) != 0) {
ret = -EFAULT;
break;
}
ret = 0;
break;
case EBT_SO_GET_ENTRIES:
case EBT_SO_GET_INIT_ENTRIES:
ret = copy_everything_to_user(t, user, len, cmd);
mutex_unlock(&ebt_mutex);
break;
default:
mutex_unlock(&ebt_mutex);
ret = -EINVAL;
}
return ret;
}
static int do_ebt_set_ctl(struct sock *sk, int cmd, sockptr_t arg,
unsigned int len)
{
struct net *net = sock_net(sk);
int ret;
if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
return -EPERM;
switch (cmd) {
case EBT_SO_SET_ENTRIES:
#ifdef CONFIG_NETFILTER_XTABLES_COMPAT
if (in_compat_syscall())
ret = compat_do_replace(net, arg, len);
else
#endif
ret = do_replace(net, arg, len);
break;
case EBT_SO_SET_COUNTERS:
#ifdef CONFIG_NETFILTER_XTABLES_COMPAT
if (in_compat_syscall())
ret = compat_update_counters(net, arg, len);
else
#endif
ret = update_counters(net, arg, len);
break;
default:
ret = -EINVAL;
}
return ret;
}
static struct nf_sockopt_ops ebt_sockopts = {
.pf = PF_INET,
.set_optmin = EBT_BASE_CTL,
.set_optmax = EBT_SO_SET_MAX + 1,
.set = do_ebt_set_ctl,
.get_optmin = EBT_BASE_CTL,
.get_optmax = EBT_SO_GET_MAX + 1,
.get = do_ebt_get_ctl,
.owner = THIS_MODULE,
};
static int __net_init ebt_pernet_init(struct net *net)
{
struct ebt_pernet *ebt_net = net_generic(net, ebt_pernet_id);
INIT_LIST_HEAD(&ebt_net->tables);
return 0;
}
static struct pernet_operations ebt_net_ops = {
.init = ebt_pernet_init,
.id = &ebt_pernet_id,
.size = sizeof(struct ebt_pernet),
};
static int __init ebtables_init(void)
{
int ret;
ret = xt_register_target(&ebt_standard_target);
if (ret < 0)
return ret;
ret = nf_register_sockopt(&ebt_sockopts);
if (ret < 0) {
xt_unregister_target(&ebt_standard_target);
return ret;
}
ret = register_pernet_subsys(&ebt_net_ops);
if (ret < 0) {
nf_unregister_sockopt(&ebt_sockopts);
xt_unregister_target(&ebt_standard_target);
return ret;
}
return 0;
}
static void ebtables_fini(void)
{
nf_unregister_sockopt(&ebt_sockopts);
xt_unregister_target(&ebt_standard_target);
unregister_pernet_subsys(&ebt_net_ops);
}
EXPORT_SYMBOL(ebt_register_table);
EXPORT_SYMBOL(ebt_unregister_table);
EXPORT_SYMBOL(ebt_do_table);
module_init(ebtables_init);
module_exit(ebtables_fini);
MODULE_LICENSE("GPL");
| linux-master | net/bridge/netfilter/ebtables.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/module.h>
#ifndef __CHECKER__
#include <net/cfg802154.h>
#include "driver-ops.h"
#define CREATE_TRACE_POINTS
#include "trace.h"
#endif
| linux-master | net/mac802154/trace.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2007-2012 Siemens AG
*
* Written by:
* Dmitry Eremin-Solenikov <[email protected]>
* Sergey Lapin <[email protected]>
* Maxim Gorbachyov <[email protected]>
* Alexander Smirnov <[email protected]>
*/
#include <linux/if_arp.h>
#include <net/mac802154.h>
#include <net/ieee802154_netdev.h>
#include <net/cfg802154.h>
#include "ieee802154_i.h"
#include "driver-ops.h"
void mac802154_dev_set_page_channel(struct net_device *dev, u8 page, u8 chan)
{
struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
struct ieee802154_local *local = sdata->local;
int res;
ASSERT_RTNL();
BUG_ON(dev->type != ARPHRD_IEEE802154);
res = drv_set_channel(local, page, chan);
if (res) {
pr_debug("set_channel failed\n");
} else {
local->phy->current_channel = chan;
local->phy->current_page = page;
}
}
int mac802154_get_params(struct net_device *dev,
struct ieee802154_llsec_params *params)
{
struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
int res;
BUG_ON(dev->type != ARPHRD_IEEE802154);
mutex_lock(&sdata->sec_mtx);
res = mac802154_llsec_get_params(&sdata->sec, params);
mutex_unlock(&sdata->sec_mtx);
return res;
}
int mac802154_set_params(struct net_device *dev,
const struct ieee802154_llsec_params *params,
int changed)
{
struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
int res;
BUG_ON(dev->type != ARPHRD_IEEE802154);
mutex_lock(&sdata->sec_mtx);
res = mac802154_llsec_set_params(&sdata->sec, params, changed);
mutex_unlock(&sdata->sec_mtx);
return res;
}
int mac802154_add_key(struct net_device *dev,
const struct ieee802154_llsec_key_id *id,
const struct ieee802154_llsec_key *key)
{
struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
int res;
BUG_ON(dev->type != ARPHRD_IEEE802154);
mutex_lock(&sdata->sec_mtx);
res = mac802154_llsec_key_add(&sdata->sec, id, key);
mutex_unlock(&sdata->sec_mtx);
return res;
}
int mac802154_del_key(struct net_device *dev,
const struct ieee802154_llsec_key_id *id)
{
struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
int res;
BUG_ON(dev->type != ARPHRD_IEEE802154);
mutex_lock(&sdata->sec_mtx);
res = mac802154_llsec_key_del(&sdata->sec, id);
mutex_unlock(&sdata->sec_mtx);
return res;
}
int mac802154_add_dev(struct net_device *dev,
const struct ieee802154_llsec_device *llsec_dev)
{
struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
int res;
BUG_ON(dev->type != ARPHRD_IEEE802154);
mutex_lock(&sdata->sec_mtx);
res = mac802154_llsec_dev_add(&sdata->sec, llsec_dev);
mutex_unlock(&sdata->sec_mtx);
return res;
}
int mac802154_del_dev(struct net_device *dev, __le64 dev_addr)
{
struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
int res;
BUG_ON(dev->type != ARPHRD_IEEE802154);
mutex_lock(&sdata->sec_mtx);
res = mac802154_llsec_dev_del(&sdata->sec, dev_addr);
mutex_unlock(&sdata->sec_mtx);
return res;
}
int mac802154_add_devkey(struct net_device *dev,
__le64 device_addr,
const struct ieee802154_llsec_device_key *key)
{
struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
int res;
BUG_ON(dev->type != ARPHRD_IEEE802154);
mutex_lock(&sdata->sec_mtx);
res = mac802154_llsec_devkey_add(&sdata->sec, device_addr, key);
mutex_unlock(&sdata->sec_mtx);
return res;
}
int mac802154_del_devkey(struct net_device *dev,
__le64 device_addr,
const struct ieee802154_llsec_device_key *key)
{
struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
int res;
BUG_ON(dev->type != ARPHRD_IEEE802154);
mutex_lock(&sdata->sec_mtx);
res = mac802154_llsec_devkey_del(&sdata->sec, device_addr, key);
mutex_unlock(&sdata->sec_mtx);
return res;
}
int mac802154_add_seclevel(struct net_device *dev,
const struct ieee802154_llsec_seclevel *sl)
{
struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
int res;
BUG_ON(dev->type != ARPHRD_IEEE802154);
mutex_lock(&sdata->sec_mtx);
res = mac802154_llsec_seclevel_add(&sdata->sec, sl);
mutex_unlock(&sdata->sec_mtx);
return res;
}
int mac802154_del_seclevel(struct net_device *dev,
const struct ieee802154_llsec_seclevel *sl)
{
struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
int res;
BUG_ON(dev->type != ARPHRD_IEEE802154);
mutex_lock(&sdata->sec_mtx);
res = mac802154_llsec_seclevel_del(&sdata->sec, sl);
mutex_unlock(&sdata->sec_mtx);
return res;
}
void mac802154_lock_table(struct net_device *dev)
{
struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
BUG_ON(dev->type != ARPHRD_IEEE802154);
mutex_lock(&sdata->sec_mtx);
}
void mac802154_get_table(struct net_device *dev,
struct ieee802154_llsec_table **t)
{
struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
BUG_ON(dev->type != ARPHRD_IEEE802154);
*t = &sdata->sec.table;
}
void mac802154_unlock_table(struct net_device *dev)
{
struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
BUG_ON(dev->type != ARPHRD_IEEE802154);
mutex_unlock(&sdata->sec_mtx);
}
| linux-master | net/mac802154/mib.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2007-2012 Siemens AG
*
* Written by:
* Dmitry Eremin-Solenikov <[email protected]>
* Sergey Lapin <[email protected]>
* Maxim Gorbachyov <[email protected]>
* Alexander Smirnov <[email protected]>
*/
#include <linux/netdevice.h>
#include <linux/module.h>
#include <linux/if_arp.h>
#include <linux/ieee802154.h>
#include <net/nl802154.h>
#include <net/mac802154.h>
#include <net/ieee802154_netdev.h>
#include <net/cfg802154.h>
#include "ieee802154_i.h"
#include "driver-ops.h"
int mac802154_wpan_update_llsec(struct net_device *dev)
{
struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
struct ieee802154_mlme_ops *ops = ieee802154_mlme_ops(dev);
struct wpan_dev *wpan_dev = &sdata->wpan_dev;
int rc = 0;
if (ops->llsec) {
struct ieee802154_llsec_params params;
int changed = 0;
params.pan_id = wpan_dev->pan_id;
changed |= IEEE802154_LLSEC_PARAM_PAN_ID;
params.hwaddr = wpan_dev->extended_addr;
changed |= IEEE802154_LLSEC_PARAM_HWADDR;
rc = ops->llsec->set_params(dev, ¶ms, changed);
}
return rc;
}
static int
mac802154_wpan_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
struct wpan_dev *wpan_dev = &sdata->wpan_dev;
struct sockaddr_ieee802154 *sa =
(struct sockaddr_ieee802154 *)&ifr->ifr_addr;
int err = -ENOIOCTLCMD;
if (cmd != SIOCGIFADDR && cmd != SIOCSIFADDR)
return err;
rtnl_lock();
switch (cmd) {
case SIOCGIFADDR:
{
u16 pan_id, short_addr;
pan_id = le16_to_cpu(wpan_dev->pan_id);
short_addr = le16_to_cpu(wpan_dev->short_addr);
if (pan_id == IEEE802154_PANID_BROADCAST ||
short_addr == IEEE802154_ADDR_BROADCAST) {
err = -EADDRNOTAVAIL;
break;
}
sa->family = AF_IEEE802154;
sa->addr.addr_type = IEEE802154_ADDR_SHORT;
sa->addr.pan_id = pan_id;
sa->addr.short_addr = short_addr;
err = 0;
break;
}
case SIOCSIFADDR:
if (netif_running(dev)) {
rtnl_unlock();
return -EBUSY;
}
dev_warn(&dev->dev,
"Using DEBUGing ioctl SIOCSIFADDR isn't recommended!\n");
if (sa->family != AF_IEEE802154 ||
sa->addr.addr_type != IEEE802154_ADDR_SHORT ||
sa->addr.pan_id == IEEE802154_PANID_BROADCAST ||
sa->addr.short_addr == IEEE802154_ADDR_BROADCAST ||
sa->addr.short_addr == IEEE802154_ADDR_UNDEF) {
err = -EINVAL;
break;
}
wpan_dev->pan_id = cpu_to_le16(sa->addr.pan_id);
wpan_dev->short_addr = cpu_to_le16(sa->addr.short_addr);
err = mac802154_wpan_update_llsec(dev);
break;
}
rtnl_unlock();
return err;
}
static int mac802154_wpan_mac_addr(struct net_device *dev, void *p)
{
struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
struct sockaddr *addr = p;
__le64 extended_addr;
if (netif_running(dev))
return -EBUSY;
/* lowpan need to be down for update
* SLAAC address after ifup
*/
if (sdata->wpan_dev.lowpan_dev) {
if (netif_running(sdata->wpan_dev.lowpan_dev))
return -EBUSY;
}
ieee802154_be64_to_le64(&extended_addr, addr->sa_data);
if (!ieee802154_is_valid_extended_unicast_addr(extended_addr))
return -EINVAL;
dev_addr_set(dev, addr->sa_data);
sdata->wpan_dev.extended_addr = extended_addr;
/* update lowpan interface mac address when
* wpan mac has been changed
*/
if (sdata->wpan_dev.lowpan_dev)
dev_addr_set(sdata->wpan_dev.lowpan_dev, dev->dev_addr);
return mac802154_wpan_update_llsec(dev);
}
static int ieee802154_setup_hw(struct ieee802154_sub_if_data *sdata)
{
struct ieee802154_local *local = sdata->local;
struct wpan_dev *wpan_dev = &sdata->wpan_dev;
int ret;
sdata->required_filtering = sdata->iface_default_filtering;
if (local->hw.flags & IEEE802154_HW_AFILT) {
local->addr_filt.pan_id = wpan_dev->pan_id;
local->addr_filt.ieee_addr = wpan_dev->extended_addr;
local->addr_filt.short_addr = wpan_dev->short_addr;
}
if (local->hw.flags & IEEE802154_HW_LBT) {
ret = drv_set_lbt_mode(local, wpan_dev->lbt);
if (ret < 0)
return ret;
}
if (local->hw.flags & IEEE802154_HW_CSMA_PARAMS) {
ret = drv_set_csma_params(local, wpan_dev->min_be,
wpan_dev->max_be,
wpan_dev->csma_retries);
if (ret < 0)
return ret;
}
if (local->hw.flags & IEEE802154_HW_FRAME_RETRIES) {
ret = drv_set_max_frame_retries(local, wpan_dev->frame_retries);
if (ret < 0)
return ret;
}
return 0;
}
static int mac802154_slave_open(struct net_device *dev)
{
struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
struct ieee802154_local *local = sdata->local;
int res;
ASSERT_RTNL();
set_bit(SDATA_STATE_RUNNING, &sdata->state);
if (!local->open_count) {
res = ieee802154_setup_hw(sdata);
if (res)
goto err;
res = drv_start(local, sdata->required_filtering,
&local->addr_filt);
if (res)
goto err;
}
local->open_count++;
netif_start_queue(dev);
return 0;
err:
/* might already be clear but that doesn't matter */
clear_bit(SDATA_STATE_RUNNING, &sdata->state);
return res;
}
static int
ieee802154_check_mac_settings(struct ieee802154_local *local,
struct ieee802154_sub_if_data *sdata,
struct ieee802154_sub_if_data *nsdata)
{
struct wpan_dev *nwpan_dev = &nsdata->wpan_dev;
struct wpan_dev *wpan_dev = &sdata->wpan_dev;
ASSERT_RTNL();
if (sdata->iface_default_filtering != nsdata->iface_default_filtering)
return -EBUSY;
if (local->hw.flags & IEEE802154_HW_AFILT) {
if (wpan_dev->pan_id != nwpan_dev->pan_id ||
wpan_dev->short_addr != nwpan_dev->short_addr ||
wpan_dev->extended_addr != nwpan_dev->extended_addr)
return -EBUSY;
}
if (local->hw.flags & IEEE802154_HW_CSMA_PARAMS) {
if (wpan_dev->min_be != nwpan_dev->min_be ||
wpan_dev->max_be != nwpan_dev->max_be ||
wpan_dev->csma_retries != nwpan_dev->csma_retries)
return -EBUSY;
}
if (local->hw.flags & IEEE802154_HW_FRAME_RETRIES) {
if (wpan_dev->frame_retries != nwpan_dev->frame_retries)
return -EBUSY;
}
if (local->hw.flags & IEEE802154_HW_LBT) {
if (wpan_dev->lbt != nwpan_dev->lbt)
return -EBUSY;
}
return 0;
}
static int
ieee802154_check_concurrent_iface(struct ieee802154_sub_if_data *sdata,
enum nl802154_iftype iftype)
{
struct ieee802154_local *local = sdata->local;
struct ieee802154_sub_if_data *nsdata;
/* we hold the RTNL here so can safely walk the list */
list_for_each_entry(nsdata, &local->interfaces, list) {
if (nsdata != sdata && ieee802154_sdata_running(nsdata)) {
int ret;
/* TODO currently we don't support multiple node/coord
* types we need to run skb_clone at rx path. Check if
* there exist really an use case if we need to support
* multiple node/coord types at the same time.
*/
if (sdata->wpan_dev.iftype != NL802154_IFTYPE_MONITOR &&
nsdata->wpan_dev.iftype != NL802154_IFTYPE_MONITOR)
return -EBUSY;
/* check all phy mac sublayer settings are the same.
* We have only one phy, different values makes trouble.
*/
ret = ieee802154_check_mac_settings(local, sdata, nsdata);
if (ret < 0)
return ret;
}
}
return 0;
}
static int mac802154_wpan_open(struct net_device *dev)
{
int rc;
struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
struct wpan_dev *wpan_dev = &sdata->wpan_dev;
rc = ieee802154_check_concurrent_iface(sdata, wpan_dev->iftype);
if (rc < 0)
return rc;
return mac802154_slave_open(dev);
}
static int mac802154_slave_close(struct net_device *dev)
{
struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
struct ieee802154_local *local = sdata->local;
ASSERT_RTNL();
if (mac802154_is_scanning(local))
mac802154_abort_scan_locked(local, sdata);
if (mac802154_is_beaconing(local))
mac802154_stop_beacons_locked(local, sdata);
netif_stop_queue(dev);
local->open_count--;
clear_bit(SDATA_STATE_RUNNING, &sdata->state);
if (!local->open_count)
ieee802154_stop_device(local);
return 0;
}
static int mac802154_set_header_security(struct ieee802154_sub_if_data *sdata,
struct ieee802154_hdr *hdr,
const struct ieee802154_mac_cb *cb)
{
struct ieee802154_llsec_params params;
u8 level;
mac802154_llsec_get_params(&sdata->sec, ¶ms);
if (!params.enabled && cb->secen_override && cb->secen)
return -EINVAL;
if (!params.enabled ||
(cb->secen_override && !cb->secen) ||
!params.out_level)
return 0;
if (cb->seclevel_override && !cb->seclevel)
return -EINVAL;
level = cb->seclevel_override ? cb->seclevel : params.out_level;
hdr->fc.security_enabled = 1;
hdr->sec.level = level;
hdr->sec.key_id_mode = params.out_key.mode;
if (params.out_key.mode == IEEE802154_SCF_KEY_SHORT_INDEX)
hdr->sec.short_src = params.out_key.short_source;
else if (params.out_key.mode == IEEE802154_SCF_KEY_HW_INDEX)
hdr->sec.extended_src = params.out_key.extended_source;
hdr->sec.key_id = params.out_key.id;
return 0;
}
static int ieee802154_header_create(struct sk_buff *skb,
struct net_device *dev,
const struct ieee802154_addr *daddr,
const struct ieee802154_addr *saddr,
unsigned len)
{
struct ieee802154_hdr hdr;
struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
struct wpan_dev *wpan_dev = &sdata->wpan_dev;
struct ieee802154_mac_cb *cb = mac_cb(skb);
int hlen;
if (!daddr)
return -EINVAL;
memset(&hdr.fc, 0, sizeof(hdr.fc));
hdr.fc.type = cb->type;
hdr.fc.security_enabled = cb->secen;
hdr.fc.ack_request = cb->ackreq;
hdr.seq = atomic_inc_return(&dev->ieee802154_ptr->dsn) & 0xFF;
if (mac802154_set_header_security(sdata, &hdr, cb) < 0)
return -EINVAL;
if (!saddr) {
if (wpan_dev->short_addr == cpu_to_le16(IEEE802154_ADDR_BROADCAST) ||
wpan_dev->short_addr == cpu_to_le16(IEEE802154_ADDR_UNDEF) ||
wpan_dev->pan_id == cpu_to_le16(IEEE802154_PANID_BROADCAST)) {
hdr.source.mode = IEEE802154_ADDR_LONG;
hdr.source.extended_addr = wpan_dev->extended_addr;
} else {
hdr.source.mode = IEEE802154_ADDR_SHORT;
hdr.source.short_addr = wpan_dev->short_addr;
}
hdr.source.pan_id = wpan_dev->pan_id;
} else {
hdr.source = *(const struct ieee802154_addr *)saddr;
}
hdr.dest = *(const struct ieee802154_addr *)daddr;
hlen = ieee802154_hdr_push(skb, &hdr);
if (hlen < 0)
return -EINVAL;
skb_reset_mac_header(skb);
skb->mac_len = hlen;
if (len > ieee802154_max_payload(&hdr))
return -EMSGSIZE;
return hlen;
}
static const struct wpan_dev_header_ops ieee802154_header_ops = {
.create = ieee802154_header_create,
};
/* This header create functionality assumes a 8 byte array for
* source and destination pointer at maximum. To adapt this for
* the 802.15.4 dataframe header we use extended address handling
* here only and intra pan connection. fc fields are mostly fallback
* handling. For provide dev_hard_header for dgram sockets.
*/
static int mac802154_header_create(struct sk_buff *skb,
struct net_device *dev,
unsigned short type,
const void *daddr,
const void *saddr,
unsigned len)
{
struct ieee802154_hdr hdr;
struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
struct wpan_dev *wpan_dev = &sdata->wpan_dev;
struct ieee802154_mac_cb cb = { };
int hlen;
if (!daddr)
return -EINVAL;
memset(&hdr.fc, 0, sizeof(hdr.fc));
hdr.fc.type = IEEE802154_FC_TYPE_DATA;
hdr.fc.ack_request = wpan_dev->ackreq;
hdr.seq = atomic_inc_return(&dev->ieee802154_ptr->dsn) & 0xFF;
/* TODO currently a workaround to give zero cb block to set
* security parameters defaults according MIB.
*/
if (mac802154_set_header_security(sdata, &hdr, &cb) < 0)
return -EINVAL;
hdr.dest.pan_id = wpan_dev->pan_id;
hdr.dest.mode = IEEE802154_ADDR_LONG;
ieee802154_be64_to_le64(&hdr.dest.extended_addr, daddr);
hdr.source.pan_id = hdr.dest.pan_id;
hdr.source.mode = IEEE802154_ADDR_LONG;
if (!saddr)
hdr.source.extended_addr = wpan_dev->extended_addr;
else
ieee802154_be64_to_le64(&hdr.source.extended_addr, saddr);
hlen = ieee802154_hdr_push(skb, &hdr);
if (hlen < 0)
return -EINVAL;
skb_reset_mac_header(skb);
skb->mac_len = hlen;
if (len > ieee802154_max_payload(&hdr))
return -EMSGSIZE;
return hlen;
}
static int
mac802154_header_parse(const struct sk_buff *skb, unsigned char *haddr)
{
struct ieee802154_hdr hdr;
if (ieee802154_hdr_peek_addrs(skb, &hdr) < 0) {
pr_debug("malformed packet\n");
return 0;
}
if (hdr.source.mode == IEEE802154_ADDR_LONG) {
ieee802154_le64_to_be64(haddr, &hdr.source.extended_addr);
return IEEE802154_EXTENDED_ADDR_LEN;
}
return 0;
}
static const struct header_ops mac802154_header_ops = {
.create = mac802154_header_create,
.parse = mac802154_header_parse,
};
static const struct net_device_ops mac802154_wpan_ops = {
.ndo_open = mac802154_wpan_open,
.ndo_stop = mac802154_slave_close,
.ndo_start_xmit = ieee802154_subif_start_xmit,
.ndo_do_ioctl = mac802154_wpan_ioctl,
.ndo_set_mac_address = mac802154_wpan_mac_addr,
};
static const struct net_device_ops mac802154_monitor_ops = {
.ndo_open = mac802154_wpan_open,
.ndo_stop = mac802154_slave_close,
.ndo_start_xmit = ieee802154_monitor_start_xmit,
};
static void mac802154_wpan_free(struct net_device *dev)
{
struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
mac802154_llsec_destroy(&sdata->sec);
}
static void ieee802154_if_setup(struct net_device *dev)
{
dev->addr_len = IEEE802154_EXTENDED_ADDR_LEN;
memset(dev->broadcast, 0xff, IEEE802154_EXTENDED_ADDR_LEN);
/* Let hard_header_len set to IEEE802154_MIN_HEADER_LEN. AF_PACKET
* will not send frames without any payload, but ack frames
* has no payload, so substract one that we can send a 3 bytes
* frame. The xmit callback assumes at least a hard header where two
* bytes fc and sequence field are set.
*/
dev->hard_header_len = IEEE802154_MIN_HEADER_LEN - 1;
/* The auth_tag header is for security and places in private payload
* room of mac frame which stucks between payload and FCS field.
*/
dev->needed_tailroom = IEEE802154_MAX_AUTH_TAG_LEN +
IEEE802154_FCS_LEN;
/* The mtu size is the payload without mac header in this case.
* We have a dynamic length header with a minimum header length
* which is hard_header_len. In this case we let mtu to the size
* of maximum payload which is IEEE802154_MTU - IEEE802154_FCS_LEN -
* hard_header_len. The FCS which is set by hardware or ndo_start_xmit
* and the minimum mac header which can be evaluated inside driver
* layer. The rest of mac header will be part of payload if greater
* than hard_header_len.
*/
dev->mtu = IEEE802154_MTU - IEEE802154_FCS_LEN -
dev->hard_header_len;
dev->tx_queue_len = 300;
dev->flags = IFF_NOARP | IFF_BROADCAST;
}
static int
ieee802154_setup_sdata(struct ieee802154_sub_if_data *sdata,
enum nl802154_iftype type)
{
struct wpan_dev *wpan_dev = &sdata->wpan_dev;
int ret;
u8 tmp;
/* set some type-dependent values */
sdata->wpan_dev.iftype = type;
get_random_bytes(&tmp, sizeof(tmp));
atomic_set(&wpan_dev->bsn, tmp);
get_random_bytes(&tmp, sizeof(tmp));
atomic_set(&wpan_dev->dsn, tmp);
/* defaults per 802.15.4-2011 */
wpan_dev->min_be = 3;
wpan_dev->max_be = 5;
wpan_dev->csma_retries = 4;
wpan_dev->frame_retries = 3;
wpan_dev->pan_id = cpu_to_le16(IEEE802154_PANID_BROADCAST);
wpan_dev->short_addr = cpu_to_le16(IEEE802154_ADDR_BROADCAST);
switch (type) {
case NL802154_IFTYPE_COORD:
case NL802154_IFTYPE_NODE:
ieee802154_be64_to_le64(&wpan_dev->extended_addr,
sdata->dev->dev_addr);
sdata->dev->header_ops = &mac802154_header_ops;
sdata->dev->needs_free_netdev = true;
sdata->dev->priv_destructor = mac802154_wpan_free;
sdata->dev->netdev_ops = &mac802154_wpan_ops;
sdata->dev->ml_priv = &mac802154_mlme_wpan;
sdata->iface_default_filtering = IEEE802154_FILTERING_4_FRAME_FIELDS;
wpan_dev->header_ops = &ieee802154_header_ops;
mutex_init(&sdata->sec_mtx);
mac802154_llsec_init(&sdata->sec);
ret = mac802154_wpan_update_llsec(sdata->dev);
if (ret < 0)
return ret;
break;
case NL802154_IFTYPE_MONITOR:
sdata->dev->needs_free_netdev = true;
sdata->dev->netdev_ops = &mac802154_monitor_ops;
sdata->iface_default_filtering = IEEE802154_FILTERING_NONE;
break;
default:
BUG();
}
return 0;
}
struct net_device *
ieee802154_if_add(struct ieee802154_local *local, const char *name,
unsigned char name_assign_type, enum nl802154_iftype type,
__le64 extended_addr)
{
u8 addr[IEEE802154_EXTENDED_ADDR_LEN];
struct net_device *ndev = NULL;
struct ieee802154_sub_if_data *sdata = NULL;
int ret;
ASSERT_RTNL();
ndev = alloc_netdev(sizeof(*sdata), name,
name_assign_type, ieee802154_if_setup);
if (!ndev)
return ERR_PTR(-ENOMEM);
ndev->needed_headroom = local->hw.extra_tx_headroom +
IEEE802154_MAX_HEADER_LEN;
ret = dev_alloc_name(ndev, ndev->name);
if (ret < 0)
goto err;
ieee802154_le64_to_be64(ndev->perm_addr,
&local->hw.phy->perm_extended_addr);
switch (type) {
case NL802154_IFTYPE_COORD:
case NL802154_IFTYPE_NODE:
ndev->type = ARPHRD_IEEE802154;
if (ieee802154_is_valid_extended_unicast_addr(extended_addr)) {
ieee802154_le64_to_be64(addr, &extended_addr);
dev_addr_set(ndev, addr);
} else {
dev_addr_set(ndev, ndev->perm_addr);
}
break;
case NL802154_IFTYPE_MONITOR:
ndev->type = ARPHRD_IEEE802154_MONITOR;
break;
default:
ret = -EINVAL;
goto err;
}
/* TODO check this */
SET_NETDEV_DEV(ndev, &local->phy->dev);
dev_net_set(ndev, wpan_phy_net(local->hw.phy));
sdata = netdev_priv(ndev);
ndev->ieee802154_ptr = &sdata->wpan_dev;
memcpy(sdata->name, ndev->name, IFNAMSIZ);
sdata->dev = ndev;
sdata->wpan_dev.wpan_phy = local->hw.phy;
sdata->local = local;
INIT_LIST_HEAD(&sdata->wpan_dev.list);
/* setup type-dependent data */
ret = ieee802154_setup_sdata(sdata, type);
if (ret)
goto err;
ret = register_netdevice(ndev);
if (ret < 0)
goto err;
mutex_lock(&local->iflist_mtx);
list_add_tail_rcu(&sdata->list, &local->interfaces);
mutex_unlock(&local->iflist_mtx);
return ndev;
err:
free_netdev(ndev);
return ERR_PTR(ret);
}
void ieee802154_if_remove(struct ieee802154_sub_if_data *sdata)
{
ASSERT_RTNL();
mutex_lock(&sdata->local->iflist_mtx);
list_del_rcu(&sdata->list);
mutex_unlock(&sdata->local->iflist_mtx);
synchronize_rcu();
unregister_netdevice(sdata->dev);
}
void ieee802154_remove_interfaces(struct ieee802154_local *local)
{
struct ieee802154_sub_if_data *sdata, *tmp;
mutex_lock(&local->iflist_mtx);
list_for_each_entry_safe(sdata, tmp, &local->interfaces, list) {
list_del(&sdata->list);
unregister_netdevice(sdata->dev);
}
mutex_unlock(&local->iflist_mtx);
}
static int netdev_notify(struct notifier_block *nb,
unsigned long state, void *ptr)
{
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
struct ieee802154_sub_if_data *sdata;
if (state != NETDEV_CHANGENAME)
return NOTIFY_DONE;
if (!dev->ieee802154_ptr || !dev->ieee802154_ptr->wpan_phy)
return NOTIFY_DONE;
if (dev->ieee802154_ptr->wpan_phy->privid != mac802154_wpan_phy_privid)
return NOTIFY_DONE;
sdata = IEEE802154_DEV_TO_SUB_IF(dev);
memcpy(sdata->name, dev->name, IFNAMSIZ);
return NOTIFY_OK;
}
static struct notifier_block mac802154_netdev_notifier = {
.notifier_call = netdev_notify,
};
int ieee802154_iface_init(void)
{
return register_netdevice_notifier(&mac802154_netdev_notifier);
}
void ieee802154_iface_exit(void)
{
unregister_netdevice_notifier(&mac802154_netdev_notifier);
}
| linux-master | net/mac802154/iface.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2007-2012 Siemens AG
*
* Written by:
* Pavel Smolenskiy <[email protected]>
* Maxim Gorbachyov <[email protected]>
* Dmitry Eremin-Solenikov <[email protected]>
* Alexander Smirnov <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/crc-ccitt.h>
#include <asm/unaligned.h>
#include <net/mac802154.h>
#include <net/ieee802154_netdev.h>
#include <net/nl802154.h>
#include "ieee802154_i.h"
static int ieee802154_deliver_skb(struct sk_buff *skb)
{
skb->ip_summed = CHECKSUM_UNNECESSARY;
skb->protocol = htons(ETH_P_IEEE802154);
return netif_receive_skb(skb);
}
void mac802154_rx_beacon_worker(struct work_struct *work)
{
struct ieee802154_local *local =
container_of(work, struct ieee802154_local, rx_beacon_work);
struct cfg802154_mac_pkt *mac_pkt;
mac_pkt = list_first_entry_or_null(&local->rx_beacon_list,
struct cfg802154_mac_pkt, node);
if (!mac_pkt)
return;
mac802154_process_beacon(local, mac_pkt->skb, mac_pkt->page, mac_pkt->channel);
list_del(&mac_pkt->node);
kfree_skb(mac_pkt->skb);
kfree(mac_pkt);
}
static bool mac802154_should_answer_beacon_req(struct ieee802154_local *local)
{
struct cfg802154_beacon_request *beacon_req;
unsigned int interval;
rcu_read_lock();
beacon_req = rcu_dereference(local->beacon_req);
if (!beacon_req) {
rcu_read_unlock();
return false;
}
interval = beacon_req->interval;
rcu_read_unlock();
if (!mac802154_is_beaconing(local))
return false;
return interval == IEEE802154_ACTIVE_SCAN_DURATION;
}
void mac802154_rx_mac_cmd_worker(struct work_struct *work)
{
struct ieee802154_local *local =
container_of(work, struct ieee802154_local, rx_mac_cmd_work);
struct cfg802154_mac_pkt *mac_pkt;
u8 mac_cmd;
int rc;
mac_pkt = list_first_entry_or_null(&local->rx_mac_cmd_list,
struct cfg802154_mac_pkt, node);
if (!mac_pkt)
return;
rc = ieee802154_get_mac_cmd(mac_pkt->skb, &mac_cmd);
if (rc)
goto out;
switch (mac_cmd) {
case IEEE802154_CMD_BEACON_REQ:
dev_dbg(&mac_pkt->sdata->dev->dev, "processing BEACON REQ\n");
if (!mac802154_should_answer_beacon_req(local))
break;
queue_delayed_work(local->mac_wq, &local->beacon_work, 0);
break;
default:
break;
}
out:
list_del(&mac_pkt->node);
kfree_skb(mac_pkt->skb);
kfree(mac_pkt);
}
static int
ieee802154_subif_frame(struct ieee802154_sub_if_data *sdata,
struct sk_buff *skb, const struct ieee802154_hdr *hdr)
{
struct wpan_phy *wpan_phy = sdata->local->hw.phy;
struct wpan_dev *wpan_dev = &sdata->wpan_dev;
struct cfg802154_mac_pkt *mac_pkt;
__le16 span, sshort;
int rc;
pr_debug("getting packet via slave interface %s\n", sdata->dev->name);
span = wpan_dev->pan_id;
sshort = wpan_dev->short_addr;
/* Level 3 filtering: Only beacons are accepted during scans */
if (sdata->required_filtering == IEEE802154_FILTERING_3_SCAN &&
sdata->required_filtering > wpan_phy->filtering) {
if (mac_cb(skb)->type != IEEE802154_FC_TYPE_BEACON) {
dev_dbg(&sdata->dev->dev,
"drop non-beacon frame (0x%x) during scan\n",
mac_cb(skb)->type);
goto fail;
}
}
switch (mac_cb(skb)->dest.mode) {
case IEEE802154_ADDR_NONE:
if (hdr->source.mode != IEEE802154_ADDR_NONE)
/* FIXME: check if we are PAN coordinator */
skb->pkt_type = PACKET_OTHERHOST;
else
/* ACK comes with both addresses empty */
skb->pkt_type = PACKET_HOST;
break;
case IEEE802154_ADDR_LONG:
if (mac_cb(skb)->dest.pan_id != span &&
mac_cb(skb)->dest.pan_id != cpu_to_le16(IEEE802154_PANID_BROADCAST))
skb->pkt_type = PACKET_OTHERHOST;
else if (mac_cb(skb)->dest.extended_addr == wpan_dev->extended_addr)
skb->pkt_type = PACKET_HOST;
else
skb->pkt_type = PACKET_OTHERHOST;
break;
case IEEE802154_ADDR_SHORT:
if (mac_cb(skb)->dest.pan_id != span &&
mac_cb(skb)->dest.pan_id != cpu_to_le16(IEEE802154_PANID_BROADCAST))
skb->pkt_type = PACKET_OTHERHOST;
else if (mac_cb(skb)->dest.short_addr == sshort)
skb->pkt_type = PACKET_HOST;
else if (mac_cb(skb)->dest.short_addr ==
cpu_to_le16(IEEE802154_ADDR_BROADCAST))
skb->pkt_type = PACKET_BROADCAST;
else
skb->pkt_type = PACKET_OTHERHOST;
break;
default:
pr_debug("invalid dest mode\n");
goto fail;
}
skb->dev = sdata->dev;
/* TODO this should be moved after netif_receive_skb call, otherwise
* wireshark will show a mac header with security fields and the
* payload is already decrypted.
*/
rc = mac802154_llsec_decrypt(&sdata->sec, skb);
if (rc) {
pr_debug("decryption failed: %i\n", rc);
goto fail;
}
sdata->dev->stats.rx_packets++;
sdata->dev->stats.rx_bytes += skb->len;
switch (mac_cb(skb)->type) {
case IEEE802154_FC_TYPE_BEACON:
dev_dbg(&sdata->dev->dev, "BEACON received\n");
if (!mac802154_is_scanning(sdata->local))
goto fail;
mac_pkt = kzalloc(sizeof(*mac_pkt), GFP_ATOMIC);
if (!mac_pkt)
goto fail;
mac_pkt->skb = skb_get(skb);
mac_pkt->sdata = sdata;
mac_pkt->page = sdata->local->scan_page;
mac_pkt->channel = sdata->local->scan_channel;
list_add_tail(&mac_pkt->node, &sdata->local->rx_beacon_list);
queue_work(sdata->local->mac_wq, &sdata->local->rx_beacon_work);
return NET_RX_SUCCESS;
case IEEE802154_FC_TYPE_MAC_CMD:
dev_dbg(&sdata->dev->dev, "MAC COMMAND received\n");
mac_pkt = kzalloc(sizeof(*mac_pkt), GFP_ATOMIC);
if (!mac_pkt)
goto fail;
mac_pkt->skb = skb_get(skb);
mac_pkt->sdata = sdata;
list_add_tail(&mac_pkt->node, &sdata->local->rx_mac_cmd_list);
queue_work(sdata->local->mac_wq, &sdata->local->rx_mac_cmd_work);
return NET_RX_SUCCESS;
case IEEE802154_FC_TYPE_ACK:
goto fail;
case IEEE802154_FC_TYPE_DATA:
return ieee802154_deliver_skb(skb);
default:
pr_warn_ratelimited("ieee802154: bad frame received "
"(type = %d)\n", mac_cb(skb)->type);
goto fail;
}
fail:
kfree_skb(skb);
return NET_RX_DROP;
}
static void
ieee802154_print_addr(const char *name, const struct ieee802154_addr *addr)
{
if (addr->mode == IEEE802154_ADDR_NONE) {
pr_debug("%s not present\n", name);
return;
}
pr_debug("%s PAN ID: %04x\n", name, le16_to_cpu(addr->pan_id));
if (addr->mode == IEEE802154_ADDR_SHORT) {
pr_debug("%s is short: %04x\n", name,
le16_to_cpu(addr->short_addr));
} else {
u64 hw = swab64((__force u64)addr->extended_addr);
pr_debug("%s is hardware: %8phC\n", name, &hw);
}
}
static int
ieee802154_parse_frame_start(struct sk_buff *skb, struct ieee802154_hdr *hdr)
{
int hlen;
struct ieee802154_mac_cb *cb = mac_cb(skb);
skb_reset_mac_header(skb);
hlen = ieee802154_hdr_pull(skb, hdr);
if (hlen < 0)
return -EINVAL;
skb->mac_len = hlen;
pr_debug("fc: %04x dsn: %02x\n", le16_to_cpup((__le16 *)&hdr->fc),
hdr->seq);
cb->type = hdr->fc.type;
cb->ackreq = hdr->fc.ack_request;
cb->secen = hdr->fc.security_enabled;
ieee802154_print_addr("destination", &hdr->dest);
ieee802154_print_addr("source", &hdr->source);
cb->source = hdr->source;
cb->dest = hdr->dest;
if (hdr->fc.security_enabled) {
u64 key;
pr_debug("seclevel %i\n", hdr->sec.level);
switch (hdr->sec.key_id_mode) {
case IEEE802154_SCF_KEY_IMPLICIT:
pr_debug("implicit key\n");
break;
case IEEE802154_SCF_KEY_INDEX:
pr_debug("key %02x\n", hdr->sec.key_id);
break;
case IEEE802154_SCF_KEY_SHORT_INDEX:
pr_debug("key %04x:%04x %02x\n",
le32_to_cpu(hdr->sec.short_src) >> 16,
le32_to_cpu(hdr->sec.short_src) & 0xffff,
hdr->sec.key_id);
break;
case IEEE802154_SCF_KEY_HW_INDEX:
key = swab64((__force u64)hdr->sec.extended_src);
pr_debug("key source %8phC %02x\n", &key,
hdr->sec.key_id);
break;
}
}
return 0;
}
static void
__ieee802154_rx_handle_packet(struct ieee802154_local *local,
struct sk_buff *skb)
{
int ret;
struct ieee802154_sub_if_data *sdata;
struct ieee802154_hdr hdr;
struct sk_buff *skb2;
ret = ieee802154_parse_frame_start(skb, &hdr);
if (ret) {
pr_debug("got invalid frame\n");
return;
}
list_for_each_entry_rcu(sdata, &local->interfaces, list) {
if (sdata->wpan_dev.iftype == NL802154_IFTYPE_MONITOR)
continue;
if (!ieee802154_sdata_running(sdata))
continue;
/* Do not deliver packets received on interfaces expecting
* AACK=1 if the address filters where disabled.
*/
if (local->hw.phy->filtering < IEEE802154_FILTERING_4_FRAME_FIELDS &&
sdata->required_filtering == IEEE802154_FILTERING_4_FRAME_FIELDS)
continue;
skb2 = skb_clone(skb, GFP_ATOMIC);
if (skb2) {
skb2->dev = sdata->dev;
ieee802154_subif_frame(sdata, skb2, &hdr);
}
}
}
static void
ieee802154_monitors_rx(struct ieee802154_local *local, struct sk_buff *skb)
{
struct sk_buff *skb2;
struct ieee802154_sub_if_data *sdata;
skb_reset_mac_header(skb);
skb->ip_summed = CHECKSUM_UNNECESSARY;
skb->pkt_type = PACKET_OTHERHOST;
skb->protocol = htons(ETH_P_IEEE802154);
list_for_each_entry_rcu(sdata, &local->interfaces, list) {
if (sdata->wpan_dev.iftype != NL802154_IFTYPE_MONITOR)
continue;
if (!ieee802154_sdata_running(sdata))
continue;
skb2 = skb_clone(skb, GFP_ATOMIC);
if (skb2) {
skb2->dev = sdata->dev;
ieee802154_deliver_skb(skb2);
sdata->dev->stats.rx_packets++;
sdata->dev->stats.rx_bytes += skb->len;
}
}
}
void ieee802154_rx(struct ieee802154_local *local, struct sk_buff *skb)
{
u16 crc;
WARN_ON_ONCE(softirq_count() == 0);
if (local->suspended)
goto free_skb;
/* TODO: When a transceiver omits the checksum here, we
* add an own calculated one. This is currently an ugly
* solution because the monitor needs a crc here.
*/
if (local->hw.flags & IEEE802154_HW_RX_OMIT_CKSUM) {
crc = crc_ccitt(0, skb->data, skb->len);
put_unaligned_le16(crc, skb_put(skb, 2));
}
rcu_read_lock();
ieee802154_monitors_rx(local, skb);
/* Level 1 filtering: Check the FCS by software when relevant */
if (local->hw.phy->filtering == IEEE802154_FILTERING_NONE) {
crc = crc_ccitt(0, skb->data, skb->len);
if (crc)
goto drop;
}
/* remove crc */
skb_trim(skb, skb->len - 2);
__ieee802154_rx_handle_packet(local, skb);
drop:
rcu_read_unlock();
free_skb:
kfree_skb(skb);
}
void
ieee802154_rx_irqsafe(struct ieee802154_hw *hw, struct sk_buff *skb, u8 lqi)
{
struct ieee802154_local *local = hw_to_local(hw);
struct ieee802154_mac_cb *cb = mac_cb_init(skb);
cb->lqi = lqi;
skb->pkt_type = IEEE802154_RX_MSG;
skb_queue_tail(&local->skb_queue, skb);
tasklet_schedule(&local->tasklet);
}
EXPORT_SYMBOL(ieee802154_rx_irqsafe);
| linux-master | net/mac802154/rx.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
*
* Authors:
* Alexander Aring <[email protected]>
*
* Based on: net/mac80211/util.c
*/
#include "ieee802154_i.h"
#include "driver-ops.h"
/* privid for wpan_phys to determine whether they belong to us or not */
const void *const mac802154_wpan_phy_privid = &mac802154_wpan_phy_privid;
/**
* ieee802154_wake_queue - wake ieee802154 queue
* @hw: main hardware object
*
* Tranceivers usually have either one transmit framebuffer or one framebuffer
* for both transmitting and receiving. Hence, the core currently only handles
* one frame at a time for each phy, which means we had to stop the queue to
* avoid new skb to come during the transmission. The queue then needs to be
* woken up after the operation.
*/
static void ieee802154_wake_queue(struct ieee802154_hw *hw)
{
struct ieee802154_local *local = hw_to_local(hw);
struct ieee802154_sub_if_data *sdata;
rcu_read_lock();
clear_bit(WPAN_PHY_FLAG_STATE_QUEUE_STOPPED, &local->phy->flags);
list_for_each_entry_rcu(sdata, &local->interfaces, list) {
if (!sdata->dev)
continue;
netif_wake_queue(sdata->dev);
}
rcu_read_unlock();
}
/**
* ieee802154_stop_queue - stop ieee802154 queue
* @hw: main hardware object
*
* Tranceivers usually have either one transmit framebuffer or one framebuffer
* for both transmitting and receiving. Hence, the core currently only handles
* one frame at a time for each phy, which means we need to tell upper layers to
* stop giving us new skbs while we are busy with the transmitted one. The queue
* must then be stopped before transmitting.
*/
static void ieee802154_stop_queue(struct ieee802154_hw *hw)
{
struct ieee802154_local *local = hw_to_local(hw);
struct ieee802154_sub_if_data *sdata;
rcu_read_lock();
list_for_each_entry_rcu(sdata, &local->interfaces, list) {
if (!sdata->dev)
continue;
netif_stop_queue(sdata->dev);
}
rcu_read_unlock();
}
void ieee802154_hold_queue(struct ieee802154_local *local)
{
unsigned long flags;
spin_lock_irqsave(&local->phy->queue_lock, flags);
if (!atomic_fetch_inc(&local->phy->hold_txs))
ieee802154_stop_queue(&local->hw);
spin_unlock_irqrestore(&local->phy->queue_lock, flags);
}
void ieee802154_release_queue(struct ieee802154_local *local)
{
unsigned long flags;
spin_lock_irqsave(&local->phy->queue_lock, flags);
if (atomic_dec_and_test(&local->phy->hold_txs))
ieee802154_wake_queue(&local->hw);
spin_unlock_irqrestore(&local->phy->queue_lock, flags);
}
void ieee802154_disable_queue(struct ieee802154_local *local)
{
struct ieee802154_sub_if_data *sdata;
rcu_read_lock();
list_for_each_entry_rcu(sdata, &local->interfaces, list) {
if (!sdata->dev)
continue;
netif_tx_disable(sdata->dev);
}
rcu_read_unlock();
}
enum hrtimer_restart ieee802154_xmit_ifs_timer(struct hrtimer *timer)
{
struct ieee802154_local *local =
container_of(timer, struct ieee802154_local, ifs_timer);
ieee802154_release_queue(local);
return HRTIMER_NORESTART;
}
void ieee802154_xmit_complete(struct ieee802154_hw *hw, struct sk_buff *skb,
bool ifs_handling)
{
struct ieee802154_local *local = hw_to_local(hw);
local->tx_result = IEEE802154_SUCCESS;
if (ifs_handling) {
u8 max_sifs_size;
/* If transceiver sets CRC on his own we need to use lifs
* threshold len above 16 otherwise 18, because it's not
* part of skb->len.
*/
if (hw->flags & IEEE802154_HW_TX_OMIT_CKSUM)
max_sifs_size = IEEE802154_MAX_SIFS_FRAME_SIZE -
IEEE802154_FCS_LEN;
else
max_sifs_size = IEEE802154_MAX_SIFS_FRAME_SIZE;
if (skb->len > max_sifs_size)
hrtimer_start(&local->ifs_timer,
hw->phy->lifs_period * NSEC_PER_USEC,
HRTIMER_MODE_REL);
else
hrtimer_start(&local->ifs_timer,
hw->phy->sifs_period * NSEC_PER_USEC,
HRTIMER_MODE_REL);
} else {
ieee802154_release_queue(local);
}
dev_consume_skb_any(skb);
if (atomic_dec_and_test(&hw->phy->ongoing_txs))
wake_up(&hw->phy->sync_txq);
}
EXPORT_SYMBOL(ieee802154_xmit_complete);
void ieee802154_xmit_error(struct ieee802154_hw *hw, struct sk_buff *skb,
int reason)
{
struct ieee802154_local *local = hw_to_local(hw);
local->tx_result = reason;
ieee802154_release_queue(local);
dev_kfree_skb_any(skb);
if (atomic_dec_and_test(&hw->phy->ongoing_txs))
wake_up(&hw->phy->sync_txq);
}
EXPORT_SYMBOL(ieee802154_xmit_error);
void ieee802154_xmit_hw_error(struct ieee802154_hw *hw, struct sk_buff *skb)
{
ieee802154_xmit_error(hw, skb, IEEE802154_SYSTEM_ERROR);
}
EXPORT_SYMBOL(ieee802154_xmit_hw_error);
void ieee802154_stop_device(struct ieee802154_local *local)
{
flush_workqueue(local->workqueue);
hrtimer_cancel(&local->ifs_timer);
drv_stop(local);
}
| linux-master | net/mac802154/util.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
*
* Authors:
* Alexander Aring <[email protected]>
*
* Based on: net/mac80211/cfg.c
*/
#include <net/rtnetlink.h>
#include <net/cfg802154.h>
#include "ieee802154_i.h"
#include "driver-ops.h"
#include "cfg.h"
static struct net_device *
ieee802154_add_iface_deprecated(struct wpan_phy *wpan_phy,
const char *name,
unsigned char name_assign_type, int type)
{
struct ieee802154_local *local = wpan_phy_priv(wpan_phy);
struct net_device *dev;
rtnl_lock();
dev = ieee802154_if_add(local, name, name_assign_type, type,
cpu_to_le64(0x0000000000000000ULL));
rtnl_unlock();
return dev;
}
static void ieee802154_del_iface_deprecated(struct wpan_phy *wpan_phy,
struct net_device *dev)
{
struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
ieee802154_if_remove(sdata);
}
#ifdef CONFIG_PM
static int ieee802154_suspend(struct wpan_phy *wpan_phy)
{
struct ieee802154_local *local = wpan_phy_priv(wpan_phy);
if (!local->open_count)
goto suspend;
ieee802154_sync_and_hold_queue(local);
synchronize_net();
/* stop hardware - this must stop RX */
ieee802154_stop_device(local);
suspend:
local->suspended = true;
return 0;
}
static int ieee802154_resume(struct wpan_phy *wpan_phy)
{
struct ieee802154_local *local = wpan_phy_priv(wpan_phy);
int ret;
/* nothing to do if HW shouldn't run */
if (!local->open_count)
goto wake_up;
/* restart hardware */
ret = drv_start(local, local->phy->filtering, &local->addr_filt);
if (ret)
return ret;
wake_up:
ieee802154_release_queue(local);
local->suspended = false;
return 0;
}
#else
#define ieee802154_suspend NULL
#define ieee802154_resume NULL
#endif
static int
ieee802154_add_iface(struct wpan_phy *phy, const char *name,
unsigned char name_assign_type,
enum nl802154_iftype type, __le64 extended_addr)
{
struct ieee802154_local *local = wpan_phy_priv(phy);
struct net_device *err;
err = ieee802154_if_add(local, name, name_assign_type, type,
extended_addr);
return PTR_ERR_OR_ZERO(err);
}
static int
ieee802154_del_iface(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev)
{
ieee802154_if_remove(IEEE802154_WPAN_DEV_TO_SUB_IF(wpan_dev));
return 0;
}
static int
ieee802154_set_channel(struct wpan_phy *wpan_phy, u8 page, u8 channel)
{
struct ieee802154_local *local = wpan_phy_priv(wpan_phy);
int ret;
ASSERT_RTNL();
if (wpan_phy->current_page == page &&
wpan_phy->current_channel == channel)
return 0;
/* Refuse to change channels during scanning or beaconing */
if (mac802154_is_scanning(local) || mac802154_is_beaconing(local))
return -EBUSY;
ret = drv_set_channel(local, page, channel);
if (!ret) {
wpan_phy->current_page = page;
wpan_phy->current_channel = channel;
ieee802154_configure_durations(wpan_phy, page, channel);
}
return ret;
}
static int
ieee802154_set_cca_mode(struct wpan_phy *wpan_phy,
const struct wpan_phy_cca *cca)
{
struct ieee802154_local *local = wpan_phy_priv(wpan_phy);
int ret;
ASSERT_RTNL();
if (wpan_phy_cca_cmp(&wpan_phy->cca, cca))
return 0;
ret = drv_set_cca_mode(local, cca);
if (!ret)
wpan_phy->cca = *cca;
return ret;
}
static int
ieee802154_set_cca_ed_level(struct wpan_phy *wpan_phy, s32 ed_level)
{
struct ieee802154_local *local = wpan_phy_priv(wpan_phy);
int ret;
ASSERT_RTNL();
if (wpan_phy->cca_ed_level == ed_level)
return 0;
ret = drv_set_cca_ed_level(local, ed_level);
if (!ret)
wpan_phy->cca_ed_level = ed_level;
return ret;
}
static int
ieee802154_set_tx_power(struct wpan_phy *wpan_phy, s32 power)
{
struct ieee802154_local *local = wpan_phy_priv(wpan_phy);
int ret;
ASSERT_RTNL();
if (wpan_phy->transmit_power == power)
return 0;
ret = drv_set_tx_power(local, power);
if (!ret)
wpan_phy->transmit_power = power;
return ret;
}
static int
ieee802154_set_pan_id(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev,
__le16 pan_id)
{
int ret;
ASSERT_RTNL();
if (wpan_dev->pan_id == pan_id)
return 0;
ret = mac802154_wpan_update_llsec(wpan_dev->netdev);
if (!ret)
wpan_dev->pan_id = pan_id;
return ret;
}
static int
ieee802154_set_backoff_exponent(struct wpan_phy *wpan_phy,
struct wpan_dev *wpan_dev,
u8 min_be, u8 max_be)
{
ASSERT_RTNL();
wpan_dev->min_be = min_be;
wpan_dev->max_be = max_be;
return 0;
}
static int
ieee802154_set_short_addr(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev,
__le16 short_addr)
{
ASSERT_RTNL();
wpan_dev->short_addr = short_addr;
return 0;
}
static int
ieee802154_set_max_csma_backoffs(struct wpan_phy *wpan_phy,
struct wpan_dev *wpan_dev,
u8 max_csma_backoffs)
{
ASSERT_RTNL();
wpan_dev->csma_retries = max_csma_backoffs;
return 0;
}
static int
ieee802154_set_max_frame_retries(struct wpan_phy *wpan_phy,
struct wpan_dev *wpan_dev,
s8 max_frame_retries)
{
ASSERT_RTNL();
wpan_dev->frame_retries = max_frame_retries;
return 0;
}
static int
ieee802154_set_lbt_mode(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev,
bool mode)
{
ASSERT_RTNL();
wpan_dev->lbt = mode;
return 0;
}
static int
ieee802154_set_ackreq_default(struct wpan_phy *wpan_phy,
struct wpan_dev *wpan_dev, bool ackreq)
{
ASSERT_RTNL();
wpan_dev->ackreq = ackreq;
return 0;
}
static int mac802154_trigger_scan(struct wpan_phy *wpan_phy,
struct cfg802154_scan_request *request)
{
struct ieee802154_sub_if_data *sdata;
sdata = IEEE802154_WPAN_DEV_TO_SUB_IF(request->wpan_dev);
ASSERT_RTNL();
return mac802154_trigger_scan_locked(sdata, request);
}
static int mac802154_abort_scan(struct wpan_phy *wpan_phy,
struct wpan_dev *wpan_dev)
{
struct ieee802154_local *local = wpan_phy_priv(wpan_phy);
struct ieee802154_sub_if_data *sdata;
sdata = IEEE802154_WPAN_DEV_TO_SUB_IF(wpan_dev);
ASSERT_RTNL();
return mac802154_abort_scan_locked(local, sdata);
}
static int mac802154_send_beacons(struct wpan_phy *wpan_phy,
struct cfg802154_beacon_request *request)
{
struct ieee802154_sub_if_data *sdata;
sdata = IEEE802154_WPAN_DEV_TO_SUB_IF(request->wpan_dev);
ASSERT_RTNL();
return mac802154_send_beacons_locked(sdata, request);
}
static int mac802154_stop_beacons(struct wpan_phy *wpan_phy,
struct wpan_dev *wpan_dev)
{
struct ieee802154_local *local = wpan_phy_priv(wpan_phy);
struct ieee802154_sub_if_data *sdata;
sdata = IEEE802154_WPAN_DEV_TO_SUB_IF(wpan_dev);
ASSERT_RTNL();
return mac802154_stop_beacons_locked(local, sdata);
}
#ifdef CONFIG_IEEE802154_NL802154_EXPERIMENTAL
static void
ieee802154_get_llsec_table(struct wpan_phy *wpan_phy,
struct wpan_dev *wpan_dev,
struct ieee802154_llsec_table **table)
{
struct net_device *dev = wpan_dev->netdev;
struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
*table = &sdata->sec.table;
}
static void
ieee802154_lock_llsec_table(struct wpan_phy *wpan_phy,
struct wpan_dev *wpan_dev)
{
struct net_device *dev = wpan_dev->netdev;
struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
mutex_lock(&sdata->sec_mtx);
}
static void
ieee802154_unlock_llsec_table(struct wpan_phy *wpan_phy,
struct wpan_dev *wpan_dev)
{
struct net_device *dev = wpan_dev->netdev;
struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
mutex_unlock(&sdata->sec_mtx);
}
static int
ieee802154_set_llsec_params(struct wpan_phy *wpan_phy,
struct wpan_dev *wpan_dev,
const struct ieee802154_llsec_params *params,
int changed)
{
struct net_device *dev = wpan_dev->netdev;
struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
int res;
mutex_lock(&sdata->sec_mtx);
res = mac802154_llsec_set_params(&sdata->sec, params, changed);
mutex_unlock(&sdata->sec_mtx);
return res;
}
static int
ieee802154_get_llsec_params(struct wpan_phy *wpan_phy,
struct wpan_dev *wpan_dev,
struct ieee802154_llsec_params *params)
{
struct net_device *dev = wpan_dev->netdev;
struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
int res;
mutex_lock(&sdata->sec_mtx);
res = mac802154_llsec_get_params(&sdata->sec, params);
mutex_unlock(&sdata->sec_mtx);
return res;
}
static int
ieee802154_add_llsec_key(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev,
const struct ieee802154_llsec_key_id *id,
const struct ieee802154_llsec_key *key)
{
struct net_device *dev = wpan_dev->netdev;
struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
int res;
mutex_lock(&sdata->sec_mtx);
res = mac802154_llsec_key_add(&sdata->sec, id, key);
mutex_unlock(&sdata->sec_mtx);
return res;
}
static int
ieee802154_del_llsec_key(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev,
const struct ieee802154_llsec_key_id *id)
{
struct net_device *dev = wpan_dev->netdev;
struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
int res;
mutex_lock(&sdata->sec_mtx);
res = mac802154_llsec_key_del(&sdata->sec, id);
mutex_unlock(&sdata->sec_mtx);
return res;
}
static int
ieee802154_add_seclevel(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev,
const struct ieee802154_llsec_seclevel *sl)
{
struct net_device *dev = wpan_dev->netdev;
struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
int res;
mutex_lock(&sdata->sec_mtx);
res = mac802154_llsec_seclevel_add(&sdata->sec, sl);
mutex_unlock(&sdata->sec_mtx);
return res;
}
static int
ieee802154_del_seclevel(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev,
const struct ieee802154_llsec_seclevel *sl)
{
struct net_device *dev = wpan_dev->netdev;
struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
int res;
mutex_lock(&sdata->sec_mtx);
res = mac802154_llsec_seclevel_del(&sdata->sec, sl);
mutex_unlock(&sdata->sec_mtx);
return res;
}
static int
ieee802154_add_device(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev,
const struct ieee802154_llsec_device *dev_desc)
{
struct net_device *dev = wpan_dev->netdev;
struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
int res;
mutex_lock(&sdata->sec_mtx);
res = mac802154_llsec_dev_add(&sdata->sec, dev_desc);
mutex_unlock(&sdata->sec_mtx);
return res;
}
static int
ieee802154_del_device(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev,
__le64 extended_addr)
{
struct net_device *dev = wpan_dev->netdev;
struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
int res;
mutex_lock(&sdata->sec_mtx);
res = mac802154_llsec_dev_del(&sdata->sec, extended_addr);
mutex_unlock(&sdata->sec_mtx);
return res;
}
static int
ieee802154_add_devkey(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev,
__le64 extended_addr,
const struct ieee802154_llsec_device_key *key)
{
struct net_device *dev = wpan_dev->netdev;
struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
int res;
mutex_lock(&sdata->sec_mtx);
res = mac802154_llsec_devkey_add(&sdata->sec, extended_addr, key);
mutex_unlock(&sdata->sec_mtx);
return res;
}
static int
ieee802154_del_devkey(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev,
__le64 extended_addr,
const struct ieee802154_llsec_device_key *key)
{
struct net_device *dev = wpan_dev->netdev;
struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
int res;
mutex_lock(&sdata->sec_mtx);
res = mac802154_llsec_devkey_del(&sdata->sec, extended_addr, key);
mutex_unlock(&sdata->sec_mtx);
return res;
}
#endif /* CONFIG_IEEE802154_NL802154_EXPERIMENTAL */
const struct cfg802154_ops mac802154_config_ops = {
.add_virtual_intf_deprecated = ieee802154_add_iface_deprecated,
.del_virtual_intf_deprecated = ieee802154_del_iface_deprecated,
.suspend = ieee802154_suspend,
.resume = ieee802154_resume,
.add_virtual_intf = ieee802154_add_iface,
.del_virtual_intf = ieee802154_del_iface,
.set_channel = ieee802154_set_channel,
.set_cca_mode = ieee802154_set_cca_mode,
.set_cca_ed_level = ieee802154_set_cca_ed_level,
.set_tx_power = ieee802154_set_tx_power,
.set_pan_id = ieee802154_set_pan_id,
.set_short_addr = ieee802154_set_short_addr,
.set_backoff_exponent = ieee802154_set_backoff_exponent,
.set_max_csma_backoffs = ieee802154_set_max_csma_backoffs,
.set_max_frame_retries = ieee802154_set_max_frame_retries,
.set_lbt_mode = ieee802154_set_lbt_mode,
.set_ackreq_default = ieee802154_set_ackreq_default,
.trigger_scan = mac802154_trigger_scan,
.abort_scan = mac802154_abort_scan,
.send_beacons = mac802154_send_beacons,
.stop_beacons = mac802154_stop_beacons,
#ifdef CONFIG_IEEE802154_NL802154_EXPERIMENTAL
.get_llsec_table = ieee802154_get_llsec_table,
.lock_llsec_table = ieee802154_lock_llsec_table,
.unlock_llsec_table = ieee802154_unlock_llsec_table,
/* TODO above */
.set_llsec_params = ieee802154_set_llsec_params,
.get_llsec_params = ieee802154_get_llsec_params,
.add_llsec_key = ieee802154_add_llsec_key,
.del_llsec_key = ieee802154_del_llsec_key,
.add_seclevel = ieee802154_add_seclevel,
.del_seclevel = ieee802154_del_seclevel,
.add_device = ieee802154_add_device,
.del_device = ieee802154_del_device,
.add_devkey = ieee802154_add_devkey,
.del_devkey = ieee802154_del_devkey,
#endif /* CONFIG_IEEE802154_NL802154_EXPERIMENTAL */
};
| linux-master | net/mac802154/cfg.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2014 Fraunhofer ITWM
*
* Written by:
* Phoebe Buckheister <[email protected]>
*/
#include <linux/err.h>
#include <linux/bug.h>
#include <linux/completion.h>
#include <linux/ieee802154.h>
#include <linux/rculist.h>
#include <crypto/aead.h>
#include <crypto/skcipher.h>
#include "ieee802154_i.h"
#include "llsec.h"
static void llsec_key_put(struct mac802154_llsec_key *key);
static bool llsec_key_id_equal(const struct ieee802154_llsec_key_id *a,
const struct ieee802154_llsec_key_id *b);
static void llsec_dev_free(struct mac802154_llsec_device *dev);
void mac802154_llsec_init(struct mac802154_llsec *sec)
{
memset(sec, 0, sizeof(*sec));
memset(&sec->params.default_key_source, 0xFF, IEEE802154_ADDR_LEN);
INIT_LIST_HEAD(&sec->table.security_levels);
INIT_LIST_HEAD(&sec->table.devices);
INIT_LIST_HEAD(&sec->table.keys);
hash_init(sec->devices_short);
hash_init(sec->devices_hw);
rwlock_init(&sec->lock);
}
void mac802154_llsec_destroy(struct mac802154_llsec *sec)
{
struct ieee802154_llsec_seclevel *sl, *sn;
struct ieee802154_llsec_device *dev, *dn;
struct ieee802154_llsec_key_entry *key, *kn;
list_for_each_entry_safe(sl, sn, &sec->table.security_levels, list) {
struct mac802154_llsec_seclevel *msl;
msl = container_of(sl, struct mac802154_llsec_seclevel, level);
list_del(&sl->list);
kfree_sensitive(msl);
}
list_for_each_entry_safe(dev, dn, &sec->table.devices, list) {
struct mac802154_llsec_device *mdev;
mdev = container_of(dev, struct mac802154_llsec_device, dev);
list_del(&dev->list);
llsec_dev_free(mdev);
}
list_for_each_entry_safe(key, kn, &sec->table.keys, list) {
struct mac802154_llsec_key *mkey;
mkey = container_of(key->key, struct mac802154_llsec_key, key);
list_del(&key->list);
llsec_key_put(mkey);
kfree_sensitive(key);
}
}
int mac802154_llsec_get_params(struct mac802154_llsec *sec,
struct ieee802154_llsec_params *params)
{
read_lock_bh(&sec->lock);
*params = sec->params;
read_unlock_bh(&sec->lock);
return 0;
}
int mac802154_llsec_set_params(struct mac802154_llsec *sec,
const struct ieee802154_llsec_params *params,
int changed)
{
write_lock_bh(&sec->lock);
if (changed & IEEE802154_LLSEC_PARAM_ENABLED)
sec->params.enabled = params->enabled;
if (changed & IEEE802154_LLSEC_PARAM_FRAME_COUNTER)
sec->params.frame_counter = params->frame_counter;
if (changed & IEEE802154_LLSEC_PARAM_OUT_LEVEL)
sec->params.out_level = params->out_level;
if (changed & IEEE802154_LLSEC_PARAM_OUT_KEY)
sec->params.out_key = params->out_key;
if (changed & IEEE802154_LLSEC_PARAM_KEY_SOURCE)
sec->params.default_key_source = params->default_key_source;
if (changed & IEEE802154_LLSEC_PARAM_PAN_ID)
sec->params.pan_id = params->pan_id;
if (changed & IEEE802154_LLSEC_PARAM_HWADDR)
sec->params.hwaddr = params->hwaddr;
if (changed & IEEE802154_LLSEC_PARAM_COORD_HWADDR)
sec->params.coord_hwaddr = params->coord_hwaddr;
if (changed & IEEE802154_LLSEC_PARAM_COORD_SHORTADDR)
sec->params.coord_shortaddr = params->coord_shortaddr;
write_unlock_bh(&sec->lock);
return 0;
}
static struct mac802154_llsec_key*
llsec_key_alloc(const struct ieee802154_llsec_key *template)
{
const int authsizes[3] = { 4, 8, 16 };
struct mac802154_llsec_key *key;
int i;
key = kzalloc(sizeof(*key), GFP_KERNEL);
if (!key)
return NULL;
kref_init(&key->ref);
key->key = *template;
BUILD_BUG_ON(ARRAY_SIZE(authsizes) != ARRAY_SIZE(key->tfm));
for (i = 0; i < ARRAY_SIZE(key->tfm); i++) {
key->tfm[i] = crypto_alloc_aead("ccm(aes)", 0,
CRYPTO_ALG_ASYNC);
if (IS_ERR(key->tfm[i]))
goto err_tfm;
if (crypto_aead_setkey(key->tfm[i], template->key,
IEEE802154_LLSEC_KEY_SIZE))
goto err_tfm;
if (crypto_aead_setauthsize(key->tfm[i], authsizes[i]))
goto err_tfm;
}
key->tfm0 = crypto_alloc_sync_skcipher("ctr(aes)", 0, 0);
if (IS_ERR(key->tfm0))
goto err_tfm;
if (crypto_sync_skcipher_setkey(key->tfm0, template->key,
IEEE802154_LLSEC_KEY_SIZE))
goto err_tfm0;
return key;
err_tfm0:
crypto_free_sync_skcipher(key->tfm0);
err_tfm:
for (i = 0; i < ARRAY_SIZE(key->tfm); i++)
if (!IS_ERR_OR_NULL(key->tfm[i]))
crypto_free_aead(key->tfm[i]);
kfree_sensitive(key);
return NULL;
}
static void llsec_key_release(struct kref *ref)
{
struct mac802154_llsec_key *key;
int i;
key = container_of(ref, struct mac802154_llsec_key, ref);
for (i = 0; i < ARRAY_SIZE(key->tfm); i++)
crypto_free_aead(key->tfm[i]);
crypto_free_sync_skcipher(key->tfm0);
kfree_sensitive(key);
}
static struct mac802154_llsec_key*
llsec_key_get(struct mac802154_llsec_key *key)
{
kref_get(&key->ref);
return key;
}
static void llsec_key_put(struct mac802154_llsec_key *key)
{
kref_put(&key->ref, llsec_key_release);
}
static bool llsec_key_id_equal(const struct ieee802154_llsec_key_id *a,
const struct ieee802154_llsec_key_id *b)
{
if (a->mode != b->mode)
return false;
if (a->mode == IEEE802154_SCF_KEY_IMPLICIT)
return ieee802154_addr_equal(&a->device_addr, &b->device_addr);
if (a->id != b->id)
return false;
switch (a->mode) {
case IEEE802154_SCF_KEY_INDEX:
return true;
case IEEE802154_SCF_KEY_SHORT_INDEX:
return a->short_source == b->short_source;
case IEEE802154_SCF_KEY_HW_INDEX:
return a->extended_source == b->extended_source;
}
return false;
}
int mac802154_llsec_key_add(struct mac802154_llsec *sec,
const struct ieee802154_llsec_key_id *id,
const struct ieee802154_llsec_key *key)
{
struct mac802154_llsec_key *mkey = NULL;
struct ieee802154_llsec_key_entry *pos, *new;
if (!(key->frame_types & (1 << IEEE802154_FC_TYPE_MAC_CMD)) &&
key->cmd_frame_ids)
return -EINVAL;
list_for_each_entry(pos, &sec->table.keys, list) {
if (llsec_key_id_equal(&pos->id, id))
return -EEXIST;
if (memcmp(pos->key->key, key->key,
IEEE802154_LLSEC_KEY_SIZE))
continue;
mkey = container_of(pos->key, struct mac802154_llsec_key, key);
/* Don't allow multiple instances of the same AES key to have
* different allowed frame types/command frame ids, as this is
* not possible in the 802.15.4 PIB.
*/
if (pos->key->frame_types != key->frame_types ||
pos->key->cmd_frame_ids != key->cmd_frame_ids)
return -EEXIST;
break;
}
new = kzalloc(sizeof(*new), GFP_KERNEL);
if (!new)
return -ENOMEM;
if (!mkey)
mkey = llsec_key_alloc(key);
else
mkey = llsec_key_get(mkey);
if (!mkey)
goto fail;
new->id = *id;
new->key = &mkey->key;
list_add_rcu(&new->list, &sec->table.keys);
return 0;
fail:
kfree_sensitive(new);
return -ENOMEM;
}
int mac802154_llsec_key_del(struct mac802154_llsec *sec,
const struct ieee802154_llsec_key_id *key)
{
struct ieee802154_llsec_key_entry *pos;
list_for_each_entry(pos, &sec->table.keys, list) {
struct mac802154_llsec_key *mkey;
mkey = container_of(pos->key, struct mac802154_llsec_key, key);
if (llsec_key_id_equal(&pos->id, key)) {
list_del_rcu(&pos->list);
llsec_key_put(mkey);
return 0;
}
}
return -ENOENT;
}
static bool llsec_dev_use_shortaddr(__le16 short_addr)
{
return short_addr != cpu_to_le16(IEEE802154_ADDR_UNDEF) &&
short_addr != cpu_to_le16(0xffff);
}
static u32 llsec_dev_hash_short(__le16 short_addr, __le16 pan_id)
{
return ((__force u16)short_addr) << 16 | (__force u16)pan_id;
}
static u64 llsec_dev_hash_long(__le64 hwaddr)
{
return (__force u64)hwaddr;
}
static struct mac802154_llsec_device*
llsec_dev_find_short(struct mac802154_llsec *sec, __le16 short_addr,
__le16 pan_id)
{
struct mac802154_llsec_device *dev;
u32 key = llsec_dev_hash_short(short_addr, pan_id);
hash_for_each_possible_rcu(sec->devices_short, dev, bucket_s, key) {
if (dev->dev.short_addr == short_addr &&
dev->dev.pan_id == pan_id)
return dev;
}
return NULL;
}
static struct mac802154_llsec_device*
llsec_dev_find_long(struct mac802154_llsec *sec, __le64 hwaddr)
{
struct mac802154_llsec_device *dev;
u64 key = llsec_dev_hash_long(hwaddr);
hash_for_each_possible_rcu(sec->devices_hw, dev, bucket_hw, key) {
if (dev->dev.hwaddr == hwaddr)
return dev;
}
return NULL;
}
static void llsec_dev_free(struct mac802154_llsec_device *dev)
{
struct ieee802154_llsec_device_key *pos, *pn;
struct mac802154_llsec_device_key *devkey;
list_for_each_entry_safe(pos, pn, &dev->dev.keys, list) {
devkey = container_of(pos, struct mac802154_llsec_device_key,
devkey);
list_del(&pos->list);
kfree_sensitive(devkey);
}
kfree_sensitive(dev);
}
int mac802154_llsec_dev_add(struct mac802154_llsec *sec,
const struct ieee802154_llsec_device *dev)
{
struct mac802154_llsec_device *entry;
u32 skey = llsec_dev_hash_short(dev->short_addr, dev->pan_id);
u64 hwkey = llsec_dev_hash_long(dev->hwaddr);
BUILD_BUG_ON(sizeof(hwkey) != IEEE802154_ADDR_LEN);
if ((llsec_dev_use_shortaddr(dev->short_addr) &&
llsec_dev_find_short(sec, dev->short_addr, dev->pan_id)) ||
llsec_dev_find_long(sec, dev->hwaddr))
return -EEXIST;
entry = kmalloc(sizeof(*entry), GFP_KERNEL);
if (!entry)
return -ENOMEM;
entry->dev = *dev;
spin_lock_init(&entry->lock);
INIT_LIST_HEAD(&entry->dev.keys);
if (llsec_dev_use_shortaddr(dev->short_addr))
hash_add_rcu(sec->devices_short, &entry->bucket_s, skey);
else
INIT_HLIST_NODE(&entry->bucket_s);
hash_add_rcu(sec->devices_hw, &entry->bucket_hw, hwkey);
list_add_tail_rcu(&entry->dev.list, &sec->table.devices);
return 0;
}
static void llsec_dev_free_rcu(struct rcu_head *rcu)
{
llsec_dev_free(container_of(rcu, struct mac802154_llsec_device, rcu));
}
int mac802154_llsec_dev_del(struct mac802154_llsec *sec, __le64 device_addr)
{
struct mac802154_llsec_device *pos;
pos = llsec_dev_find_long(sec, device_addr);
if (!pos)
return -ENOENT;
hash_del_rcu(&pos->bucket_s);
hash_del_rcu(&pos->bucket_hw);
list_del_rcu(&pos->dev.list);
call_rcu(&pos->rcu, llsec_dev_free_rcu);
return 0;
}
static struct mac802154_llsec_device_key*
llsec_devkey_find(struct mac802154_llsec_device *dev,
const struct ieee802154_llsec_key_id *key)
{
struct ieee802154_llsec_device_key *devkey;
list_for_each_entry_rcu(devkey, &dev->dev.keys, list) {
if (!llsec_key_id_equal(key, &devkey->key_id))
continue;
return container_of(devkey, struct mac802154_llsec_device_key,
devkey);
}
return NULL;
}
int mac802154_llsec_devkey_add(struct mac802154_llsec *sec,
__le64 dev_addr,
const struct ieee802154_llsec_device_key *key)
{
struct mac802154_llsec_device *dev;
struct mac802154_llsec_device_key *devkey;
dev = llsec_dev_find_long(sec, dev_addr);
if (!dev)
return -ENOENT;
if (llsec_devkey_find(dev, &key->key_id))
return -EEXIST;
devkey = kmalloc(sizeof(*devkey), GFP_KERNEL);
if (!devkey)
return -ENOMEM;
devkey->devkey = *key;
list_add_tail_rcu(&devkey->devkey.list, &dev->dev.keys);
return 0;
}
int mac802154_llsec_devkey_del(struct mac802154_llsec *sec,
__le64 dev_addr,
const struct ieee802154_llsec_device_key *key)
{
struct mac802154_llsec_device *dev;
struct mac802154_llsec_device_key *devkey;
dev = llsec_dev_find_long(sec, dev_addr);
if (!dev)
return -ENOENT;
devkey = llsec_devkey_find(dev, &key->key_id);
if (!devkey)
return -ENOENT;
list_del_rcu(&devkey->devkey.list);
kfree_rcu(devkey, rcu);
return 0;
}
static struct mac802154_llsec_seclevel*
llsec_find_seclevel(const struct mac802154_llsec *sec,
const struct ieee802154_llsec_seclevel *sl)
{
struct ieee802154_llsec_seclevel *pos;
list_for_each_entry(pos, &sec->table.security_levels, list) {
if (pos->frame_type != sl->frame_type ||
(pos->frame_type == IEEE802154_FC_TYPE_MAC_CMD &&
pos->cmd_frame_id != sl->cmd_frame_id) ||
pos->device_override != sl->device_override ||
pos->sec_levels != sl->sec_levels)
continue;
return container_of(pos, struct mac802154_llsec_seclevel,
level);
}
return NULL;
}
int mac802154_llsec_seclevel_add(struct mac802154_llsec *sec,
const struct ieee802154_llsec_seclevel *sl)
{
struct mac802154_llsec_seclevel *entry;
if (llsec_find_seclevel(sec, sl))
return -EEXIST;
entry = kmalloc(sizeof(*entry), GFP_KERNEL);
if (!entry)
return -ENOMEM;
entry->level = *sl;
list_add_tail_rcu(&entry->level.list, &sec->table.security_levels);
return 0;
}
int mac802154_llsec_seclevel_del(struct mac802154_llsec *sec,
const struct ieee802154_llsec_seclevel *sl)
{
struct mac802154_llsec_seclevel *pos;
pos = llsec_find_seclevel(sec, sl);
if (!pos)
return -ENOENT;
list_del_rcu(&pos->level.list);
kfree_rcu(pos, rcu);
return 0;
}
static int llsec_recover_addr(struct mac802154_llsec *sec,
struct ieee802154_addr *addr)
{
__le16 caddr = sec->params.coord_shortaddr;
addr->pan_id = sec->params.pan_id;
if (caddr == cpu_to_le16(IEEE802154_ADDR_BROADCAST)) {
return -EINVAL;
} else if (caddr == cpu_to_le16(IEEE802154_ADDR_UNDEF)) {
addr->extended_addr = sec->params.coord_hwaddr;
addr->mode = IEEE802154_ADDR_LONG;
} else {
addr->short_addr = sec->params.coord_shortaddr;
addr->mode = IEEE802154_ADDR_SHORT;
}
return 0;
}
static struct mac802154_llsec_key*
llsec_lookup_key(struct mac802154_llsec *sec,
const struct ieee802154_hdr *hdr,
const struct ieee802154_addr *addr,
struct ieee802154_llsec_key_id *key_id)
{
struct ieee802154_addr devaddr = *addr;
u8 key_id_mode = hdr->sec.key_id_mode;
struct ieee802154_llsec_key_entry *key_entry;
struct mac802154_llsec_key *key;
if (key_id_mode == IEEE802154_SCF_KEY_IMPLICIT &&
devaddr.mode == IEEE802154_ADDR_NONE) {
if (hdr->fc.type == IEEE802154_FC_TYPE_BEACON) {
devaddr.extended_addr = sec->params.coord_hwaddr;
devaddr.mode = IEEE802154_ADDR_LONG;
} else if (llsec_recover_addr(sec, &devaddr) < 0) {
return NULL;
}
}
list_for_each_entry_rcu(key_entry, &sec->table.keys, list) {
const struct ieee802154_llsec_key_id *id = &key_entry->id;
if (!(key_entry->key->frame_types & BIT(hdr->fc.type)))
continue;
if (id->mode != key_id_mode)
continue;
if (key_id_mode == IEEE802154_SCF_KEY_IMPLICIT) {
if (ieee802154_addr_equal(&devaddr, &id->device_addr))
goto found;
} else {
if (id->id != hdr->sec.key_id)
continue;
if ((key_id_mode == IEEE802154_SCF_KEY_INDEX) ||
(key_id_mode == IEEE802154_SCF_KEY_SHORT_INDEX &&
id->short_source == hdr->sec.short_src) ||
(key_id_mode == IEEE802154_SCF_KEY_HW_INDEX &&
id->extended_source == hdr->sec.extended_src))
goto found;
}
}
return NULL;
found:
key = container_of(key_entry->key, struct mac802154_llsec_key, key);
if (key_id)
*key_id = key_entry->id;
return llsec_key_get(key);
}
static void llsec_geniv(u8 iv[16], __le64 addr,
const struct ieee802154_sechdr *sec)
{
__be64 addr_bytes = (__force __be64) swab64((__force u64) addr);
__be32 frame_counter = (__force __be32) swab32((__force u32) sec->frame_counter);
iv[0] = 1; /* L' = L - 1 = 1 */
memcpy(iv + 1, &addr_bytes, sizeof(addr_bytes));
memcpy(iv + 9, &frame_counter, sizeof(frame_counter));
iv[13] = sec->level;
iv[14] = 0;
iv[15] = 1;
}
static int
llsec_do_encrypt_unauth(struct sk_buff *skb, const struct mac802154_llsec *sec,
const struct ieee802154_hdr *hdr,
struct mac802154_llsec_key *key)
{
u8 iv[16];
struct scatterlist src;
SYNC_SKCIPHER_REQUEST_ON_STACK(req, key->tfm0);
int err, datalen;
unsigned char *data;
llsec_geniv(iv, sec->params.hwaddr, &hdr->sec);
/* Compute data payload offset and data length */
data = skb_mac_header(skb) + skb->mac_len;
datalen = skb_tail_pointer(skb) - data;
sg_init_one(&src, data, datalen);
skcipher_request_set_sync_tfm(req, key->tfm0);
skcipher_request_set_callback(req, 0, NULL, NULL);
skcipher_request_set_crypt(req, &src, &src, datalen, iv);
err = crypto_skcipher_encrypt(req);
skcipher_request_zero(req);
return err;
}
static struct crypto_aead*
llsec_tfm_by_len(struct mac802154_llsec_key *key, int authlen)
{
int i;
for (i = 0; i < ARRAY_SIZE(key->tfm); i++)
if (crypto_aead_authsize(key->tfm[i]) == authlen)
return key->tfm[i];
BUG();
}
static int
llsec_do_encrypt_auth(struct sk_buff *skb, const struct mac802154_llsec *sec,
const struct ieee802154_hdr *hdr,
struct mac802154_llsec_key *key)
{
u8 iv[16];
unsigned char *data;
int authlen, assoclen, datalen, rc;
struct scatterlist sg;
struct aead_request *req;
authlen = ieee802154_sechdr_authtag_len(&hdr->sec);
llsec_geniv(iv, sec->params.hwaddr, &hdr->sec);
req = aead_request_alloc(llsec_tfm_by_len(key, authlen), GFP_ATOMIC);
if (!req)
return -ENOMEM;
assoclen = skb->mac_len;
data = skb_mac_header(skb) + skb->mac_len;
datalen = skb_tail_pointer(skb) - data;
skb_put(skb, authlen);
sg_init_one(&sg, skb_mac_header(skb), assoclen + datalen + authlen);
if (!(hdr->sec.level & IEEE802154_SCF_SECLEVEL_ENC)) {
assoclen += datalen;
datalen = 0;
}
aead_request_set_callback(req, 0, NULL, NULL);
aead_request_set_crypt(req, &sg, &sg, datalen, iv);
aead_request_set_ad(req, assoclen);
rc = crypto_aead_encrypt(req);
kfree_sensitive(req);
return rc;
}
static int llsec_do_encrypt(struct sk_buff *skb,
const struct mac802154_llsec *sec,
const struct ieee802154_hdr *hdr,
struct mac802154_llsec_key *key)
{
if (hdr->sec.level == IEEE802154_SCF_SECLEVEL_ENC)
return llsec_do_encrypt_unauth(skb, sec, hdr, key);
else
return llsec_do_encrypt_auth(skb, sec, hdr, key);
}
int mac802154_llsec_encrypt(struct mac802154_llsec *sec, struct sk_buff *skb)
{
struct ieee802154_hdr hdr;
int rc, authlen, hlen;
struct mac802154_llsec_key *key;
u32 frame_ctr;
hlen = ieee802154_hdr_pull(skb, &hdr);
/* TODO: control frames security support */
if (hlen < 0 ||
(hdr.fc.type != IEEE802154_FC_TYPE_DATA &&
hdr.fc.type != IEEE802154_FC_TYPE_BEACON))
return -EINVAL;
if (!hdr.fc.security_enabled ||
(hdr.sec.level == IEEE802154_SCF_SECLEVEL_NONE)) {
skb_push(skb, hlen);
return 0;
}
authlen = ieee802154_sechdr_authtag_len(&hdr.sec);
if (skb->len + hlen + authlen + IEEE802154_MFR_SIZE > IEEE802154_MTU)
return -EMSGSIZE;
rcu_read_lock();
read_lock_bh(&sec->lock);
if (!sec->params.enabled) {
rc = -EINVAL;
goto fail_read;
}
key = llsec_lookup_key(sec, &hdr, &hdr.dest, NULL);
if (!key) {
rc = -ENOKEY;
goto fail_read;
}
read_unlock_bh(&sec->lock);
write_lock_bh(&sec->lock);
frame_ctr = be32_to_cpu(sec->params.frame_counter);
hdr.sec.frame_counter = cpu_to_le32(frame_ctr);
if (frame_ctr == 0xFFFFFFFF) {
write_unlock_bh(&sec->lock);
llsec_key_put(key);
rc = -EOVERFLOW;
goto fail;
}
sec->params.frame_counter = cpu_to_be32(frame_ctr + 1);
write_unlock_bh(&sec->lock);
rcu_read_unlock();
skb->mac_len = ieee802154_hdr_push(skb, &hdr);
skb_reset_mac_header(skb);
rc = llsec_do_encrypt(skb, sec, &hdr, key);
llsec_key_put(key);
return rc;
fail_read:
read_unlock_bh(&sec->lock);
fail:
rcu_read_unlock();
return rc;
}
static struct mac802154_llsec_device*
llsec_lookup_dev(struct mac802154_llsec *sec,
const struct ieee802154_addr *addr)
{
struct ieee802154_addr devaddr = *addr;
struct mac802154_llsec_device *dev = NULL;
if (devaddr.mode == IEEE802154_ADDR_NONE &&
llsec_recover_addr(sec, &devaddr) < 0)
return NULL;
if (devaddr.mode == IEEE802154_ADDR_SHORT) {
u32 key = llsec_dev_hash_short(devaddr.short_addr,
devaddr.pan_id);
hash_for_each_possible_rcu(sec->devices_short, dev,
bucket_s, key) {
if (dev->dev.pan_id == devaddr.pan_id &&
dev->dev.short_addr == devaddr.short_addr)
return dev;
}
} else {
u64 key = llsec_dev_hash_long(devaddr.extended_addr);
hash_for_each_possible_rcu(sec->devices_hw, dev,
bucket_hw, key) {
if (dev->dev.hwaddr == devaddr.extended_addr)
return dev;
}
}
return NULL;
}
static int
llsec_lookup_seclevel(const struct mac802154_llsec *sec,
u8 frame_type, u8 cmd_frame_id,
struct ieee802154_llsec_seclevel *rlevel)
{
struct ieee802154_llsec_seclevel *level;
list_for_each_entry_rcu(level, &sec->table.security_levels, list) {
if (level->frame_type == frame_type &&
(frame_type != IEEE802154_FC_TYPE_MAC_CMD ||
level->cmd_frame_id == cmd_frame_id)) {
*rlevel = *level;
return 0;
}
}
return -EINVAL;
}
static int
llsec_do_decrypt_unauth(struct sk_buff *skb, const struct mac802154_llsec *sec,
const struct ieee802154_hdr *hdr,
struct mac802154_llsec_key *key, __le64 dev_addr)
{
u8 iv[16];
unsigned char *data;
int datalen;
struct scatterlist src;
SYNC_SKCIPHER_REQUEST_ON_STACK(req, key->tfm0);
int err;
llsec_geniv(iv, dev_addr, &hdr->sec);
data = skb_mac_header(skb) + skb->mac_len;
datalen = skb_tail_pointer(skb) - data;
sg_init_one(&src, data, datalen);
skcipher_request_set_sync_tfm(req, key->tfm0);
skcipher_request_set_callback(req, 0, NULL, NULL);
skcipher_request_set_crypt(req, &src, &src, datalen, iv);
err = crypto_skcipher_decrypt(req);
skcipher_request_zero(req);
return err;
}
static int
llsec_do_decrypt_auth(struct sk_buff *skb, const struct mac802154_llsec *sec,
const struct ieee802154_hdr *hdr,
struct mac802154_llsec_key *key, __le64 dev_addr)
{
u8 iv[16];
unsigned char *data;
int authlen, datalen, assoclen, rc;
struct scatterlist sg;
struct aead_request *req;
authlen = ieee802154_sechdr_authtag_len(&hdr->sec);
llsec_geniv(iv, dev_addr, &hdr->sec);
req = aead_request_alloc(llsec_tfm_by_len(key, authlen), GFP_ATOMIC);
if (!req)
return -ENOMEM;
assoclen = skb->mac_len;
data = skb_mac_header(skb) + skb->mac_len;
datalen = skb_tail_pointer(skb) - data;
sg_init_one(&sg, skb_mac_header(skb), assoclen + datalen);
if (!(hdr->sec.level & IEEE802154_SCF_SECLEVEL_ENC)) {
assoclen += datalen - authlen;
datalen = authlen;
}
aead_request_set_callback(req, 0, NULL, NULL);
aead_request_set_crypt(req, &sg, &sg, datalen, iv);
aead_request_set_ad(req, assoclen);
rc = crypto_aead_decrypt(req);
kfree_sensitive(req);
skb_trim(skb, skb->len - authlen);
return rc;
}
static int
llsec_do_decrypt(struct sk_buff *skb, const struct mac802154_llsec *sec,
const struct ieee802154_hdr *hdr,
struct mac802154_llsec_key *key, __le64 dev_addr)
{
if (hdr->sec.level == IEEE802154_SCF_SECLEVEL_ENC)
return llsec_do_decrypt_unauth(skb, sec, hdr, key, dev_addr);
else
return llsec_do_decrypt_auth(skb, sec, hdr, key, dev_addr);
}
static int
llsec_update_devkey_record(struct mac802154_llsec_device *dev,
const struct ieee802154_llsec_key_id *in_key)
{
struct mac802154_llsec_device_key *devkey;
devkey = llsec_devkey_find(dev, in_key);
if (!devkey) {
struct mac802154_llsec_device_key *next;
next = kzalloc(sizeof(*devkey), GFP_ATOMIC);
if (!next)
return -ENOMEM;
next->devkey.key_id = *in_key;
spin_lock_bh(&dev->lock);
devkey = llsec_devkey_find(dev, in_key);
if (!devkey)
list_add_rcu(&next->devkey.list, &dev->dev.keys);
else
kfree_sensitive(next);
spin_unlock_bh(&dev->lock);
}
return 0;
}
static int
llsec_update_devkey_info(struct mac802154_llsec_device *dev,
const struct ieee802154_llsec_key_id *in_key,
u32 frame_counter)
{
struct mac802154_llsec_device_key *devkey = NULL;
if (dev->dev.key_mode == IEEE802154_LLSEC_DEVKEY_RESTRICT) {
devkey = llsec_devkey_find(dev, in_key);
if (!devkey)
return -ENOENT;
}
if (dev->dev.key_mode == IEEE802154_LLSEC_DEVKEY_RECORD) {
int rc = llsec_update_devkey_record(dev, in_key);
if (rc < 0)
return rc;
}
spin_lock_bh(&dev->lock);
if ((!devkey && frame_counter < dev->dev.frame_counter) ||
(devkey && frame_counter < devkey->devkey.frame_counter)) {
spin_unlock_bh(&dev->lock);
return -EINVAL;
}
if (devkey)
devkey->devkey.frame_counter = frame_counter + 1;
else
dev->dev.frame_counter = frame_counter + 1;
spin_unlock_bh(&dev->lock);
return 0;
}
int mac802154_llsec_decrypt(struct mac802154_llsec *sec, struct sk_buff *skb)
{
struct ieee802154_hdr hdr;
struct mac802154_llsec_key *key;
struct ieee802154_llsec_key_id key_id;
struct mac802154_llsec_device *dev;
struct ieee802154_llsec_seclevel seclevel;
int err;
__le64 dev_addr;
u32 frame_ctr;
if (ieee802154_hdr_peek(skb, &hdr) < 0)
return -EINVAL;
if (!hdr.fc.security_enabled)
return 0;
if (hdr.fc.version == 0)
return -EINVAL;
read_lock_bh(&sec->lock);
if (!sec->params.enabled) {
read_unlock_bh(&sec->lock);
return -EINVAL;
}
read_unlock_bh(&sec->lock);
rcu_read_lock();
key = llsec_lookup_key(sec, &hdr, &hdr.source, &key_id);
if (!key) {
err = -ENOKEY;
goto fail;
}
dev = llsec_lookup_dev(sec, &hdr.source);
if (!dev) {
err = -EINVAL;
goto fail_dev;
}
if (llsec_lookup_seclevel(sec, hdr.fc.type, 0, &seclevel) < 0) {
err = -EINVAL;
goto fail_dev;
}
if (!(seclevel.sec_levels & BIT(hdr.sec.level)) &&
(hdr.sec.level == 0 && seclevel.device_override &&
!dev->dev.seclevel_exempt)) {
err = -EINVAL;
goto fail_dev;
}
frame_ctr = le32_to_cpu(hdr.sec.frame_counter);
if (frame_ctr == 0xffffffff) {
err = -EOVERFLOW;
goto fail_dev;
}
err = llsec_update_devkey_info(dev, &key_id, frame_ctr);
if (err)
goto fail_dev;
dev_addr = dev->dev.hwaddr;
rcu_read_unlock();
err = llsec_do_decrypt(skb, sec, &hdr, key, dev_addr);
llsec_key_put(key);
return err;
fail_dev:
llsec_key_put(key);
fail:
rcu_read_unlock();
return err;
}
| linux-master | net/mac802154/llsec.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* IEEE 802.15.4 scanning management
*
* Copyright (C) 2021 Qorvo US, Inc
* Authors:
* - David Girault <[email protected]>
* - Miquel Raynal <[email protected]>
*/
#include <linux/module.h>
#include <linux/rtnetlink.h>
#include <net/mac802154.h>
#include "ieee802154_i.h"
#include "driver-ops.h"
#include "../ieee802154/nl802154.h"
#define IEEE802154_BEACON_MHR_SZ 13
#define IEEE802154_BEACON_PL_SZ 4
#define IEEE802154_MAC_CMD_MHR_SZ 23
#define IEEE802154_MAC_CMD_PL_SZ 1
#define IEEE802154_BEACON_SKB_SZ (IEEE802154_BEACON_MHR_SZ + \
IEEE802154_BEACON_PL_SZ)
#define IEEE802154_MAC_CMD_SKB_SZ (IEEE802154_MAC_CMD_MHR_SZ + \
IEEE802154_MAC_CMD_PL_SZ)
/* mac802154_scan_cleanup_locked() must be called upon scan completion or abort.
* - Completions are asynchronous, not locked by the rtnl and decided by the
* scan worker.
* - Aborts are decided by userspace, and locked by the rtnl.
*
* Concurrent modifications to the PHY, the interfaces or the hardware is in
* general prevented by the rtnl. So in most cases we don't need additional
* protection.
*
* However, the scan worker get's triggered without anybody noticing and thus we
* must ensure the presence of the devices as well as data consistency:
* - The sub-interface and device driver module get both their reference
* counters incremented whenever we start a scan, so they cannot disappear
* during operation.
* - Data consistency is achieved by the use of rcu protected pointers.
*/
static int mac802154_scan_cleanup_locked(struct ieee802154_local *local,
struct ieee802154_sub_if_data *sdata,
bool aborted)
{
struct wpan_dev *wpan_dev = &sdata->wpan_dev;
struct wpan_phy *wpan_phy = local->phy;
struct cfg802154_scan_request *request;
u8 arg;
/* Prevent any further use of the scan request */
clear_bit(IEEE802154_IS_SCANNING, &local->ongoing);
cancel_delayed_work(&local->scan_work);
request = rcu_replace_pointer(local->scan_req, NULL, 1);
if (!request)
return 0;
kvfree_rcu_mightsleep(request);
/* Advertize first, while we know the devices cannot be removed */
if (aborted)
arg = NL802154_SCAN_DONE_REASON_ABORTED;
else
arg = NL802154_SCAN_DONE_REASON_FINISHED;
nl802154_scan_done(wpan_phy, wpan_dev, arg);
/* Cleanup software stack */
ieee802154_mlme_op_post(local);
/* Set the hardware back in its original state */
drv_set_channel(local, wpan_phy->current_page,
wpan_phy->current_channel);
ieee802154_configure_durations(wpan_phy, wpan_phy->current_page,
wpan_phy->current_channel);
drv_stop(local);
synchronize_net();
sdata->required_filtering = sdata->iface_default_filtering;
drv_start(local, sdata->required_filtering, &local->addr_filt);
return 0;
}
int mac802154_abort_scan_locked(struct ieee802154_local *local,
struct ieee802154_sub_if_data *sdata)
{
ASSERT_RTNL();
if (!mac802154_is_scanning(local))
return -ESRCH;
return mac802154_scan_cleanup_locked(local, sdata, true);
}
static unsigned int mac802154_scan_get_channel_time(u8 duration_order,
u8 symbol_duration)
{
u64 base_super_frame_duration = (u64)symbol_duration *
IEEE802154_SUPERFRAME_PERIOD * IEEE802154_SLOT_PERIOD;
return usecs_to_jiffies(base_super_frame_duration *
(BIT(duration_order) + 1));
}
static void mac802154_flush_queued_beacons(struct ieee802154_local *local)
{
struct cfg802154_mac_pkt *mac_pkt, *tmp;
list_for_each_entry_safe(mac_pkt, tmp, &local->rx_beacon_list, node) {
list_del(&mac_pkt->node);
kfree_skb(mac_pkt->skb);
kfree(mac_pkt);
}
}
static void
mac802154_scan_get_next_channel(struct ieee802154_local *local,
struct cfg802154_scan_request *scan_req,
u8 *channel)
{
(*channel)++;
*channel = find_next_bit((const unsigned long *)&scan_req->channels,
IEEE802154_MAX_CHANNEL + 1,
*channel);
}
static int mac802154_scan_find_next_chan(struct ieee802154_local *local,
struct cfg802154_scan_request *scan_req,
u8 page, u8 *channel)
{
mac802154_scan_get_next_channel(local, scan_req, channel);
if (*channel > IEEE802154_MAX_CHANNEL)
return -EINVAL;
return 0;
}
static int mac802154_scan_prepare_beacon_req(struct ieee802154_local *local)
{
memset(&local->scan_beacon_req, 0, sizeof(local->scan_beacon_req));
local->scan_beacon_req.mhr.fc.type = IEEE802154_FC_TYPE_MAC_CMD;
local->scan_beacon_req.mhr.fc.dest_addr_mode = IEEE802154_SHORT_ADDRESSING;
local->scan_beacon_req.mhr.fc.version = IEEE802154_2003_STD;
local->scan_beacon_req.mhr.fc.source_addr_mode = IEEE802154_NO_ADDRESSING;
local->scan_beacon_req.mhr.dest.mode = IEEE802154_ADDR_SHORT;
local->scan_beacon_req.mhr.dest.pan_id = cpu_to_le16(IEEE802154_PANID_BROADCAST);
local->scan_beacon_req.mhr.dest.short_addr = cpu_to_le16(IEEE802154_ADDR_BROADCAST);
local->scan_beacon_req.mac_pl.cmd_id = IEEE802154_CMD_BEACON_REQ;
return 0;
}
static int mac802154_transmit_beacon_req(struct ieee802154_local *local,
struct ieee802154_sub_if_data *sdata)
{
struct sk_buff *skb;
int ret;
skb = alloc_skb(IEEE802154_MAC_CMD_SKB_SZ, GFP_KERNEL);
if (!skb)
return -ENOBUFS;
skb->dev = sdata->dev;
ret = ieee802154_mac_cmd_push(skb, &local->scan_beacon_req, NULL, 0);
if (ret) {
kfree_skb(skb);
return ret;
}
return ieee802154_mlme_tx(local, sdata, skb);
}
void mac802154_scan_worker(struct work_struct *work)
{
struct ieee802154_local *local =
container_of(work, struct ieee802154_local, scan_work.work);
struct cfg802154_scan_request *scan_req;
struct ieee802154_sub_if_data *sdata;
unsigned int scan_duration = 0;
struct wpan_phy *wpan_phy;
u8 scan_req_duration;
u8 page, channel;
int ret;
/* Ensure the device receiver is turned off when changing channels
* because there is no atomic way to change the channel and know on
* which one a beacon might have been received.
*/
drv_stop(local);
synchronize_net();
mac802154_flush_queued_beacons(local);
rcu_read_lock();
scan_req = rcu_dereference(local->scan_req);
if (unlikely(!scan_req)) {
rcu_read_unlock();
return;
}
sdata = IEEE802154_WPAN_DEV_TO_SUB_IF(scan_req->wpan_dev);
/* Wait an arbitrary amount of time in case we cannot use the device */
if (local->suspended || !ieee802154_sdata_running(sdata)) {
rcu_read_unlock();
queue_delayed_work(local->mac_wq, &local->scan_work,
msecs_to_jiffies(1000));
return;
}
wpan_phy = scan_req->wpan_phy;
scan_req_duration = scan_req->duration;
/* Look for the next valid chan */
page = local->scan_page;
channel = local->scan_channel;
do {
ret = mac802154_scan_find_next_chan(local, scan_req, page, &channel);
if (ret) {
rcu_read_unlock();
goto end_scan;
}
} while (!ieee802154_chan_is_valid(scan_req->wpan_phy, page, channel));
rcu_read_unlock();
/* Bypass the stack on purpose when changing the channel */
rtnl_lock();
ret = drv_set_channel(local, page, channel);
rtnl_unlock();
if (ret) {
dev_err(&sdata->dev->dev,
"Channel change failure during scan, aborting (%d)\n", ret);
goto end_scan;
}
local->scan_page = page;
local->scan_channel = channel;
rtnl_lock();
ret = drv_start(local, IEEE802154_FILTERING_3_SCAN, &local->addr_filt);
rtnl_unlock();
if (ret) {
dev_err(&sdata->dev->dev,
"Restarting failure after channel change, aborting (%d)\n", ret);
goto end_scan;
}
if (scan_req->type == NL802154_SCAN_ACTIVE) {
ret = mac802154_transmit_beacon_req(local, sdata);
if (ret)
dev_err(&sdata->dev->dev,
"Error when transmitting beacon request (%d)\n", ret);
}
ieee802154_configure_durations(wpan_phy, page, channel);
scan_duration = mac802154_scan_get_channel_time(scan_req_duration,
wpan_phy->symbol_duration);
dev_dbg(&sdata->dev->dev,
"Scan page %u channel %u for %ums\n",
page, channel, jiffies_to_msecs(scan_duration));
queue_delayed_work(local->mac_wq, &local->scan_work, scan_duration);
return;
end_scan:
rtnl_lock();
mac802154_scan_cleanup_locked(local, sdata, false);
rtnl_unlock();
}
int mac802154_trigger_scan_locked(struct ieee802154_sub_if_data *sdata,
struct cfg802154_scan_request *request)
{
struct ieee802154_local *local = sdata->local;
ASSERT_RTNL();
if (mac802154_is_scanning(local))
return -EBUSY;
if (request->type != NL802154_SCAN_PASSIVE &&
request->type != NL802154_SCAN_ACTIVE)
return -EOPNOTSUPP;
/* Store scanning parameters */
rcu_assign_pointer(local->scan_req, request);
/* Software scanning requires to set promiscuous mode, so we need to
* pause the Tx queue during the entire operation.
*/
ieee802154_mlme_op_pre(local);
sdata->required_filtering = IEEE802154_FILTERING_3_SCAN;
local->scan_page = request->page;
local->scan_channel = -1;
set_bit(IEEE802154_IS_SCANNING, &local->ongoing);
if (request->type == NL802154_SCAN_ACTIVE)
mac802154_scan_prepare_beacon_req(local);
nl802154_scan_started(request->wpan_phy, request->wpan_dev);
queue_delayed_work(local->mac_wq, &local->scan_work, 0);
return 0;
}
int mac802154_process_beacon(struct ieee802154_local *local,
struct sk_buff *skb,
u8 page, u8 channel)
{
struct ieee802154_beacon_hdr *bh = (void *)skb->data;
struct ieee802154_addr *src = &mac_cb(skb)->source;
struct cfg802154_scan_request *scan_req;
struct ieee802154_coord_desc desc;
if (skb->len != sizeof(*bh))
return -EINVAL;
if (unlikely(src->mode == IEEE802154_ADDR_NONE))
return -EINVAL;
dev_dbg(&skb->dev->dev,
"BEACON received on page %u channel %u\n",
page, channel);
memcpy(&desc.addr, src, sizeof(desc.addr));
desc.page = page;
desc.channel = channel;
desc.link_quality = mac_cb(skb)->lqi;
desc.superframe_spec = get_unaligned_le16(skb->data);
desc.gts_permit = bh->gts_permit;
trace_802154_scan_event(&desc);
rcu_read_lock();
scan_req = rcu_dereference(local->scan_req);
if (likely(scan_req))
nl802154_scan_event(scan_req->wpan_phy, scan_req->wpan_dev, &desc);
rcu_read_unlock();
return 0;
}
static int mac802154_transmit_beacon(struct ieee802154_local *local,
struct wpan_dev *wpan_dev)
{
struct cfg802154_beacon_request *beacon_req;
struct ieee802154_sub_if_data *sdata;
struct sk_buff *skb;
int ret;
/* Update the sequence number */
local->beacon.mhr.seq = atomic_inc_return(&wpan_dev->bsn) & 0xFF;
skb = alloc_skb(IEEE802154_BEACON_SKB_SZ, GFP_KERNEL);
if (!skb)
return -ENOBUFS;
rcu_read_lock();
beacon_req = rcu_dereference(local->beacon_req);
if (unlikely(!beacon_req)) {
rcu_read_unlock();
kfree_skb(skb);
return -EINVAL;
}
sdata = IEEE802154_WPAN_DEV_TO_SUB_IF(beacon_req->wpan_dev);
skb->dev = sdata->dev;
rcu_read_unlock();
ret = ieee802154_beacon_push(skb, &local->beacon);
if (ret) {
kfree_skb(skb);
return ret;
}
/* Using the MLME transmission helper for sending beacons is a bit
* overkill because we do not really care about the final outcome.
*
* Even though, going through the whole net stack with a regular
* dev_queue_xmit() is not relevant either because we want beacons to be
* sent "now" rather than go through the whole net stack scheduling
* (qdisc & co).
*
* Finally, using ieee802154_subif_start_xmit() would only be an option
* if we had a generic transmit helper which would acquire the
* HARD_TX_LOCK() to prevent buffer handling conflicts with regular
* packets.
*
* So for now we keep it simple and send beacons with our MLME helper,
* even if it stops the ieee802154 queue entirely during these
* transmissions, wich anyway does not have a huge impact on the
* performances given the current design of the stack.
*/
return ieee802154_mlme_tx(local, sdata, skb);
}
void mac802154_beacon_worker(struct work_struct *work)
{
struct ieee802154_local *local =
container_of(work, struct ieee802154_local, beacon_work.work);
struct cfg802154_beacon_request *beacon_req;
struct ieee802154_sub_if_data *sdata;
struct wpan_dev *wpan_dev;
u8 interval;
int ret;
rcu_read_lock();
beacon_req = rcu_dereference(local->beacon_req);
if (unlikely(!beacon_req)) {
rcu_read_unlock();
return;
}
sdata = IEEE802154_WPAN_DEV_TO_SUB_IF(beacon_req->wpan_dev);
/* Wait an arbitrary amount of time in case we cannot use the device */
if (local->suspended || !ieee802154_sdata_running(sdata)) {
rcu_read_unlock();
queue_delayed_work(local->mac_wq, &local->beacon_work,
msecs_to_jiffies(1000));
return;
}
wpan_dev = beacon_req->wpan_dev;
interval = beacon_req->interval;
rcu_read_unlock();
dev_dbg(&sdata->dev->dev, "Sending beacon\n");
ret = mac802154_transmit_beacon(local, wpan_dev);
if (ret)
dev_err(&sdata->dev->dev,
"Beacon could not be transmitted (%d)\n", ret);
if (interval < IEEE802154_ACTIVE_SCAN_DURATION)
queue_delayed_work(local->mac_wq, &local->beacon_work,
local->beacon_interval);
}
int mac802154_stop_beacons_locked(struct ieee802154_local *local,
struct ieee802154_sub_if_data *sdata)
{
struct wpan_dev *wpan_dev = &sdata->wpan_dev;
struct cfg802154_beacon_request *request;
ASSERT_RTNL();
if (!mac802154_is_beaconing(local))
return -ESRCH;
clear_bit(IEEE802154_IS_BEACONING, &local->ongoing);
cancel_delayed_work(&local->beacon_work);
request = rcu_replace_pointer(local->beacon_req, NULL, 1);
if (!request)
return 0;
kvfree_rcu_mightsleep(request);
nl802154_beaconing_done(wpan_dev);
return 0;
}
int mac802154_send_beacons_locked(struct ieee802154_sub_if_data *sdata,
struct cfg802154_beacon_request *request)
{
struct ieee802154_local *local = sdata->local;
ASSERT_RTNL();
if (mac802154_is_beaconing(local))
mac802154_stop_beacons_locked(local, sdata);
/* Store beaconing parameters */
rcu_assign_pointer(local->beacon_req, request);
set_bit(IEEE802154_IS_BEACONING, &local->ongoing);
memset(&local->beacon, 0, sizeof(local->beacon));
local->beacon.mhr.fc.type = IEEE802154_FC_TYPE_BEACON;
local->beacon.mhr.fc.security_enabled = 0;
local->beacon.mhr.fc.frame_pending = 0;
local->beacon.mhr.fc.ack_request = 0;
local->beacon.mhr.fc.intra_pan = 0;
local->beacon.mhr.fc.dest_addr_mode = IEEE802154_NO_ADDRESSING;
local->beacon.mhr.fc.version = IEEE802154_2003_STD;
local->beacon.mhr.fc.source_addr_mode = IEEE802154_EXTENDED_ADDRESSING;
atomic_set(&request->wpan_dev->bsn, -1);
local->beacon.mhr.source.mode = IEEE802154_ADDR_LONG;
local->beacon.mhr.source.pan_id = request->wpan_dev->pan_id;
local->beacon.mhr.source.extended_addr = request->wpan_dev->extended_addr;
local->beacon.mac_pl.beacon_order = request->interval;
if (request->interval <= IEEE802154_MAX_SCAN_DURATION)
local->beacon.mac_pl.superframe_order = request->interval;
local->beacon.mac_pl.final_cap_slot = 0xf;
local->beacon.mac_pl.battery_life_ext = 0;
/* TODO: Fill this field with the coordinator situation in the network */
local->beacon.mac_pl.pan_coordinator = 1;
local->beacon.mac_pl.assoc_permit = 1;
if (request->interval == IEEE802154_ACTIVE_SCAN_DURATION)
return 0;
/* Start the beacon work */
local->beacon_interval =
mac802154_scan_get_channel_time(request->interval,
request->wpan_phy->symbol_duration);
queue_delayed_work(local->mac_wq, &local->beacon_work, 0);
return 0;
}
| linux-master | net/mac802154/scan.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* MAC commands interface
*
* Copyright 2007-2012 Siemens AG
*
* Written by:
* Sergey Lapin <[email protected]>
* Dmitry Eremin-Solenikov <[email protected]>
* Alexander Smirnov <[email protected]>
*/
#include <linux/skbuff.h>
#include <linux/if_arp.h>
#include <linux/ieee802154.h>
#include <net/ieee802154_netdev.h>
#include <net/cfg802154.h>
#include <net/mac802154.h>
#include "ieee802154_i.h"
#include "driver-ops.h"
static int mac802154_mlme_start_req(struct net_device *dev,
struct ieee802154_addr *addr,
u8 channel, u8 page,
u8 bcn_ord, u8 sf_ord,
u8 pan_coord, u8 blx,
u8 coord_realign)
{
struct ieee802154_llsec_params params;
int changed = 0;
ASSERT_RTNL();
BUG_ON(addr->mode != IEEE802154_ADDR_SHORT);
dev->ieee802154_ptr->pan_id = addr->pan_id;
dev->ieee802154_ptr->short_addr = addr->short_addr;
mac802154_dev_set_page_channel(dev, page, channel);
params.pan_id = addr->pan_id;
changed |= IEEE802154_LLSEC_PARAM_PAN_ID;
params.hwaddr = ieee802154_devaddr_from_raw(dev->dev_addr);
changed |= IEEE802154_LLSEC_PARAM_HWADDR;
params.coord_hwaddr = params.hwaddr;
changed |= IEEE802154_LLSEC_PARAM_COORD_HWADDR;
params.coord_shortaddr = addr->short_addr;
changed |= IEEE802154_LLSEC_PARAM_COORD_SHORTADDR;
return mac802154_set_params(dev, ¶ms, changed);
}
static int mac802154_set_mac_params(struct net_device *dev,
const struct ieee802154_mac_params *params)
{
struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
struct ieee802154_local *local = sdata->local;
struct wpan_dev *wpan_dev = &sdata->wpan_dev;
int ret;
ASSERT_RTNL();
/* PHY */
wpan_dev->wpan_phy->transmit_power = params->transmit_power;
wpan_dev->wpan_phy->cca = params->cca;
wpan_dev->wpan_phy->cca_ed_level = params->cca_ed_level;
/* MAC */
wpan_dev->min_be = params->min_be;
wpan_dev->max_be = params->max_be;
wpan_dev->csma_retries = params->csma_retries;
wpan_dev->frame_retries = params->frame_retries;
wpan_dev->lbt = params->lbt;
if (local->hw.phy->flags & WPAN_PHY_FLAG_TXPOWER) {
ret = drv_set_tx_power(local, params->transmit_power);
if (ret < 0)
return ret;
}
if (local->hw.phy->flags & WPAN_PHY_FLAG_CCA_MODE) {
ret = drv_set_cca_mode(local, ¶ms->cca);
if (ret < 0)
return ret;
}
if (local->hw.phy->flags & WPAN_PHY_FLAG_CCA_ED_LEVEL) {
ret = drv_set_cca_ed_level(local, params->cca_ed_level);
if (ret < 0)
return ret;
}
return 0;
}
static void mac802154_get_mac_params(struct net_device *dev,
struct ieee802154_mac_params *params)
{
struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
struct wpan_dev *wpan_dev = &sdata->wpan_dev;
ASSERT_RTNL();
/* PHY */
params->transmit_power = wpan_dev->wpan_phy->transmit_power;
params->cca = wpan_dev->wpan_phy->cca;
params->cca_ed_level = wpan_dev->wpan_phy->cca_ed_level;
/* MAC */
params->min_be = wpan_dev->min_be;
params->max_be = wpan_dev->max_be;
params->csma_retries = wpan_dev->csma_retries;
params->frame_retries = wpan_dev->frame_retries;
params->lbt = wpan_dev->lbt;
}
static const struct ieee802154_llsec_ops mac802154_llsec_ops = {
.get_params = mac802154_get_params,
.set_params = mac802154_set_params,
.add_key = mac802154_add_key,
.del_key = mac802154_del_key,
.add_dev = mac802154_add_dev,
.del_dev = mac802154_del_dev,
.add_devkey = mac802154_add_devkey,
.del_devkey = mac802154_del_devkey,
.add_seclevel = mac802154_add_seclevel,
.del_seclevel = mac802154_del_seclevel,
.lock_table = mac802154_lock_table,
.get_table = mac802154_get_table,
.unlock_table = mac802154_unlock_table,
};
struct ieee802154_mlme_ops mac802154_mlme_wpan = {
.start_req = mac802154_mlme_start_req,
.llsec = &mac802154_llsec_ops,
.set_mac_params = mac802154_set_mac_params,
.get_mac_params = mac802154_get_mac_params,
};
| linux-master | net/mac802154/mac_cmd.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2007-2012 Siemens AG
*
* Written by:
* Alexander Smirnov <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include <net/netlink.h>
#include <net/nl802154.h>
#include <net/mac802154.h>
#include <net/ieee802154_netdev.h>
#include <net/route.h>
#include <net/cfg802154.h>
#include "ieee802154_i.h"
#include "cfg.h"
static void ieee802154_tasklet_handler(struct tasklet_struct *t)
{
struct ieee802154_local *local = from_tasklet(local, t, tasklet);
struct sk_buff *skb;
while ((skb = skb_dequeue(&local->skb_queue))) {
switch (skb->pkt_type) {
case IEEE802154_RX_MSG:
/* Clear skb->pkt_type in order to not confuse kernel
* netstack.
*/
skb->pkt_type = 0;
ieee802154_rx(local, skb);
break;
default:
WARN(1, "mac802154: Packet is of unknown type %d\n",
skb->pkt_type);
kfree_skb(skb);
break;
}
}
}
struct ieee802154_hw *
ieee802154_alloc_hw(size_t priv_data_len, const struct ieee802154_ops *ops)
{
struct wpan_phy *phy;
struct ieee802154_local *local;
size_t priv_size;
if (WARN_ON(!ops || !(ops->xmit_async || ops->xmit_sync) || !ops->ed ||
!ops->start || !ops->stop || !ops->set_channel))
return NULL;
/* Ensure 32-byte alignment of our private data and hw private data.
* We use the wpan_phy priv data for both our ieee802154_local and for
* the driver's private data
*
* in memory it'll be like this:
*
* +-------------------------+
* | struct wpan_phy |
* +-------------------------+
* | struct ieee802154_local |
* +-------------------------+
* | driver's private data |
* +-------------------------+
*
* Due to ieee802154 layer isn't aware of driver and MAC structures,
* so lets align them here.
*/
priv_size = ALIGN(sizeof(*local), NETDEV_ALIGN) + priv_data_len;
phy = wpan_phy_new(&mac802154_config_ops, priv_size);
if (!phy) {
pr_err("failure to allocate master IEEE802.15.4 device\n");
return NULL;
}
phy->privid = mac802154_wpan_phy_privid;
local = wpan_phy_priv(phy);
local->phy = phy;
local->hw.phy = local->phy;
local->hw.priv = (char *)local + ALIGN(sizeof(*local), NETDEV_ALIGN);
local->ops = ops;
INIT_LIST_HEAD(&local->interfaces);
INIT_LIST_HEAD(&local->rx_beacon_list);
INIT_LIST_HEAD(&local->rx_mac_cmd_list);
mutex_init(&local->iflist_mtx);
tasklet_setup(&local->tasklet, ieee802154_tasklet_handler);
skb_queue_head_init(&local->skb_queue);
INIT_WORK(&local->sync_tx_work, ieee802154_xmit_sync_worker);
INIT_DELAYED_WORK(&local->scan_work, mac802154_scan_worker);
INIT_WORK(&local->rx_beacon_work, mac802154_rx_beacon_worker);
INIT_DELAYED_WORK(&local->beacon_work, mac802154_beacon_worker);
INIT_WORK(&local->rx_mac_cmd_work, mac802154_rx_mac_cmd_worker);
/* init supported flags with 802.15.4 default ranges */
phy->supported.max_minbe = 8;
phy->supported.min_maxbe = 3;
phy->supported.max_maxbe = 8;
phy->supported.min_frame_retries = 0;
phy->supported.max_frame_retries = 7;
phy->supported.max_csma_backoffs = 5;
phy->supported.lbt = NL802154_SUPPORTED_BOOL_FALSE;
/* always supported */
phy->supported.iftypes = BIT(NL802154_IFTYPE_NODE) | BIT(NL802154_IFTYPE_COORD);
return &local->hw;
}
EXPORT_SYMBOL(ieee802154_alloc_hw);
void ieee802154_configure_durations(struct wpan_phy *phy,
unsigned int page, unsigned int channel)
{
u32 duration = 0;
switch (page) {
case 0:
if (BIT(channel) & 0x1)
/* 868 MHz BPSK 802.15.4-2003: 20 ksym/s */
duration = 50 * NSEC_PER_USEC;
else if (BIT(channel) & 0x7FE)
/* 915 MHz BPSK 802.15.4-2003: 40 ksym/s */
duration = 25 * NSEC_PER_USEC;
else if (BIT(channel) & 0x7FFF800)
/* 2400 MHz O-QPSK 802.15.4-2006: 62.5 ksym/s */
duration = 16 * NSEC_PER_USEC;
break;
case 2:
if (BIT(channel) & 0x1)
/* 868 MHz O-QPSK 802.15.4-2006: 25 ksym/s */
duration = 40 * NSEC_PER_USEC;
else if (BIT(channel) & 0x7FE)
/* 915 MHz O-QPSK 802.15.4-2006: 62.5 ksym/s */
duration = 16 * NSEC_PER_USEC;
break;
case 3:
if (BIT(channel) & 0x3FFF)
/* 2.4 GHz CSS 802.15.4a-2007: 1/6 Msym/s */
duration = 6 * NSEC_PER_USEC;
break;
default:
break;
}
if (!duration) {
pr_debug("Unknown PHY symbol duration\n");
return;
}
phy->symbol_duration = duration;
phy->lifs_period = (IEEE802154_LIFS_PERIOD * phy->symbol_duration) / NSEC_PER_SEC;
phy->sifs_period = (IEEE802154_SIFS_PERIOD * phy->symbol_duration) / NSEC_PER_SEC;
}
EXPORT_SYMBOL(ieee802154_configure_durations);
void ieee802154_free_hw(struct ieee802154_hw *hw)
{
struct ieee802154_local *local = hw_to_local(hw);
BUG_ON(!list_empty(&local->interfaces));
mutex_destroy(&local->iflist_mtx);
wpan_phy_free(local->phy);
}
EXPORT_SYMBOL(ieee802154_free_hw);
static void ieee802154_setup_wpan_phy_pib(struct wpan_phy *wpan_phy)
{
/* TODO warn on empty symbol_duration
* Should be done when all drivers sets this value.
*/
wpan_phy->lifs_period =
(IEEE802154_LIFS_PERIOD * wpan_phy->symbol_duration) / 1000;
wpan_phy->sifs_period =
(IEEE802154_SIFS_PERIOD * wpan_phy->symbol_duration) / 1000;
}
int ieee802154_register_hw(struct ieee802154_hw *hw)
{
struct ieee802154_local *local = hw_to_local(hw);
char mac_wq_name[IFNAMSIZ + 10] = {};
struct net_device *dev;
int rc = -ENOSYS;
local->workqueue =
create_singlethread_workqueue(wpan_phy_name(local->phy));
if (!local->workqueue) {
rc = -ENOMEM;
goto out;
}
snprintf(mac_wq_name, IFNAMSIZ + 10, "%s-mac-cmds", wpan_phy_name(local->phy));
local->mac_wq = create_singlethread_workqueue(mac_wq_name);
if (!local->mac_wq) {
rc = -ENOMEM;
goto out_wq;
}
hrtimer_init(&local->ifs_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
local->ifs_timer.function = ieee802154_xmit_ifs_timer;
wpan_phy_set_dev(local->phy, local->hw.parent);
ieee802154_setup_wpan_phy_pib(local->phy);
ieee802154_configure_durations(local->phy, local->phy->current_page,
local->phy->current_channel);
if (!(hw->flags & IEEE802154_HW_CSMA_PARAMS)) {
local->phy->supported.min_csma_backoffs = 4;
local->phy->supported.max_csma_backoffs = 4;
local->phy->supported.min_maxbe = 5;
local->phy->supported.max_maxbe = 5;
local->phy->supported.min_minbe = 3;
local->phy->supported.max_minbe = 3;
}
if (!(hw->flags & IEEE802154_HW_FRAME_RETRIES)) {
local->phy->supported.min_frame_retries = 3;
local->phy->supported.max_frame_retries = 3;
}
if (hw->flags & IEEE802154_HW_PROMISCUOUS)
local->phy->supported.iftypes |= BIT(NL802154_IFTYPE_MONITOR);
rc = wpan_phy_register(local->phy);
if (rc < 0)
goto out_mac_wq;
rtnl_lock();
dev = ieee802154_if_add(local, "wpan%d", NET_NAME_ENUM,
NL802154_IFTYPE_NODE,
cpu_to_le64(0x0000000000000000ULL));
if (IS_ERR(dev)) {
rtnl_unlock();
rc = PTR_ERR(dev);
goto out_phy;
}
rtnl_unlock();
return 0;
out_phy:
wpan_phy_unregister(local->phy);
out_mac_wq:
destroy_workqueue(local->mac_wq);
out_wq:
destroy_workqueue(local->workqueue);
out:
return rc;
}
EXPORT_SYMBOL(ieee802154_register_hw);
void ieee802154_unregister_hw(struct ieee802154_hw *hw)
{
struct ieee802154_local *local = hw_to_local(hw);
tasklet_kill(&local->tasklet);
flush_workqueue(local->workqueue);
rtnl_lock();
ieee802154_remove_interfaces(local);
rtnl_unlock();
destroy_workqueue(local->mac_wq);
destroy_workqueue(local->workqueue);
wpan_phy_unregister(local->phy);
}
EXPORT_SYMBOL(ieee802154_unregister_hw);
static int __init ieee802154_init(void)
{
return ieee802154_iface_init();
}
static void __exit ieee802154_exit(void)
{
ieee802154_iface_exit();
rcu_barrier();
}
subsys_initcall(ieee802154_init);
module_exit(ieee802154_exit);
MODULE_DESCRIPTION("IEEE 802.15.4 subsystem");
MODULE_LICENSE("GPL v2");
| linux-master | net/mac802154/main.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2007-2012 Siemens AG
*
* Written by:
* Dmitry Eremin-Solenikov <[email protected]>
* Sergey Lapin <[email protected]>
* Maxim Gorbachyov <[email protected]>
* Alexander Smirnov <[email protected]>
*/
#include <linux/netdevice.h>
#include <linux/if_arp.h>
#include <linux/crc-ccitt.h>
#include <asm/unaligned.h>
#include <net/rtnetlink.h>
#include <net/ieee802154_netdev.h>
#include <net/mac802154.h>
#include <net/cfg802154.h>
#include "ieee802154_i.h"
#include "driver-ops.h"
void ieee802154_xmit_sync_worker(struct work_struct *work)
{
struct ieee802154_local *local =
container_of(work, struct ieee802154_local, sync_tx_work);
struct sk_buff *skb = local->tx_skb;
struct net_device *dev = skb->dev;
int res;
res = drv_xmit_sync(local, skb);
if (res)
goto err_tx;
dev->stats.tx_packets++;
dev->stats.tx_bytes += skb->len;
ieee802154_xmit_complete(&local->hw, skb, false);
return;
err_tx:
/* Restart the netif queue on each sub_if_data object. */
ieee802154_release_queue(local);
if (atomic_dec_and_test(&local->phy->ongoing_txs))
wake_up(&local->phy->sync_txq);
kfree_skb(skb);
netdev_dbg(dev, "transmission failed\n");
}
static netdev_tx_t
ieee802154_tx(struct ieee802154_local *local, struct sk_buff *skb)
{
struct net_device *dev = skb->dev;
int ret;
if (!(local->hw.flags & IEEE802154_HW_TX_OMIT_CKSUM)) {
struct sk_buff *nskb;
u16 crc;
if (unlikely(skb_tailroom(skb) < IEEE802154_FCS_LEN)) {
nskb = skb_copy_expand(skb, 0, IEEE802154_FCS_LEN,
GFP_ATOMIC);
if (likely(nskb)) {
consume_skb(skb);
skb = nskb;
} else {
goto err_free_skb;
}
}
crc = crc_ccitt(0, skb->data, skb->len);
put_unaligned_le16(crc, skb_put(skb, 2));
}
/* Stop the netif queue on each sub_if_data object. */
ieee802154_hold_queue(local);
atomic_inc(&local->phy->ongoing_txs);
/* Drivers should preferably implement the async callback. In some rare
* cases they only provide a sync callback which we will use as a
* fallback.
*/
if (local->ops->xmit_async) {
unsigned int len = skb->len;
ret = drv_xmit_async(local, skb);
if (ret)
goto err_wake_netif_queue;
dev->stats.tx_packets++;
dev->stats.tx_bytes += len;
} else {
local->tx_skb = skb;
queue_work(local->workqueue, &local->sync_tx_work);
}
return NETDEV_TX_OK;
err_wake_netif_queue:
ieee802154_release_queue(local);
if (atomic_dec_and_test(&local->phy->ongoing_txs))
wake_up(&local->phy->sync_txq);
err_free_skb:
kfree_skb(skb);
return NETDEV_TX_OK;
}
static int ieee802154_sync_queue(struct ieee802154_local *local)
{
int ret;
ieee802154_hold_queue(local);
ieee802154_disable_queue(local);
wait_event(local->phy->sync_txq, !atomic_read(&local->phy->ongoing_txs));
ret = local->tx_result;
ieee802154_release_queue(local);
return ret;
}
int ieee802154_sync_and_hold_queue(struct ieee802154_local *local)
{
int ret;
ieee802154_hold_queue(local);
ret = ieee802154_sync_queue(local);
set_bit(WPAN_PHY_FLAG_STATE_QUEUE_STOPPED, &local->phy->flags);
return ret;
}
int ieee802154_mlme_op_pre(struct ieee802154_local *local)
{
return ieee802154_sync_and_hold_queue(local);
}
int ieee802154_mlme_tx_locked(struct ieee802154_local *local,
struct ieee802154_sub_if_data *sdata,
struct sk_buff *skb)
{
/* Avoid possible calls to ->ndo_stop() when we asynchronously perform
* MLME transmissions.
*/
ASSERT_RTNL();
/* Ensure the device was not stopped, otherwise error out */
if (!local->open_count)
return -ENETDOWN;
/* Warn if the ieee802154 core thinks MLME frames can be sent while the
* net interface expects this cannot happen.
*/
if (WARN_ON_ONCE(!netif_running(sdata->dev)))
return -ENETDOWN;
ieee802154_tx(local, skb);
return ieee802154_sync_queue(local);
}
int ieee802154_mlme_tx(struct ieee802154_local *local,
struct ieee802154_sub_if_data *sdata,
struct sk_buff *skb)
{
int ret;
rtnl_lock();
ret = ieee802154_mlme_tx_locked(local, sdata, skb);
rtnl_unlock();
return ret;
}
void ieee802154_mlme_op_post(struct ieee802154_local *local)
{
ieee802154_release_queue(local);
}
int ieee802154_mlme_tx_one(struct ieee802154_local *local,
struct ieee802154_sub_if_data *sdata,
struct sk_buff *skb)
{
int ret;
ieee802154_mlme_op_pre(local);
ret = ieee802154_mlme_tx(local, sdata, skb);
ieee802154_mlme_op_post(local);
return ret;
}
int ieee802154_mlme_tx_one_locked(struct ieee802154_local *local,
struct ieee802154_sub_if_data *sdata,
struct sk_buff *skb)
{
int ret;
ieee802154_mlme_op_pre(local);
ret = ieee802154_mlme_tx_locked(local, sdata, skb);
ieee802154_mlme_op_post(local);
return ret;
}
static bool ieee802154_queue_is_stopped(struct ieee802154_local *local)
{
return test_bit(WPAN_PHY_FLAG_STATE_QUEUE_STOPPED, &local->phy->flags);
}
static netdev_tx_t
ieee802154_hot_tx(struct ieee802154_local *local, struct sk_buff *skb)
{
/* Warn if the net interface tries to transmit frames while the
* ieee802154 core assumes the queue is stopped.
*/
WARN_ON_ONCE(ieee802154_queue_is_stopped(local));
return ieee802154_tx(local, skb);
}
netdev_tx_t
ieee802154_monitor_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
skb->skb_iif = dev->ifindex;
return ieee802154_hot_tx(sdata->local, skb);
}
netdev_tx_t
ieee802154_subif_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
int rc;
/* TODO we should move it to wpan_dev_hard_header and dev_hard_header
* functions. The reason is wireshark will show a mac header which is
* with security fields but the payload is not encrypted.
*/
rc = mac802154_llsec_encrypt(&sdata->sec, skb);
if (rc) {
netdev_warn(dev, "encryption failed: %i\n", rc);
kfree_skb(skb);
return NETDEV_TX_OK;
}
skb->skb_iif = dev->ifindex;
return ieee802154_hot_tx(sdata->local, skb);
}
| linux-master | net/mac802154/tx.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Trace points for transport security layer handshakes.
*
* Author: Chuck Lever <[email protected]>
*
* Copyright (c) 2023, Oracle and/or its affiliates.
*/
#include <linux/types.h>
#include <linux/ipv6.h>
#include <net/sock.h>
#include <net/inet_sock.h>
#include <net/netlink.h>
#include <net/genetlink.h>
#include "handshake.h"
#define CREATE_TRACE_POINTS
#include <trace/events/handshake.h>
| linux-master | net/handshake/trace.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Handshake request lifetime events
*
* Author: Chuck Lever <[email protected]>
*
* Copyright (c) 2023, Oracle and/or its affiliates.
*/
#include <linux/types.h>
#include <linux/socket.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/inet.h>
#include <linux/fdtable.h>
#include <linux/rhashtable.h>
#include <net/sock.h>
#include <net/genetlink.h>
#include <net/netns/generic.h>
#include <kunit/visibility.h>
#include <uapi/linux/handshake.h>
#include "handshake.h"
#include <trace/events/handshake.h>
/*
* We need both a handshake_req -> sock mapping, and a sock ->
* handshake_req mapping. Both are one-to-one.
*
* To avoid adding another pointer field to struct sock, net/handshake
* maintains a hash table, indexed by the memory address of @sock, to
* find the struct handshake_req outstanding for that socket. The
* reverse direction uses a simple pointer field in the handshake_req
* struct.
*/
static struct rhashtable handshake_rhashtbl ____cacheline_aligned_in_smp;
static const struct rhashtable_params handshake_rhash_params = {
.key_len = sizeof_field(struct handshake_req, hr_sk),
.key_offset = offsetof(struct handshake_req, hr_sk),
.head_offset = offsetof(struct handshake_req, hr_rhash),
.automatic_shrinking = true,
};
int handshake_req_hash_init(void)
{
return rhashtable_init(&handshake_rhashtbl, &handshake_rhash_params);
}
void handshake_req_hash_destroy(void)
{
rhashtable_destroy(&handshake_rhashtbl);
}
struct handshake_req *handshake_req_hash_lookup(struct sock *sk)
{
return rhashtable_lookup_fast(&handshake_rhashtbl, &sk,
handshake_rhash_params);
}
EXPORT_SYMBOL_IF_KUNIT(handshake_req_hash_lookup);
static bool handshake_req_hash_add(struct handshake_req *req)
{
int ret;
ret = rhashtable_lookup_insert_fast(&handshake_rhashtbl,
&req->hr_rhash,
handshake_rhash_params);
return ret == 0;
}
static void handshake_req_destroy(struct handshake_req *req)
{
if (req->hr_proto->hp_destroy)
req->hr_proto->hp_destroy(req);
rhashtable_remove_fast(&handshake_rhashtbl, &req->hr_rhash,
handshake_rhash_params);
kfree(req);
}
static void handshake_sk_destruct(struct sock *sk)
{
void (*sk_destruct)(struct sock *sk);
struct handshake_req *req;
req = handshake_req_hash_lookup(sk);
if (!req)
return;
trace_handshake_destruct(sock_net(sk), req, sk);
sk_destruct = req->hr_odestruct;
handshake_req_destroy(req);
if (sk_destruct)
sk_destruct(sk);
}
/**
* handshake_req_alloc - Allocate a handshake request
* @proto: security protocol
* @flags: memory allocation flags
*
* Returns an initialized handshake_req or NULL.
*/
struct handshake_req *handshake_req_alloc(const struct handshake_proto *proto,
gfp_t flags)
{
struct handshake_req *req;
if (!proto)
return NULL;
if (proto->hp_handler_class <= HANDSHAKE_HANDLER_CLASS_NONE)
return NULL;
if (proto->hp_handler_class >= HANDSHAKE_HANDLER_CLASS_MAX)
return NULL;
if (!proto->hp_accept || !proto->hp_done)
return NULL;
req = kzalloc(struct_size(req, hr_priv, proto->hp_privsize), flags);
if (!req)
return NULL;
INIT_LIST_HEAD(&req->hr_list);
req->hr_proto = proto;
return req;
}
EXPORT_SYMBOL(handshake_req_alloc);
/**
* handshake_req_private - Get per-handshake private data
* @req: handshake arguments
*
*/
void *handshake_req_private(struct handshake_req *req)
{
return (void *)&req->hr_priv;
}
EXPORT_SYMBOL(handshake_req_private);
static bool __add_pending_locked(struct handshake_net *hn,
struct handshake_req *req)
{
if (WARN_ON_ONCE(!list_empty(&req->hr_list)))
return false;
hn->hn_pending++;
list_add_tail(&req->hr_list, &hn->hn_requests);
return true;
}
static void __remove_pending_locked(struct handshake_net *hn,
struct handshake_req *req)
{
hn->hn_pending--;
list_del_init(&req->hr_list);
}
/*
* Returns %true if the request was found on @net's pending list,
* otherwise %false.
*
* If @req was on a pending list, it has not yet been accepted.
*/
static bool remove_pending(struct handshake_net *hn, struct handshake_req *req)
{
bool ret = false;
spin_lock(&hn->hn_lock);
if (!list_empty(&req->hr_list)) {
__remove_pending_locked(hn, req);
ret = true;
}
spin_unlock(&hn->hn_lock);
return ret;
}
struct handshake_req *handshake_req_next(struct handshake_net *hn, int class)
{
struct handshake_req *req, *pos;
req = NULL;
spin_lock(&hn->hn_lock);
list_for_each_entry(pos, &hn->hn_requests, hr_list) {
if (pos->hr_proto->hp_handler_class != class)
continue;
__remove_pending_locked(hn, pos);
req = pos;
break;
}
spin_unlock(&hn->hn_lock);
return req;
}
EXPORT_SYMBOL_IF_KUNIT(handshake_req_next);
/**
* handshake_req_submit - Submit a handshake request
* @sock: open socket on which to perform the handshake
* @req: handshake arguments
* @flags: memory allocation flags
*
* Return values:
* %0: Request queued
* %-EINVAL: Invalid argument
* %-EBUSY: A handshake is already under way for this socket
* %-ESRCH: No handshake agent is available
* %-EAGAIN: Too many pending handshake requests
* %-ENOMEM: Failed to allocate memory
* %-EMSGSIZE: Failed to construct notification message
* %-EOPNOTSUPP: Handshake module not initialized
*
* A zero return value from handshake_req_submit() means that
* exactly one subsequent completion callback is guaranteed.
*
* A negative return value from handshake_req_submit() means that
* no completion callback will be done and that @req has been
* destroyed.
*/
int handshake_req_submit(struct socket *sock, struct handshake_req *req,
gfp_t flags)
{
struct handshake_net *hn;
struct net *net;
int ret;
if (!sock || !req || !sock->file) {
kfree(req);
return -EINVAL;
}
req->hr_sk = sock->sk;
if (!req->hr_sk) {
kfree(req);
return -EINVAL;
}
req->hr_odestruct = req->hr_sk->sk_destruct;
req->hr_sk->sk_destruct = handshake_sk_destruct;
ret = -EOPNOTSUPP;
net = sock_net(req->hr_sk);
hn = handshake_pernet(net);
if (!hn)
goto out_err;
ret = -EAGAIN;
if (READ_ONCE(hn->hn_pending) >= hn->hn_pending_max)
goto out_err;
spin_lock(&hn->hn_lock);
ret = -EOPNOTSUPP;
if (test_bit(HANDSHAKE_F_NET_DRAINING, &hn->hn_flags))
goto out_unlock;
ret = -EBUSY;
if (!handshake_req_hash_add(req))
goto out_unlock;
if (!__add_pending_locked(hn, req))
goto out_unlock;
spin_unlock(&hn->hn_lock);
ret = handshake_genl_notify(net, req->hr_proto, flags);
if (ret) {
trace_handshake_notify_err(net, req, req->hr_sk, ret);
if (remove_pending(hn, req))
goto out_err;
}
/* Prevent socket release while a handshake request is pending */
sock_hold(req->hr_sk);
trace_handshake_submit(net, req, req->hr_sk);
return 0;
out_unlock:
spin_unlock(&hn->hn_lock);
out_err:
trace_handshake_submit_err(net, req, req->hr_sk, ret);
handshake_req_destroy(req);
return ret;
}
EXPORT_SYMBOL(handshake_req_submit);
void handshake_complete(struct handshake_req *req, unsigned int status,
struct genl_info *info)
{
struct sock *sk = req->hr_sk;
struct net *net = sock_net(sk);
if (!test_and_set_bit(HANDSHAKE_F_REQ_COMPLETED, &req->hr_flags)) {
trace_handshake_complete(net, req, sk, status);
req->hr_proto->hp_done(req, status, info);
/* Handshake request is no longer pending */
sock_put(sk);
}
}
EXPORT_SYMBOL_IF_KUNIT(handshake_complete);
/**
* handshake_req_cancel - Cancel an in-progress handshake
* @sk: socket on which there is an ongoing handshake
*
* Request cancellation races with request completion. To determine
* who won, callers examine the return value from this function.
*
* Return values:
* %true - Uncompleted handshake request was canceled
* %false - Handshake request already completed or not found
*/
bool handshake_req_cancel(struct sock *sk)
{
struct handshake_req *req;
struct handshake_net *hn;
struct net *net;
net = sock_net(sk);
req = handshake_req_hash_lookup(sk);
if (!req) {
trace_handshake_cancel_none(net, req, sk);
return false;
}
hn = handshake_pernet(net);
if (hn && remove_pending(hn, req)) {
/* Request hadn't been accepted */
goto out_true;
}
if (test_and_set_bit(HANDSHAKE_F_REQ_COMPLETED, &req->hr_flags)) {
/* Request already completed */
trace_handshake_cancel_busy(net, req, sk);
return false;
}
out_true:
trace_handshake_cancel(net, req, sk);
/* Handshake request is no longer pending */
sock_put(sk);
return true;
}
EXPORT_SYMBOL(handshake_req_cancel);
| linux-master | net/handshake/request.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Handle the TLS Alert protocol
*
* Author: Chuck Lever <[email protected]>
*
* Copyright (c) 2023, Oracle and/or its affiliates.
*/
#include <linux/types.h>
#include <linux/socket.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/inet.h>
#include <net/sock.h>
#include <net/handshake.h>
#include <net/tls.h>
#include <net/tls_prot.h>
#include "handshake.h"
#include <trace/events/handshake.h>
/**
* tls_alert_send - send a TLS Alert on a kTLS socket
* @sock: open kTLS socket to send on
* @level: TLS Alert level
* @description: TLS Alert description
*
* Returns zero on success or a negative errno.
*/
int tls_alert_send(struct socket *sock, u8 level, u8 description)
{
u8 record_type = TLS_RECORD_TYPE_ALERT;
u8 buf[CMSG_SPACE(sizeof(record_type))];
struct msghdr msg = { 0 };
struct cmsghdr *cmsg;
struct kvec iov;
u8 alert[2];
int ret;
trace_tls_alert_send(sock->sk, level, description);
alert[0] = level;
alert[1] = description;
iov.iov_base = alert;
iov.iov_len = sizeof(alert);
memset(buf, 0, sizeof(buf));
msg.msg_control = buf;
msg.msg_controllen = sizeof(buf);
msg.msg_flags = MSG_DONTWAIT;
cmsg = CMSG_FIRSTHDR(&msg);
cmsg->cmsg_level = SOL_TLS;
cmsg->cmsg_type = TLS_SET_RECORD_TYPE;
cmsg->cmsg_len = CMSG_LEN(sizeof(record_type));
memcpy(CMSG_DATA(cmsg), &record_type, sizeof(record_type));
iov_iter_kvec(&msg.msg_iter, ITER_SOURCE, &iov, 1, iov.iov_len);
ret = sock_sendmsg(sock, &msg);
return ret < 0 ? ret : 0;
}
/**
* tls_get_record_type - Look for TLS RECORD_TYPE information
* @sk: socket (for IP address information)
* @cmsg: incoming message to be parsed
*
* Returns zero or a TLS_RECORD_TYPE value.
*/
u8 tls_get_record_type(const struct sock *sk, const struct cmsghdr *cmsg)
{
u8 record_type;
if (cmsg->cmsg_level != SOL_TLS)
return 0;
if (cmsg->cmsg_type != TLS_GET_RECORD_TYPE)
return 0;
record_type = *((u8 *)CMSG_DATA(cmsg));
trace_tls_contenttype(sk, record_type);
return record_type;
}
EXPORT_SYMBOL(tls_get_record_type);
/**
* tls_alert_recv - Parse TLS Alert messages
* @sk: socket (for IP address information)
* @msg: incoming message to be parsed
* @level: OUT - TLS AlertLevel value
* @description: OUT - TLS AlertDescription value
*
*/
void tls_alert_recv(const struct sock *sk, const struct msghdr *msg,
u8 *level, u8 *description)
{
const struct kvec *iov;
u8 *data;
iov = msg->msg_iter.kvec;
data = iov->iov_base;
*level = data[0];
*description = data[1];
trace_tls_alert_recv(sk, *level, *description);
}
EXPORT_SYMBOL(tls_alert_recv);
| linux-master | net/handshake/alert.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2023 Oracle and/or its affiliates.
*
* KUnit test of the handshake upcall mechanism.
*/
#include <kunit/test.h>
#include <kunit/visibility.h>
#include <linux/kernel.h>
#include <net/sock.h>
#include <net/genetlink.h>
#include <net/netns/generic.h>
#include <uapi/linux/handshake.h>
#include "handshake.h"
MODULE_IMPORT_NS(EXPORTED_FOR_KUNIT_TESTING);
static int test_accept_func(struct handshake_req *req, struct genl_info *info,
int fd)
{
return 0;
}
static void test_done_func(struct handshake_req *req, unsigned int status,
struct genl_info *info)
{
}
struct handshake_req_alloc_test_param {
const char *desc;
struct handshake_proto *proto;
gfp_t gfp;
bool expect_success;
};
static struct handshake_proto handshake_req_alloc_proto_2 = {
.hp_handler_class = HANDSHAKE_HANDLER_CLASS_NONE,
};
static struct handshake_proto handshake_req_alloc_proto_3 = {
.hp_handler_class = HANDSHAKE_HANDLER_CLASS_MAX,
};
static struct handshake_proto handshake_req_alloc_proto_4 = {
.hp_handler_class = HANDSHAKE_HANDLER_CLASS_TLSHD,
};
static struct handshake_proto handshake_req_alloc_proto_5 = {
.hp_handler_class = HANDSHAKE_HANDLER_CLASS_TLSHD,
.hp_accept = test_accept_func,
};
static struct handshake_proto handshake_req_alloc_proto_6 = {
.hp_handler_class = HANDSHAKE_HANDLER_CLASS_TLSHD,
.hp_privsize = UINT_MAX,
.hp_accept = test_accept_func,
.hp_done = test_done_func,
};
static struct handshake_proto handshake_req_alloc_proto_good = {
.hp_handler_class = HANDSHAKE_HANDLER_CLASS_TLSHD,
.hp_accept = test_accept_func,
.hp_done = test_done_func,
};
static const
struct handshake_req_alloc_test_param handshake_req_alloc_params[] = {
{
.desc = "handshake_req_alloc NULL proto",
.proto = NULL,
.gfp = GFP_KERNEL,
.expect_success = false,
},
{
.desc = "handshake_req_alloc CLASS_NONE",
.proto = &handshake_req_alloc_proto_2,
.gfp = GFP_KERNEL,
.expect_success = false,
},
{
.desc = "handshake_req_alloc CLASS_MAX",
.proto = &handshake_req_alloc_proto_3,
.gfp = GFP_KERNEL,
.expect_success = false,
},
{
.desc = "handshake_req_alloc no callbacks",
.proto = &handshake_req_alloc_proto_4,
.gfp = GFP_KERNEL,
.expect_success = false,
},
{
.desc = "handshake_req_alloc no done callback",
.proto = &handshake_req_alloc_proto_5,
.gfp = GFP_KERNEL,
.expect_success = false,
},
{
.desc = "handshake_req_alloc excessive privsize",
.proto = &handshake_req_alloc_proto_6,
.gfp = GFP_KERNEL | __GFP_NOWARN,
.expect_success = false,
},
{
.desc = "handshake_req_alloc all good",
.proto = &handshake_req_alloc_proto_good,
.gfp = GFP_KERNEL,
.expect_success = true,
},
};
static void
handshake_req_alloc_get_desc(const struct handshake_req_alloc_test_param *param,
char *desc)
{
strscpy(desc, param->desc, KUNIT_PARAM_DESC_SIZE);
}
/* Creates the function handshake_req_alloc_gen_params */
KUNIT_ARRAY_PARAM(handshake_req_alloc, handshake_req_alloc_params,
handshake_req_alloc_get_desc);
static void handshake_req_alloc_case(struct kunit *test)
{
const struct handshake_req_alloc_test_param *param = test->param_value;
struct handshake_req *result;
/* Arrange */
/* Act */
result = handshake_req_alloc(param->proto, param->gfp);
/* Assert */
if (param->expect_success)
KUNIT_EXPECT_NOT_NULL(test, result);
else
KUNIT_EXPECT_NULL(test, result);
kfree(result);
}
static void handshake_req_submit_test1(struct kunit *test)
{
struct socket *sock;
int err, result;
/* Arrange */
err = __sock_create(&init_net, PF_INET, SOCK_STREAM, IPPROTO_TCP,
&sock, 1);
KUNIT_ASSERT_EQ(test, err, 0);
/* Act */
result = handshake_req_submit(sock, NULL, GFP_KERNEL);
/* Assert */
KUNIT_EXPECT_EQ(test, result, -EINVAL);
sock_release(sock);
}
static void handshake_req_submit_test2(struct kunit *test)
{
struct handshake_req *req;
int result;
/* Arrange */
req = handshake_req_alloc(&handshake_req_alloc_proto_good, GFP_KERNEL);
KUNIT_ASSERT_NOT_NULL(test, req);
/* Act */
result = handshake_req_submit(NULL, req, GFP_KERNEL);
/* Assert */
KUNIT_EXPECT_EQ(test, result, -EINVAL);
/* handshake_req_submit() destroys @req on error */
}
static void handshake_req_submit_test3(struct kunit *test)
{
struct handshake_req *req;
struct socket *sock;
int err, result;
/* Arrange */
req = handshake_req_alloc(&handshake_req_alloc_proto_good, GFP_KERNEL);
KUNIT_ASSERT_NOT_NULL(test, req);
err = __sock_create(&init_net, PF_INET, SOCK_STREAM, IPPROTO_TCP,
&sock, 1);
KUNIT_ASSERT_EQ(test, err, 0);
sock->file = NULL;
/* Act */
result = handshake_req_submit(sock, req, GFP_KERNEL);
/* Assert */
KUNIT_EXPECT_EQ(test, result, -EINVAL);
/* handshake_req_submit() destroys @req on error */
sock_release(sock);
}
static void handshake_req_submit_test4(struct kunit *test)
{
struct handshake_req *req, *result;
struct socket *sock;
struct file *filp;
int err;
/* Arrange */
req = handshake_req_alloc(&handshake_req_alloc_proto_good, GFP_KERNEL);
KUNIT_ASSERT_NOT_NULL(test, req);
err = __sock_create(&init_net, PF_INET, SOCK_STREAM, IPPROTO_TCP,
&sock, 1);
KUNIT_ASSERT_EQ(test, err, 0);
filp = sock_alloc_file(sock, O_NONBLOCK, NULL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, filp);
KUNIT_ASSERT_NOT_NULL(test, sock->sk);
sock->file = filp;
err = handshake_req_submit(sock, req, GFP_KERNEL);
KUNIT_ASSERT_EQ(test, err, 0);
/* Act */
result = handshake_req_hash_lookup(sock->sk);
/* Assert */
KUNIT_EXPECT_NOT_NULL(test, result);
KUNIT_EXPECT_PTR_EQ(test, req, result);
handshake_req_cancel(sock->sk);
fput(filp);
}
static void handshake_req_submit_test5(struct kunit *test)
{
struct handshake_req *req;
struct handshake_net *hn;
struct socket *sock;
struct file *filp;
struct net *net;
int saved, err;
/* Arrange */
req = handshake_req_alloc(&handshake_req_alloc_proto_good, GFP_KERNEL);
KUNIT_ASSERT_NOT_NULL(test, req);
err = __sock_create(&init_net, PF_INET, SOCK_STREAM, IPPROTO_TCP,
&sock, 1);
KUNIT_ASSERT_EQ(test, err, 0);
filp = sock_alloc_file(sock, O_NONBLOCK, NULL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, filp);
KUNIT_ASSERT_NOT_NULL(test, sock->sk);
sock->file = filp;
net = sock_net(sock->sk);
hn = handshake_pernet(net);
KUNIT_ASSERT_NOT_NULL(test, hn);
saved = hn->hn_pending;
hn->hn_pending = hn->hn_pending_max + 1;
/* Act */
err = handshake_req_submit(sock, req, GFP_KERNEL);
/* Assert */
KUNIT_EXPECT_EQ(test, err, -EAGAIN);
fput(filp);
hn->hn_pending = saved;
}
static void handshake_req_submit_test6(struct kunit *test)
{
struct handshake_req *req1, *req2;
struct socket *sock;
struct file *filp;
int err;
/* Arrange */
req1 = handshake_req_alloc(&handshake_req_alloc_proto_good, GFP_KERNEL);
KUNIT_ASSERT_NOT_NULL(test, req1);
req2 = handshake_req_alloc(&handshake_req_alloc_proto_good, GFP_KERNEL);
KUNIT_ASSERT_NOT_NULL(test, req2);
err = __sock_create(&init_net, PF_INET, SOCK_STREAM, IPPROTO_TCP,
&sock, 1);
KUNIT_ASSERT_EQ(test, err, 0);
filp = sock_alloc_file(sock, O_NONBLOCK, NULL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, filp);
KUNIT_ASSERT_NOT_NULL(test, sock->sk);
sock->file = filp;
/* Act */
err = handshake_req_submit(sock, req1, GFP_KERNEL);
KUNIT_ASSERT_EQ(test, err, 0);
err = handshake_req_submit(sock, req2, GFP_KERNEL);
/* Assert */
KUNIT_EXPECT_EQ(test, err, -EBUSY);
handshake_req_cancel(sock->sk);
fput(filp);
}
static void handshake_req_cancel_test1(struct kunit *test)
{
struct handshake_req *req;
struct socket *sock;
struct file *filp;
bool result;
int err;
/* Arrange */
req = handshake_req_alloc(&handshake_req_alloc_proto_good, GFP_KERNEL);
KUNIT_ASSERT_NOT_NULL(test, req);
err = __sock_create(&init_net, PF_INET, SOCK_STREAM, IPPROTO_TCP,
&sock, 1);
KUNIT_ASSERT_EQ(test, err, 0);
filp = sock_alloc_file(sock, O_NONBLOCK, NULL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, filp);
sock->file = filp;
err = handshake_req_submit(sock, req, GFP_KERNEL);
KUNIT_ASSERT_EQ(test, err, 0);
/* NB: handshake_req hasn't been accepted */
/* Act */
result = handshake_req_cancel(sock->sk);
/* Assert */
KUNIT_EXPECT_TRUE(test, result);
fput(filp);
}
static void handshake_req_cancel_test2(struct kunit *test)
{
struct handshake_req *req, *next;
struct handshake_net *hn;
struct socket *sock;
struct file *filp;
struct net *net;
bool result;
int err;
/* Arrange */
req = handshake_req_alloc(&handshake_req_alloc_proto_good, GFP_KERNEL);
KUNIT_ASSERT_NOT_NULL(test, req);
err = __sock_create(&init_net, PF_INET, SOCK_STREAM, IPPROTO_TCP,
&sock, 1);
KUNIT_ASSERT_EQ(test, err, 0);
filp = sock_alloc_file(sock, O_NONBLOCK, NULL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, filp);
sock->file = filp;
err = handshake_req_submit(sock, req, GFP_KERNEL);
KUNIT_ASSERT_EQ(test, err, 0);
net = sock_net(sock->sk);
hn = handshake_pernet(net);
KUNIT_ASSERT_NOT_NULL(test, hn);
/* Pretend to accept this request */
next = handshake_req_next(hn, HANDSHAKE_HANDLER_CLASS_TLSHD);
KUNIT_ASSERT_PTR_EQ(test, req, next);
/* Act */
result = handshake_req_cancel(sock->sk);
/* Assert */
KUNIT_EXPECT_TRUE(test, result);
fput(filp);
}
static void handshake_req_cancel_test3(struct kunit *test)
{
struct handshake_req *req, *next;
struct handshake_net *hn;
struct socket *sock;
struct file *filp;
struct net *net;
bool result;
int err;
/* Arrange */
req = handshake_req_alloc(&handshake_req_alloc_proto_good, GFP_KERNEL);
KUNIT_ASSERT_NOT_NULL(test, req);
err = __sock_create(&init_net, PF_INET, SOCK_STREAM, IPPROTO_TCP,
&sock, 1);
KUNIT_ASSERT_EQ(test, err, 0);
filp = sock_alloc_file(sock, O_NONBLOCK, NULL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, filp);
sock->file = filp;
err = handshake_req_submit(sock, req, GFP_KERNEL);
KUNIT_ASSERT_EQ(test, err, 0);
net = sock_net(sock->sk);
hn = handshake_pernet(net);
KUNIT_ASSERT_NOT_NULL(test, hn);
/* Pretend to accept this request */
next = handshake_req_next(hn, HANDSHAKE_HANDLER_CLASS_TLSHD);
KUNIT_ASSERT_PTR_EQ(test, req, next);
/* Pretend to complete this request */
handshake_complete(next, -ETIMEDOUT, NULL);
/* Act */
result = handshake_req_cancel(sock->sk);
/* Assert */
KUNIT_EXPECT_FALSE(test, result);
fput(filp);
}
static struct handshake_req *handshake_req_destroy_test;
static void test_destroy_func(struct handshake_req *req)
{
handshake_req_destroy_test = req;
}
static struct handshake_proto handshake_req_alloc_proto_destroy = {
.hp_handler_class = HANDSHAKE_HANDLER_CLASS_TLSHD,
.hp_accept = test_accept_func,
.hp_done = test_done_func,
.hp_destroy = test_destroy_func,
};
static void handshake_req_destroy_test1(struct kunit *test)
{
struct handshake_req *req;
struct socket *sock;
struct file *filp;
int err;
/* Arrange */
handshake_req_destroy_test = NULL;
req = handshake_req_alloc(&handshake_req_alloc_proto_destroy, GFP_KERNEL);
KUNIT_ASSERT_NOT_NULL(test, req);
err = __sock_create(&init_net, PF_INET, SOCK_STREAM, IPPROTO_TCP,
&sock, 1);
KUNIT_ASSERT_EQ(test, err, 0);
filp = sock_alloc_file(sock, O_NONBLOCK, NULL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, filp);
sock->file = filp;
err = handshake_req_submit(sock, req, GFP_KERNEL);
KUNIT_ASSERT_EQ(test, err, 0);
handshake_req_cancel(sock->sk);
/* Act */
fput(filp);
/* Assert */
KUNIT_EXPECT_PTR_EQ(test, handshake_req_destroy_test, req);
}
static struct kunit_case handshake_api_test_cases[] = {
{
.name = "req_alloc API fuzzing",
.run_case = handshake_req_alloc_case,
.generate_params = handshake_req_alloc_gen_params,
},
{
.name = "req_submit NULL req arg",
.run_case = handshake_req_submit_test1,
},
{
.name = "req_submit NULL sock arg",
.run_case = handshake_req_submit_test2,
},
{
.name = "req_submit NULL sock->file",
.run_case = handshake_req_submit_test3,
},
{
.name = "req_lookup works",
.run_case = handshake_req_submit_test4,
},
{
.name = "req_submit max pending",
.run_case = handshake_req_submit_test5,
},
{
.name = "req_submit multiple",
.run_case = handshake_req_submit_test6,
},
{
.name = "req_cancel before accept",
.run_case = handshake_req_cancel_test1,
},
{
.name = "req_cancel after accept",
.run_case = handshake_req_cancel_test2,
},
{
.name = "req_cancel after done",
.run_case = handshake_req_cancel_test3,
},
{
.name = "req_destroy works",
.run_case = handshake_req_destroy_test1,
},
{}
};
static struct kunit_suite handshake_api_suite = {
.name = "Handshake API tests",
.test_cases = handshake_api_test_cases,
};
kunit_test_suites(&handshake_api_suite);
MODULE_DESCRIPTION("Test handshake upcall API functions");
MODULE_LICENSE("GPL");
| linux-master | net/handshake/handshake-test.c |
// SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)
/* Do not edit directly, auto-generated from: */
/* Documentation/netlink/specs/handshake.yaml */
/* YNL-GEN kernel source */
#include <net/netlink.h>
#include <net/genetlink.h>
#include "genl.h"
#include <uapi/linux/handshake.h>
/* HANDSHAKE_CMD_ACCEPT - do */
static const struct nla_policy handshake_accept_nl_policy[HANDSHAKE_A_ACCEPT_HANDLER_CLASS + 1] = {
[HANDSHAKE_A_ACCEPT_HANDLER_CLASS] = NLA_POLICY_MAX(NLA_U32, 2),
};
/* HANDSHAKE_CMD_DONE - do */
static const struct nla_policy handshake_done_nl_policy[HANDSHAKE_A_DONE_REMOTE_AUTH + 1] = {
[HANDSHAKE_A_DONE_STATUS] = { .type = NLA_U32, },
[HANDSHAKE_A_DONE_SOCKFD] = { .type = NLA_U32, },
[HANDSHAKE_A_DONE_REMOTE_AUTH] = { .type = NLA_U32, },
};
/* Ops table for handshake */
static const struct genl_split_ops handshake_nl_ops[] = {
{
.cmd = HANDSHAKE_CMD_ACCEPT,
.doit = handshake_nl_accept_doit,
.policy = handshake_accept_nl_policy,
.maxattr = HANDSHAKE_A_ACCEPT_HANDLER_CLASS,
.flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DO,
},
{
.cmd = HANDSHAKE_CMD_DONE,
.doit = handshake_nl_done_doit,
.policy = handshake_done_nl_policy,
.maxattr = HANDSHAKE_A_DONE_REMOTE_AUTH,
.flags = GENL_CMD_CAP_DO,
},
};
static const struct genl_multicast_group handshake_nl_mcgrps[] = {
[HANDSHAKE_NLGRP_NONE] = { "none", },
[HANDSHAKE_NLGRP_TLSHD] = { "tlshd", },
};
struct genl_family handshake_nl_family __ro_after_init = {
.name = HANDSHAKE_FAMILY_NAME,
.version = HANDSHAKE_FAMILY_VERSION,
.netnsok = true,
.parallel_ops = true,
.module = THIS_MODULE,
.split_ops = handshake_nl_ops,
.n_split_ops = ARRAY_SIZE(handshake_nl_ops),
.mcgrps = handshake_nl_mcgrps,
.n_mcgrps = ARRAY_SIZE(handshake_nl_mcgrps),
};
| linux-master | net/handshake/genl.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Generic netlink handshake service
*
* Author: Chuck Lever <[email protected]>
*
* Copyright (c) 2023, Oracle and/or its affiliates.
*/
#include <linux/types.h>
#include <linux/socket.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/mm.h>
#include <net/sock.h>
#include <net/genetlink.h>
#include <net/netns/generic.h>
#include <kunit/visibility.h>
#include <uapi/linux/handshake.h>
#include "handshake.h"
#include "genl.h"
#include <trace/events/handshake.h>
/**
* handshake_genl_notify - Notify handlers that a request is waiting
* @net: target network namespace
* @proto: handshake protocol
* @flags: memory allocation control flags
*
* Returns zero on success or a negative errno if notification failed.
*/
int handshake_genl_notify(struct net *net, const struct handshake_proto *proto,
gfp_t flags)
{
struct sk_buff *msg;
void *hdr;
/* Disable notifications during unit testing */
if (!test_bit(HANDSHAKE_F_PROTO_NOTIFY, &proto->hp_flags))
return 0;
if (!genl_has_listeners(&handshake_nl_family, net,
proto->hp_handler_class))
return -ESRCH;
msg = genlmsg_new(GENLMSG_DEFAULT_SIZE, flags);
if (!msg)
return -ENOMEM;
hdr = genlmsg_put(msg, 0, 0, &handshake_nl_family, 0,
HANDSHAKE_CMD_READY);
if (!hdr)
goto out_free;
if (nla_put_u32(msg, HANDSHAKE_A_ACCEPT_HANDLER_CLASS,
proto->hp_handler_class) < 0) {
genlmsg_cancel(msg, hdr);
goto out_free;
}
genlmsg_end(msg, hdr);
return genlmsg_multicast_netns(&handshake_nl_family, net, msg,
0, proto->hp_handler_class, flags);
out_free:
nlmsg_free(msg);
return -EMSGSIZE;
}
/**
* handshake_genl_put - Create a generic netlink message header
* @msg: buffer in which to create the header
* @info: generic netlink message context
*
* Returns a ready-to-use header, or NULL.
*/
struct nlmsghdr *handshake_genl_put(struct sk_buff *msg,
struct genl_info *info)
{
return genlmsg_put(msg, info->snd_portid, info->snd_seq,
&handshake_nl_family, 0, info->genlhdr->cmd);
}
EXPORT_SYMBOL(handshake_genl_put);
/*
* dup() a kernel socket for use as a user space file descriptor
* in the current process. The kernel socket must have an
* instatiated struct file.
*
* Implicit argument: "current()"
*/
static int handshake_dup(struct socket *sock)
{
struct file *file;
int newfd;
file = get_file(sock->file);
newfd = get_unused_fd_flags(O_CLOEXEC);
if (newfd < 0) {
fput(file);
return newfd;
}
fd_install(newfd, file);
return newfd;
}
int handshake_nl_accept_doit(struct sk_buff *skb, struct genl_info *info)
{
struct net *net = sock_net(skb->sk);
struct handshake_net *hn = handshake_pernet(net);
struct handshake_req *req = NULL;
struct socket *sock;
int class, fd, err;
err = -EOPNOTSUPP;
if (!hn)
goto out_status;
err = -EINVAL;
if (GENL_REQ_ATTR_CHECK(info, HANDSHAKE_A_ACCEPT_HANDLER_CLASS))
goto out_status;
class = nla_get_u32(info->attrs[HANDSHAKE_A_ACCEPT_HANDLER_CLASS]);
err = -EAGAIN;
req = handshake_req_next(hn, class);
if (!req)
goto out_status;
sock = req->hr_sk->sk_socket;
fd = handshake_dup(sock);
if (fd < 0) {
err = fd;
goto out_complete;
}
err = req->hr_proto->hp_accept(req, info, fd);
if (err) {
fput(sock->file);
goto out_complete;
}
trace_handshake_cmd_accept(net, req, req->hr_sk, fd);
return 0;
out_complete:
handshake_complete(req, -EIO, NULL);
out_status:
trace_handshake_cmd_accept_err(net, req, NULL, err);
return err;
}
int handshake_nl_done_doit(struct sk_buff *skb, struct genl_info *info)
{
struct net *net = sock_net(skb->sk);
struct handshake_req *req;
struct socket *sock;
int fd, status, err;
if (GENL_REQ_ATTR_CHECK(info, HANDSHAKE_A_DONE_SOCKFD))
return -EINVAL;
fd = nla_get_u32(info->attrs[HANDSHAKE_A_DONE_SOCKFD]);
sock = sockfd_lookup(fd, &err);
if (!sock)
return err;
req = handshake_req_hash_lookup(sock->sk);
if (!req) {
err = -EBUSY;
trace_handshake_cmd_done_err(net, req, sock->sk, err);
fput(sock->file);
return err;
}
trace_handshake_cmd_done(net, req, sock->sk, fd);
status = -EIO;
if (info->attrs[HANDSHAKE_A_DONE_STATUS])
status = nla_get_u32(info->attrs[HANDSHAKE_A_DONE_STATUS]);
handshake_complete(req, status, info);
fput(sock->file);
return 0;
}
static unsigned int handshake_net_id;
static int __net_init handshake_net_init(struct net *net)
{
struct handshake_net *hn = net_generic(net, handshake_net_id);
unsigned long tmp;
struct sysinfo si;
/*
* Arbitrary limit to prevent handshakes that do not make
* progress from clogging up the system. The cap scales up
* with the amount of physical memory on the system.
*/
si_meminfo(&si);
tmp = si.totalram / (25 * si.mem_unit);
hn->hn_pending_max = clamp(tmp, 3UL, 50UL);
spin_lock_init(&hn->hn_lock);
hn->hn_pending = 0;
hn->hn_flags = 0;
INIT_LIST_HEAD(&hn->hn_requests);
return 0;
}
static void __net_exit handshake_net_exit(struct net *net)
{
struct handshake_net *hn = net_generic(net, handshake_net_id);
struct handshake_req *req;
LIST_HEAD(requests);
/*
* Drain the net's pending list. Requests that have been
* accepted and are in progress will be destroyed when
* the socket is closed.
*/
spin_lock(&hn->hn_lock);
set_bit(HANDSHAKE_F_NET_DRAINING, &hn->hn_flags);
list_splice_init(&requests, &hn->hn_requests);
spin_unlock(&hn->hn_lock);
while (!list_empty(&requests)) {
req = list_first_entry(&requests, struct handshake_req, hr_list);
list_del(&req->hr_list);
/*
* Requests on this list have not yet been
* accepted, so they do not have an fd to put.
*/
handshake_complete(req, -ETIMEDOUT, NULL);
}
}
static struct pernet_operations handshake_genl_net_ops = {
.init = handshake_net_init,
.exit = handshake_net_exit,
.id = &handshake_net_id,
.size = sizeof(struct handshake_net),
};
/**
* handshake_pernet - Get the handshake private per-net structure
* @net: network namespace
*
* Returns a pointer to the net's private per-net structure for the
* handshake module, or NULL if handshake_init() failed.
*/
struct handshake_net *handshake_pernet(struct net *net)
{
return handshake_net_id ?
net_generic(net, handshake_net_id) : NULL;
}
EXPORT_SYMBOL_IF_KUNIT(handshake_pernet);
static int __init handshake_init(void)
{
int ret;
ret = handshake_req_hash_init();
if (ret) {
pr_warn("handshake: hash initialization failed (%d)\n", ret);
return ret;
}
ret = genl_register_family(&handshake_nl_family);
if (ret) {
pr_warn("handshake: netlink registration failed (%d)\n", ret);
handshake_req_hash_destroy();
return ret;
}
/*
* ORDER: register_pernet_subsys must be done last.
*
* If initialization does not make it past pernet_subsys
* registration, then handshake_net_id will remain 0. That
* shunts the handshake consumer API to return ENOTSUPP
* to prevent it from dereferencing something that hasn't
* been allocated.
*/
ret = register_pernet_subsys(&handshake_genl_net_ops);
if (ret) {
pr_warn("handshake: pernet registration failed (%d)\n", ret);
genl_unregister_family(&handshake_nl_family);
handshake_req_hash_destroy();
}
return ret;
}
static void __exit handshake_exit(void)
{
unregister_pernet_subsys(&handshake_genl_net_ops);
handshake_net_id = 0;
handshake_req_hash_destroy();
genl_unregister_family(&handshake_nl_family);
}
module_init(handshake_init);
module_exit(handshake_exit);
| linux-master | net/handshake/netlink.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Establish a TLS session for a kernel socket consumer
* using the tlshd user space handler.
*
* Author: Chuck Lever <[email protected]>
*
* Copyright (c) 2021-2023, Oracle and/or its affiliates.
*/
#include <linux/types.h>
#include <linux/socket.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/key.h>
#include <net/sock.h>
#include <net/handshake.h>
#include <net/genetlink.h>
#include <net/tls_prot.h>
#include <uapi/linux/keyctl.h>
#include <uapi/linux/handshake.h>
#include "handshake.h"
struct tls_handshake_req {
void (*th_consumer_done)(void *data, int status,
key_serial_t peerid);
void *th_consumer_data;
int th_type;
unsigned int th_timeout_ms;
int th_auth_mode;
const char *th_peername;
key_serial_t th_keyring;
key_serial_t th_certificate;
key_serial_t th_privkey;
unsigned int th_num_peerids;
key_serial_t th_peerid[5];
};
static struct tls_handshake_req *
tls_handshake_req_init(struct handshake_req *req,
const struct tls_handshake_args *args)
{
struct tls_handshake_req *treq = handshake_req_private(req);
treq->th_timeout_ms = args->ta_timeout_ms;
treq->th_consumer_done = args->ta_done;
treq->th_consumer_data = args->ta_data;
treq->th_peername = args->ta_peername;
treq->th_keyring = args->ta_keyring;
treq->th_num_peerids = 0;
treq->th_certificate = TLS_NO_CERT;
treq->th_privkey = TLS_NO_PRIVKEY;
return treq;
}
static void tls_handshake_remote_peerids(struct tls_handshake_req *treq,
struct genl_info *info)
{
struct nlattr *head = nlmsg_attrdata(info->nlhdr, GENL_HDRLEN);
int rem, len = nlmsg_attrlen(info->nlhdr, GENL_HDRLEN);
struct nlattr *nla;
unsigned int i;
i = 0;
nla_for_each_attr(nla, head, len, rem) {
if (nla_type(nla) == HANDSHAKE_A_DONE_REMOTE_AUTH)
i++;
}
if (!i)
return;
treq->th_num_peerids = min_t(unsigned int, i,
ARRAY_SIZE(treq->th_peerid));
i = 0;
nla_for_each_attr(nla, head, len, rem) {
if (nla_type(nla) == HANDSHAKE_A_DONE_REMOTE_AUTH)
treq->th_peerid[i++] = nla_get_u32(nla);
if (i >= treq->th_num_peerids)
break;
}
}
/**
* tls_handshake_done - callback to handle a CMD_DONE request
* @req: socket on which the handshake was performed
* @status: session status code
* @info: full results of session establishment
*
*/
static void tls_handshake_done(struct handshake_req *req,
unsigned int status, struct genl_info *info)
{
struct tls_handshake_req *treq = handshake_req_private(req);
treq->th_peerid[0] = TLS_NO_PEERID;
if (info)
tls_handshake_remote_peerids(treq, info);
if (!status)
set_bit(HANDSHAKE_F_REQ_SESSION, &req->hr_flags);
treq->th_consumer_done(treq->th_consumer_data, -status,
treq->th_peerid[0]);
}
#if IS_ENABLED(CONFIG_KEYS)
static int tls_handshake_private_keyring(struct tls_handshake_req *treq)
{
key_ref_t process_keyring_ref, keyring_ref;
int ret;
if (treq->th_keyring == TLS_NO_KEYRING)
return 0;
process_keyring_ref = lookup_user_key(KEY_SPEC_PROCESS_KEYRING,
KEY_LOOKUP_CREATE,
KEY_NEED_WRITE);
if (IS_ERR(process_keyring_ref)) {
ret = PTR_ERR(process_keyring_ref);
goto out;
}
keyring_ref = lookup_user_key(treq->th_keyring, KEY_LOOKUP_CREATE,
KEY_NEED_LINK);
if (IS_ERR(keyring_ref)) {
ret = PTR_ERR(keyring_ref);
goto out_put_key;
}
ret = key_link(key_ref_to_ptr(process_keyring_ref),
key_ref_to_ptr(keyring_ref));
key_ref_put(keyring_ref);
out_put_key:
key_ref_put(process_keyring_ref);
out:
return ret;
}
#else
static int tls_handshake_private_keyring(struct tls_handshake_req *treq)
{
return 0;
}
#endif
static int tls_handshake_put_peer_identity(struct sk_buff *msg,
struct tls_handshake_req *treq)
{
unsigned int i;
for (i = 0; i < treq->th_num_peerids; i++)
if (nla_put_u32(msg, HANDSHAKE_A_ACCEPT_PEER_IDENTITY,
treq->th_peerid[i]) < 0)
return -EMSGSIZE;
return 0;
}
static int tls_handshake_put_certificate(struct sk_buff *msg,
struct tls_handshake_req *treq)
{
struct nlattr *entry_attr;
if (treq->th_certificate == TLS_NO_CERT &&
treq->th_privkey == TLS_NO_PRIVKEY)
return 0;
entry_attr = nla_nest_start(msg, HANDSHAKE_A_ACCEPT_CERTIFICATE);
if (!entry_attr)
return -EMSGSIZE;
if (nla_put_u32(msg, HANDSHAKE_A_X509_CERT,
treq->th_certificate) ||
nla_put_u32(msg, HANDSHAKE_A_X509_PRIVKEY,
treq->th_privkey)) {
nla_nest_cancel(msg, entry_attr);
return -EMSGSIZE;
}
nla_nest_end(msg, entry_attr);
return 0;
}
/**
* tls_handshake_accept - callback to construct a CMD_ACCEPT response
* @req: handshake parameters to return
* @info: generic netlink message context
* @fd: file descriptor to be returned
*
* Returns zero on success, or a negative errno on failure.
*/
static int tls_handshake_accept(struct handshake_req *req,
struct genl_info *info, int fd)
{
struct tls_handshake_req *treq = handshake_req_private(req);
struct nlmsghdr *hdr;
struct sk_buff *msg;
int ret;
ret = tls_handshake_private_keyring(treq);
if (ret < 0)
goto out;
ret = -ENOMEM;
msg = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!msg)
goto out;
hdr = handshake_genl_put(msg, info);
if (!hdr)
goto out_cancel;
ret = -EMSGSIZE;
ret = nla_put_u32(msg, HANDSHAKE_A_ACCEPT_SOCKFD, fd);
if (ret < 0)
goto out_cancel;
ret = nla_put_u32(msg, HANDSHAKE_A_ACCEPT_MESSAGE_TYPE, treq->th_type);
if (ret < 0)
goto out_cancel;
if (treq->th_peername) {
ret = nla_put_string(msg, HANDSHAKE_A_ACCEPT_PEERNAME,
treq->th_peername);
if (ret < 0)
goto out_cancel;
}
if (treq->th_timeout_ms) {
ret = nla_put_u32(msg, HANDSHAKE_A_ACCEPT_TIMEOUT, treq->th_timeout_ms);
if (ret < 0)
goto out_cancel;
}
ret = nla_put_u32(msg, HANDSHAKE_A_ACCEPT_AUTH_MODE,
treq->th_auth_mode);
if (ret < 0)
goto out_cancel;
switch (treq->th_auth_mode) {
case HANDSHAKE_AUTH_PSK:
ret = tls_handshake_put_peer_identity(msg, treq);
if (ret < 0)
goto out_cancel;
break;
case HANDSHAKE_AUTH_X509:
ret = tls_handshake_put_certificate(msg, treq);
if (ret < 0)
goto out_cancel;
break;
}
genlmsg_end(msg, hdr);
return genlmsg_reply(msg, info);
out_cancel:
genlmsg_cancel(msg, hdr);
out:
return ret;
}
static const struct handshake_proto tls_handshake_proto = {
.hp_handler_class = HANDSHAKE_HANDLER_CLASS_TLSHD,
.hp_privsize = sizeof(struct tls_handshake_req),
.hp_flags = BIT(HANDSHAKE_F_PROTO_NOTIFY),
.hp_accept = tls_handshake_accept,
.hp_done = tls_handshake_done,
};
/**
* tls_client_hello_anon - request an anonymous TLS handshake on a socket
* @args: socket and handshake parameters for this request
* @flags: memory allocation control flags
*
* Return values:
* %0: Handshake request enqueue; ->done will be called when complete
* %-ESRCH: No user agent is available
* %-ENOMEM: Memory allocation failed
*/
int tls_client_hello_anon(const struct tls_handshake_args *args, gfp_t flags)
{
struct tls_handshake_req *treq;
struct handshake_req *req;
req = handshake_req_alloc(&tls_handshake_proto, flags);
if (!req)
return -ENOMEM;
treq = tls_handshake_req_init(req, args);
treq->th_type = HANDSHAKE_MSG_TYPE_CLIENTHELLO;
treq->th_auth_mode = HANDSHAKE_AUTH_UNAUTH;
return handshake_req_submit(args->ta_sock, req, flags);
}
EXPORT_SYMBOL(tls_client_hello_anon);
/**
* tls_client_hello_x509 - request an x.509-based TLS handshake on a socket
* @args: socket and handshake parameters for this request
* @flags: memory allocation control flags
*
* Return values:
* %0: Handshake request enqueue; ->done will be called when complete
* %-ESRCH: No user agent is available
* %-ENOMEM: Memory allocation failed
*/
int tls_client_hello_x509(const struct tls_handshake_args *args, gfp_t flags)
{
struct tls_handshake_req *treq;
struct handshake_req *req;
req = handshake_req_alloc(&tls_handshake_proto, flags);
if (!req)
return -ENOMEM;
treq = tls_handshake_req_init(req, args);
treq->th_type = HANDSHAKE_MSG_TYPE_CLIENTHELLO;
treq->th_auth_mode = HANDSHAKE_AUTH_X509;
treq->th_certificate = args->ta_my_cert;
treq->th_privkey = args->ta_my_privkey;
return handshake_req_submit(args->ta_sock, req, flags);
}
EXPORT_SYMBOL(tls_client_hello_x509);
/**
* tls_client_hello_psk - request a PSK-based TLS handshake on a socket
* @args: socket and handshake parameters for this request
* @flags: memory allocation control flags
*
* Return values:
* %0: Handshake request enqueue; ->done will be called when complete
* %-EINVAL: Wrong number of local peer IDs
* %-ESRCH: No user agent is available
* %-ENOMEM: Memory allocation failed
*/
int tls_client_hello_psk(const struct tls_handshake_args *args, gfp_t flags)
{
struct tls_handshake_req *treq;
struct handshake_req *req;
unsigned int i;
if (!args->ta_num_peerids ||
args->ta_num_peerids > ARRAY_SIZE(treq->th_peerid))
return -EINVAL;
req = handshake_req_alloc(&tls_handshake_proto, flags);
if (!req)
return -ENOMEM;
treq = tls_handshake_req_init(req, args);
treq->th_type = HANDSHAKE_MSG_TYPE_CLIENTHELLO;
treq->th_auth_mode = HANDSHAKE_AUTH_PSK;
treq->th_num_peerids = args->ta_num_peerids;
for (i = 0; i < args->ta_num_peerids; i++)
treq->th_peerid[i] = args->ta_my_peerids[i];
return handshake_req_submit(args->ta_sock, req, flags);
}
EXPORT_SYMBOL(tls_client_hello_psk);
/**
* tls_server_hello_x509 - request a server TLS handshake on a socket
* @args: socket and handshake parameters for this request
* @flags: memory allocation control flags
*
* Return values:
* %0: Handshake request enqueue; ->done will be called when complete
* %-ESRCH: No user agent is available
* %-ENOMEM: Memory allocation failed
*/
int tls_server_hello_x509(const struct tls_handshake_args *args, gfp_t flags)
{
struct tls_handshake_req *treq;
struct handshake_req *req;
req = handshake_req_alloc(&tls_handshake_proto, flags);
if (!req)
return -ENOMEM;
treq = tls_handshake_req_init(req, args);
treq->th_type = HANDSHAKE_MSG_TYPE_SERVERHELLO;
treq->th_auth_mode = HANDSHAKE_AUTH_X509;
treq->th_certificate = args->ta_my_cert;
treq->th_privkey = args->ta_my_privkey;
return handshake_req_submit(args->ta_sock, req, flags);
}
EXPORT_SYMBOL(tls_server_hello_x509);
/**
* tls_server_hello_psk - request a server TLS handshake on a socket
* @args: socket and handshake parameters for this request
* @flags: memory allocation control flags
*
* Return values:
* %0: Handshake request enqueue; ->done will be called when complete
* %-ESRCH: No user agent is available
* %-ENOMEM: Memory allocation failed
*/
int tls_server_hello_psk(const struct tls_handshake_args *args, gfp_t flags)
{
struct tls_handshake_req *treq;
struct handshake_req *req;
req = handshake_req_alloc(&tls_handshake_proto, flags);
if (!req)
return -ENOMEM;
treq = tls_handshake_req_init(req, args);
treq->th_type = HANDSHAKE_MSG_TYPE_SERVERHELLO;
treq->th_auth_mode = HANDSHAKE_AUTH_PSK;
treq->th_num_peerids = 1;
treq->th_peerid[0] = args->ta_my_peerids[0];
return handshake_req_submit(args->ta_sock, req, flags);
}
EXPORT_SYMBOL(tls_server_hello_psk);
/**
* tls_handshake_cancel - cancel a pending handshake
* @sk: socket on which there is an ongoing handshake
*
* Request cancellation races with request completion. To determine
* who won, callers examine the return value from this function.
*
* Return values:
* %true - Uncompleted handshake request was canceled
* %false - Handshake request already completed or not found
*/
bool tls_handshake_cancel(struct sock *sk)
{
return handshake_req_cancel(sk);
}
EXPORT_SYMBOL(tls_handshake_cancel);
/**
* tls_handshake_close - send a Closure alert
* @sock: an open socket
*
*/
void tls_handshake_close(struct socket *sock)
{
struct handshake_req *req;
req = handshake_req_hash_lookup(sock->sk);
if (!req)
return;
if (!test_and_clear_bit(HANDSHAKE_F_REQ_SESSION, &req->hr_flags))
return;
tls_alert_send(sock, TLS_ALERT_LEVEL_WARNING,
TLS_ALERT_DESC_CLOSE_NOTIFY);
}
EXPORT_SYMBOL(tls_handshake_close);
| linux-master | net/handshake/tlshd.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* NET3: Support for 802.2 demultiplexing off Ethernet
*
* Demultiplex 802.2 encoded protocols. We match the entry by the
* SSAP/DSAP pair and then deliver to the registered datalink that
* matches. The control byte is ignored and handling of such items
* is up to the routine passed the frame.
*
* Unlike the 802.3 datalink we have a list of 802.2 entries as
* there are multiple protocols to demux. The list is currently
* short (3 or 4 entries at most). The current demux assumes this.
*/
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
#include <net/datalink.h>
#include <linux/mm.h>
#include <linux/in.h>
#include <linux/init.h>
#include <net/llc.h>
#include <net/p8022.h>
static int p8022_request(struct datalink_proto *dl, struct sk_buff *skb,
const unsigned char *dest)
{
llc_build_and_send_ui_pkt(dl->sap, skb, dest, dl->sap->laddr.lsap);
return 0;
}
struct datalink_proto *register_8022_client(unsigned char type,
int (*func)(struct sk_buff *skb,
struct net_device *dev,
struct packet_type *pt,
struct net_device *orig_dev))
{
struct datalink_proto *proto;
proto = kmalloc(sizeof(*proto), GFP_ATOMIC);
if (proto) {
proto->type[0] = type;
proto->header_length = 3;
proto->request = p8022_request;
proto->sap = llc_sap_open(type, func);
if (!proto->sap) {
kfree(proto);
proto = NULL;
}
}
return proto;
}
void unregister_8022_client(struct datalink_proto *proto)
{
llc_sap_put(proto->sap);
kfree(proto);
}
EXPORT_SYMBOL(register_8022_client);
EXPORT_SYMBOL(unregister_8022_client);
MODULE_LICENSE("GPL");
| linux-master | net/802/p8022.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* INET An implementation of the TCP/IP protocol suite for the LINUX
* operating system. INET is implemented using the BSD Socket
* interface as the means of communication with the user level.
*
* FDDI-type device handling.
*
* Version: @(#)fddi.c 1.0.0 08/12/96
*
* Authors: Lawrence V. Stefani, <[email protected]>
*
* fddi.c is based on previous eth.c and tr.c work by
* Ross Biro
* Fred N. van Kempen, <[email protected]>
* Mark Evans, <[email protected]>
* Florian La Roche, <[email protected]>
* Alan Cox, <[email protected]>
*
* Changes
* Alan Cox : New arp/rebuild header
* Maciej W. Rozycki : IPv6 support
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/socket.h>
#include <linux/in.h>
#include <linux/inet.h>
#include <linux/netdevice.h>
#include <linux/fddidevice.h>
#include <linux/if_ether.h>
#include <linux/skbuff.h>
#include <linux/errno.h>
#include <net/arp.h>
#include <net/sock.h>
/*
* Create the FDDI MAC header for an arbitrary protocol layer
*
* saddr=NULL means use device source address
* daddr=NULL means leave destination address (eg unresolved arp)
*/
static int fddi_header(struct sk_buff *skb, struct net_device *dev,
unsigned short type,
const void *daddr, const void *saddr, unsigned int len)
{
int hl = FDDI_K_SNAP_HLEN;
struct fddihdr *fddi;
if(type != ETH_P_IP && type != ETH_P_IPV6 && type != ETH_P_ARP)
hl=FDDI_K_8022_HLEN-3;
fddi = skb_push(skb, hl);
fddi->fc = FDDI_FC_K_ASYNC_LLC_DEF;
if(type == ETH_P_IP || type == ETH_P_IPV6 || type == ETH_P_ARP)
{
fddi->hdr.llc_snap.dsap = FDDI_EXTENDED_SAP;
fddi->hdr.llc_snap.ssap = FDDI_EXTENDED_SAP;
fddi->hdr.llc_snap.ctrl = FDDI_UI_CMD;
fddi->hdr.llc_snap.oui[0] = 0x00;
fddi->hdr.llc_snap.oui[1] = 0x00;
fddi->hdr.llc_snap.oui[2] = 0x00;
fddi->hdr.llc_snap.ethertype = htons(type);
}
/* Set the source and destination hardware addresses */
if (saddr != NULL)
memcpy(fddi->saddr, saddr, dev->addr_len);
else
memcpy(fddi->saddr, dev->dev_addr, dev->addr_len);
if (daddr != NULL)
{
memcpy(fddi->daddr, daddr, dev->addr_len);
return hl;
}
return -hl;
}
/*
* Determine the packet's protocol ID and fill in skb fields.
* This routine is called before an incoming packet is passed
* up. It's used to fill in specific skb fields and to set
* the proper pointer to the start of packet data (skb->data).
*/
__be16 fddi_type_trans(struct sk_buff *skb, struct net_device *dev)
{
struct fddihdr *fddi = (struct fddihdr *)skb->data;
__be16 type;
/*
* Set mac.raw field to point to FC byte, set data field to point
* to start of packet data. Assume 802.2 SNAP frames for now.
*/
skb->dev = dev;
skb_reset_mac_header(skb); /* point to frame control (FC) */
if(fddi->hdr.llc_8022_1.dsap==0xe0)
{
skb_pull(skb, FDDI_K_8022_HLEN-3);
type = htons(ETH_P_802_2);
}
else
{
skb_pull(skb, FDDI_K_SNAP_HLEN); /* adjust for 21 byte header */
type=fddi->hdr.llc_snap.ethertype;
}
/* Set packet type based on destination address and flag settings */
if (*fddi->daddr & 0x01)
{
if (memcmp(fddi->daddr, dev->broadcast, FDDI_K_ALEN) == 0)
skb->pkt_type = PACKET_BROADCAST;
else
skb->pkt_type = PACKET_MULTICAST;
}
else if (dev->flags & IFF_PROMISC)
{
if (memcmp(fddi->daddr, dev->dev_addr, FDDI_K_ALEN))
skb->pkt_type = PACKET_OTHERHOST;
}
/* Assume 802.2 SNAP frames, for now */
return type;
}
EXPORT_SYMBOL(fddi_type_trans);
static const struct header_ops fddi_header_ops = {
.create = fddi_header,
};
static void fddi_setup(struct net_device *dev)
{
dev->header_ops = &fddi_header_ops;
dev->type = ARPHRD_FDDI;
dev->hard_header_len = FDDI_K_SNAP_HLEN+3; /* Assume 802.2 SNAP hdr len + 3 pad bytes */
dev->mtu = FDDI_K_SNAP_DLEN; /* Assume max payload of 802.2 SNAP frame */
dev->min_mtu = FDDI_K_SNAP_HLEN;
dev->max_mtu = FDDI_K_SNAP_DLEN;
dev->addr_len = FDDI_K_ALEN;
dev->tx_queue_len = 100; /* Long queues on FDDI */
dev->flags = IFF_BROADCAST | IFF_MULTICAST;
memset(dev->broadcast, 0xFF, FDDI_K_ALEN);
}
/**
* alloc_fddidev - Register FDDI device
* @sizeof_priv: Size of additional driver-private structure to be allocated
* for this FDDI device
*
* Fill in the fields of the device structure with FDDI-generic values.
*
* Constructs a new net device, complete with a private data area of
* size @sizeof_priv. A 32-byte (not bit) alignment is enforced for
* this private data area.
*/
struct net_device *alloc_fddidev(int sizeof_priv)
{
return alloc_netdev(sizeof_priv, "fddi%d", NET_NAME_UNKNOWN,
fddi_setup);
}
EXPORT_SYMBOL(alloc_fddidev);
MODULE_LICENSE("GPL");
| linux-master | net/802/fddi.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* SNAP data link layer. Derived from 802.2
*
* Alan Cox <[email protected]>,
* from the 802.2 layer by Greg Page.
* Merged in additions from Greg Page's psnap.c.
*/
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
#include <net/datalink.h>
#include <net/llc.h>
#include <net/psnap.h>
#include <linux/mm.h>
#include <linux/in.h>
#include <linux/init.h>
#include <linux/rculist.h>
static LIST_HEAD(snap_list);
static DEFINE_SPINLOCK(snap_lock);
static struct llc_sap *snap_sap;
/*
* Find a snap client by matching the 5 bytes.
*/
static struct datalink_proto *find_snap_client(const unsigned char *desc)
{
struct datalink_proto *proto = NULL, *p;
list_for_each_entry_rcu(p, &snap_list, node, lockdep_is_held(&snap_lock)) {
if (!memcmp(p->type, desc, 5)) {
proto = p;
break;
}
}
return proto;
}
/*
* A SNAP packet has arrived
*/
static int snap_rcv(struct sk_buff *skb, struct net_device *dev,
struct packet_type *pt, struct net_device *orig_dev)
{
int rc = 1;
struct datalink_proto *proto;
static struct packet_type snap_packet_type = {
.type = cpu_to_be16(ETH_P_SNAP),
};
if (unlikely(!pskb_may_pull(skb, 5)))
goto drop;
rcu_read_lock();
proto = find_snap_client(skb_transport_header(skb));
if (proto) {
/* Pass the frame on. */
skb->transport_header += 5;
skb_pull_rcsum(skb, 5);
rc = proto->rcvfunc(skb, dev, &snap_packet_type, orig_dev);
}
rcu_read_unlock();
if (unlikely(!proto))
goto drop;
out:
return rc;
drop:
kfree_skb(skb);
goto out;
}
/*
* Put a SNAP header on a frame and pass to 802.2
*/
static int snap_request(struct datalink_proto *dl,
struct sk_buff *skb, const u8 *dest)
{
memcpy(skb_push(skb, 5), dl->type, 5);
llc_build_and_send_ui_pkt(snap_sap, skb, dest, snap_sap->laddr.lsap);
return 0;
}
/*
* Set up the SNAP layer
*/
EXPORT_SYMBOL(register_snap_client);
EXPORT_SYMBOL(unregister_snap_client);
static const char snap_err_msg[] __initconst =
KERN_CRIT "SNAP - unable to register with 802.2\n";
static int __init snap_init(void)
{
snap_sap = llc_sap_open(0xAA, snap_rcv);
if (!snap_sap) {
printk(snap_err_msg);
return -EBUSY;
}
return 0;
}
module_init(snap_init);
static void __exit snap_exit(void)
{
llc_sap_put(snap_sap);
}
module_exit(snap_exit);
/*
* Register SNAP clients. We don't yet use this for IP.
*/
struct datalink_proto *register_snap_client(const unsigned char *desc,
int (*rcvfunc)(struct sk_buff *,
struct net_device *,
struct packet_type *,
struct net_device *))
{
struct datalink_proto *proto = NULL;
spin_lock_bh(&snap_lock);
if (find_snap_client(desc))
goto out;
proto = kmalloc(sizeof(*proto), GFP_ATOMIC);
if (proto) {
memcpy(proto->type, desc, 5);
proto->rcvfunc = rcvfunc;
proto->header_length = 5 + 3; /* snap + 802.2 */
proto->request = snap_request;
list_add_rcu(&proto->node, &snap_list);
}
out:
spin_unlock_bh(&snap_lock);
return proto;
}
/*
* Unregister SNAP clients. Protocols no longer want to play with us ...
*/
void unregister_snap_client(struct datalink_proto *proto)
{
spin_lock_bh(&snap_lock);
list_del_rcu(&proto->node);
spin_unlock_bh(&snap_lock);
synchronize_net();
kfree(proto);
}
MODULE_LICENSE("GPL");
| linux-master | net/802/psnap.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* INET An implementation of the TCP/IP protocol suite for the LINUX
* operating system. INET is implemented using the BSD Socket
* interface as the means of communication with the user level.
*
* HIPPI-type device handling.
*
* Version: @(#)hippi.c 1.0.0 05/29/97
*
* Authors: Ross Biro
* Fred N. van Kempen, <[email protected]>
* Mark Evans, <[email protected]>
* Florian La Roche, <[email protected]>
* Alan Cox, <[email protected]>
* Jes Sorensen, <[email protected]>
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/socket.h>
#include <linux/in.h>
#include <linux/inet.h>
#include <linux/netdevice.h>
#include <linux/hippidevice.h>
#include <linux/skbuff.h>
#include <linux/errno.h>
#include <net/arp.h>
#include <net/sock.h>
#include <linux/uaccess.h>
/*
* Create the HIPPI MAC header for an arbitrary protocol layer
*
* saddr=NULL means use device source address
* daddr=NULL means leave destination address (eg unresolved arp)
*/
static int hippi_header(struct sk_buff *skb, struct net_device *dev,
unsigned short type,
const void *daddr, const void *saddr, unsigned int len)
{
struct hippi_hdr *hip = skb_push(skb, HIPPI_HLEN);
struct hippi_cb *hcb = (struct hippi_cb *) skb->cb;
if (!len){
len = skb->len - HIPPI_HLEN;
printk("hippi_header(): length not supplied\n");
}
/*
* Due to the stupidity of the little endian byte-order we
* have to set the fp field this way.
*/
hip->fp.fixed = htonl(0x04800018);
hip->fp.d2_size = htonl(len + 8);
hip->le.fc = 0;
hip->le.double_wide = 0; /* only HIPPI 800 for the time being */
hip->le.message_type = 0; /* Data PDU */
hip->le.dest_addr_type = 2; /* 12 bit SC address */
hip->le.src_addr_type = 2; /* 12 bit SC address */
memcpy(hip->le.src_switch_addr, dev->dev_addr + 3, 3);
memset_startat(&hip->le, 0, reserved);
hip->snap.dsap = HIPPI_EXTENDED_SAP;
hip->snap.ssap = HIPPI_EXTENDED_SAP;
hip->snap.ctrl = HIPPI_UI_CMD;
hip->snap.oui[0] = 0x00;
hip->snap.oui[1] = 0x00;
hip->snap.oui[2] = 0x00;
hip->snap.ethertype = htons(type);
if (daddr)
{
memcpy(hip->le.dest_switch_addr, daddr + 3, 3);
memcpy(&hcb->ifield, daddr + 2, 4);
return HIPPI_HLEN;
}
hcb->ifield = 0;
return -((int)HIPPI_HLEN);
}
/*
* Determine the packet's protocol ID.
*/
__be16 hippi_type_trans(struct sk_buff *skb, struct net_device *dev)
{
struct hippi_hdr *hip;
/*
* This is actually wrong ... question is if we really should
* set the raw address here.
*/
skb->dev = dev;
skb_reset_mac_header(skb);
hip = (struct hippi_hdr *)skb_mac_header(skb);
skb_pull(skb, HIPPI_HLEN);
/*
* No fancy promisc stuff here now.
*/
return hip->snap.ethertype;
}
EXPORT_SYMBOL(hippi_type_trans);
/*
* For HIPPI we will actually use the lower 4 bytes of the hardware
* address as the I-FIELD rather than the actual hardware address.
*/
int hippi_mac_addr(struct net_device *dev, void *p)
{
struct sockaddr *addr = p;
if (netif_running(dev))
return -EBUSY;
dev_addr_set(dev, addr->sa_data);
return 0;
}
EXPORT_SYMBOL(hippi_mac_addr);
int hippi_neigh_setup_dev(struct net_device *dev, struct neigh_parms *p)
{
/* Never send broadcast/multicast ARP messages */
NEIGH_VAR_INIT(p, MCAST_PROBES, 0);
/* In IPv6 unicast probes are valid even on NBMA,
* because they are encapsulated in normal IPv6 protocol.
* Should be a generic flag.
*/
if (p->tbl->family != AF_INET6)
NEIGH_VAR_INIT(p, UCAST_PROBES, 0);
return 0;
}
EXPORT_SYMBOL(hippi_neigh_setup_dev);
static const struct header_ops hippi_header_ops = {
.create = hippi_header,
};
static void hippi_setup(struct net_device *dev)
{
dev->header_ops = &hippi_header_ops;
/*
* We don't support HIPPI `ARP' for the time being, and probably
* never will unless someone else implements it. However we
* still need a fake ARPHRD to make ifconfig and friends play ball.
*/
dev->type = ARPHRD_HIPPI;
dev->hard_header_len = HIPPI_HLEN;
dev->mtu = 65280;
dev->min_mtu = 68;
dev->max_mtu = 65280;
dev->addr_len = HIPPI_ALEN;
dev->tx_queue_len = 25 /* 5 */;
memset(dev->broadcast, 0xFF, HIPPI_ALEN);
/*
* HIPPI doesn't support broadcast+multicast and we only use
* static ARP tables. ARP is disabled by hippi_neigh_setup_dev.
*/
dev->flags = 0;
}
/**
* alloc_hippi_dev - Register HIPPI device
* @sizeof_priv: Size of additional driver-private structure to be allocated
* for this HIPPI device
*
* Fill in the fields of the device structure with HIPPI-generic values.
*
* Constructs a new net device, complete with a private data area of
* size @sizeof_priv. A 32-byte (not bit) alignment is enforced for
* this private data area.
*/
struct net_device *alloc_hippi_dev(int sizeof_priv)
{
return alloc_netdev(sizeof_priv, "hip%d", NET_NAME_UNKNOWN,
hippi_setup);
}
EXPORT_SYMBOL(alloc_hippi_dev);
| linux-master | net/802/hippi.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* IEEE 802.1D Generic Attribute Registration Protocol (GARP)
*
* Copyright (c) 2008 Patrick McHardy <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/timer.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/rtnetlink.h>
#include <linux/llc.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <net/llc.h>
#include <net/llc_pdu.h>
#include <net/garp.h>
#include <asm/unaligned.h>
static unsigned int garp_join_time __read_mostly = 200;
module_param(garp_join_time, uint, 0644);
MODULE_PARM_DESC(garp_join_time, "Join time in ms (default 200ms)");
MODULE_LICENSE("GPL");
static const struct garp_state_trans {
u8 state;
u8 action;
} garp_applicant_state_table[GARP_APPLICANT_MAX + 1][GARP_EVENT_MAX + 1] = {
[GARP_APPLICANT_VA] = {
[GARP_EVENT_TRANSMIT_PDU] = { .state = GARP_APPLICANT_AA,
.action = GARP_ACTION_S_JOIN_IN },
[GARP_EVENT_R_JOIN_IN] = { .state = GARP_APPLICANT_AA },
[GARP_EVENT_R_JOIN_EMPTY] = { .state = GARP_APPLICANT_VA },
[GARP_EVENT_R_EMPTY] = { .state = GARP_APPLICANT_VA },
[GARP_EVENT_R_LEAVE_IN] = { .state = GARP_APPLICANT_VA },
[GARP_EVENT_R_LEAVE_EMPTY] = { .state = GARP_APPLICANT_VP },
[GARP_EVENT_REQ_JOIN] = { .state = GARP_APPLICANT_INVALID },
[GARP_EVENT_REQ_LEAVE] = { .state = GARP_APPLICANT_LA },
},
[GARP_APPLICANT_AA] = {
[GARP_EVENT_TRANSMIT_PDU] = { .state = GARP_APPLICANT_QA,
.action = GARP_ACTION_S_JOIN_IN },
[GARP_EVENT_R_JOIN_IN] = { .state = GARP_APPLICANT_QA },
[GARP_EVENT_R_JOIN_EMPTY] = { .state = GARP_APPLICANT_VA },
[GARP_EVENT_R_EMPTY] = { .state = GARP_APPLICANT_VA },
[GARP_EVENT_R_LEAVE_IN] = { .state = GARP_APPLICANT_VA },
[GARP_EVENT_R_LEAVE_EMPTY] = { .state = GARP_APPLICANT_VP },
[GARP_EVENT_REQ_JOIN] = { .state = GARP_APPLICANT_INVALID },
[GARP_EVENT_REQ_LEAVE] = { .state = GARP_APPLICANT_LA },
},
[GARP_APPLICANT_QA] = {
[GARP_EVENT_TRANSMIT_PDU] = { .state = GARP_APPLICANT_INVALID },
[GARP_EVENT_R_JOIN_IN] = { .state = GARP_APPLICANT_QA },
[GARP_EVENT_R_JOIN_EMPTY] = { .state = GARP_APPLICANT_VA },
[GARP_EVENT_R_EMPTY] = { .state = GARP_APPLICANT_VA },
[GARP_EVENT_R_LEAVE_IN] = { .state = GARP_APPLICANT_VP },
[GARP_EVENT_R_LEAVE_EMPTY] = { .state = GARP_APPLICANT_VP },
[GARP_EVENT_REQ_JOIN] = { .state = GARP_APPLICANT_INVALID },
[GARP_EVENT_REQ_LEAVE] = { .state = GARP_APPLICANT_LA },
},
[GARP_APPLICANT_LA] = {
[GARP_EVENT_TRANSMIT_PDU] = { .state = GARP_APPLICANT_VO,
.action = GARP_ACTION_S_LEAVE_EMPTY },
[GARP_EVENT_R_JOIN_IN] = { .state = GARP_APPLICANT_LA },
[GARP_EVENT_R_JOIN_EMPTY] = { .state = GARP_APPLICANT_VO },
[GARP_EVENT_R_EMPTY] = { .state = GARP_APPLICANT_LA },
[GARP_EVENT_R_LEAVE_IN] = { .state = GARP_APPLICANT_LA },
[GARP_EVENT_R_LEAVE_EMPTY] = { .state = GARP_APPLICANT_VO },
[GARP_EVENT_REQ_JOIN] = { .state = GARP_APPLICANT_VA },
[GARP_EVENT_REQ_LEAVE] = { .state = GARP_APPLICANT_INVALID },
},
[GARP_APPLICANT_VP] = {
[GARP_EVENT_TRANSMIT_PDU] = { .state = GARP_APPLICANT_AA,
.action = GARP_ACTION_S_JOIN_IN },
[GARP_EVENT_R_JOIN_IN] = { .state = GARP_APPLICANT_AP },
[GARP_EVENT_R_JOIN_EMPTY] = { .state = GARP_APPLICANT_VP },
[GARP_EVENT_R_EMPTY] = { .state = GARP_APPLICANT_VP },
[GARP_EVENT_R_LEAVE_IN] = { .state = GARP_APPLICANT_VP },
[GARP_EVENT_R_LEAVE_EMPTY] = { .state = GARP_APPLICANT_VP },
[GARP_EVENT_REQ_JOIN] = { .state = GARP_APPLICANT_INVALID },
[GARP_EVENT_REQ_LEAVE] = { .state = GARP_APPLICANT_VO },
},
[GARP_APPLICANT_AP] = {
[GARP_EVENT_TRANSMIT_PDU] = { .state = GARP_APPLICANT_QA,
.action = GARP_ACTION_S_JOIN_IN },
[GARP_EVENT_R_JOIN_IN] = { .state = GARP_APPLICANT_QP },
[GARP_EVENT_R_JOIN_EMPTY] = { .state = GARP_APPLICANT_VP },
[GARP_EVENT_R_EMPTY] = { .state = GARP_APPLICANT_VP },
[GARP_EVENT_R_LEAVE_IN] = { .state = GARP_APPLICANT_VP },
[GARP_EVENT_R_LEAVE_EMPTY] = { .state = GARP_APPLICANT_VP },
[GARP_EVENT_REQ_JOIN] = { .state = GARP_APPLICANT_INVALID },
[GARP_EVENT_REQ_LEAVE] = { .state = GARP_APPLICANT_AO },
},
[GARP_APPLICANT_QP] = {
[GARP_EVENT_TRANSMIT_PDU] = { .state = GARP_APPLICANT_INVALID },
[GARP_EVENT_R_JOIN_IN] = { .state = GARP_APPLICANT_QP },
[GARP_EVENT_R_JOIN_EMPTY] = { .state = GARP_APPLICANT_VP },
[GARP_EVENT_R_EMPTY] = { .state = GARP_APPLICANT_VP },
[GARP_EVENT_R_LEAVE_IN] = { .state = GARP_APPLICANT_VP },
[GARP_EVENT_R_LEAVE_EMPTY] = { .state = GARP_APPLICANT_VP },
[GARP_EVENT_REQ_JOIN] = { .state = GARP_APPLICANT_INVALID },
[GARP_EVENT_REQ_LEAVE] = { .state = GARP_APPLICANT_QO },
},
[GARP_APPLICANT_VO] = {
[GARP_EVENT_TRANSMIT_PDU] = { .state = GARP_APPLICANT_INVALID },
[GARP_EVENT_R_JOIN_IN] = { .state = GARP_APPLICANT_AO },
[GARP_EVENT_R_JOIN_EMPTY] = { .state = GARP_APPLICANT_VO },
[GARP_EVENT_R_EMPTY] = { .state = GARP_APPLICANT_VO },
[GARP_EVENT_R_LEAVE_IN] = { .state = GARP_APPLICANT_VO },
[GARP_EVENT_R_LEAVE_EMPTY] = { .state = GARP_APPLICANT_VO },
[GARP_EVENT_REQ_JOIN] = { .state = GARP_APPLICANT_VP },
[GARP_EVENT_REQ_LEAVE] = { .state = GARP_APPLICANT_INVALID },
},
[GARP_APPLICANT_AO] = {
[GARP_EVENT_TRANSMIT_PDU] = { .state = GARP_APPLICANT_INVALID },
[GARP_EVENT_R_JOIN_IN] = { .state = GARP_APPLICANT_QO },
[GARP_EVENT_R_JOIN_EMPTY] = { .state = GARP_APPLICANT_VO },
[GARP_EVENT_R_EMPTY] = { .state = GARP_APPLICANT_VO },
[GARP_EVENT_R_LEAVE_IN] = { .state = GARP_APPLICANT_VO },
[GARP_EVENT_R_LEAVE_EMPTY] = { .state = GARP_APPLICANT_VO },
[GARP_EVENT_REQ_JOIN] = { .state = GARP_APPLICANT_AP },
[GARP_EVENT_REQ_LEAVE] = { .state = GARP_APPLICANT_INVALID },
},
[GARP_APPLICANT_QO] = {
[GARP_EVENT_TRANSMIT_PDU] = { .state = GARP_APPLICANT_INVALID },
[GARP_EVENT_R_JOIN_IN] = { .state = GARP_APPLICANT_QO },
[GARP_EVENT_R_JOIN_EMPTY] = { .state = GARP_APPLICANT_VO },
[GARP_EVENT_R_EMPTY] = { .state = GARP_APPLICANT_VO },
[GARP_EVENT_R_LEAVE_IN] = { .state = GARP_APPLICANT_VO },
[GARP_EVENT_R_LEAVE_EMPTY] = { .state = GARP_APPLICANT_VO },
[GARP_EVENT_REQ_JOIN] = { .state = GARP_APPLICANT_QP },
[GARP_EVENT_REQ_LEAVE] = { .state = GARP_APPLICANT_INVALID },
},
};
static int garp_attr_cmp(const struct garp_attr *attr,
const void *data, u8 len, u8 type)
{
if (attr->type != type)
return attr->type - type;
if (attr->dlen != len)
return attr->dlen - len;
return memcmp(attr->data, data, len);
}
static struct garp_attr *garp_attr_lookup(const struct garp_applicant *app,
const void *data, u8 len, u8 type)
{
struct rb_node *parent = app->gid.rb_node;
struct garp_attr *attr;
int d;
while (parent) {
attr = rb_entry(parent, struct garp_attr, node);
d = garp_attr_cmp(attr, data, len, type);
if (d > 0)
parent = parent->rb_left;
else if (d < 0)
parent = parent->rb_right;
else
return attr;
}
return NULL;
}
static struct garp_attr *garp_attr_create(struct garp_applicant *app,
const void *data, u8 len, u8 type)
{
struct rb_node *parent = NULL, **p = &app->gid.rb_node;
struct garp_attr *attr;
int d;
while (*p) {
parent = *p;
attr = rb_entry(parent, struct garp_attr, node);
d = garp_attr_cmp(attr, data, len, type);
if (d > 0)
p = &parent->rb_left;
else if (d < 0)
p = &parent->rb_right;
else {
/* The attribute already exists; re-use it. */
return attr;
}
}
attr = kmalloc(sizeof(*attr) + len, GFP_ATOMIC);
if (!attr)
return attr;
attr->state = GARP_APPLICANT_VO;
attr->type = type;
attr->dlen = len;
memcpy(attr->data, data, len);
rb_link_node(&attr->node, parent, p);
rb_insert_color(&attr->node, &app->gid);
return attr;
}
static void garp_attr_destroy(struct garp_applicant *app, struct garp_attr *attr)
{
rb_erase(&attr->node, &app->gid);
kfree(attr);
}
static void garp_attr_destroy_all(struct garp_applicant *app)
{
struct rb_node *node, *next;
struct garp_attr *attr;
for (node = rb_first(&app->gid);
next = node ? rb_next(node) : NULL, node != NULL;
node = next) {
attr = rb_entry(node, struct garp_attr, node);
garp_attr_destroy(app, attr);
}
}
static int garp_pdu_init(struct garp_applicant *app)
{
struct sk_buff *skb;
struct garp_pdu_hdr *gp;
#define LLC_RESERVE sizeof(struct llc_pdu_un)
skb = alloc_skb(app->dev->mtu + LL_RESERVED_SPACE(app->dev),
GFP_ATOMIC);
if (!skb)
return -ENOMEM;
skb->dev = app->dev;
skb->protocol = htons(ETH_P_802_2);
skb_reserve(skb, LL_RESERVED_SPACE(app->dev) + LLC_RESERVE);
gp = __skb_put(skb, sizeof(*gp));
put_unaligned(htons(GARP_PROTOCOL_ID), &gp->protocol);
app->pdu = skb;
return 0;
}
static int garp_pdu_append_end_mark(struct garp_applicant *app)
{
if (skb_tailroom(app->pdu) < sizeof(u8))
return -1;
__skb_put_u8(app->pdu, GARP_END_MARK);
return 0;
}
static void garp_pdu_queue(struct garp_applicant *app)
{
if (!app->pdu)
return;
garp_pdu_append_end_mark(app);
garp_pdu_append_end_mark(app);
llc_pdu_header_init(app->pdu, LLC_PDU_TYPE_U, LLC_SAP_BSPAN,
LLC_SAP_BSPAN, LLC_PDU_CMD);
llc_pdu_init_as_ui_cmd(app->pdu);
llc_mac_hdr_init(app->pdu, app->dev->dev_addr,
app->app->proto.group_address);
skb_queue_tail(&app->queue, app->pdu);
app->pdu = NULL;
}
static void garp_queue_xmit(struct garp_applicant *app)
{
struct sk_buff *skb;
while ((skb = skb_dequeue(&app->queue)))
dev_queue_xmit(skb);
}
static int garp_pdu_append_msg(struct garp_applicant *app, u8 attrtype)
{
struct garp_msg_hdr *gm;
if (skb_tailroom(app->pdu) < sizeof(*gm))
return -1;
gm = __skb_put(app->pdu, sizeof(*gm));
gm->attrtype = attrtype;
garp_cb(app->pdu)->cur_type = attrtype;
return 0;
}
static int garp_pdu_append_attr(struct garp_applicant *app,
const struct garp_attr *attr,
enum garp_attr_event event)
{
struct garp_attr_hdr *ga;
unsigned int len;
int err;
again:
if (!app->pdu) {
err = garp_pdu_init(app);
if (err < 0)
return err;
}
if (garp_cb(app->pdu)->cur_type != attr->type) {
if (garp_cb(app->pdu)->cur_type &&
garp_pdu_append_end_mark(app) < 0)
goto queue;
if (garp_pdu_append_msg(app, attr->type) < 0)
goto queue;
}
len = sizeof(*ga) + attr->dlen;
if (skb_tailroom(app->pdu) < len)
goto queue;
ga = __skb_put(app->pdu, len);
ga->len = len;
ga->event = event;
memcpy(ga->data, attr->data, attr->dlen);
return 0;
queue:
garp_pdu_queue(app);
goto again;
}
static void garp_attr_event(struct garp_applicant *app,
struct garp_attr *attr, enum garp_event event)
{
enum garp_applicant_state state;
state = garp_applicant_state_table[attr->state][event].state;
if (state == GARP_APPLICANT_INVALID)
return;
switch (garp_applicant_state_table[attr->state][event].action) {
case GARP_ACTION_NONE:
break;
case GARP_ACTION_S_JOIN_IN:
/* When appending the attribute fails, don't update state in
* order to retry on next TRANSMIT_PDU event. */
if (garp_pdu_append_attr(app, attr, GARP_JOIN_IN) < 0)
return;
break;
case GARP_ACTION_S_LEAVE_EMPTY:
garp_pdu_append_attr(app, attr, GARP_LEAVE_EMPTY);
/* As a pure applicant, sending a leave message implies that
* the attribute was unregistered and can be destroyed. */
garp_attr_destroy(app, attr);
return;
default:
WARN_ON(1);
}
attr->state = state;
}
int garp_request_join(const struct net_device *dev,
const struct garp_application *appl,
const void *data, u8 len, u8 type)
{
struct garp_port *port = rtnl_dereference(dev->garp_port);
struct garp_applicant *app = rtnl_dereference(port->applicants[appl->type]);
struct garp_attr *attr;
spin_lock_bh(&app->lock);
attr = garp_attr_create(app, data, len, type);
if (!attr) {
spin_unlock_bh(&app->lock);
return -ENOMEM;
}
garp_attr_event(app, attr, GARP_EVENT_REQ_JOIN);
spin_unlock_bh(&app->lock);
return 0;
}
EXPORT_SYMBOL_GPL(garp_request_join);
void garp_request_leave(const struct net_device *dev,
const struct garp_application *appl,
const void *data, u8 len, u8 type)
{
struct garp_port *port = rtnl_dereference(dev->garp_port);
struct garp_applicant *app = rtnl_dereference(port->applicants[appl->type]);
struct garp_attr *attr;
spin_lock_bh(&app->lock);
attr = garp_attr_lookup(app, data, len, type);
if (!attr) {
spin_unlock_bh(&app->lock);
return;
}
garp_attr_event(app, attr, GARP_EVENT_REQ_LEAVE);
spin_unlock_bh(&app->lock);
}
EXPORT_SYMBOL_GPL(garp_request_leave);
static void garp_gid_event(struct garp_applicant *app, enum garp_event event)
{
struct rb_node *node, *next;
struct garp_attr *attr;
for (node = rb_first(&app->gid);
next = node ? rb_next(node) : NULL, node != NULL;
node = next) {
attr = rb_entry(node, struct garp_attr, node);
garp_attr_event(app, attr, event);
}
}
static void garp_join_timer_arm(struct garp_applicant *app)
{
unsigned long delay;
delay = get_random_u32_below(msecs_to_jiffies(garp_join_time));
mod_timer(&app->join_timer, jiffies + delay);
}
static void garp_join_timer(struct timer_list *t)
{
struct garp_applicant *app = from_timer(app, t, join_timer);
spin_lock(&app->lock);
garp_gid_event(app, GARP_EVENT_TRANSMIT_PDU);
garp_pdu_queue(app);
spin_unlock(&app->lock);
garp_queue_xmit(app);
garp_join_timer_arm(app);
}
static int garp_pdu_parse_end_mark(struct sk_buff *skb)
{
if (!pskb_may_pull(skb, sizeof(u8)))
return -1;
if (*skb->data == GARP_END_MARK) {
skb_pull(skb, sizeof(u8));
return -1;
}
return 0;
}
static int garp_pdu_parse_attr(struct garp_applicant *app, struct sk_buff *skb,
u8 attrtype)
{
const struct garp_attr_hdr *ga;
struct garp_attr *attr;
enum garp_event event;
unsigned int dlen;
if (!pskb_may_pull(skb, sizeof(*ga)))
return -1;
ga = (struct garp_attr_hdr *)skb->data;
if (ga->len < sizeof(*ga))
return -1;
if (!pskb_may_pull(skb, ga->len))
return -1;
skb_pull(skb, ga->len);
dlen = sizeof(*ga) - ga->len;
if (attrtype > app->app->maxattr)
return 0;
switch (ga->event) {
case GARP_LEAVE_ALL:
if (dlen != 0)
return -1;
garp_gid_event(app, GARP_EVENT_R_LEAVE_EMPTY);
return 0;
case GARP_JOIN_EMPTY:
event = GARP_EVENT_R_JOIN_EMPTY;
break;
case GARP_JOIN_IN:
event = GARP_EVENT_R_JOIN_IN;
break;
case GARP_LEAVE_EMPTY:
event = GARP_EVENT_R_LEAVE_EMPTY;
break;
case GARP_EMPTY:
event = GARP_EVENT_R_EMPTY;
break;
default:
return 0;
}
if (dlen == 0)
return -1;
attr = garp_attr_lookup(app, ga->data, dlen, attrtype);
if (attr == NULL)
return 0;
garp_attr_event(app, attr, event);
return 0;
}
static int garp_pdu_parse_msg(struct garp_applicant *app, struct sk_buff *skb)
{
const struct garp_msg_hdr *gm;
if (!pskb_may_pull(skb, sizeof(*gm)))
return -1;
gm = (struct garp_msg_hdr *)skb->data;
if (gm->attrtype == 0)
return -1;
skb_pull(skb, sizeof(*gm));
while (skb->len > 0) {
if (garp_pdu_parse_attr(app, skb, gm->attrtype) < 0)
return -1;
if (garp_pdu_parse_end_mark(skb) < 0)
break;
}
return 0;
}
static void garp_pdu_rcv(const struct stp_proto *proto, struct sk_buff *skb,
struct net_device *dev)
{
struct garp_application *appl = proto->data;
struct garp_port *port;
struct garp_applicant *app;
const struct garp_pdu_hdr *gp;
port = rcu_dereference(dev->garp_port);
if (!port)
goto err;
app = rcu_dereference(port->applicants[appl->type]);
if (!app)
goto err;
if (!pskb_may_pull(skb, sizeof(*gp)))
goto err;
gp = (struct garp_pdu_hdr *)skb->data;
if (get_unaligned(&gp->protocol) != htons(GARP_PROTOCOL_ID))
goto err;
skb_pull(skb, sizeof(*gp));
spin_lock(&app->lock);
while (skb->len > 0) {
if (garp_pdu_parse_msg(app, skb) < 0)
break;
if (garp_pdu_parse_end_mark(skb) < 0)
break;
}
spin_unlock(&app->lock);
err:
kfree_skb(skb);
}
static int garp_init_port(struct net_device *dev)
{
struct garp_port *port;
port = kzalloc(sizeof(*port), GFP_KERNEL);
if (!port)
return -ENOMEM;
rcu_assign_pointer(dev->garp_port, port);
return 0;
}
static void garp_release_port(struct net_device *dev)
{
struct garp_port *port = rtnl_dereference(dev->garp_port);
unsigned int i;
for (i = 0; i <= GARP_APPLICATION_MAX; i++) {
if (rtnl_dereference(port->applicants[i]))
return;
}
RCU_INIT_POINTER(dev->garp_port, NULL);
kfree_rcu(port, rcu);
}
int garp_init_applicant(struct net_device *dev, struct garp_application *appl)
{
struct garp_applicant *app;
int err;
ASSERT_RTNL();
if (!rtnl_dereference(dev->garp_port)) {
err = garp_init_port(dev);
if (err < 0)
goto err1;
}
err = -ENOMEM;
app = kzalloc(sizeof(*app), GFP_KERNEL);
if (!app)
goto err2;
err = dev_mc_add(dev, appl->proto.group_address);
if (err < 0)
goto err3;
app->dev = dev;
app->app = appl;
app->gid = RB_ROOT;
spin_lock_init(&app->lock);
skb_queue_head_init(&app->queue);
rcu_assign_pointer(dev->garp_port->applicants[appl->type], app);
timer_setup(&app->join_timer, garp_join_timer, 0);
garp_join_timer_arm(app);
return 0;
err3:
kfree(app);
err2:
garp_release_port(dev);
err1:
return err;
}
EXPORT_SYMBOL_GPL(garp_init_applicant);
void garp_uninit_applicant(struct net_device *dev, struct garp_application *appl)
{
struct garp_port *port = rtnl_dereference(dev->garp_port);
struct garp_applicant *app = rtnl_dereference(port->applicants[appl->type]);
ASSERT_RTNL();
RCU_INIT_POINTER(port->applicants[appl->type], NULL);
/* Delete timer and generate a final TRANSMIT_PDU event to flush out
* all pending messages before the applicant is gone. */
timer_shutdown_sync(&app->join_timer);
spin_lock_bh(&app->lock);
garp_gid_event(app, GARP_EVENT_TRANSMIT_PDU);
garp_attr_destroy_all(app);
garp_pdu_queue(app);
spin_unlock_bh(&app->lock);
garp_queue_xmit(app);
dev_mc_del(dev, appl->proto.group_address);
kfree_rcu(app, rcu);
garp_release_port(dev);
}
EXPORT_SYMBOL_GPL(garp_uninit_applicant);
int garp_register_application(struct garp_application *appl)
{
appl->proto.rcv = garp_pdu_rcv;
appl->proto.data = appl;
return stp_proto_register(&appl->proto);
}
EXPORT_SYMBOL_GPL(garp_register_application);
void garp_unregister_application(struct garp_application *appl)
{
stp_proto_unregister(&appl->proto);
}
EXPORT_SYMBOL_GPL(garp_unregister_application);
| linux-master | net/802/garp.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* NET3: Fibre Channel device handling subroutines
*
* Vineet Abraham <[email protected]>
* v 1.0 03/22/99
*/
#include <linux/uaccess.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/socket.h>
#include <linux/in.h>
#include <linux/inet.h>
#include <linux/netdevice.h>
#include <linux/fcdevice.h>
#include <linux/skbuff.h>
#include <linux/errno.h>
#include <linux/timer.h>
#include <linux/net.h>
#include <linux/proc_fs.h>
#include <linux/init.h>
#include <linux/export.h>
#include <net/arp.h>
/*
* Put the headers on a Fibre Channel packet.
*/
static int fc_header(struct sk_buff *skb, struct net_device *dev,
unsigned short type,
const void *daddr, const void *saddr, unsigned int len)
{
struct fch_hdr *fch;
int hdr_len;
/*
* Add the 802.2 SNAP header if IP as the IPv4 code calls
* dev->hard_header directly.
*/
if (type == ETH_P_IP || type == ETH_P_ARP)
{
struct fcllc *fcllc;
hdr_len = sizeof(struct fch_hdr) + sizeof(struct fcllc);
fch = skb_push(skb, hdr_len);
fcllc = (struct fcllc *)(fch+1);
fcllc->dsap = fcllc->ssap = EXTENDED_SAP;
fcllc->llc = UI_CMD;
fcllc->protid[0] = fcllc->protid[1] = fcllc->protid[2] = 0x00;
fcllc->ethertype = htons(type);
}
else
{
hdr_len = sizeof(struct fch_hdr);
fch = skb_push(skb, hdr_len);
}
if(saddr)
memcpy(fch->saddr,saddr,dev->addr_len);
else
memcpy(fch->saddr,dev->dev_addr,dev->addr_len);
if(daddr)
{
memcpy(fch->daddr,daddr,dev->addr_len);
return hdr_len;
}
return -hdr_len;
}
static const struct header_ops fc_header_ops = {
.create = fc_header,
};
static void fc_setup(struct net_device *dev)
{
dev->header_ops = &fc_header_ops;
dev->type = ARPHRD_IEEE802;
dev->hard_header_len = FC_HLEN;
dev->mtu = 2024;
dev->addr_len = FC_ALEN;
dev->tx_queue_len = 100; /* Long queues on fc */
dev->flags = IFF_BROADCAST;
memset(dev->broadcast, 0xFF, FC_ALEN);
}
/**
* alloc_fcdev - Register fibre channel device
* @sizeof_priv: Size of additional driver-private structure to be allocated
* for this fibre channel device
*
* Fill in the fields of the device structure with fibre channel-generic values.
*
* Constructs a new net device, complete with a private data area of
* size @sizeof_priv. A 32-byte (not bit) alignment is enforced for
* this private data area.
*/
struct net_device *alloc_fcdev(int sizeof_priv)
{
return alloc_netdev(sizeof_priv, "fc%d", NET_NAME_UNKNOWN, fc_setup);
}
EXPORT_SYMBOL(alloc_fcdev);
| linux-master | net/802/fc.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* IEEE 802.1Q Multiple Registration Protocol (MRP)
*
* Copyright (c) 2012 Massachusetts Institute of Technology
*
* Adapted from code in net/802/garp.c
* Copyright (c) 2008 Patrick McHardy <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/timer.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/rtnetlink.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <net/mrp.h>
#include <asm/unaligned.h>
static unsigned int mrp_join_time __read_mostly = 200;
module_param(mrp_join_time, uint, 0644);
MODULE_PARM_DESC(mrp_join_time, "Join time in ms (default 200ms)");
static unsigned int mrp_periodic_time __read_mostly = 1000;
module_param(mrp_periodic_time, uint, 0644);
MODULE_PARM_DESC(mrp_periodic_time, "Periodic time in ms (default 1s)");
MODULE_LICENSE("GPL");
static const u8
mrp_applicant_state_table[MRP_APPLICANT_MAX + 1][MRP_EVENT_MAX + 1] = {
[MRP_APPLICANT_VO] = {
[MRP_EVENT_NEW] = MRP_APPLICANT_VN,
[MRP_EVENT_JOIN] = MRP_APPLICANT_VP,
[MRP_EVENT_LV] = MRP_APPLICANT_VO,
[MRP_EVENT_TX] = MRP_APPLICANT_VO,
[MRP_EVENT_R_NEW] = MRP_APPLICANT_VO,
[MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_AO,
[MRP_EVENT_R_IN] = MRP_APPLICANT_VO,
[MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_VO,
[MRP_EVENT_R_MT] = MRP_APPLICANT_VO,
[MRP_EVENT_R_LV] = MRP_APPLICANT_VO,
[MRP_EVENT_R_LA] = MRP_APPLICANT_VO,
[MRP_EVENT_REDECLARE] = MRP_APPLICANT_VO,
[MRP_EVENT_PERIODIC] = MRP_APPLICANT_VO,
},
[MRP_APPLICANT_VP] = {
[MRP_EVENT_NEW] = MRP_APPLICANT_VN,
[MRP_EVENT_JOIN] = MRP_APPLICANT_VP,
[MRP_EVENT_LV] = MRP_APPLICANT_VO,
[MRP_EVENT_TX] = MRP_APPLICANT_AA,
[MRP_EVENT_R_NEW] = MRP_APPLICANT_VP,
[MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_AP,
[MRP_EVENT_R_IN] = MRP_APPLICANT_VP,
[MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_VP,
[MRP_EVENT_R_MT] = MRP_APPLICANT_VP,
[MRP_EVENT_R_LV] = MRP_APPLICANT_VP,
[MRP_EVENT_R_LA] = MRP_APPLICANT_VP,
[MRP_EVENT_REDECLARE] = MRP_APPLICANT_VP,
[MRP_EVENT_PERIODIC] = MRP_APPLICANT_VP,
},
[MRP_APPLICANT_VN] = {
[MRP_EVENT_NEW] = MRP_APPLICANT_VN,
[MRP_EVENT_JOIN] = MRP_APPLICANT_VN,
[MRP_EVENT_LV] = MRP_APPLICANT_LA,
[MRP_EVENT_TX] = MRP_APPLICANT_AN,
[MRP_EVENT_R_NEW] = MRP_APPLICANT_VN,
[MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_VN,
[MRP_EVENT_R_IN] = MRP_APPLICANT_VN,
[MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_VN,
[MRP_EVENT_R_MT] = MRP_APPLICANT_VN,
[MRP_EVENT_R_LV] = MRP_APPLICANT_VN,
[MRP_EVENT_R_LA] = MRP_APPLICANT_VN,
[MRP_EVENT_REDECLARE] = MRP_APPLICANT_VN,
[MRP_EVENT_PERIODIC] = MRP_APPLICANT_VN,
},
[MRP_APPLICANT_AN] = {
[MRP_EVENT_NEW] = MRP_APPLICANT_AN,
[MRP_EVENT_JOIN] = MRP_APPLICANT_AN,
[MRP_EVENT_LV] = MRP_APPLICANT_LA,
[MRP_EVENT_TX] = MRP_APPLICANT_QA,
[MRP_EVENT_R_NEW] = MRP_APPLICANT_AN,
[MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_AN,
[MRP_EVENT_R_IN] = MRP_APPLICANT_AN,
[MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_AN,
[MRP_EVENT_R_MT] = MRP_APPLICANT_AN,
[MRP_EVENT_R_LV] = MRP_APPLICANT_VN,
[MRP_EVENT_R_LA] = MRP_APPLICANT_VN,
[MRP_EVENT_REDECLARE] = MRP_APPLICANT_VN,
[MRP_EVENT_PERIODIC] = MRP_APPLICANT_AN,
},
[MRP_APPLICANT_AA] = {
[MRP_EVENT_NEW] = MRP_APPLICANT_VN,
[MRP_EVENT_JOIN] = MRP_APPLICANT_AA,
[MRP_EVENT_LV] = MRP_APPLICANT_LA,
[MRP_EVENT_TX] = MRP_APPLICANT_QA,
[MRP_EVENT_R_NEW] = MRP_APPLICANT_AA,
[MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_QA,
[MRP_EVENT_R_IN] = MRP_APPLICANT_AA,
[MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_AA,
[MRP_EVENT_R_MT] = MRP_APPLICANT_AA,
[MRP_EVENT_R_LV] = MRP_APPLICANT_VP,
[MRP_EVENT_R_LA] = MRP_APPLICANT_VP,
[MRP_EVENT_REDECLARE] = MRP_APPLICANT_VP,
[MRP_EVENT_PERIODIC] = MRP_APPLICANT_AA,
},
[MRP_APPLICANT_QA] = {
[MRP_EVENT_NEW] = MRP_APPLICANT_VN,
[MRP_EVENT_JOIN] = MRP_APPLICANT_QA,
[MRP_EVENT_LV] = MRP_APPLICANT_LA,
[MRP_EVENT_TX] = MRP_APPLICANT_QA,
[MRP_EVENT_R_NEW] = MRP_APPLICANT_QA,
[MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_QA,
[MRP_EVENT_R_IN] = MRP_APPLICANT_QA,
[MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_AA,
[MRP_EVENT_R_MT] = MRP_APPLICANT_AA,
[MRP_EVENT_R_LV] = MRP_APPLICANT_VP,
[MRP_EVENT_R_LA] = MRP_APPLICANT_VP,
[MRP_EVENT_REDECLARE] = MRP_APPLICANT_VP,
[MRP_EVENT_PERIODIC] = MRP_APPLICANT_AA,
},
[MRP_APPLICANT_LA] = {
[MRP_EVENT_NEW] = MRP_APPLICANT_VN,
[MRP_EVENT_JOIN] = MRP_APPLICANT_AA,
[MRP_EVENT_LV] = MRP_APPLICANT_LA,
[MRP_EVENT_TX] = MRP_APPLICANT_VO,
[MRP_EVENT_R_NEW] = MRP_APPLICANT_LA,
[MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_LA,
[MRP_EVENT_R_IN] = MRP_APPLICANT_LA,
[MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_LA,
[MRP_EVENT_R_MT] = MRP_APPLICANT_LA,
[MRP_EVENT_R_LV] = MRP_APPLICANT_LA,
[MRP_EVENT_R_LA] = MRP_APPLICANT_LA,
[MRP_EVENT_REDECLARE] = MRP_APPLICANT_LA,
[MRP_EVENT_PERIODIC] = MRP_APPLICANT_LA,
},
[MRP_APPLICANT_AO] = {
[MRP_EVENT_NEW] = MRP_APPLICANT_VN,
[MRP_EVENT_JOIN] = MRP_APPLICANT_AP,
[MRP_EVENT_LV] = MRP_APPLICANT_AO,
[MRP_EVENT_TX] = MRP_APPLICANT_AO,
[MRP_EVENT_R_NEW] = MRP_APPLICANT_AO,
[MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_QO,
[MRP_EVENT_R_IN] = MRP_APPLICANT_AO,
[MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_AO,
[MRP_EVENT_R_MT] = MRP_APPLICANT_AO,
[MRP_EVENT_R_LV] = MRP_APPLICANT_VO,
[MRP_EVENT_R_LA] = MRP_APPLICANT_VO,
[MRP_EVENT_REDECLARE] = MRP_APPLICANT_VO,
[MRP_EVENT_PERIODIC] = MRP_APPLICANT_AO,
},
[MRP_APPLICANT_QO] = {
[MRP_EVENT_NEW] = MRP_APPLICANT_VN,
[MRP_EVENT_JOIN] = MRP_APPLICANT_QP,
[MRP_EVENT_LV] = MRP_APPLICANT_QO,
[MRP_EVENT_TX] = MRP_APPLICANT_QO,
[MRP_EVENT_R_NEW] = MRP_APPLICANT_QO,
[MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_QO,
[MRP_EVENT_R_IN] = MRP_APPLICANT_QO,
[MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_AO,
[MRP_EVENT_R_MT] = MRP_APPLICANT_AO,
[MRP_EVENT_R_LV] = MRP_APPLICANT_VO,
[MRP_EVENT_R_LA] = MRP_APPLICANT_VO,
[MRP_EVENT_REDECLARE] = MRP_APPLICANT_VO,
[MRP_EVENT_PERIODIC] = MRP_APPLICANT_QO,
},
[MRP_APPLICANT_AP] = {
[MRP_EVENT_NEW] = MRP_APPLICANT_VN,
[MRP_EVENT_JOIN] = MRP_APPLICANT_AP,
[MRP_EVENT_LV] = MRP_APPLICANT_AO,
[MRP_EVENT_TX] = MRP_APPLICANT_QA,
[MRP_EVENT_R_NEW] = MRP_APPLICANT_AP,
[MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_QP,
[MRP_EVENT_R_IN] = MRP_APPLICANT_AP,
[MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_AP,
[MRP_EVENT_R_MT] = MRP_APPLICANT_AP,
[MRP_EVENT_R_LV] = MRP_APPLICANT_VP,
[MRP_EVENT_R_LA] = MRP_APPLICANT_VP,
[MRP_EVENT_REDECLARE] = MRP_APPLICANT_VP,
[MRP_EVENT_PERIODIC] = MRP_APPLICANT_AP,
},
[MRP_APPLICANT_QP] = {
[MRP_EVENT_NEW] = MRP_APPLICANT_VN,
[MRP_EVENT_JOIN] = MRP_APPLICANT_QP,
[MRP_EVENT_LV] = MRP_APPLICANT_QO,
[MRP_EVENT_TX] = MRP_APPLICANT_QP,
[MRP_EVENT_R_NEW] = MRP_APPLICANT_QP,
[MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_QP,
[MRP_EVENT_R_IN] = MRP_APPLICANT_QP,
[MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_AP,
[MRP_EVENT_R_MT] = MRP_APPLICANT_AP,
[MRP_EVENT_R_LV] = MRP_APPLICANT_VP,
[MRP_EVENT_R_LA] = MRP_APPLICANT_VP,
[MRP_EVENT_REDECLARE] = MRP_APPLICANT_VP,
[MRP_EVENT_PERIODIC] = MRP_APPLICANT_AP,
},
};
static const u8
mrp_tx_action_table[MRP_APPLICANT_MAX + 1] = {
[MRP_APPLICANT_VO] = MRP_TX_ACTION_S_IN_OPTIONAL,
[MRP_APPLICANT_VP] = MRP_TX_ACTION_S_JOIN_IN,
[MRP_APPLICANT_VN] = MRP_TX_ACTION_S_NEW,
[MRP_APPLICANT_AN] = MRP_TX_ACTION_S_NEW,
[MRP_APPLICANT_AA] = MRP_TX_ACTION_S_JOIN_IN,
[MRP_APPLICANT_QA] = MRP_TX_ACTION_S_JOIN_IN_OPTIONAL,
[MRP_APPLICANT_LA] = MRP_TX_ACTION_S_LV,
[MRP_APPLICANT_AO] = MRP_TX_ACTION_S_IN_OPTIONAL,
[MRP_APPLICANT_QO] = MRP_TX_ACTION_S_IN_OPTIONAL,
[MRP_APPLICANT_AP] = MRP_TX_ACTION_S_JOIN_IN,
[MRP_APPLICANT_QP] = MRP_TX_ACTION_S_IN_OPTIONAL,
};
static void mrp_attrvalue_inc(void *value, u8 len)
{
u8 *v = (u8 *)value;
/* Add 1 to the last byte. If it becomes zero,
* go to the previous byte and repeat.
*/
while (len > 0 && !++v[--len])
;
}
static int mrp_attr_cmp(const struct mrp_attr *attr,
const void *value, u8 len, u8 type)
{
if (attr->type != type)
return attr->type - type;
if (attr->len != len)
return attr->len - len;
return memcmp(attr->value, value, len);
}
static struct mrp_attr *mrp_attr_lookup(const struct mrp_applicant *app,
const void *value, u8 len, u8 type)
{
struct rb_node *parent = app->mad.rb_node;
struct mrp_attr *attr;
int d;
while (parent) {
attr = rb_entry(parent, struct mrp_attr, node);
d = mrp_attr_cmp(attr, value, len, type);
if (d > 0)
parent = parent->rb_left;
else if (d < 0)
parent = parent->rb_right;
else
return attr;
}
return NULL;
}
static struct mrp_attr *mrp_attr_create(struct mrp_applicant *app,
const void *value, u8 len, u8 type)
{
struct rb_node *parent = NULL, **p = &app->mad.rb_node;
struct mrp_attr *attr;
int d;
while (*p) {
parent = *p;
attr = rb_entry(parent, struct mrp_attr, node);
d = mrp_attr_cmp(attr, value, len, type);
if (d > 0)
p = &parent->rb_left;
else if (d < 0)
p = &parent->rb_right;
else {
/* The attribute already exists; re-use it. */
return attr;
}
}
attr = kmalloc(sizeof(*attr) + len, GFP_ATOMIC);
if (!attr)
return attr;
attr->state = MRP_APPLICANT_VO;
attr->type = type;
attr->len = len;
memcpy(attr->value, value, len);
rb_link_node(&attr->node, parent, p);
rb_insert_color(&attr->node, &app->mad);
return attr;
}
static void mrp_attr_destroy(struct mrp_applicant *app, struct mrp_attr *attr)
{
rb_erase(&attr->node, &app->mad);
kfree(attr);
}
static void mrp_attr_destroy_all(struct mrp_applicant *app)
{
struct rb_node *node, *next;
struct mrp_attr *attr;
for (node = rb_first(&app->mad);
next = node ? rb_next(node) : NULL, node != NULL;
node = next) {
attr = rb_entry(node, struct mrp_attr, node);
mrp_attr_destroy(app, attr);
}
}
static int mrp_pdu_init(struct mrp_applicant *app)
{
struct sk_buff *skb;
struct mrp_pdu_hdr *ph;
skb = alloc_skb(app->dev->mtu + LL_RESERVED_SPACE(app->dev),
GFP_ATOMIC);
if (!skb)
return -ENOMEM;
skb->dev = app->dev;
skb->protocol = app->app->pkttype.type;
skb_reserve(skb, LL_RESERVED_SPACE(app->dev));
skb_reset_network_header(skb);
skb_reset_transport_header(skb);
ph = __skb_put(skb, sizeof(*ph));
ph->version = app->app->version;
app->pdu = skb;
return 0;
}
static int mrp_pdu_append_end_mark(struct mrp_applicant *app)
{
__be16 *endmark;
if (skb_tailroom(app->pdu) < sizeof(*endmark))
return -1;
endmark = __skb_put(app->pdu, sizeof(*endmark));
put_unaligned(MRP_END_MARK, endmark);
return 0;
}
static void mrp_pdu_queue(struct mrp_applicant *app)
{
if (!app->pdu)
return;
if (mrp_cb(app->pdu)->mh)
mrp_pdu_append_end_mark(app);
mrp_pdu_append_end_mark(app);
dev_hard_header(app->pdu, app->dev, ntohs(app->app->pkttype.type),
app->app->group_address, app->dev->dev_addr,
app->pdu->len);
skb_queue_tail(&app->queue, app->pdu);
app->pdu = NULL;
}
static void mrp_queue_xmit(struct mrp_applicant *app)
{
struct sk_buff *skb;
while ((skb = skb_dequeue(&app->queue)))
dev_queue_xmit(skb);
}
static int mrp_pdu_append_msg_hdr(struct mrp_applicant *app,
u8 attrtype, u8 attrlen)
{
struct mrp_msg_hdr *mh;
if (mrp_cb(app->pdu)->mh) {
if (mrp_pdu_append_end_mark(app) < 0)
return -1;
mrp_cb(app->pdu)->mh = NULL;
mrp_cb(app->pdu)->vah = NULL;
}
if (skb_tailroom(app->pdu) < sizeof(*mh))
return -1;
mh = __skb_put(app->pdu, sizeof(*mh));
mh->attrtype = attrtype;
mh->attrlen = attrlen;
mrp_cb(app->pdu)->mh = mh;
return 0;
}
static int mrp_pdu_append_vecattr_hdr(struct mrp_applicant *app,
const void *firstattrvalue, u8 attrlen)
{
struct mrp_vecattr_hdr *vah;
if (skb_tailroom(app->pdu) < sizeof(*vah) + attrlen)
return -1;
vah = __skb_put(app->pdu, sizeof(*vah) + attrlen);
put_unaligned(0, &vah->lenflags);
memcpy(vah->firstattrvalue, firstattrvalue, attrlen);
mrp_cb(app->pdu)->vah = vah;
memcpy(mrp_cb(app->pdu)->attrvalue, firstattrvalue, attrlen);
return 0;
}
static int mrp_pdu_append_vecattr_event(struct mrp_applicant *app,
const struct mrp_attr *attr,
enum mrp_vecattr_event vaevent)
{
u16 len, pos;
u8 *vaevents;
int err;
again:
if (!app->pdu) {
err = mrp_pdu_init(app);
if (err < 0)
return err;
}
/* If there is no Message header in the PDU, or the Message header is
* for a different attribute type, add an EndMark (if necessary) and a
* new Message header to the PDU.
*/
if (!mrp_cb(app->pdu)->mh ||
mrp_cb(app->pdu)->mh->attrtype != attr->type ||
mrp_cb(app->pdu)->mh->attrlen != attr->len) {
if (mrp_pdu_append_msg_hdr(app, attr->type, attr->len) < 0)
goto queue;
}
/* If there is no VectorAttribute header for this Message in the PDU,
* or this attribute's value does not sequentially follow the previous
* attribute's value, add a new VectorAttribute header to the PDU.
*/
if (!mrp_cb(app->pdu)->vah ||
memcmp(mrp_cb(app->pdu)->attrvalue, attr->value, attr->len)) {
if (mrp_pdu_append_vecattr_hdr(app, attr->value, attr->len) < 0)
goto queue;
}
len = be16_to_cpu(get_unaligned(&mrp_cb(app->pdu)->vah->lenflags));
pos = len % 3;
/* Events are packed into Vectors in the PDU, three to a byte. Add a
* byte to the end of the Vector if necessary.
*/
if (!pos) {
if (skb_tailroom(app->pdu) < sizeof(u8))
goto queue;
vaevents = __skb_put(app->pdu, sizeof(u8));
} else {
vaevents = (u8 *)(skb_tail_pointer(app->pdu) - sizeof(u8));
}
switch (pos) {
case 0:
*vaevents = vaevent * (__MRP_VECATTR_EVENT_MAX *
__MRP_VECATTR_EVENT_MAX);
break;
case 1:
*vaevents += vaevent * __MRP_VECATTR_EVENT_MAX;
break;
case 2:
*vaevents += vaevent;
break;
default:
WARN_ON(1);
}
/* Increment the length of the VectorAttribute in the PDU, as well as
* the value of the next attribute that would continue its Vector.
*/
put_unaligned(cpu_to_be16(++len), &mrp_cb(app->pdu)->vah->lenflags);
mrp_attrvalue_inc(mrp_cb(app->pdu)->attrvalue, attr->len);
return 0;
queue:
mrp_pdu_queue(app);
goto again;
}
static void mrp_attr_event(struct mrp_applicant *app,
struct mrp_attr *attr, enum mrp_event event)
{
enum mrp_applicant_state state;
state = mrp_applicant_state_table[attr->state][event];
if (state == MRP_APPLICANT_INVALID) {
WARN_ON(1);
return;
}
if (event == MRP_EVENT_TX) {
/* When appending the attribute fails, don't update its state
* in order to retry at the next TX event.
*/
switch (mrp_tx_action_table[attr->state]) {
case MRP_TX_ACTION_NONE:
case MRP_TX_ACTION_S_JOIN_IN_OPTIONAL:
case MRP_TX_ACTION_S_IN_OPTIONAL:
break;
case MRP_TX_ACTION_S_NEW:
if (mrp_pdu_append_vecattr_event(
app, attr, MRP_VECATTR_EVENT_NEW) < 0)
return;
break;
case MRP_TX_ACTION_S_JOIN_IN:
if (mrp_pdu_append_vecattr_event(
app, attr, MRP_VECATTR_EVENT_JOIN_IN) < 0)
return;
break;
case MRP_TX_ACTION_S_LV:
if (mrp_pdu_append_vecattr_event(
app, attr, MRP_VECATTR_EVENT_LV) < 0)
return;
/* As a pure applicant, sending a leave message
* implies that the attribute was unregistered and
* can be destroyed.
*/
mrp_attr_destroy(app, attr);
return;
default:
WARN_ON(1);
}
}
attr->state = state;
}
int mrp_request_join(const struct net_device *dev,
const struct mrp_application *appl,
const void *value, u8 len, u8 type)
{
struct mrp_port *port = rtnl_dereference(dev->mrp_port);
struct mrp_applicant *app = rtnl_dereference(
port->applicants[appl->type]);
struct mrp_attr *attr;
if (sizeof(struct mrp_skb_cb) + len >
sizeof_field(struct sk_buff, cb))
return -ENOMEM;
spin_lock_bh(&app->lock);
attr = mrp_attr_create(app, value, len, type);
if (!attr) {
spin_unlock_bh(&app->lock);
return -ENOMEM;
}
mrp_attr_event(app, attr, MRP_EVENT_JOIN);
spin_unlock_bh(&app->lock);
return 0;
}
EXPORT_SYMBOL_GPL(mrp_request_join);
void mrp_request_leave(const struct net_device *dev,
const struct mrp_application *appl,
const void *value, u8 len, u8 type)
{
struct mrp_port *port = rtnl_dereference(dev->mrp_port);
struct mrp_applicant *app = rtnl_dereference(
port->applicants[appl->type]);
struct mrp_attr *attr;
if (sizeof(struct mrp_skb_cb) + len >
sizeof_field(struct sk_buff, cb))
return;
spin_lock_bh(&app->lock);
attr = mrp_attr_lookup(app, value, len, type);
if (!attr) {
spin_unlock_bh(&app->lock);
return;
}
mrp_attr_event(app, attr, MRP_EVENT_LV);
spin_unlock_bh(&app->lock);
}
EXPORT_SYMBOL_GPL(mrp_request_leave);
static void mrp_mad_event(struct mrp_applicant *app, enum mrp_event event)
{
struct rb_node *node, *next;
struct mrp_attr *attr;
for (node = rb_first(&app->mad);
next = node ? rb_next(node) : NULL, node != NULL;
node = next) {
attr = rb_entry(node, struct mrp_attr, node);
mrp_attr_event(app, attr, event);
}
}
static void mrp_join_timer_arm(struct mrp_applicant *app)
{
unsigned long delay;
delay = get_random_u32_below(msecs_to_jiffies(mrp_join_time));
mod_timer(&app->join_timer, jiffies + delay);
}
static void mrp_join_timer(struct timer_list *t)
{
struct mrp_applicant *app = from_timer(app, t, join_timer);
spin_lock(&app->lock);
mrp_mad_event(app, MRP_EVENT_TX);
mrp_pdu_queue(app);
spin_unlock(&app->lock);
mrp_queue_xmit(app);
spin_lock(&app->lock);
if (likely(app->active))
mrp_join_timer_arm(app);
spin_unlock(&app->lock);
}
static void mrp_periodic_timer_arm(struct mrp_applicant *app)
{
mod_timer(&app->periodic_timer,
jiffies + msecs_to_jiffies(mrp_periodic_time));
}
static void mrp_periodic_timer(struct timer_list *t)
{
struct mrp_applicant *app = from_timer(app, t, periodic_timer);
spin_lock(&app->lock);
if (likely(app->active)) {
mrp_mad_event(app, MRP_EVENT_PERIODIC);
mrp_pdu_queue(app);
mrp_periodic_timer_arm(app);
}
spin_unlock(&app->lock);
}
static int mrp_pdu_parse_end_mark(struct sk_buff *skb, int *offset)
{
__be16 endmark;
if (skb_copy_bits(skb, *offset, &endmark, sizeof(endmark)) < 0)
return -1;
if (endmark == MRP_END_MARK) {
*offset += sizeof(endmark);
return -1;
}
return 0;
}
static void mrp_pdu_parse_vecattr_event(struct mrp_applicant *app,
struct sk_buff *skb,
enum mrp_vecattr_event vaevent)
{
struct mrp_attr *attr;
enum mrp_event event;
attr = mrp_attr_lookup(app, mrp_cb(skb)->attrvalue,
mrp_cb(skb)->mh->attrlen,
mrp_cb(skb)->mh->attrtype);
if (attr == NULL)
return;
switch (vaevent) {
case MRP_VECATTR_EVENT_NEW:
event = MRP_EVENT_R_NEW;
break;
case MRP_VECATTR_EVENT_JOIN_IN:
event = MRP_EVENT_R_JOIN_IN;
break;
case MRP_VECATTR_EVENT_IN:
event = MRP_EVENT_R_IN;
break;
case MRP_VECATTR_EVENT_JOIN_MT:
event = MRP_EVENT_R_JOIN_MT;
break;
case MRP_VECATTR_EVENT_MT:
event = MRP_EVENT_R_MT;
break;
case MRP_VECATTR_EVENT_LV:
event = MRP_EVENT_R_LV;
break;
default:
return;
}
mrp_attr_event(app, attr, event);
}
static int mrp_pdu_parse_vecattr(struct mrp_applicant *app,
struct sk_buff *skb, int *offset)
{
struct mrp_vecattr_hdr _vah;
u16 valen;
u8 vaevents, vaevent;
mrp_cb(skb)->vah = skb_header_pointer(skb, *offset, sizeof(_vah),
&_vah);
if (!mrp_cb(skb)->vah)
return -1;
*offset += sizeof(_vah);
if (get_unaligned(&mrp_cb(skb)->vah->lenflags) &
MRP_VECATTR_HDR_FLAG_LA)
mrp_mad_event(app, MRP_EVENT_R_LA);
valen = be16_to_cpu(get_unaligned(&mrp_cb(skb)->vah->lenflags) &
MRP_VECATTR_HDR_LEN_MASK);
/* The VectorAttribute structure in a PDU carries event information
* about one or more attributes having consecutive values. Only the
* value for the first attribute is contained in the structure. So
* we make a copy of that value, and then increment it each time we
* advance to the next event in its Vector.
*/
if (sizeof(struct mrp_skb_cb) + mrp_cb(skb)->mh->attrlen >
sizeof_field(struct sk_buff, cb))
return -1;
if (skb_copy_bits(skb, *offset, mrp_cb(skb)->attrvalue,
mrp_cb(skb)->mh->attrlen) < 0)
return -1;
*offset += mrp_cb(skb)->mh->attrlen;
/* In a VectorAttribute, the Vector contains events which are packed
* three to a byte. We process one byte of the Vector at a time.
*/
while (valen > 0) {
if (skb_copy_bits(skb, *offset, &vaevents,
sizeof(vaevents)) < 0)
return -1;
*offset += sizeof(vaevents);
/* Extract and process the first event. */
vaevent = vaevents / (__MRP_VECATTR_EVENT_MAX *
__MRP_VECATTR_EVENT_MAX);
if (vaevent >= __MRP_VECATTR_EVENT_MAX) {
/* The byte is malformed; stop processing. */
return -1;
}
mrp_pdu_parse_vecattr_event(app, skb, vaevent);
/* If present, extract and process the second event. */
if (!--valen)
break;
mrp_attrvalue_inc(mrp_cb(skb)->attrvalue,
mrp_cb(skb)->mh->attrlen);
vaevents %= (__MRP_VECATTR_EVENT_MAX *
__MRP_VECATTR_EVENT_MAX);
vaevent = vaevents / __MRP_VECATTR_EVENT_MAX;
mrp_pdu_parse_vecattr_event(app, skb, vaevent);
/* If present, extract and process the third event. */
if (!--valen)
break;
mrp_attrvalue_inc(mrp_cb(skb)->attrvalue,
mrp_cb(skb)->mh->attrlen);
vaevents %= __MRP_VECATTR_EVENT_MAX;
vaevent = vaevents;
mrp_pdu_parse_vecattr_event(app, skb, vaevent);
}
return 0;
}
static int mrp_pdu_parse_msg(struct mrp_applicant *app, struct sk_buff *skb,
int *offset)
{
struct mrp_msg_hdr _mh;
mrp_cb(skb)->mh = skb_header_pointer(skb, *offset, sizeof(_mh), &_mh);
if (!mrp_cb(skb)->mh)
return -1;
*offset += sizeof(_mh);
if (mrp_cb(skb)->mh->attrtype == 0 ||
mrp_cb(skb)->mh->attrtype > app->app->maxattr ||
mrp_cb(skb)->mh->attrlen == 0)
return -1;
while (skb->len > *offset) {
if (mrp_pdu_parse_end_mark(skb, offset) < 0)
break;
if (mrp_pdu_parse_vecattr(app, skb, offset) < 0)
return -1;
}
return 0;
}
static int mrp_rcv(struct sk_buff *skb, struct net_device *dev,
struct packet_type *pt, struct net_device *orig_dev)
{
struct mrp_application *appl = container_of(pt, struct mrp_application,
pkttype);
struct mrp_port *port;
struct mrp_applicant *app;
struct mrp_pdu_hdr _ph;
const struct mrp_pdu_hdr *ph;
int offset = skb_network_offset(skb);
/* If the interface is in promiscuous mode, drop the packet if
* it was unicast to another host.
*/
if (unlikely(skb->pkt_type == PACKET_OTHERHOST))
goto out;
skb = skb_share_check(skb, GFP_ATOMIC);
if (unlikely(!skb))
goto out;
port = rcu_dereference(dev->mrp_port);
if (unlikely(!port))
goto out;
app = rcu_dereference(port->applicants[appl->type]);
if (unlikely(!app))
goto out;
ph = skb_header_pointer(skb, offset, sizeof(_ph), &_ph);
if (!ph)
goto out;
offset += sizeof(_ph);
if (ph->version != app->app->version)
goto out;
spin_lock(&app->lock);
while (skb->len > offset) {
if (mrp_pdu_parse_end_mark(skb, &offset) < 0)
break;
if (mrp_pdu_parse_msg(app, skb, &offset) < 0)
break;
}
spin_unlock(&app->lock);
out:
kfree_skb(skb);
return 0;
}
static int mrp_init_port(struct net_device *dev)
{
struct mrp_port *port;
port = kzalloc(sizeof(*port), GFP_KERNEL);
if (!port)
return -ENOMEM;
rcu_assign_pointer(dev->mrp_port, port);
return 0;
}
static void mrp_release_port(struct net_device *dev)
{
struct mrp_port *port = rtnl_dereference(dev->mrp_port);
unsigned int i;
for (i = 0; i <= MRP_APPLICATION_MAX; i++) {
if (rtnl_dereference(port->applicants[i]))
return;
}
RCU_INIT_POINTER(dev->mrp_port, NULL);
kfree_rcu(port, rcu);
}
int mrp_init_applicant(struct net_device *dev, struct mrp_application *appl)
{
struct mrp_applicant *app;
int err;
ASSERT_RTNL();
if (!rtnl_dereference(dev->mrp_port)) {
err = mrp_init_port(dev);
if (err < 0)
goto err1;
}
err = -ENOMEM;
app = kzalloc(sizeof(*app), GFP_KERNEL);
if (!app)
goto err2;
err = dev_mc_add(dev, appl->group_address);
if (err < 0)
goto err3;
app->dev = dev;
app->app = appl;
app->mad = RB_ROOT;
app->active = true;
spin_lock_init(&app->lock);
skb_queue_head_init(&app->queue);
rcu_assign_pointer(dev->mrp_port->applicants[appl->type], app);
timer_setup(&app->join_timer, mrp_join_timer, 0);
mrp_join_timer_arm(app);
timer_setup(&app->periodic_timer, mrp_periodic_timer, 0);
mrp_periodic_timer_arm(app);
return 0;
err3:
kfree(app);
err2:
mrp_release_port(dev);
err1:
return err;
}
EXPORT_SYMBOL_GPL(mrp_init_applicant);
void mrp_uninit_applicant(struct net_device *dev, struct mrp_application *appl)
{
struct mrp_port *port = rtnl_dereference(dev->mrp_port);
struct mrp_applicant *app = rtnl_dereference(
port->applicants[appl->type]);
ASSERT_RTNL();
RCU_INIT_POINTER(port->applicants[appl->type], NULL);
spin_lock_bh(&app->lock);
app->active = false;
spin_unlock_bh(&app->lock);
/* Delete timer and generate a final TX event to flush out
* all pending messages before the applicant is gone.
*/
timer_shutdown_sync(&app->join_timer);
timer_shutdown_sync(&app->periodic_timer);
spin_lock_bh(&app->lock);
mrp_mad_event(app, MRP_EVENT_TX);
mrp_attr_destroy_all(app);
mrp_pdu_queue(app);
spin_unlock_bh(&app->lock);
mrp_queue_xmit(app);
dev_mc_del(dev, appl->group_address);
kfree_rcu(app, rcu);
mrp_release_port(dev);
}
EXPORT_SYMBOL_GPL(mrp_uninit_applicant);
int mrp_register_application(struct mrp_application *appl)
{
appl->pkttype.func = mrp_rcv;
dev_add_pack(&appl->pkttype);
return 0;
}
EXPORT_SYMBOL_GPL(mrp_register_application);
void mrp_unregister_application(struct mrp_application *appl)
{
dev_remove_pack(&appl->pkttype);
}
EXPORT_SYMBOL_GPL(mrp_unregister_application);
| linux-master | net/802/mrp.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* STP SAP demux
*
* Copyright (c) 2008 Patrick McHardy <[email protected]>
*/
#include <linux/mutex.h>
#include <linux/skbuff.h>
#include <linux/etherdevice.h>
#include <linux/llc.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <net/llc.h>
#include <net/llc_pdu.h>
#include <net/stp.h>
/* 01:80:c2:00:00:20 - 01:80:c2:00:00:2F */
#define GARP_ADDR_MIN 0x20
#define GARP_ADDR_MAX 0x2F
#define GARP_ADDR_RANGE (GARP_ADDR_MAX - GARP_ADDR_MIN)
static const struct stp_proto __rcu *garp_protos[GARP_ADDR_RANGE + 1] __read_mostly;
static const struct stp_proto __rcu *stp_proto __read_mostly;
static struct llc_sap *sap __read_mostly;
static unsigned int sap_registered;
static DEFINE_MUTEX(stp_proto_mutex);
/* Called under rcu_read_lock from LLC */
static int stp_pdu_rcv(struct sk_buff *skb, struct net_device *dev,
struct packet_type *pt, struct net_device *orig_dev)
{
const struct ethhdr *eh = eth_hdr(skb);
const struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb);
const struct stp_proto *proto;
if (pdu->ssap != LLC_SAP_BSPAN ||
pdu->dsap != LLC_SAP_BSPAN ||
pdu->ctrl_1 != LLC_PDU_TYPE_U)
goto err;
if (eh->h_dest[5] >= GARP_ADDR_MIN && eh->h_dest[5] <= GARP_ADDR_MAX) {
proto = rcu_dereference(garp_protos[eh->h_dest[5] -
GARP_ADDR_MIN]);
if (proto &&
!ether_addr_equal(eh->h_dest, proto->group_address))
goto err;
} else
proto = rcu_dereference(stp_proto);
if (!proto)
goto err;
proto->rcv(proto, skb, dev);
return 0;
err:
kfree_skb(skb);
return 0;
}
int stp_proto_register(const struct stp_proto *proto)
{
int err = 0;
mutex_lock(&stp_proto_mutex);
if (sap_registered++ == 0) {
sap = llc_sap_open(LLC_SAP_BSPAN, stp_pdu_rcv);
if (!sap) {
err = -ENOMEM;
goto out;
}
}
if (is_zero_ether_addr(proto->group_address))
rcu_assign_pointer(stp_proto, proto);
else
rcu_assign_pointer(garp_protos[proto->group_address[5] -
GARP_ADDR_MIN], proto);
out:
mutex_unlock(&stp_proto_mutex);
return err;
}
EXPORT_SYMBOL_GPL(stp_proto_register);
void stp_proto_unregister(const struct stp_proto *proto)
{
mutex_lock(&stp_proto_mutex);
if (is_zero_ether_addr(proto->group_address))
RCU_INIT_POINTER(stp_proto, NULL);
else
RCU_INIT_POINTER(garp_protos[proto->group_address[5] -
GARP_ADDR_MIN], NULL);
synchronize_rcu();
if (--sap_registered == 0)
llc_sap_put(sap);
mutex_unlock(&stp_proto_mutex);
}
EXPORT_SYMBOL_GPL(stp_proto_unregister);
MODULE_LICENSE("GPL");
| linux-master | net/802/stp.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright 2019 NXP
*/
#include <linux/dsa/ocelot.h>
#include "tag.h"
#define OCELOT_NAME "ocelot"
#define SEVILLE_NAME "seville"
/* If the port is under a VLAN-aware bridge, remove the VLAN header from the
* payload and move it into the DSA tag, which will make the switch classify
* the packet to the bridge VLAN. Otherwise, leave the classified VLAN at zero,
* which is the pvid of standalone and VLAN-unaware bridge ports.
*/
static void ocelot_xmit_get_vlan_info(struct sk_buff *skb, struct dsa_port *dp,
u64 *vlan_tci, u64 *tag_type)
{
struct net_device *br = dsa_port_bridge_dev_get(dp);
struct vlan_ethhdr *hdr;
u16 proto, tci;
if (!br || !br_vlan_enabled(br)) {
*vlan_tci = 0;
*tag_type = IFH_TAG_TYPE_C;
return;
}
hdr = skb_vlan_eth_hdr(skb);
br_vlan_get_proto(br, &proto);
if (ntohs(hdr->h_vlan_proto) == proto) {
vlan_remove_tag(skb, &tci);
*vlan_tci = tci;
} else {
rcu_read_lock();
br_vlan_get_pvid_rcu(br, &tci);
rcu_read_unlock();
*vlan_tci = tci;
}
*tag_type = (proto != ETH_P_8021Q) ? IFH_TAG_TYPE_S : IFH_TAG_TYPE_C;
}
static void ocelot_xmit_common(struct sk_buff *skb, struct net_device *netdev,
__be32 ifh_prefix, void **ifh)
{
struct dsa_port *dp = dsa_slave_to_port(netdev);
struct dsa_switch *ds = dp->ds;
u64 vlan_tci, tag_type;
void *injection;
__be32 *prefix;
u32 rew_op = 0;
u64 qos_class;
ocelot_xmit_get_vlan_info(skb, dp, &vlan_tci, &tag_type);
qos_class = netdev_get_num_tc(netdev) ?
netdev_get_prio_tc_map(netdev, skb->priority) : skb->priority;
injection = skb_push(skb, OCELOT_TAG_LEN);
prefix = skb_push(skb, OCELOT_SHORT_PREFIX_LEN);
*prefix = ifh_prefix;
memset(injection, 0, OCELOT_TAG_LEN);
ocelot_ifh_set_bypass(injection, 1);
ocelot_ifh_set_src(injection, ds->num_ports);
ocelot_ifh_set_qos_class(injection, qos_class);
ocelot_ifh_set_vlan_tci(injection, vlan_tci);
ocelot_ifh_set_tag_type(injection, tag_type);
rew_op = ocelot_ptp_rew_op(skb);
if (rew_op)
ocelot_ifh_set_rew_op(injection, rew_op);
*ifh = injection;
}
static struct sk_buff *ocelot_xmit(struct sk_buff *skb,
struct net_device *netdev)
{
struct dsa_port *dp = dsa_slave_to_port(netdev);
void *injection;
ocelot_xmit_common(skb, netdev, cpu_to_be32(0x8880000a), &injection);
ocelot_ifh_set_dest(injection, BIT_ULL(dp->index));
return skb;
}
static struct sk_buff *seville_xmit(struct sk_buff *skb,
struct net_device *netdev)
{
struct dsa_port *dp = dsa_slave_to_port(netdev);
void *injection;
ocelot_xmit_common(skb, netdev, cpu_to_be32(0x88800005), &injection);
seville_ifh_set_dest(injection, BIT_ULL(dp->index));
return skb;
}
static struct sk_buff *ocelot_rcv(struct sk_buff *skb,
struct net_device *netdev)
{
u64 src_port, qos_class;
u64 vlan_tci, tag_type;
u8 *start = skb->data;
struct dsa_port *dp;
u8 *extraction;
u16 vlan_tpid;
u64 rew_val;
/* Revert skb->data by the amount consumed by the DSA master,
* so it points to the beginning of the frame.
*/
skb_push(skb, ETH_HLEN);
/* We don't care about the short prefix, it is just for easy entrance
* into the DSA master's RX filter. Discard it now by moving it into
* the headroom.
*/
skb_pull(skb, OCELOT_SHORT_PREFIX_LEN);
/* And skb->data now points to the extraction frame header.
* Keep a pointer to it.
*/
extraction = skb->data;
/* Now the EFH is part of the headroom as well */
skb_pull(skb, OCELOT_TAG_LEN);
/* Reset the pointer to the real MAC header */
skb_reset_mac_header(skb);
skb_reset_mac_len(skb);
/* And move skb->data to the correct location again */
skb_pull(skb, ETH_HLEN);
/* Remove from inet csum the extraction header */
skb_postpull_rcsum(skb, start, OCELOT_TOTAL_TAG_LEN);
ocelot_xfh_get_src_port(extraction, &src_port);
ocelot_xfh_get_qos_class(extraction, &qos_class);
ocelot_xfh_get_tag_type(extraction, &tag_type);
ocelot_xfh_get_vlan_tci(extraction, &vlan_tci);
ocelot_xfh_get_rew_val(extraction, &rew_val);
skb->dev = dsa_master_find_slave(netdev, 0, src_port);
if (!skb->dev)
/* The switch will reflect back some frames sent through
* sockets opened on the bare DSA master. These will come back
* with src_port equal to the index of the CPU port, for which
* there is no slave registered. So don't print any error
* message here (ignore and drop those frames).
*/
return NULL;
dsa_default_offload_fwd_mark(skb);
skb->priority = qos_class;
OCELOT_SKB_CB(skb)->tstamp_lo = rew_val;
/* Ocelot switches copy frames unmodified to the CPU. However, it is
* possible for the user to request a VLAN modification through
* VCAP_IS1_ACT_VID_REPLACE_ENA. In this case, what will happen is that
* the VLAN ID field from the Extraction Header gets updated, but the
* 802.1Q header does not (the classified VLAN only becomes visible on
* egress through the "port tag" of front-panel ports).
* So, for traffic extracted by the CPU, we want to pick up the
* classified VLAN and manually replace the existing 802.1Q header from
* the packet with it, so that the operating system is always up to
* date with the result of tc-vlan actions.
* NOTE: In VLAN-unaware mode, we don't want to do that, we want the
* frame to remain unmodified, because the classified VLAN is always
* equal to the pvid of the ingress port and should not be used for
* processing.
*/
dp = dsa_slave_to_port(skb->dev);
vlan_tpid = tag_type ? ETH_P_8021AD : ETH_P_8021Q;
if (dsa_port_is_vlan_filtering(dp) &&
eth_hdr(skb)->h_proto == htons(vlan_tpid)) {
u16 dummy_vlan_tci;
skb_push_rcsum(skb, ETH_HLEN);
__skb_vlan_pop(skb, &dummy_vlan_tci);
skb_pull_rcsum(skb, ETH_HLEN);
__vlan_hwaccel_put_tag(skb, htons(vlan_tpid), vlan_tci);
}
return skb;
}
static const struct dsa_device_ops ocelot_netdev_ops = {
.name = OCELOT_NAME,
.proto = DSA_TAG_PROTO_OCELOT,
.xmit = ocelot_xmit,
.rcv = ocelot_rcv,
.needed_headroom = OCELOT_TOTAL_TAG_LEN,
.promisc_on_master = true,
};
DSA_TAG_DRIVER(ocelot_netdev_ops);
MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_OCELOT, OCELOT_NAME);
static const struct dsa_device_ops seville_netdev_ops = {
.name = SEVILLE_NAME,
.proto = DSA_TAG_PROTO_SEVILLE,
.xmit = seville_xmit,
.rcv = ocelot_rcv,
.needed_headroom = OCELOT_TOTAL_TAG_LEN,
.promisc_on_master = true,
};
DSA_TAG_DRIVER(seville_netdev_ops);
MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_SEVILLE, SEVILLE_NAME);
static struct dsa_tag_driver *ocelot_tag_driver_array[] = {
&DSA_TAG_DRIVER_NAME(ocelot_netdev_ops),
&DSA_TAG_DRIVER_NAME(seville_netdev_ops),
};
module_dsa_tag_drivers(ocelot_tag_driver_array);
MODULE_LICENSE("GPL v2");
| linux-master | net/dsa/tag_ocelot.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* Copyright 2022-2023 NXP
*/
#define CREATE_TRACE_POINTS
#include "trace.h"
void dsa_db_print(const struct dsa_db *db, char buf[DSA_DB_BUFSIZ])
{
switch (db->type) {
case DSA_DB_PORT:
sprintf(buf, "port %s", db->dp->name);
break;
case DSA_DB_LAG:
sprintf(buf, "lag %s id %d", db->lag.dev->name, db->lag.id);
break;
case DSA_DB_BRIDGE:
sprintf(buf, "bridge %s num %d", db->bridge.dev->name,
db->bridge.num);
break;
default:
sprintf(buf, "unknown");
break;
}
}
const char *dsa_port_kind(const struct dsa_port *dp)
{
switch (dp->type) {
case DSA_PORT_TYPE_USER:
return "user";
case DSA_PORT_TYPE_CPU:
return "cpu";
case DSA_PORT_TYPE_DSA:
return "dsa";
default:
return "unused";
}
}
| linux-master | net/dsa/trace.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2015, The Linux Foundation. All rights reserved.
*/
#include <linux/etherdevice.h>
#include <linux/bitfield.h>
#include <net/dsa.h>
#include <linux/dsa/tag_qca.h>
#include "tag.h"
#define QCA_NAME "qca"
static struct sk_buff *qca_tag_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct dsa_port *dp = dsa_slave_to_port(dev);
__be16 *phdr;
u16 hdr;
skb_push(skb, QCA_HDR_LEN);
dsa_alloc_etype_header(skb, QCA_HDR_LEN);
phdr = dsa_etype_header_pos_tx(skb);
/* Set the version field, and set destination port information */
hdr = FIELD_PREP(QCA_HDR_XMIT_VERSION, QCA_HDR_VERSION);
hdr |= QCA_HDR_XMIT_FROM_CPU;
hdr |= FIELD_PREP(QCA_HDR_XMIT_DP_BIT, BIT(dp->index));
*phdr = htons(hdr);
return skb;
}
static struct sk_buff *qca_tag_rcv(struct sk_buff *skb, struct net_device *dev)
{
struct qca_tagger_data *tagger_data;
struct dsa_port *dp = dev->dsa_ptr;
struct dsa_switch *ds = dp->ds;
u8 ver, pk_type;
__be16 *phdr;
int port;
u16 hdr;
BUILD_BUG_ON(sizeof(struct qca_mgmt_ethhdr) != QCA_HDR_MGMT_HEADER_LEN + QCA_HDR_LEN);
tagger_data = ds->tagger_data;
if (unlikely(!pskb_may_pull(skb, QCA_HDR_LEN)))
return NULL;
phdr = dsa_etype_header_pos_rx(skb);
hdr = ntohs(*phdr);
/* Make sure the version is correct */
ver = FIELD_GET(QCA_HDR_RECV_VERSION, hdr);
if (unlikely(ver != QCA_HDR_VERSION))
return NULL;
/* Get pk type */
pk_type = FIELD_GET(QCA_HDR_RECV_TYPE, hdr);
/* Ethernet mgmt read/write packet */
if (pk_type == QCA_HDR_RECV_TYPE_RW_REG_ACK) {
if (likely(tagger_data->rw_reg_ack_handler))
tagger_data->rw_reg_ack_handler(ds, skb);
return NULL;
}
/* Ethernet MIB counter packet */
if (pk_type == QCA_HDR_RECV_TYPE_MIB) {
if (likely(tagger_data->mib_autocast_handler))
tagger_data->mib_autocast_handler(ds, skb);
return NULL;
}
/* Get source port information */
port = FIELD_GET(QCA_HDR_RECV_SOURCE_PORT, hdr);
skb->dev = dsa_master_find_slave(dev, 0, port);
if (!skb->dev)
return NULL;
/* Remove QCA tag and recalculate checksum */
skb_pull_rcsum(skb, QCA_HDR_LEN);
dsa_strip_etype_header(skb, QCA_HDR_LEN);
return skb;
}
static int qca_tag_connect(struct dsa_switch *ds)
{
struct qca_tagger_data *tagger_data;
tagger_data = kzalloc(sizeof(*tagger_data), GFP_KERNEL);
if (!tagger_data)
return -ENOMEM;
ds->tagger_data = tagger_data;
return 0;
}
static void qca_tag_disconnect(struct dsa_switch *ds)
{
kfree(ds->tagger_data);
ds->tagger_data = NULL;
}
static const struct dsa_device_ops qca_netdev_ops = {
.name = QCA_NAME,
.proto = DSA_TAG_PROTO_QCA,
.connect = qca_tag_connect,
.disconnect = qca_tag_disconnect,
.xmit = qca_tag_xmit,
.rcv = qca_tag_rcv,
.needed_headroom = QCA_HDR_LEN,
.promisc_on_master = true,
};
MODULE_LICENSE("GPL");
MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_QCA, QCA_NAME);
module_dsa_tag_driver(qca_netdev_ops);
| linux-master | net/dsa/tag_qca.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* net/dsa/tag_ksz.c - Microchip KSZ Switch tag format handling
* Copyright (c) 2017 Microchip Technology
*/
#include <linux/dsa/ksz_common.h>
#include <linux/etherdevice.h>
#include <linux/list.h>
#include <linux/ptp_classify.h>
#include <net/dsa.h>
#include "tag.h"
#define KSZ8795_NAME "ksz8795"
#define KSZ9477_NAME "ksz9477"
#define KSZ9893_NAME "ksz9893"
#define LAN937X_NAME "lan937x"
/* Typically only one byte is used for tail tag. */
#define KSZ_PTP_TAG_LEN 4
#define KSZ_EGRESS_TAG_LEN 1
#define KSZ_INGRESS_TAG_LEN 1
#define KSZ_HWTS_EN 0
struct ksz_tagger_private {
struct ksz_tagger_data data; /* Must be first */
unsigned long state;
struct kthread_worker *xmit_worker;
};
static struct ksz_tagger_private *
ksz_tagger_private(struct dsa_switch *ds)
{
return ds->tagger_data;
}
static void ksz_hwtstamp_set_state(struct dsa_switch *ds, bool on)
{
struct ksz_tagger_private *priv = ksz_tagger_private(ds);
if (on)
set_bit(KSZ_HWTS_EN, &priv->state);
else
clear_bit(KSZ_HWTS_EN, &priv->state);
}
static void ksz_disconnect(struct dsa_switch *ds)
{
struct ksz_tagger_private *priv = ds->tagger_data;
kthread_destroy_worker(priv->xmit_worker);
kfree(priv);
ds->tagger_data = NULL;
}
static int ksz_connect(struct dsa_switch *ds)
{
struct ksz_tagger_data *tagger_data;
struct kthread_worker *xmit_worker;
struct ksz_tagger_private *priv;
int ret;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
xmit_worker = kthread_create_worker(0, "dsa%d:%d_xmit",
ds->dst->index, ds->index);
if (IS_ERR(xmit_worker)) {
ret = PTR_ERR(xmit_worker);
kfree(priv);
return ret;
}
priv->xmit_worker = xmit_worker;
/* Export functions for switch driver use */
tagger_data = &priv->data;
tagger_data->hwtstamp_set_state = ksz_hwtstamp_set_state;
ds->tagger_data = priv;
return 0;
}
static struct sk_buff *ksz_common_rcv(struct sk_buff *skb,
struct net_device *dev,
unsigned int port, unsigned int len)
{
skb->dev = dsa_master_find_slave(dev, 0, port);
if (!skb->dev)
return NULL;
if (pskb_trim_rcsum(skb, skb->len - len))
return NULL;
dsa_default_offload_fwd_mark(skb);
return skb;
}
/*
* For Ingress (Host -> KSZ8795), 1 byte is added before FCS.
* ---------------------------------------------------------------------------
* DA(6bytes)|SA(6bytes)|....|Data(nbytes)|tag(1byte)|FCS(4bytes)
* ---------------------------------------------------------------------------
* tag : each bit represents port (eg, 0x01=port1, 0x02=port2, 0x10=port5)
*
* For Egress (KSZ8795 -> Host), 1 byte is added before FCS.
* ---------------------------------------------------------------------------
* DA(6bytes)|SA(6bytes)|....|Data(nbytes)|tag0(1byte)|FCS(4bytes)
* ---------------------------------------------------------------------------
* tag0 : zero-based value represents port
* (eg, 0x00=port1, 0x02=port3, 0x06=port7)
*/
#define KSZ8795_TAIL_TAG_OVERRIDE BIT(6)
#define KSZ8795_TAIL_TAG_LOOKUP BIT(7)
static struct sk_buff *ksz8795_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct dsa_port *dp = dsa_slave_to_port(dev);
struct ethhdr *hdr;
u8 *tag;
if (skb->ip_summed == CHECKSUM_PARTIAL && skb_checksum_help(skb))
return NULL;
/* Tag encoding */
tag = skb_put(skb, KSZ_INGRESS_TAG_LEN);
hdr = skb_eth_hdr(skb);
*tag = 1 << dp->index;
if (is_link_local_ether_addr(hdr->h_dest))
*tag |= KSZ8795_TAIL_TAG_OVERRIDE;
return skb;
}
static struct sk_buff *ksz8795_rcv(struct sk_buff *skb, struct net_device *dev)
{
u8 *tag = skb_tail_pointer(skb) - KSZ_EGRESS_TAG_LEN;
return ksz_common_rcv(skb, dev, tag[0] & 7, KSZ_EGRESS_TAG_LEN);
}
static const struct dsa_device_ops ksz8795_netdev_ops = {
.name = KSZ8795_NAME,
.proto = DSA_TAG_PROTO_KSZ8795,
.xmit = ksz8795_xmit,
.rcv = ksz8795_rcv,
.needed_tailroom = KSZ_INGRESS_TAG_LEN,
};
DSA_TAG_DRIVER(ksz8795_netdev_ops);
MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_KSZ8795, KSZ8795_NAME);
/*
* For Ingress (Host -> KSZ9477), 2/6 bytes are added before FCS.
* ---------------------------------------------------------------------------
* DA(6bytes)|SA(6bytes)|....|Data(nbytes)|ts(4bytes)|tag0(1byte)|tag1(1byte)|
* FCS(4bytes)
* ---------------------------------------------------------------------------
* ts : time stamp (Present only if PTP is enabled in the Hardware)
* tag0 : Prioritization (not used now)
* tag1 : each bit represents port (eg, 0x01=port1, 0x02=port2, 0x10=port5)
*
* For Egress (KSZ9477 -> Host), 1/5 bytes is added before FCS.
* ---------------------------------------------------------------------------
* DA(6bytes)|SA(6bytes)|....|Data(nbytes)|ts(4bytes)|tag0(1byte)|FCS(4bytes)
* ---------------------------------------------------------------------------
* ts : time stamp (Present only if bit 7 of tag0 is set)
* tag0 : zero-based value represents port
* (eg, 0x00=port1, 0x02=port3, 0x06=port7)
*/
#define KSZ9477_INGRESS_TAG_LEN 2
#define KSZ9477_PTP_TAG_LEN 4
#define KSZ9477_PTP_TAG_INDICATION 0x80
#define KSZ9477_TAIL_TAG_PRIO GENMASK(8, 7)
#define KSZ9477_TAIL_TAG_OVERRIDE BIT(9)
#define KSZ9477_TAIL_TAG_LOOKUP BIT(10)
static void ksz_rcv_timestamp(struct sk_buff *skb, u8 *tag)
{
u8 *tstamp_raw = tag - KSZ_PTP_TAG_LEN;
ktime_t tstamp;
tstamp = ksz_decode_tstamp(get_unaligned_be32(tstamp_raw));
KSZ_SKB_CB(skb)->tstamp = tstamp;
}
/* Time stamp tag *needs* to be inserted if PTP is enabled in hardware.
* Regardless of Whether it is a PTP frame or not.
*/
static void ksz_xmit_timestamp(struct dsa_port *dp, struct sk_buff *skb)
{
struct ksz_tagger_private *priv;
struct ptp_header *ptp_hdr;
unsigned int ptp_type;
u32 tstamp_raw = 0;
s64 correction;
priv = ksz_tagger_private(dp->ds);
if (!test_bit(KSZ_HWTS_EN, &priv->state))
return;
if (!KSZ_SKB_CB(skb)->update_correction)
goto output_tag;
ptp_type = KSZ_SKB_CB(skb)->ptp_type;
ptp_hdr = ptp_parse_header(skb, ptp_type);
if (!ptp_hdr)
goto output_tag;
correction = (s64)get_unaligned_be64(&ptp_hdr->correction);
if (correction < 0) {
struct timespec64 ts;
ts = ns_to_timespec64(-correction >> 16);
tstamp_raw = ((ts.tv_sec & 3) << 30) | ts.tv_nsec;
/* Set correction field to 0 and update UDP checksum */
ptp_header_update_correction(skb, ptp_type, ptp_hdr, 0);
}
output_tag:
put_unaligned_be32(tstamp_raw, skb_put(skb, KSZ_PTP_TAG_LEN));
}
/* Defer transmit if waiting for egress time stamp is required. */
static struct sk_buff *ksz_defer_xmit(struct dsa_port *dp, struct sk_buff *skb)
{
struct ksz_tagger_data *tagger_data = ksz_tagger_data(dp->ds);
struct ksz_tagger_private *priv = ksz_tagger_private(dp->ds);
void (*xmit_work_fn)(struct kthread_work *work);
struct sk_buff *clone = KSZ_SKB_CB(skb)->clone;
struct ksz_deferred_xmit_work *xmit_work;
struct kthread_worker *xmit_worker;
if (!clone)
return skb; /* no deferred xmit for this packet */
xmit_work_fn = tagger_data->xmit_work_fn;
xmit_worker = priv->xmit_worker;
if (!xmit_work_fn || !xmit_worker)
return NULL;
xmit_work = kzalloc(sizeof(*xmit_work), GFP_ATOMIC);
if (!xmit_work)
return NULL;
kthread_init_work(&xmit_work->work, xmit_work_fn);
/* Increase refcount so the kfree_skb in dsa_slave_xmit
* won't really free the packet.
*/
xmit_work->dp = dp;
xmit_work->skb = skb_get(skb);
kthread_queue_work(xmit_worker, &xmit_work->work);
return NULL;
}
static struct sk_buff *ksz9477_xmit(struct sk_buff *skb,
struct net_device *dev)
{
u16 queue_mapping = skb_get_queue_mapping(skb);
u8 prio = netdev_txq_to_tc(dev, queue_mapping);
struct dsa_port *dp = dsa_slave_to_port(dev);
struct ethhdr *hdr;
__be16 *tag;
u16 val;
if (skb->ip_summed == CHECKSUM_PARTIAL && skb_checksum_help(skb))
return NULL;
/* Tag encoding */
ksz_xmit_timestamp(dp, skb);
tag = skb_put(skb, KSZ9477_INGRESS_TAG_LEN);
hdr = skb_eth_hdr(skb);
val = BIT(dp->index);
val |= FIELD_PREP(KSZ9477_TAIL_TAG_PRIO, prio);
if (is_link_local_ether_addr(hdr->h_dest))
val |= KSZ9477_TAIL_TAG_OVERRIDE;
*tag = cpu_to_be16(val);
return ksz_defer_xmit(dp, skb);
}
static struct sk_buff *ksz9477_rcv(struct sk_buff *skb, struct net_device *dev)
{
/* Tag decoding */
u8 *tag = skb_tail_pointer(skb) - KSZ_EGRESS_TAG_LEN;
unsigned int port = tag[0] & 7;
unsigned int len = KSZ_EGRESS_TAG_LEN;
/* Extra 4-bytes PTP timestamp */
if (tag[0] & KSZ9477_PTP_TAG_INDICATION) {
ksz_rcv_timestamp(skb, tag);
len += KSZ_PTP_TAG_LEN;
}
return ksz_common_rcv(skb, dev, port, len);
}
static const struct dsa_device_ops ksz9477_netdev_ops = {
.name = KSZ9477_NAME,
.proto = DSA_TAG_PROTO_KSZ9477,
.xmit = ksz9477_xmit,
.rcv = ksz9477_rcv,
.connect = ksz_connect,
.disconnect = ksz_disconnect,
.needed_tailroom = KSZ9477_INGRESS_TAG_LEN + KSZ_PTP_TAG_LEN,
};
DSA_TAG_DRIVER(ksz9477_netdev_ops);
MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_KSZ9477, KSZ9477_NAME);
#define KSZ9893_TAIL_TAG_PRIO GENMASK(4, 3)
#define KSZ9893_TAIL_TAG_OVERRIDE BIT(5)
#define KSZ9893_TAIL_TAG_LOOKUP BIT(6)
static struct sk_buff *ksz9893_xmit(struct sk_buff *skb,
struct net_device *dev)
{
u16 queue_mapping = skb_get_queue_mapping(skb);
u8 prio = netdev_txq_to_tc(dev, queue_mapping);
struct dsa_port *dp = dsa_slave_to_port(dev);
struct ethhdr *hdr;
u8 *tag;
if (skb->ip_summed == CHECKSUM_PARTIAL && skb_checksum_help(skb))
return NULL;
/* Tag encoding */
ksz_xmit_timestamp(dp, skb);
tag = skb_put(skb, KSZ_INGRESS_TAG_LEN);
hdr = skb_eth_hdr(skb);
*tag = BIT(dp->index);
*tag |= FIELD_PREP(KSZ9893_TAIL_TAG_PRIO, prio);
if (is_link_local_ether_addr(hdr->h_dest))
*tag |= KSZ9893_TAIL_TAG_OVERRIDE;
return ksz_defer_xmit(dp, skb);
}
static const struct dsa_device_ops ksz9893_netdev_ops = {
.name = KSZ9893_NAME,
.proto = DSA_TAG_PROTO_KSZ9893,
.xmit = ksz9893_xmit,
.rcv = ksz9477_rcv,
.connect = ksz_connect,
.disconnect = ksz_disconnect,
.needed_tailroom = KSZ_INGRESS_TAG_LEN + KSZ_PTP_TAG_LEN,
};
DSA_TAG_DRIVER(ksz9893_netdev_ops);
MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_KSZ9893, KSZ9893_NAME);
/* For xmit, 2/6 bytes are added before FCS.
* ---------------------------------------------------------------------------
* DA(6bytes)|SA(6bytes)|....|Data(nbytes)|ts(4bytes)|tag0(1byte)|tag1(1byte)|
* FCS(4bytes)
* ---------------------------------------------------------------------------
* ts : time stamp (Present only if PTP is enabled in the Hardware)
* tag0 : represents tag override, lookup and valid
* tag1 : each bit represents port (eg, 0x01=port1, 0x02=port2, 0x80=port8)
*
* For rcv, 1/5 bytes is added before FCS.
* ---------------------------------------------------------------------------
* DA(6bytes)|SA(6bytes)|....|Data(nbytes)|ts(4bytes)|tag0(1byte)|FCS(4bytes)
* ---------------------------------------------------------------------------
* ts : time stamp (Present only if bit 7 of tag0 is set)
* tag0 : zero-based value represents port
* (eg, 0x00=port1, 0x02=port3, 0x07=port8)
*/
#define LAN937X_EGRESS_TAG_LEN 2
#define LAN937X_TAIL_TAG_BLOCKING_OVERRIDE BIT(11)
#define LAN937X_TAIL_TAG_LOOKUP BIT(12)
#define LAN937X_TAIL_TAG_VALID BIT(13)
#define LAN937X_TAIL_TAG_PRIO GENMASK(10, 8)
#define LAN937X_TAIL_TAG_PORT_MASK 7
static struct sk_buff *lan937x_xmit(struct sk_buff *skb,
struct net_device *dev)
{
u16 queue_mapping = skb_get_queue_mapping(skb);
u8 prio = netdev_txq_to_tc(dev, queue_mapping);
struct dsa_port *dp = dsa_slave_to_port(dev);
const struct ethhdr *hdr = eth_hdr(skb);
__be16 *tag;
u16 val;
if (skb->ip_summed == CHECKSUM_PARTIAL && skb_checksum_help(skb))
return NULL;
ksz_xmit_timestamp(dp, skb);
tag = skb_put(skb, LAN937X_EGRESS_TAG_LEN);
val = BIT(dp->index);
val |= FIELD_PREP(LAN937X_TAIL_TAG_PRIO, prio);
if (is_link_local_ether_addr(hdr->h_dest))
val |= LAN937X_TAIL_TAG_BLOCKING_OVERRIDE;
/* Tail tag valid bit - This bit should always be set by the CPU */
val |= LAN937X_TAIL_TAG_VALID;
put_unaligned_be16(val, tag);
return ksz_defer_xmit(dp, skb);
}
static const struct dsa_device_ops lan937x_netdev_ops = {
.name = LAN937X_NAME,
.proto = DSA_TAG_PROTO_LAN937X,
.xmit = lan937x_xmit,
.rcv = ksz9477_rcv,
.connect = ksz_connect,
.disconnect = ksz_disconnect,
.needed_tailroom = LAN937X_EGRESS_TAG_LEN + KSZ_PTP_TAG_LEN,
};
DSA_TAG_DRIVER(lan937x_netdev_ops);
MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_LAN937X, LAN937X_NAME);
static struct dsa_tag_driver *dsa_tag_driver_array[] = {
&DSA_TAG_DRIVER_NAME(ksz8795_netdev_ops),
&DSA_TAG_DRIVER_NAME(ksz9477_netdev_ops),
&DSA_TAG_DRIVER_NAME(ksz9893_netdev_ops),
&DSA_TAG_DRIVER_NAME(lan937x_netdev_ops),
};
module_dsa_tag_drivers(dsa_tag_driver_array);
MODULE_LICENSE("GPL");
| linux-master | net/dsa/tag_ksz.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Intel / Lantiq GSWIP V2.0 PMAC tag support
*
* Copyright (C) 2017 - 2018 Hauke Mehrtens <[email protected]>
*/
#include <linux/bitops.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <net/dsa.h>
#include "tag.h"
#define GSWIP_NAME "gswip"
#define GSWIP_TX_HEADER_LEN 4
/* special tag in TX path header */
/* Byte 0 */
#define GSWIP_TX_SLPID_SHIFT 0 /* source port ID */
#define GSWIP_TX_SLPID_CPU 2
#define GSWIP_TX_SLPID_APP1 3
#define GSWIP_TX_SLPID_APP2 4
#define GSWIP_TX_SLPID_APP3 5
#define GSWIP_TX_SLPID_APP4 6
#define GSWIP_TX_SLPID_APP5 7
/* Byte 1 */
#define GSWIP_TX_CRCGEN_DIS BIT(7)
#define GSWIP_TX_DPID_SHIFT 0 /* destination group ID */
#define GSWIP_TX_DPID_ELAN 0
#define GSWIP_TX_DPID_EWAN 1
#define GSWIP_TX_DPID_CPU 2
#define GSWIP_TX_DPID_APP1 3
#define GSWIP_TX_DPID_APP2 4
#define GSWIP_TX_DPID_APP3 5
#define GSWIP_TX_DPID_APP4 6
#define GSWIP_TX_DPID_APP5 7
/* Byte 2 */
#define GSWIP_TX_PORT_MAP_EN BIT(7)
#define GSWIP_TX_PORT_MAP_SEL BIT(6)
#define GSWIP_TX_LRN_DIS BIT(5)
#define GSWIP_TX_CLASS_EN BIT(4)
#define GSWIP_TX_CLASS_SHIFT 0
#define GSWIP_TX_CLASS_MASK GENMASK(3, 0)
/* Byte 3 */
#define GSWIP_TX_DPID_EN BIT(0)
#define GSWIP_TX_PORT_MAP_SHIFT 1
#define GSWIP_TX_PORT_MAP_MASK GENMASK(6, 1)
#define GSWIP_RX_HEADER_LEN 8
/* special tag in RX path header */
/* Byte 7 */
#define GSWIP_RX_SPPID_SHIFT 4
#define GSWIP_RX_SPPID_MASK GENMASK(6, 4)
static struct sk_buff *gswip_tag_xmit(struct sk_buff *skb,
struct net_device *dev)
{
struct dsa_port *dp = dsa_slave_to_port(dev);
u8 *gswip_tag;
skb_push(skb, GSWIP_TX_HEADER_LEN);
gswip_tag = skb->data;
gswip_tag[0] = GSWIP_TX_SLPID_CPU;
gswip_tag[1] = GSWIP_TX_DPID_ELAN;
gswip_tag[2] = GSWIP_TX_PORT_MAP_EN | GSWIP_TX_PORT_MAP_SEL;
gswip_tag[3] = BIT(dp->index + GSWIP_TX_PORT_MAP_SHIFT) & GSWIP_TX_PORT_MAP_MASK;
gswip_tag[3] |= GSWIP_TX_DPID_EN;
return skb;
}
static struct sk_buff *gswip_tag_rcv(struct sk_buff *skb,
struct net_device *dev)
{
int port;
u8 *gswip_tag;
if (unlikely(!pskb_may_pull(skb, GSWIP_RX_HEADER_LEN)))
return NULL;
gswip_tag = skb->data - ETH_HLEN;
/* Get source port information */
port = (gswip_tag[7] & GSWIP_RX_SPPID_MASK) >> GSWIP_RX_SPPID_SHIFT;
skb->dev = dsa_master_find_slave(dev, 0, port);
if (!skb->dev)
return NULL;
/* remove GSWIP tag */
skb_pull_rcsum(skb, GSWIP_RX_HEADER_LEN);
return skb;
}
static const struct dsa_device_ops gswip_netdev_ops = {
.name = GSWIP_NAME,
.proto = DSA_TAG_PROTO_GSWIP,
.xmit = gswip_tag_xmit,
.rcv = gswip_tag_rcv,
.needed_headroom = GSWIP_RX_HEADER_LEN,
};
MODULE_LICENSE("GPL");
MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_GSWIP, GSWIP_NAME);
module_dsa_tag_driver(gswip_netdev_ops);
| linux-master | net/dsa/tag_gswip.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Handling of a master device, switching frames via its switch fabric CPU port
*
* Copyright (c) 2017 Savoir-faire Linux Inc.
* Vivien Didelot <[email protected]>
*/
#include <linux/ethtool.h>
#include <linux/netdevice.h>
#include <linux/netlink.h>
#include <net/dsa.h>
#include "dsa.h"
#include "master.h"
#include "port.h"
#include "tag.h"
static int dsa_master_get_regs_len(struct net_device *dev)
{
struct dsa_port *cpu_dp = dev->dsa_ptr;
const struct ethtool_ops *ops = cpu_dp->orig_ethtool_ops;
struct dsa_switch *ds = cpu_dp->ds;
int port = cpu_dp->index;
int ret = 0;
int len;
if (ops->get_regs_len) {
len = ops->get_regs_len(dev);
if (len < 0)
return len;
ret += len;
}
ret += sizeof(struct ethtool_drvinfo);
ret += sizeof(struct ethtool_regs);
if (ds->ops->get_regs_len) {
len = ds->ops->get_regs_len(ds, port);
if (len < 0)
return len;
ret += len;
}
return ret;
}
static void dsa_master_get_regs(struct net_device *dev,
struct ethtool_regs *regs, void *data)
{
struct dsa_port *cpu_dp = dev->dsa_ptr;
const struct ethtool_ops *ops = cpu_dp->orig_ethtool_ops;
struct dsa_switch *ds = cpu_dp->ds;
struct ethtool_drvinfo *cpu_info;
struct ethtool_regs *cpu_regs;
int port = cpu_dp->index;
int len;
if (ops->get_regs_len && ops->get_regs) {
len = ops->get_regs_len(dev);
if (len < 0)
return;
regs->len = len;
ops->get_regs(dev, regs, data);
data += regs->len;
}
cpu_info = (struct ethtool_drvinfo *)data;
strscpy(cpu_info->driver, "dsa", sizeof(cpu_info->driver));
data += sizeof(*cpu_info);
cpu_regs = (struct ethtool_regs *)data;
data += sizeof(*cpu_regs);
if (ds->ops->get_regs_len && ds->ops->get_regs) {
len = ds->ops->get_regs_len(ds, port);
if (len < 0)
return;
cpu_regs->len = len;
ds->ops->get_regs(ds, port, cpu_regs, data);
}
}
static void dsa_master_get_ethtool_stats(struct net_device *dev,
struct ethtool_stats *stats,
uint64_t *data)
{
struct dsa_port *cpu_dp = dev->dsa_ptr;
const struct ethtool_ops *ops = cpu_dp->orig_ethtool_ops;
struct dsa_switch *ds = cpu_dp->ds;
int port = cpu_dp->index;
int count = 0;
if (ops->get_sset_count && ops->get_ethtool_stats) {
count = ops->get_sset_count(dev, ETH_SS_STATS);
ops->get_ethtool_stats(dev, stats, data);
}
if (ds->ops->get_ethtool_stats)
ds->ops->get_ethtool_stats(ds, port, data + count);
}
static void dsa_master_get_ethtool_phy_stats(struct net_device *dev,
struct ethtool_stats *stats,
uint64_t *data)
{
struct dsa_port *cpu_dp = dev->dsa_ptr;
const struct ethtool_ops *ops = cpu_dp->orig_ethtool_ops;
struct dsa_switch *ds = cpu_dp->ds;
int port = cpu_dp->index;
int count = 0;
if (dev->phydev && !ops->get_ethtool_phy_stats) {
count = phy_ethtool_get_sset_count(dev->phydev);
if (count >= 0)
phy_ethtool_get_stats(dev->phydev, stats, data);
} else if (ops->get_sset_count && ops->get_ethtool_phy_stats) {
count = ops->get_sset_count(dev, ETH_SS_PHY_STATS);
ops->get_ethtool_phy_stats(dev, stats, data);
}
if (count < 0)
count = 0;
if (ds->ops->get_ethtool_phy_stats)
ds->ops->get_ethtool_phy_stats(ds, port, data + count);
}
static int dsa_master_get_sset_count(struct net_device *dev, int sset)
{
struct dsa_port *cpu_dp = dev->dsa_ptr;
const struct ethtool_ops *ops = cpu_dp->orig_ethtool_ops;
struct dsa_switch *ds = cpu_dp->ds;
int count = 0;
if (sset == ETH_SS_PHY_STATS && dev->phydev &&
!ops->get_ethtool_phy_stats)
count = phy_ethtool_get_sset_count(dev->phydev);
else if (ops->get_sset_count)
count = ops->get_sset_count(dev, sset);
if (count < 0)
count = 0;
if (ds->ops->get_sset_count)
count += ds->ops->get_sset_count(ds, cpu_dp->index, sset);
return count;
}
static void dsa_master_get_strings(struct net_device *dev, uint32_t stringset,
uint8_t *data)
{
struct dsa_port *cpu_dp = dev->dsa_ptr;
const struct ethtool_ops *ops = cpu_dp->orig_ethtool_ops;
struct dsa_switch *ds = cpu_dp->ds;
int port = cpu_dp->index;
int len = ETH_GSTRING_LEN;
int mcount = 0, count, i;
uint8_t pfx[4];
uint8_t *ndata;
snprintf(pfx, sizeof(pfx), "p%.2d", port);
/* We do not want to be NULL-terminated, since this is a prefix */
pfx[sizeof(pfx) - 1] = '_';
if (stringset == ETH_SS_PHY_STATS && dev->phydev &&
!ops->get_ethtool_phy_stats) {
mcount = phy_ethtool_get_sset_count(dev->phydev);
if (mcount < 0)
mcount = 0;
else
phy_ethtool_get_strings(dev->phydev, data);
} else if (ops->get_sset_count && ops->get_strings) {
mcount = ops->get_sset_count(dev, stringset);
if (mcount < 0)
mcount = 0;
ops->get_strings(dev, stringset, data);
}
if (ds->ops->get_strings) {
ndata = data + mcount * len;
/* This function copies ETH_GSTRINGS_LEN bytes, we will mangle
* the output after to prepend our CPU port prefix we
* constructed earlier
*/
ds->ops->get_strings(ds, port, stringset, ndata);
count = ds->ops->get_sset_count(ds, port, stringset);
if (count < 0)
return;
for (i = 0; i < count; i++) {
memmove(ndata + (i * len + sizeof(pfx)),
ndata + i * len, len - sizeof(pfx));
memcpy(ndata + i * len, pfx, sizeof(pfx));
}
}
}
/* Deny PTP operations on master if there is at least one switch in the tree
* that is PTP capable.
*/
int __dsa_master_hwtstamp_validate(struct net_device *dev,
const struct kernel_hwtstamp_config *config,
struct netlink_ext_ack *extack)
{
struct dsa_port *cpu_dp = dev->dsa_ptr;
struct dsa_switch *ds = cpu_dp->ds;
struct dsa_switch_tree *dst;
struct dsa_port *dp;
dst = ds->dst;
list_for_each_entry(dp, &dst->ports, list) {
if (dsa_port_supports_hwtstamp(dp)) {
NL_SET_ERR_MSG(extack,
"HW timestamping not allowed on DSA master when switch supports the operation");
return -EBUSY;
}
}
return 0;
}
static int dsa_master_ethtool_setup(struct net_device *dev)
{
struct dsa_port *cpu_dp = dev->dsa_ptr;
struct dsa_switch *ds = cpu_dp->ds;
struct ethtool_ops *ops;
if (netif_is_lag_master(dev))
return 0;
ops = devm_kzalloc(ds->dev, sizeof(*ops), GFP_KERNEL);
if (!ops)
return -ENOMEM;
cpu_dp->orig_ethtool_ops = dev->ethtool_ops;
if (cpu_dp->orig_ethtool_ops)
memcpy(ops, cpu_dp->orig_ethtool_ops, sizeof(*ops));
ops->get_regs_len = dsa_master_get_regs_len;
ops->get_regs = dsa_master_get_regs;
ops->get_sset_count = dsa_master_get_sset_count;
ops->get_ethtool_stats = dsa_master_get_ethtool_stats;
ops->get_strings = dsa_master_get_strings;
ops->get_ethtool_phy_stats = dsa_master_get_ethtool_phy_stats;
dev->ethtool_ops = ops;
return 0;
}
static void dsa_master_ethtool_teardown(struct net_device *dev)
{
struct dsa_port *cpu_dp = dev->dsa_ptr;
if (netif_is_lag_master(dev))
return;
dev->ethtool_ops = cpu_dp->orig_ethtool_ops;
cpu_dp->orig_ethtool_ops = NULL;
}
/* Keep the master always promiscuous if the tagging protocol requires that
* (garbles MAC DA) or if it doesn't support unicast filtering, case in which
* it would revert to promiscuous mode as soon as we call dev_uc_add() on it
* anyway.
*/
static void dsa_master_set_promiscuity(struct net_device *dev, int inc)
{
const struct dsa_device_ops *ops = dev->dsa_ptr->tag_ops;
if ((dev->priv_flags & IFF_UNICAST_FLT) && !ops->promisc_on_master)
return;
ASSERT_RTNL();
dev_set_promiscuity(dev, inc);
}
static ssize_t tagging_show(struct device *d, struct device_attribute *attr,
char *buf)
{
struct net_device *dev = to_net_dev(d);
struct dsa_port *cpu_dp = dev->dsa_ptr;
return sysfs_emit(buf, "%s\n",
dsa_tag_protocol_to_str(cpu_dp->tag_ops));
}
static ssize_t tagging_store(struct device *d, struct device_attribute *attr,
const char *buf, size_t count)
{
const struct dsa_device_ops *new_tag_ops, *old_tag_ops;
const char *end = strchrnul(buf, '\n'), *name;
struct net_device *dev = to_net_dev(d);
struct dsa_port *cpu_dp = dev->dsa_ptr;
size_t len = end - buf;
int err;
/* Empty string passed */
if (!len)
return -ENOPROTOOPT;
name = kstrndup(buf, len, GFP_KERNEL);
if (!name)
return -ENOMEM;
old_tag_ops = cpu_dp->tag_ops;
new_tag_ops = dsa_tag_driver_get_by_name(name);
kfree(name);
/* Bad tagger name? */
if (IS_ERR(new_tag_ops))
return PTR_ERR(new_tag_ops);
if (new_tag_ops == old_tag_ops)
/* Drop the temporarily held duplicate reference, since
* the DSA switch tree uses this tagger.
*/
goto out;
err = dsa_tree_change_tag_proto(cpu_dp->ds->dst, new_tag_ops,
old_tag_ops);
if (err) {
/* On failure the old tagger is restored, so we don't need the
* driver for the new one.
*/
dsa_tag_driver_put(new_tag_ops);
return err;
}
/* On success we no longer need the module for the old tagging protocol
*/
out:
dsa_tag_driver_put(old_tag_ops);
return count;
}
static DEVICE_ATTR_RW(tagging);
static struct attribute *dsa_slave_attrs[] = {
&dev_attr_tagging.attr,
NULL
};
static const struct attribute_group dsa_group = {
.name = "dsa",
.attrs = dsa_slave_attrs,
};
static void dsa_master_reset_mtu(struct net_device *dev)
{
int err;
err = dev_set_mtu(dev, ETH_DATA_LEN);
if (err)
netdev_dbg(dev,
"Unable to reset MTU to exclude DSA overheads\n");
}
int dsa_master_setup(struct net_device *dev, struct dsa_port *cpu_dp)
{
const struct dsa_device_ops *tag_ops = cpu_dp->tag_ops;
struct dsa_switch *ds = cpu_dp->ds;
struct device_link *consumer_link;
int mtu, ret;
mtu = ETH_DATA_LEN + dsa_tag_protocol_overhead(tag_ops);
/* The DSA master must use SET_NETDEV_DEV for this to work. */
if (!netif_is_lag_master(dev)) {
consumer_link = device_link_add(ds->dev, dev->dev.parent,
DL_FLAG_AUTOREMOVE_CONSUMER);
if (!consumer_link)
netdev_err(dev,
"Failed to create a device link to DSA switch %s\n",
dev_name(ds->dev));
}
/* The switch driver may not implement ->port_change_mtu(), case in
* which dsa_slave_change_mtu() will not update the master MTU either,
* so we need to do that here.
*/
ret = dev_set_mtu(dev, mtu);
if (ret)
netdev_warn(dev, "error %d setting MTU to %d to include DSA overhead\n",
ret, mtu);
/* If we use a tagging format that doesn't have an ethertype
* field, make sure that all packets from this point on get
* sent to the tag format's receive function.
*/
wmb();
dev->dsa_ptr = cpu_dp;
dsa_master_set_promiscuity(dev, 1);
ret = dsa_master_ethtool_setup(dev);
if (ret)
goto out_err_reset_promisc;
ret = sysfs_create_group(&dev->dev.kobj, &dsa_group);
if (ret)
goto out_err_ethtool_teardown;
return ret;
out_err_ethtool_teardown:
dsa_master_ethtool_teardown(dev);
out_err_reset_promisc:
dsa_master_set_promiscuity(dev, -1);
return ret;
}
void dsa_master_teardown(struct net_device *dev)
{
sysfs_remove_group(&dev->dev.kobj, &dsa_group);
dsa_master_ethtool_teardown(dev);
dsa_master_reset_mtu(dev);
dsa_master_set_promiscuity(dev, -1);
dev->dsa_ptr = NULL;
/* If we used a tagging format that doesn't have an ethertype
* field, make sure that all packets from this point get sent
* without the tag and go through the regular receive path.
*/
wmb();
}
int dsa_master_lag_setup(struct net_device *lag_dev, struct dsa_port *cpu_dp,
struct netdev_lag_upper_info *uinfo,
struct netlink_ext_ack *extack)
{
bool master_setup = false;
int err;
if (!netdev_uses_dsa(lag_dev)) {
err = dsa_master_setup(lag_dev, cpu_dp);
if (err)
return err;
master_setup = true;
}
err = dsa_port_lag_join(cpu_dp, lag_dev, uinfo, extack);
if (err) {
NL_SET_ERR_MSG_WEAK_MOD(extack, "CPU port failed to join LAG");
goto out_master_teardown;
}
return 0;
out_master_teardown:
if (master_setup)
dsa_master_teardown(lag_dev);
return err;
}
/* Tear down a master if there isn't any other user port on it,
* optionally also destroying LAG information.
*/
void dsa_master_lag_teardown(struct net_device *lag_dev,
struct dsa_port *cpu_dp)
{
struct net_device *upper;
struct list_head *iter;
dsa_port_lag_leave(cpu_dp, lag_dev);
netdev_for_each_upper_dev_rcu(lag_dev, upper, iter)
if (dsa_slave_dev_check(upper))
return;
dsa_master_teardown(lag_dev);
}
| linux-master | net/dsa/master.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Stubs for DSA functionality called by the core network stack.
* These are necessary because CONFIG_NET_DSA can be a module, and built-in
* code cannot directly call symbols exported by modules.
*/
#include <net/dsa_stubs.h>
const struct dsa_stubs *dsa_stubs;
EXPORT_SYMBOL_GPL(dsa_stubs);
| linux-master | net/dsa/stubs.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* DSA topology and switch handling
*
* Copyright (c) 2008-2009 Marvell Semiconductor
* Copyright (c) 2013 Florian Fainelli <[email protected]>
* Copyright (c) 2016 Andrew Lunn <[email protected]>
*/
#include <linux/device.h>
#include <linux/err.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/slab.h>
#include <linux/rtnetlink.h>
#include <linux/of.h>
#include <linux/of_mdio.h>
#include <linux/of_net.h>
#include <net/dsa_stubs.h>
#include <net/sch_generic.h>
#include "devlink.h"
#include "dsa.h"
#include "master.h"
#include "netlink.h"
#include "port.h"
#include "slave.h"
#include "switch.h"
#include "tag.h"
#define DSA_MAX_NUM_OFFLOADING_BRIDGES BITS_PER_LONG
static DEFINE_MUTEX(dsa2_mutex);
LIST_HEAD(dsa_tree_list);
static struct workqueue_struct *dsa_owq;
/* Track the bridges with forwarding offload enabled */
static unsigned long dsa_fwd_offloading_bridges;
bool dsa_schedule_work(struct work_struct *work)
{
return queue_work(dsa_owq, work);
}
void dsa_flush_workqueue(void)
{
flush_workqueue(dsa_owq);
}
EXPORT_SYMBOL_GPL(dsa_flush_workqueue);
/**
* dsa_lag_map() - Map LAG structure to a linear LAG array
* @dst: Tree in which to record the mapping.
* @lag: LAG structure that is to be mapped to the tree's array.
*
* dsa_lag_id/dsa_lag_by_id can then be used to translate between the
* two spaces. The size of the mapping space is determined by the
* driver by setting ds->num_lag_ids. It is perfectly legal to leave
* it unset if it is not needed, in which case these functions become
* no-ops.
*/
void dsa_lag_map(struct dsa_switch_tree *dst, struct dsa_lag *lag)
{
unsigned int id;
for (id = 1; id <= dst->lags_len; id++) {
if (!dsa_lag_by_id(dst, id)) {
dst->lags[id - 1] = lag;
lag->id = id;
return;
}
}
/* No IDs left, which is OK. Some drivers do not need it. The
* ones that do, e.g. mv88e6xxx, will discover that dsa_lag_id
* returns an error for this device when joining the LAG. The
* driver can then return -EOPNOTSUPP back to DSA, which will
* fall back to a software LAG.
*/
}
/**
* dsa_lag_unmap() - Remove a LAG ID mapping
* @dst: Tree in which the mapping is recorded.
* @lag: LAG structure that was mapped.
*
* As there may be multiple users of the mapping, it is only removed
* if there are no other references to it.
*/
void dsa_lag_unmap(struct dsa_switch_tree *dst, struct dsa_lag *lag)
{
unsigned int id;
dsa_lags_foreach_id(id, dst) {
if (dsa_lag_by_id(dst, id) == lag) {
dst->lags[id - 1] = NULL;
lag->id = 0;
break;
}
}
}
struct dsa_lag *dsa_tree_lag_find(struct dsa_switch_tree *dst,
const struct net_device *lag_dev)
{
struct dsa_port *dp;
list_for_each_entry(dp, &dst->ports, list)
if (dsa_port_lag_dev_get(dp) == lag_dev)
return dp->lag;
return NULL;
}
struct dsa_bridge *dsa_tree_bridge_find(struct dsa_switch_tree *dst,
const struct net_device *br)
{
struct dsa_port *dp;
list_for_each_entry(dp, &dst->ports, list)
if (dsa_port_bridge_dev_get(dp) == br)
return dp->bridge;
return NULL;
}
static int dsa_bridge_num_find(const struct net_device *bridge_dev)
{
struct dsa_switch_tree *dst;
list_for_each_entry(dst, &dsa_tree_list, list) {
struct dsa_bridge *bridge;
bridge = dsa_tree_bridge_find(dst, bridge_dev);
if (bridge)
return bridge->num;
}
return 0;
}
unsigned int dsa_bridge_num_get(const struct net_device *bridge_dev, int max)
{
unsigned int bridge_num = dsa_bridge_num_find(bridge_dev);
/* Switches without FDB isolation support don't get unique
* bridge numbering
*/
if (!max)
return 0;
if (!bridge_num) {
/* First port that requests FDB isolation or TX forwarding
* offload for this bridge
*/
bridge_num = find_next_zero_bit(&dsa_fwd_offloading_bridges,
DSA_MAX_NUM_OFFLOADING_BRIDGES,
1);
if (bridge_num >= max)
return 0;
set_bit(bridge_num, &dsa_fwd_offloading_bridges);
}
return bridge_num;
}
void dsa_bridge_num_put(const struct net_device *bridge_dev,
unsigned int bridge_num)
{
/* Since we refcount bridges, we know that when we call this function
* it is no longer in use, so we can just go ahead and remove it from
* the bit mask.
*/
clear_bit(bridge_num, &dsa_fwd_offloading_bridges);
}
struct dsa_switch *dsa_switch_find(int tree_index, int sw_index)
{
struct dsa_switch_tree *dst;
struct dsa_port *dp;
list_for_each_entry(dst, &dsa_tree_list, list) {
if (dst->index != tree_index)
continue;
list_for_each_entry(dp, &dst->ports, list) {
if (dp->ds->index != sw_index)
continue;
return dp->ds;
}
}
return NULL;
}
EXPORT_SYMBOL_GPL(dsa_switch_find);
static struct dsa_switch_tree *dsa_tree_find(int index)
{
struct dsa_switch_tree *dst;
list_for_each_entry(dst, &dsa_tree_list, list)
if (dst->index == index)
return dst;
return NULL;
}
static struct dsa_switch_tree *dsa_tree_alloc(int index)
{
struct dsa_switch_tree *dst;
dst = kzalloc(sizeof(*dst), GFP_KERNEL);
if (!dst)
return NULL;
dst->index = index;
INIT_LIST_HEAD(&dst->rtable);
INIT_LIST_HEAD(&dst->ports);
INIT_LIST_HEAD(&dst->list);
list_add_tail(&dst->list, &dsa_tree_list);
kref_init(&dst->refcount);
return dst;
}
static void dsa_tree_free(struct dsa_switch_tree *dst)
{
if (dst->tag_ops)
dsa_tag_driver_put(dst->tag_ops);
list_del(&dst->list);
kfree(dst);
}
static struct dsa_switch_tree *dsa_tree_get(struct dsa_switch_tree *dst)
{
if (dst)
kref_get(&dst->refcount);
return dst;
}
static struct dsa_switch_tree *dsa_tree_touch(int index)
{
struct dsa_switch_tree *dst;
dst = dsa_tree_find(index);
if (dst)
return dsa_tree_get(dst);
else
return dsa_tree_alloc(index);
}
static void dsa_tree_release(struct kref *ref)
{
struct dsa_switch_tree *dst;
dst = container_of(ref, struct dsa_switch_tree, refcount);
dsa_tree_free(dst);
}
static void dsa_tree_put(struct dsa_switch_tree *dst)
{
if (dst)
kref_put(&dst->refcount, dsa_tree_release);
}
static struct dsa_port *dsa_tree_find_port_by_node(struct dsa_switch_tree *dst,
struct device_node *dn)
{
struct dsa_port *dp;
list_for_each_entry(dp, &dst->ports, list)
if (dp->dn == dn)
return dp;
return NULL;
}
static struct dsa_link *dsa_link_touch(struct dsa_port *dp,
struct dsa_port *link_dp)
{
struct dsa_switch *ds = dp->ds;
struct dsa_switch_tree *dst;
struct dsa_link *dl;
dst = ds->dst;
list_for_each_entry(dl, &dst->rtable, list)
if (dl->dp == dp && dl->link_dp == link_dp)
return dl;
dl = kzalloc(sizeof(*dl), GFP_KERNEL);
if (!dl)
return NULL;
dl->dp = dp;
dl->link_dp = link_dp;
INIT_LIST_HEAD(&dl->list);
list_add_tail(&dl->list, &dst->rtable);
return dl;
}
static bool dsa_port_setup_routing_table(struct dsa_port *dp)
{
struct dsa_switch *ds = dp->ds;
struct dsa_switch_tree *dst = ds->dst;
struct device_node *dn = dp->dn;
struct of_phandle_iterator it;
struct dsa_port *link_dp;
struct dsa_link *dl;
int err;
of_for_each_phandle(&it, err, dn, "link", NULL, 0) {
link_dp = dsa_tree_find_port_by_node(dst, it.node);
if (!link_dp) {
of_node_put(it.node);
return false;
}
dl = dsa_link_touch(dp, link_dp);
if (!dl) {
of_node_put(it.node);
return false;
}
}
return true;
}
static bool dsa_tree_setup_routing_table(struct dsa_switch_tree *dst)
{
bool complete = true;
struct dsa_port *dp;
list_for_each_entry(dp, &dst->ports, list) {
if (dsa_port_is_dsa(dp)) {
complete = dsa_port_setup_routing_table(dp);
if (!complete)
break;
}
}
return complete;
}
static struct dsa_port *dsa_tree_find_first_cpu(struct dsa_switch_tree *dst)
{
struct dsa_port *dp;
list_for_each_entry(dp, &dst->ports, list)
if (dsa_port_is_cpu(dp))
return dp;
return NULL;
}
struct net_device *dsa_tree_find_first_master(struct dsa_switch_tree *dst)
{
struct device_node *ethernet;
struct net_device *master;
struct dsa_port *cpu_dp;
cpu_dp = dsa_tree_find_first_cpu(dst);
ethernet = of_parse_phandle(cpu_dp->dn, "ethernet", 0);
master = of_find_net_device_by_node(ethernet);
of_node_put(ethernet);
return master;
}
/* Assign the default CPU port (the first one in the tree) to all ports of the
* fabric which don't already have one as part of their own switch.
*/
static int dsa_tree_setup_default_cpu(struct dsa_switch_tree *dst)
{
struct dsa_port *cpu_dp, *dp;
cpu_dp = dsa_tree_find_first_cpu(dst);
if (!cpu_dp) {
pr_err("DSA: tree %d has no CPU port\n", dst->index);
return -EINVAL;
}
list_for_each_entry(dp, &dst->ports, list) {
if (dp->cpu_dp)
continue;
if (dsa_port_is_user(dp) || dsa_port_is_dsa(dp))
dp->cpu_dp = cpu_dp;
}
return 0;
}
static struct dsa_port *
dsa_switch_preferred_default_local_cpu_port(struct dsa_switch *ds)
{
struct dsa_port *cpu_dp;
if (!ds->ops->preferred_default_local_cpu_port)
return NULL;
cpu_dp = ds->ops->preferred_default_local_cpu_port(ds);
if (!cpu_dp)
return NULL;
if (WARN_ON(!dsa_port_is_cpu(cpu_dp) || cpu_dp->ds != ds))
return NULL;
return cpu_dp;
}
/* Perform initial assignment of CPU ports to user ports and DSA links in the
* fabric, giving preference to CPU ports local to each switch. Default to
* using the first CPU port in the switch tree if the port does not have a CPU
* port local to this switch.
*/
static int dsa_tree_setup_cpu_ports(struct dsa_switch_tree *dst)
{
struct dsa_port *preferred_cpu_dp, *cpu_dp, *dp;
list_for_each_entry(cpu_dp, &dst->ports, list) {
if (!dsa_port_is_cpu(cpu_dp))
continue;
preferred_cpu_dp = dsa_switch_preferred_default_local_cpu_port(cpu_dp->ds);
if (preferred_cpu_dp && preferred_cpu_dp != cpu_dp)
continue;
/* Prefer a local CPU port */
dsa_switch_for_each_port(dp, cpu_dp->ds) {
/* Prefer the first local CPU port found */
if (dp->cpu_dp)
continue;
if (dsa_port_is_user(dp) || dsa_port_is_dsa(dp))
dp->cpu_dp = cpu_dp;
}
}
return dsa_tree_setup_default_cpu(dst);
}
static void dsa_tree_teardown_cpu_ports(struct dsa_switch_tree *dst)
{
struct dsa_port *dp;
list_for_each_entry(dp, &dst->ports, list)
if (dsa_port_is_user(dp) || dsa_port_is_dsa(dp))
dp->cpu_dp = NULL;
}
static int dsa_port_setup(struct dsa_port *dp)
{
bool dsa_port_link_registered = false;
struct dsa_switch *ds = dp->ds;
bool dsa_port_enabled = false;
int err = 0;
if (dp->setup)
return 0;
err = dsa_port_devlink_setup(dp);
if (err)
return err;
switch (dp->type) {
case DSA_PORT_TYPE_UNUSED:
dsa_port_disable(dp);
break;
case DSA_PORT_TYPE_CPU:
if (dp->dn) {
err = dsa_shared_port_link_register_of(dp);
if (err)
break;
dsa_port_link_registered = true;
} else {
dev_warn(ds->dev,
"skipping link registration for CPU port %d\n",
dp->index);
}
err = dsa_port_enable(dp, NULL);
if (err)
break;
dsa_port_enabled = true;
break;
case DSA_PORT_TYPE_DSA:
if (dp->dn) {
err = dsa_shared_port_link_register_of(dp);
if (err)
break;
dsa_port_link_registered = true;
} else {
dev_warn(ds->dev,
"skipping link registration for DSA port %d\n",
dp->index);
}
err = dsa_port_enable(dp, NULL);
if (err)
break;
dsa_port_enabled = true;
break;
case DSA_PORT_TYPE_USER:
of_get_mac_address(dp->dn, dp->mac);
err = dsa_slave_create(dp);
break;
}
if (err && dsa_port_enabled)
dsa_port_disable(dp);
if (err && dsa_port_link_registered)
dsa_shared_port_link_unregister_of(dp);
if (err) {
dsa_port_devlink_teardown(dp);
return err;
}
dp->setup = true;
return 0;
}
static void dsa_port_teardown(struct dsa_port *dp)
{
if (!dp->setup)
return;
switch (dp->type) {
case DSA_PORT_TYPE_UNUSED:
break;
case DSA_PORT_TYPE_CPU:
dsa_port_disable(dp);
if (dp->dn)
dsa_shared_port_link_unregister_of(dp);
break;
case DSA_PORT_TYPE_DSA:
dsa_port_disable(dp);
if (dp->dn)
dsa_shared_port_link_unregister_of(dp);
break;
case DSA_PORT_TYPE_USER:
if (dp->slave) {
dsa_slave_destroy(dp->slave);
dp->slave = NULL;
}
break;
}
dsa_port_devlink_teardown(dp);
dp->setup = false;
}
static int dsa_port_setup_as_unused(struct dsa_port *dp)
{
dp->type = DSA_PORT_TYPE_UNUSED;
return dsa_port_setup(dp);
}
static int dsa_switch_setup_tag_protocol(struct dsa_switch *ds)
{
const struct dsa_device_ops *tag_ops = ds->dst->tag_ops;
struct dsa_switch_tree *dst = ds->dst;
int err;
if (tag_ops->proto == dst->default_proto)
goto connect;
rtnl_lock();
err = ds->ops->change_tag_protocol(ds, tag_ops->proto);
rtnl_unlock();
if (err) {
dev_err(ds->dev, "Unable to use tag protocol \"%s\": %pe\n",
tag_ops->name, ERR_PTR(err));
return err;
}
connect:
if (tag_ops->connect) {
err = tag_ops->connect(ds);
if (err)
return err;
}
if (ds->ops->connect_tag_protocol) {
err = ds->ops->connect_tag_protocol(ds, tag_ops->proto);
if (err) {
dev_err(ds->dev,
"Unable to connect to tag protocol \"%s\": %pe\n",
tag_ops->name, ERR_PTR(err));
goto disconnect;
}
}
return 0;
disconnect:
if (tag_ops->disconnect)
tag_ops->disconnect(ds);
return err;
}
static void dsa_switch_teardown_tag_protocol(struct dsa_switch *ds)
{
const struct dsa_device_ops *tag_ops = ds->dst->tag_ops;
if (tag_ops->disconnect)
tag_ops->disconnect(ds);
}
static int dsa_switch_setup(struct dsa_switch *ds)
{
struct device_node *dn;
int err;
if (ds->setup)
return 0;
/* Initialize ds->phys_mii_mask before registering the slave MDIO bus
* driver and before ops->setup() has run, since the switch drivers and
* the slave MDIO bus driver rely on these values for probing PHY
* devices or not
*/
ds->phys_mii_mask |= dsa_user_ports(ds);
err = dsa_switch_devlink_alloc(ds);
if (err)
return err;
err = dsa_switch_register_notifier(ds);
if (err)
goto devlink_free;
ds->configure_vlan_while_not_filtering = true;
err = ds->ops->setup(ds);
if (err < 0)
goto unregister_notifier;
err = dsa_switch_setup_tag_protocol(ds);
if (err)
goto teardown;
if (!ds->slave_mii_bus && ds->ops->phy_read) {
ds->slave_mii_bus = mdiobus_alloc();
if (!ds->slave_mii_bus) {
err = -ENOMEM;
goto teardown;
}
dsa_slave_mii_bus_init(ds);
dn = of_get_child_by_name(ds->dev->of_node, "mdio");
err = of_mdiobus_register(ds->slave_mii_bus, dn);
of_node_put(dn);
if (err < 0)
goto free_slave_mii_bus;
}
dsa_switch_devlink_register(ds);
ds->setup = true;
return 0;
free_slave_mii_bus:
if (ds->slave_mii_bus && ds->ops->phy_read)
mdiobus_free(ds->slave_mii_bus);
teardown:
if (ds->ops->teardown)
ds->ops->teardown(ds);
unregister_notifier:
dsa_switch_unregister_notifier(ds);
devlink_free:
dsa_switch_devlink_free(ds);
return err;
}
static void dsa_switch_teardown(struct dsa_switch *ds)
{
if (!ds->setup)
return;
dsa_switch_devlink_unregister(ds);
if (ds->slave_mii_bus && ds->ops->phy_read) {
mdiobus_unregister(ds->slave_mii_bus);
mdiobus_free(ds->slave_mii_bus);
ds->slave_mii_bus = NULL;
}
dsa_switch_teardown_tag_protocol(ds);
if (ds->ops->teardown)
ds->ops->teardown(ds);
dsa_switch_unregister_notifier(ds);
dsa_switch_devlink_free(ds);
ds->setup = false;
}
/* First tear down the non-shared, then the shared ports. This ensures that
* all work items scheduled by our switchdev handlers for user ports have
* completed before we destroy the refcounting kept on the shared ports.
*/
static void dsa_tree_teardown_ports(struct dsa_switch_tree *dst)
{
struct dsa_port *dp;
list_for_each_entry(dp, &dst->ports, list)
if (dsa_port_is_user(dp) || dsa_port_is_unused(dp))
dsa_port_teardown(dp);
dsa_flush_workqueue();
list_for_each_entry(dp, &dst->ports, list)
if (dsa_port_is_dsa(dp) || dsa_port_is_cpu(dp))
dsa_port_teardown(dp);
}
static void dsa_tree_teardown_switches(struct dsa_switch_tree *dst)
{
struct dsa_port *dp;
list_for_each_entry(dp, &dst->ports, list)
dsa_switch_teardown(dp->ds);
}
/* Bring shared ports up first, then non-shared ports */
static int dsa_tree_setup_ports(struct dsa_switch_tree *dst)
{
struct dsa_port *dp;
int err = 0;
list_for_each_entry(dp, &dst->ports, list) {
if (dsa_port_is_dsa(dp) || dsa_port_is_cpu(dp)) {
err = dsa_port_setup(dp);
if (err)
goto teardown;
}
}
list_for_each_entry(dp, &dst->ports, list) {
if (dsa_port_is_user(dp) || dsa_port_is_unused(dp)) {
err = dsa_port_setup(dp);
if (err) {
err = dsa_port_setup_as_unused(dp);
if (err)
goto teardown;
}
}
}
return 0;
teardown:
dsa_tree_teardown_ports(dst);
return err;
}
static int dsa_tree_setup_switches(struct dsa_switch_tree *dst)
{
struct dsa_port *dp;
int err = 0;
list_for_each_entry(dp, &dst->ports, list) {
err = dsa_switch_setup(dp->ds);
if (err) {
dsa_tree_teardown_switches(dst);
break;
}
}
return err;
}
static int dsa_tree_setup_master(struct dsa_switch_tree *dst)
{
struct dsa_port *cpu_dp;
int err = 0;
rtnl_lock();
dsa_tree_for_each_cpu_port(cpu_dp, dst) {
struct net_device *master = cpu_dp->master;
bool admin_up = (master->flags & IFF_UP) &&
!qdisc_tx_is_noop(master);
err = dsa_master_setup(master, cpu_dp);
if (err)
break;
/* Replay master state event */
dsa_tree_master_admin_state_change(dst, master, admin_up);
dsa_tree_master_oper_state_change(dst, master,
netif_oper_up(master));
}
rtnl_unlock();
return err;
}
static void dsa_tree_teardown_master(struct dsa_switch_tree *dst)
{
struct dsa_port *cpu_dp;
rtnl_lock();
dsa_tree_for_each_cpu_port(cpu_dp, dst) {
struct net_device *master = cpu_dp->master;
/* Synthesizing an "admin down" state is sufficient for
* the switches to get a notification if the master is
* currently up and running.
*/
dsa_tree_master_admin_state_change(dst, master, false);
dsa_master_teardown(master);
}
rtnl_unlock();
}
static int dsa_tree_setup_lags(struct dsa_switch_tree *dst)
{
unsigned int len = 0;
struct dsa_port *dp;
list_for_each_entry(dp, &dst->ports, list) {
if (dp->ds->num_lag_ids > len)
len = dp->ds->num_lag_ids;
}
if (!len)
return 0;
dst->lags = kcalloc(len, sizeof(*dst->lags), GFP_KERNEL);
if (!dst->lags)
return -ENOMEM;
dst->lags_len = len;
return 0;
}
static void dsa_tree_teardown_lags(struct dsa_switch_tree *dst)
{
kfree(dst->lags);
}
static int dsa_tree_setup(struct dsa_switch_tree *dst)
{
bool complete;
int err;
if (dst->setup) {
pr_err("DSA: tree %d already setup! Disjoint trees?\n",
dst->index);
return -EEXIST;
}
complete = dsa_tree_setup_routing_table(dst);
if (!complete)
return 0;
err = dsa_tree_setup_cpu_ports(dst);
if (err)
return err;
err = dsa_tree_setup_switches(dst);
if (err)
goto teardown_cpu_ports;
err = dsa_tree_setup_ports(dst);
if (err)
goto teardown_switches;
err = dsa_tree_setup_master(dst);
if (err)
goto teardown_ports;
err = dsa_tree_setup_lags(dst);
if (err)
goto teardown_master;
dst->setup = true;
pr_info("DSA: tree %d setup\n", dst->index);
return 0;
teardown_master:
dsa_tree_teardown_master(dst);
teardown_ports:
dsa_tree_teardown_ports(dst);
teardown_switches:
dsa_tree_teardown_switches(dst);
teardown_cpu_ports:
dsa_tree_teardown_cpu_ports(dst);
return err;
}
static void dsa_tree_teardown(struct dsa_switch_tree *dst)
{
struct dsa_link *dl, *next;
if (!dst->setup)
return;
dsa_tree_teardown_lags(dst);
dsa_tree_teardown_master(dst);
dsa_tree_teardown_ports(dst);
dsa_tree_teardown_switches(dst);
dsa_tree_teardown_cpu_ports(dst);
list_for_each_entry_safe(dl, next, &dst->rtable, list) {
list_del(&dl->list);
kfree(dl);
}
pr_info("DSA: tree %d torn down\n", dst->index);
dst->setup = false;
}
static int dsa_tree_bind_tag_proto(struct dsa_switch_tree *dst,
const struct dsa_device_ops *tag_ops)
{
const struct dsa_device_ops *old_tag_ops = dst->tag_ops;
struct dsa_notifier_tag_proto_info info;
int err;
dst->tag_ops = tag_ops;
/* Notify the switches from this tree about the connection
* to the new tagger
*/
info.tag_ops = tag_ops;
err = dsa_tree_notify(dst, DSA_NOTIFIER_TAG_PROTO_CONNECT, &info);
if (err && err != -EOPNOTSUPP)
goto out_disconnect;
/* Notify the old tagger about the disconnection from this tree */
info.tag_ops = old_tag_ops;
dsa_tree_notify(dst, DSA_NOTIFIER_TAG_PROTO_DISCONNECT, &info);
return 0;
out_disconnect:
info.tag_ops = tag_ops;
dsa_tree_notify(dst, DSA_NOTIFIER_TAG_PROTO_DISCONNECT, &info);
dst->tag_ops = old_tag_ops;
return err;
}
/* Since the dsa/tagging sysfs device attribute is per master, the assumption
* is that all DSA switches within a tree share the same tagger, otherwise
* they would have formed disjoint trees (different "dsa,member" values).
*/
int dsa_tree_change_tag_proto(struct dsa_switch_tree *dst,
const struct dsa_device_ops *tag_ops,
const struct dsa_device_ops *old_tag_ops)
{
struct dsa_notifier_tag_proto_info info;
struct dsa_port *dp;
int err = -EBUSY;
if (!rtnl_trylock())
return restart_syscall();
/* At the moment we don't allow changing the tag protocol under
* traffic. The rtnl_mutex also happens to serialize concurrent
* attempts to change the tagging protocol. If we ever lift the IFF_UP
* restriction, there needs to be another mutex which serializes this.
*/
dsa_tree_for_each_user_port(dp, dst) {
if (dsa_port_to_master(dp)->flags & IFF_UP)
goto out_unlock;
if (dp->slave->flags & IFF_UP)
goto out_unlock;
}
/* Notify the tag protocol change */
info.tag_ops = tag_ops;
err = dsa_tree_notify(dst, DSA_NOTIFIER_TAG_PROTO, &info);
if (err)
goto out_unwind_tagger;
err = dsa_tree_bind_tag_proto(dst, tag_ops);
if (err)
goto out_unwind_tagger;
rtnl_unlock();
return 0;
out_unwind_tagger:
info.tag_ops = old_tag_ops;
dsa_tree_notify(dst, DSA_NOTIFIER_TAG_PROTO, &info);
out_unlock:
rtnl_unlock();
return err;
}
static void dsa_tree_master_state_change(struct dsa_switch_tree *dst,
struct net_device *master)
{
struct dsa_notifier_master_state_info info;
struct dsa_port *cpu_dp = master->dsa_ptr;
info.master = master;
info.operational = dsa_port_master_is_operational(cpu_dp);
dsa_tree_notify(dst, DSA_NOTIFIER_MASTER_STATE_CHANGE, &info);
}
void dsa_tree_master_admin_state_change(struct dsa_switch_tree *dst,
struct net_device *master,
bool up)
{
struct dsa_port *cpu_dp = master->dsa_ptr;
bool notify = false;
/* Don't keep track of admin state on LAG DSA masters,
* but rather just of physical DSA masters
*/
if (netif_is_lag_master(master))
return;
if ((dsa_port_master_is_operational(cpu_dp)) !=
(up && cpu_dp->master_oper_up))
notify = true;
cpu_dp->master_admin_up = up;
if (notify)
dsa_tree_master_state_change(dst, master);
}
void dsa_tree_master_oper_state_change(struct dsa_switch_tree *dst,
struct net_device *master,
bool up)
{
struct dsa_port *cpu_dp = master->dsa_ptr;
bool notify = false;
/* Don't keep track of oper state on LAG DSA masters,
* but rather just of physical DSA masters
*/
if (netif_is_lag_master(master))
return;
if ((dsa_port_master_is_operational(cpu_dp)) !=
(cpu_dp->master_admin_up && up))
notify = true;
cpu_dp->master_oper_up = up;
if (notify)
dsa_tree_master_state_change(dst, master);
}
static struct dsa_port *dsa_port_touch(struct dsa_switch *ds, int index)
{
struct dsa_switch_tree *dst = ds->dst;
struct dsa_port *dp;
dsa_switch_for_each_port(dp, ds)
if (dp->index == index)
return dp;
dp = kzalloc(sizeof(*dp), GFP_KERNEL);
if (!dp)
return NULL;
dp->ds = ds;
dp->index = index;
mutex_init(&dp->addr_lists_lock);
mutex_init(&dp->vlans_lock);
INIT_LIST_HEAD(&dp->fdbs);
INIT_LIST_HEAD(&dp->mdbs);
INIT_LIST_HEAD(&dp->vlans); /* also initializes &dp->user_vlans */
INIT_LIST_HEAD(&dp->list);
list_add_tail(&dp->list, &dst->ports);
return dp;
}
static int dsa_port_parse_user(struct dsa_port *dp, const char *name)
{
dp->type = DSA_PORT_TYPE_USER;
dp->name = name;
return 0;
}
static int dsa_port_parse_dsa(struct dsa_port *dp)
{
dp->type = DSA_PORT_TYPE_DSA;
return 0;
}
static enum dsa_tag_protocol dsa_get_tag_protocol(struct dsa_port *dp,
struct net_device *master)
{
enum dsa_tag_protocol tag_protocol = DSA_TAG_PROTO_NONE;
struct dsa_switch *mds, *ds = dp->ds;
unsigned int mdp_upstream;
struct dsa_port *mdp;
/* It is possible to stack DSA switches onto one another when that
* happens the switch driver may want to know if its tagging protocol
* is going to work in such a configuration.
*/
if (dsa_slave_dev_check(master)) {
mdp = dsa_slave_to_port(master);
mds = mdp->ds;
mdp_upstream = dsa_upstream_port(mds, mdp->index);
tag_protocol = mds->ops->get_tag_protocol(mds, mdp_upstream,
DSA_TAG_PROTO_NONE);
}
/* If the master device is not itself a DSA slave in a disjoint DSA
* tree, then return immediately.
*/
return ds->ops->get_tag_protocol(ds, dp->index, tag_protocol);
}
static int dsa_port_parse_cpu(struct dsa_port *dp, struct net_device *master,
const char *user_protocol)
{
const struct dsa_device_ops *tag_ops = NULL;
struct dsa_switch *ds = dp->ds;
struct dsa_switch_tree *dst = ds->dst;
enum dsa_tag_protocol default_proto;
/* Find out which protocol the switch would prefer. */
default_proto = dsa_get_tag_protocol(dp, master);
if (dst->default_proto) {
if (dst->default_proto != default_proto) {
dev_err(ds->dev,
"A DSA switch tree can have only one tagging protocol\n");
return -EINVAL;
}
} else {
dst->default_proto = default_proto;
}
/* See if the user wants to override that preference. */
if (user_protocol) {
if (!ds->ops->change_tag_protocol) {
dev_err(ds->dev, "Tag protocol cannot be modified\n");
return -EINVAL;
}
tag_ops = dsa_tag_driver_get_by_name(user_protocol);
if (IS_ERR(tag_ops)) {
dev_warn(ds->dev,
"Failed to find a tagging driver for protocol %s, using default\n",
user_protocol);
tag_ops = NULL;
}
}
if (!tag_ops)
tag_ops = dsa_tag_driver_get_by_id(default_proto);
if (IS_ERR(tag_ops)) {
if (PTR_ERR(tag_ops) == -ENOPROTOOPT)
return -EPROBE_DEFER;
dev_warn(ds->dev, "No tagger for this switch\n");
return PTR_ERR(tag_ops);
}
if (dst->tag_ops) {
if (dst->tag_ops != tag_ops) {
dev_err(ds->dev,
"A DSA switch tree can have only one tagging protocol\n");
dsa_tag_driver_put(tag_ops);
return -EINVAL;
}
/* In the case of multiple CPU ports per switch, the tagging
* protocol is still reference-counted only per switch tree.
*/
dsa_tag_driver_put(tag_ops);
} else {
dst->tag_ops = tag_ops;
}
dp->master = master;
dp->type = DSA_PORT_TYPE_CPU;
dsa_port_set_tag_protocol(dp, dst->tag_ops);
dp->dst = dst;
/* At this point, the tree may be configured to use a different
* tagger than the one chosen by the switch driver during
* .setup, in the case when a user selects a custom protocol
* through the DT.
*
* This is resolved by syncing the driver with the tree in
* dsa_switch_setup_tag_protocol once .setup has run and the
* driver is ready to accept calls to .change_tag_protocol. If
* the driver does not support the custom protocol at that
* point, the tree is wholly rejected, thereby ensuring that the
* tree and driver are always in agreement on the protocol to
* use.
*/
return 0;
}
static int dsa_port_parse_of(struct dsa_port *dp, struct device_node *dn)
{
struct device_node *ethernet = of_parse_phandle(dn, "ethernet", 0);
const char *name = of_get_property(dn, "label", NULL);
bool link = of_property_read_bool(dn, "link");
dp->dn = dn;
if (ethernet) {
struct net_device *master;
const char *user_protocol;
master = of_find_net_device_by_node(ethernet);
of_node_put(ethernet);
if (!master)
return -EPROBE_DEFER;
user_protocol = of_get_property(dn, "dsa-tag-protocol", NULL);
return dsa_port_parse_cpu(dp, master, user_protocol);
}
if (link)
return dsa_port_parse_dsa(dp);
return dsa_port_parse_user(dp, name);
}
static int dsa_switch_parse_ports_of(struct dsa_switch *ds,
struct device_node *dn)
{
struct device_node *ports, *port;
struct dsa_port *dp;
int err = 0;
u32 reg;
ports = of_get_child_by_name(dn, "ports");
if (!ports) {
/* The second possibility is "ethernet-ports" */
ports = of_get_child_by_name(dn, "ethernet-ports");
if (!ports) {
dev_err(ds->dev, "no ports child node found\n");
return -EINVAL;
}
}
for_each_available_child_of_node(ports, port) {
err = of_property_read_u32(port, "reg", ®);
if (err) {
of_node_put(port);
goto out_put_node;
}
if (reg >= ds->num_ports) {
dev_err(ds->dev, "port %pOF index %u exceeds num_ports (%u)\n",
port, reg, ds->num_ports);
of_node_put(port);
err = -EINVAL;
goto out_put_node;
}
dp = dsa_to_port(ds, reg);
err = dsa_port_parse_of(dp, port);
if (err) {
of_node_put(port);
goto out_put_node;
}
}
out_put_node:
of_node_put(ports);
return err;
}
static int dsa_switch_parse_member_of(struct dsa_switch *ds,
struct device_node *dn)
{
u32 m[2] = { 0, 0 };
int sz;
/* Don't error out if this optional property isn't found */
sz = of_property_read_variable_u32_array(dn, "dsa,member", m, 2, 2);
if (sz < 0 && sz != -EINVAL)
return sz;
ds->index = m[1];
ds->dst = dsa_tree_touch(m[0]);
if (!ds->dst)
return -ENOMEM;
if (dsa_switch_find(ds->dst->index, ds->index)) {
dev_err(ds->dev,
"A DSA switch with index %d already exists in tree %d\n",
ds->index, ds->dst->index);
return -EEXIST;
}
if (ds->dst->last_switch < ds->index)
ds->dst->last_switch = ds->index;
return 0;
}
static int dsa_switch_touch_ports(struct dsa_switch *ds)
{
struct dsa_port *dp;
int port;
for (port = 0; port < ds->num_ports; port++) {
dp = dsa_port_touch(ds, port);
if (!dp)
return -ENOMEM;
}
return 0;
}
static int dsa_switch_parse_of(struct dsa_switch *ds, struct device_node *dn)
{
int err;
err = dsa_switch_parse_member_of(ds, dn);
if (err)
return err;
err = dsa_switch_touch_ports(ds);
if (err)
return err;
return dsa_switch_parse_ports_of(ds, dn);
}
static int dev_is_class(struct device *dev, void *class)
{
if (dev->class != NULL && !strcmp(dev->class->name, class))
return 1;
return 0;
}
static struct device *dev_find_class(struct device *parent, char *class)
{
if (dev_is_class(parent, class)) {
get_device(parent);
return parent;
}
return device_find_child(parent, class, dev_is_class);
}
static struct net_device *dsa_dev_to_net_device(struct device *dev)
{
struct device *d;
d = dev_find_class(dev, "net");
if (d != NULL) {
struct net_device *nd;
nd = to_net_dev(d);
dev_hold(nd);
put_device(d);
return nd;
}
return NULL;
}
static int dsa_port_parse(struct dsa_port *dp, const char *name,
struct device *dev)
{
if (!strcmp(name, "cpu")) {
struct net_device *master;
master = dsa_dev_to_net_device(dev);
if (!master)
return -EPROBE_DEFER;
dev_put(master);
return dsa_port_parse_cpu(dp, master, NULL);
}
if (!strcmp(name, "dsa"))
return dsa_port_parse_dsa(dp);
return dsa_port_parse_user(dp, name);
}
static int dsa_switch_parse_ports(struct dsa_switch *ds,
struct dsa_chip_data *cd)
{
bool valid_name_found = false;
struct dsa_port *dp;
struct device *dev;
const char *name;
unsigned int i;
int err;
for (i = 0; i < DSA_MAX_PORTS; i++) {
name = cd->port_names[i];
dev = cd->netdev[i];
dp = dsa_to_port(ds, i);
if (!name)
continue;
err = dsa_port_parse(dp, name, dev);
if (err)
return err;
valid_name_found = true;
}
if (!valid_name_found && i == DSA_MAX_PORTS)
return -EINVAL;
return 0;
}
static int dsa_switch_parse(struct dsa_switch *ds, struct dsa_chip_data *cd)
{
int err;
ds->cd = cd;
/* We don't support interconnected switches nor multiple trees via
* platform data, so this is the unique switch of the tree.
*/
ds->index = 0;
ds->dst = dsa_tree_touch(0);
if (!ds->dst)
return -ENOMEM;
err = dsa_switch_touch_ports(ds);
if (err)
return err;
return dsa_switch_parse_ports(ds, cd);
}
static void dsa_switch_release_ports(struct dsa_switch *ds)
{
struct dsa_port *dp, *next;
dsa_switch_for_each_port_safe(dp, next, ds) {
WARN_ON(!list_empty(&dp->fdbs));
WARN_ON(!list_empty(&dp->mdbs));
WARN_ON(!list_empty(&dp->vlans));
list_del(&dp->list);
kfree(dp);
}
}
static int dsa_switch_probe(struct dsa_switch *ds)
{
struct dsa_switch_tree *dst;
struct dsa_chip_data *pdata;
struct device_node *np;
int err;
if (!ds->dev)
return -ENODEV;
pdata = ds->dev->platform_data;
np = ds->dev->of_node;
if (!ds->num_ports)
return -EINVAL;
if (np) {
err = dsa_switch_parse_of(ds, np);
if (err)
dsa_switch_release_ports(ds);
} else if (pdata) {
err = dsa_switch_parse(ds, pdata);
if (err)
dsa_switch_release_ports(ds);
} else {
err = -ENODEV;
}
if (err)
return err;
dst = ds->dst;
dsa_tree_get(dst);
err = dsa_tree_setup(dst);
if (err) {
dsa_switch_release_ports(ds);
dsa_tree_put(dst);
}
return err;
}
int dsa_register_switch(struct dsa_switch *ds)
{
int err;
mutex_lock(&dsa2_mutex);
err = dsa_switch_probe(ds);
dsa_tree_put(ds->dst);
mutex_unlock(&dsa2_mutex);
return err;
}
EXPORT_SYMBOL_GPL(dsa_register_switch);
static void dsa_switch_remove(struct dsa_switch *ds)
{
struct dsa_switch_tree *dst = ds->dst;
dsa_tree_teardown(dst);
dsa_switch_release_ports(ds);
dsa_tree_put(dst);
}
void dsa_unregister_switch(struct dsa_switch *ds)
{
mutex_lock(&dsa2_mutex);
dsa_switch_remove(ds);
mutex_unlock(&dsa2_mutex);
}
EXPORT_SYMBOL_GPL(dsa_unregister_switch);
/* If the DSA master chooses to unregister its net_device on .shutdown, DSA is
* blocking that operation from completion, due to the dev_hold taken inside
* netdev_upper_dev_link. Unlink the DSA slave interfaces from being uppers of
* the DSA master, so that the system can reboot successfully.
*/
void dsa_switch_shutdown(struct dsa_switch *ds)
{
struct net_device *master, *slave_dev;
struct dsa_port *dp;
mutex_lock(&dsa2_mutex);
if (!ds->setup)
goto out;
rtnl_lock();
dsa_switch_for_each_user_port(dp, ds) {
master = dsa_port_to_master(dp);
slave_dev = dp->slave;
netdev_upper_dev_unlink(master, slave_dev);
}
/* Disconnect from further netdevice notifiers on the master,
* since netdev_uses_dsa() will now return false.
*/
dsa_switch_for_each_cpu_port(dp, ds)
dp->master->dsa_ptr = NULL;
rtnl_unlock();
out:
mutex_unlock(&dsa2_mutex);
}
EXPORT_SYMBOL_GPL(dsa_switch_shutdown);
#ifdef CONFIG_PM_SLEEP
static bool dsa_port_is_initialized(const struct dsa_port *dp)
{
return dp->type == DSA_PORT_TYPE_USER && dp->slave;
}
int dsa_switch_suspend(struct dsa_switch *ds)
{
struct dsa_port *dp;
int ret = 0;
/* Suspend slave network devices */
dsa_switch_for_each_port(dp, ds) {
if (!dsa_port_is_initialized(dp))
continue;
ret = dsa_slave_suspend(dp->slave);
if (ret)
return ret;
}
if (ds->ops->suspend)
ret = ds->ops->suspend(ds);
return ret;
}
EXPORT_SYMBOL_GPL(dsa_switch_suspend);
int dsa_switch_resume(struct dsa_switch *ds)
{
struct dsa_port *dp;
int ret = 0;
if (ds->ops->resume)
ret = ds->ops->resume(ds);
if (ret)
return ret;
/* Resume slave network devices */
dsa_switch_for_each_port(dp, ds) {
if (!dsa_port_is_initialized(dp))
continue;
ret = dsa_slave_resume(dp->slave);
if (ret)
return ret;
}
return 0;
}
EXPORT_SYMBOL_GPL(dsa_switch_resume);
#endif
struct dsa_port *dsa_port_from_netdev(struct net_device *netdev)
{
if (!netdev || !dsa_slave_dev_check(netdev))
return ERR_PTR(-ENODEV);
return dsa_slave_to_port(netdev);
}
EXPORT_SYMBOL_GPL(dsa_port_from_netdev);
bool dsa_db_equal(const struct dsa_db *a, const struct dsa_db *b)
{
if (a->type != b->type)
return false;
switch (a->type) {
case DSA_DB_PORT:
return a->dp == b->dp;
case DSA_DB_LAG:
return a->lag.dev == b->lag.dev;
case DSA_DB_BRIDGE:
return a->bridge.num == b->bridge.num;
default:
WARN_ON(1);
return false;
}
}
bool dsa_fdb_present_in_other_db(struct dsa_switch *ds, int port,
const unsigned char *addr, u16 vid,
struct dsa_db db)
{
struct dsa_port *dp = dsa_to_port(ds, port);
struct dsa_mac_addr *a;
lockdep_assert_held(&dp->addr_lists_lock);
list_for_each_entry(a, &dp->fdbs, list) {
if (!ether_addr_equal(a->addr, addr) || a->vid != vid)
continue;
if (a->db.type == db.type && !dsa_db_equal(&a->db, &db))
return true;
}
return false;
}
EXPORT_SYMBOL_GPL(dsa_fdb_present_in_other_db);
bool dsa_mdb_present_in_other_db(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_mdb *mdb,
struct dsa_db db)
{
struct dsa_port *dp = dsa_to_port(ds, port);
struct dsa_mac_addr *a;
lockdep_assert_held(&dp->addr_lists_lock);
list_for_each_entry(a, &dp->mdbs, list) {
if (!ether_addr_equal(a->addr, mdb->addr) || a->vid != mdb->vid)
continue;
if (a->db.type == db.type && !dsa_db_equal(&a->db, &db))
return true;
}
return false;
}
EXPORT_SYMBOL_GPL(dsa_mdb_present_in_other_db);
static const struct dsa_stubs __dsa_stubs = {
.master_hwtstamp_validate = __dsa_master_hwtstamp_validate,
};
static void dsa_register_stubs(void)
{
dsa_stubs = &__dsa_stubs;
}
static void dsa_unregister_stubs(void)
{
dsa_stubs = NULL;
}
static int __init dsa_init_module(void)
{
int rc;
dsa_owq = alloc_ordered_workqueue("dsa_ordered",
WQ_MEM_RECLAIM);
if (!dsa_owq)
return -ENOMEM;
rc = dsa_slave_register_notifier();
if (rc)
goto register_notifier_fail;
dev_add_pack(&dsa_pack_type);
rc = rtnl_link_register(&dsa_link_ops);
if (rc)
goto netlink_register_fail;
dsa_register_stubs();
return 0;
netlink_register_fail:
dsa_slave_unregister_notifier();
dev_remove_pack(&dsa_pack_type);
register_notifier_fail:
destroy_workqueue(dsa_owq);
return rc;
}
module_init(dsa_init_module);
static void __exit dsa_cleanup_module(void)
{
dsa_unregister_stubs();
rtnl_link_unregister(&dsa_link_ops);
dsa_slave_unregister_notifier();
dev_remove_pack(&dsa_pack_type);
destroy_workqueue(dsa_owq);
}
module_exit(dsa_cleanup_module);
MODULE_AUTHOR("Lennert Buytenhek <[email protected]>");
MODULE_DESCRIPTION("Driver for Distributed Switch Architecture switch chips");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:dsa");
| linux-master | net/dsa/dsa.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Handling of a single switch chip, part of a switch fabric
*
* Copyright (c) 2017 Savoir-faire Linux Inc.
* Vivien Didelot <[email protected]>
*/
#include <linux/if_bridge.h>
#include <linux/netdevice.h>
#include <linux/notifier.h>
#include <linux/if_vlan.h>
#include <net/switchdev.h>
#include "dsa.h"
#include "netlink.h"
#include "port.h"
#include "slave.h"
#include "switch.h"
#include "tag_8021q.h"
#include "trace.h"
static unsigned int dsa_switch_fastest_ageing_time(struct dsa_switch *ds,
unsigned int ageing_time)
{
struct dsa_port *dp;
dsa_switch_for_each_port(dp, ds)
if (dp->ageing_time && dp->ageing_time < ageing_time)
ageing_time = dp->ageing_time;
return ageing_time;
}
static int dsa_switch_ageing_time(struct dsa_switch *ds,
struct dsa_notifier_ageing_time_info *info)
{
unsigned int ageing_time = info->ageing_time;
if (ds->ageing_time_min && ageing_time < ds->ageing_time_min)
return -ERANGE;
if (ds->ageing_time_max && ageing_time > ds->ageing_time_max)
return -ERANGE;
/* Program the fastest ageing time in case of multiple bridges */
ageing_time = dsa_switch_fastest_ageing_time(ds, ageing_time);
if (ds->ops->set_ageing_time)
return ds->ops->set_ageing_time(ds, ageing_time);
return 0;
}
static bool dsa_port_mtu_match(struct dsa_port *dp,
struct dsa_notifier_mtu_info *info)
{
return dp == info->dp || dsa_port_is_dsa(dp) || dsa_port_is_cpu(dp);
}
static int dsa_switch_mtu(struct dsa_switch *ds,
struct dsa_notifier_mtu_info *info)
{
struct dsa_port *dp;
int ret;
if (!ds->ops->port_change_mtu)
return -EOPNOTSUPP;
dsa_switch_for_each_port(dp, ds) {
if (dsa_port_mtu_match(dp, info)) {
ret = ds->ops->port_change_mtu(ds, dp->index,
info->mtu);
if (ret)
return ret;
}
}
return 0;
}
static int dsa_switch_bridge_join(struct dsa_switch *ds,
struct dsa_notifier_bridge_info *info)
{
int err;
if (info->dp->ds == ds) {
if (!ds->ops->port_bridge_join)
return -EOPNOTSUPP;
err = ds->ops->port_bridge_join(ds, info->dp->index,
info->bridge,
&info->tx_fwd_offload,
info->extack);
if (err)
return err;
}
if (info->dp->ds != ds && ds->ops->crosschip_bridge_join) {
err = ds->ops->crosschip_bridge_join(ds,
info->dp->ds->dst->index,
info->dp->ds->index,
info->dp->index,
info->bridge,
info->extack);
if (err)
return err;
}
return 0;
}
static int dsa_switch_bridge_leave(struct dsa_switch *ds,
struct dsa_notifier_bridge_info *info)
{
if (info->dp->ds == ds && ds->ops->port_bridge_leave)
ds->ops->port_bridge_leave(ds, info->dp->index, info->bridge);
if (info->dp->ds != ds && ds->ops->crosschip_bridge_leave)
ds->ops->crosschip_bridge_leave(ds, info->dp->ds->dst->index,
info->dp->ds->index,
info->dp->index,
info->bridge);
return 0;
}
/* Matches for all upstream-facing ports (the CPU port and all upstream-facing
* DSA links) that sit between the targeted port on which the notifier was
* emitted and its dedicated CPU port.
*/
static bool dsa_port_host_address_match(struct dsa_port *dp,
const struct dsa_port *targeted_dp)
{
struct dsa_port *cpu_dp = targeted_dp->cpu_dp;
if (dsa_switch_is_upstream_of(dp->ds, targeted_dp->ds))
return dp->index == dsa_towards_port(dp->ds, cpu_dp->ds->index,
cpu_dp->index);
return false;
}
static struct dsa_mac_addr *dsa_mac_addr_find(struct list_head *addr_list,
const unsigned char *addr, u16 vid,
struct dsa_db db)
{
struct dsa_mac_addr *a;
list_for_each_entry(a, addr_list, list)
if (ether_addr_equal(a->addr, addr) && a->vid == vid &&
dsa_db_equal(&a->db, &db))
return a;
return NULL;
}
static int dsa_port_do_mdb_add(struct dsa_port *dp,
const struct switchdev_obj_port_mdb *mdb,
struct dsa_db db)
{
struct dsa_switch *ds = dp->ds;
struct dsa_mac_addr *a;
int port = dp->index;
int err = 0;
/* No need to bother with refcounting for user ports */
if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp))) {
err = ds->ops->port_mdb_add(ds, port, mdb, db);
trace_dsa_mdb_add_hw(dp, mdb->addr, mdb->vid, &db, err);
return err;
}
mutex_lock(&dp->addr_lists_lock);
a = dsa_mac_addr_find(&dp->mdbs, mdb->addr, mdb->vid, db);
if (a) {
refcount_inc(&a->refcount);
trace_dsa_mdb_add_bump(dp, mdb->addr, mdb->vid, &db,
&a->refcount);
goto out;
}
a = kzalloc(sizeof(*a), GFP_KERNEL);
if (!a) {
err = -ENOMEM;
goto out;
}
err = ds->ops->port_mdb_add(ds, port, mdb, db);
trace_dsa_mdb_add_hw(dp, mdb->addr, mdb->vid, &db, err);
if (err) {
kfree(a);
goto out;
}
ether_addr_copy(a->addr, mdb->addr);
a->vid = mdb->vid;
a->db = db;
refcount_set(&a->refcount, 1);
list_add_tail(&a->list, &dp->mdbs);
out:
mutex_unlock(&dp->addr_lists_lock);
return err;
}
static int dsa_port_do_mdb_del(struct dsa_port *dp,
const struct switchdev_obj_port_mdb *mdb,
struct dsa_db db)
{
struct dsa_switch *ds = dp->ds;
struct dsa_mac_addr *a;
int port = dp->index;
int err = 0;
/* No need to bother with refcounting for user ports */
if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp))) {
err = ds->ops->port_mdb_del(ds, port, mdb, db);
trace_dsa_mdb_del_hw(dp, mdb->addr, mdb->vid, &db, err);
return err;
}
mutex_lock(&dp->addr_lists_lock);
a = dsa_mac_addr_find(&dp->mdbs, mdb->addr, mdb->vid, db);
if (!a) {
trace_dsa_mdb_del_not_found(dp, mdb->addr, mdb->vid, &db);
err = -ENOENT;
goto out;
}
if (!refcount_dec_and_test(&a->refcount)) {
trace_dsa_mdb_del_drop(dp, mdb->addr, mdb->vid, &db,
&a->refcount);
goto out;
}
err = ds->ops->port_mdb_del(ds, port, mdb, db);
trace_dsa_mdb_del_hw(dp, mdb->addr, mdb->vid, &db, err);
if (err) {
refcount_set(&a->refcount, 1);
goto out;
}
list_del(&a->list);
kfree(a);
out:
mutex_unlock(&dp->addr_lists_lock);
return err;
}
static int dsa_port_do_fdb_add(struct dsa_port *dp, const unsigned char *addr,
u16 vid, struct dsa_db db)
{
struct dsa_switch *ds = dp->ds;
struct dsa_mac_addr *a;
int port = dp->index;
int err = 0;
/* No need to bother with refcounting for user ports */
if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp))) {
err = ds->ops->port_fdb_add(ds, port, addr, vid, db);
trace_dsa_fdb_add_hw(dp, addr, vid, &db, err);
return err;
}
mutex_lock(&dp->addr_lists_lock);
a = dsa_mac_addr_find(&dp->fdbs, addr, vid, db);
if (a) {
refcount_inc(&a->refcount);
trace_dsa_fdb_add_bump(dp, addr, vid, &db, &a->refcount);
goto out;
}
a = kzalloc(sizeof(*a), GFP_KERNEL);
if (!a) {
err = -ENOMEM;
goto out;
}
err = ds->ops->port_fdb_add(ds, port, addr, vid, db);
trace_dsa_fdb_add_hw(dp, addr, vid, &db, err);
if (err) {
kfree(a);
goto out;
}
ether_addr_copy(a->addr, addr);
a->vid = vid;
a->db = db;
refcount_set(&a->refcount, 1);
list_add_tail(&a->list, &dp->fdbs);
out:
mutex_unlock(&dp->addr_lists_lock);
return err;
}
static int dsa_port_do_fdb_del(struct dsa_port *dp, const unsigned char *addr,
u16 vid, struct dsa_db db)
{
struct dsa_switch *ds = dp->ds;
struct dsa_mac_addr *a;
int port = dp->index;
int err = 0;
/* No need to bother with refcounting for user ports */
if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp))) {
err = ds->ops->port_fdb_del(ds, port, addr, vid, db);
trace_dsa_fdb_del_hw(dp, addr, vid, &db, err);
return err;
}
mutex_lock(&dp->addr_lists_lock);
a = dsa_mac_addr_find(&dp->fdbs, addr, vid, db);
if (!a) {
trace_dsa_fdb_del_not_found(dp, addr, vid, &db);
err = -ENOENT;
goto out;
}
if (!refcount_dec_and_test(&a->refcount)) {
trace_dsa_fdb_del_drop(dp, addr, vid, &db, &a->refcount);
goto out;
}
err = ds->ops->port_fdb_del(ds, port, addr, vid, db);
trace_dsa_fdb_del_hw(dp, addr, vid, &db, err);
if (err) {
refcount_set(&a->refcount, 1);
goto out;
}
list_del(&a->list);
kfree(a);
out:
mutex_unlock(&dp->addr_lists_lock);
return err;
}
static int dsa_switch_do_lag_fdb_add(struct dsa_switch *ds, struct dsa_lag *lag,
const unsigned char *addr, u16 vid,
struct dsa_db db)
{
struct dsa_mac_addr *a;
int err = 0;
mutex_lock(&lag->fdb_lock);
a = dsa_mac_addr_find(&lag->fdbs, addr, vid, db);
if (a) {
refcount_inc(&a->refcount);
trace_dsa_lag_fdb_add_bump(lag->dev, addr, vid, &db,
&a->refcount);
goto out;
}
a = kzalloc(sizeof(*a), GFP_KERNEL);
if (!a) {
err = -ENOMEM;
goto out;
}
err = ds->ops->lag_fdb_add(ds, *lag, addr, vid, db);
trace_dsa_lag_fdb_add_hw(lag->dev, addr, vid, &db, err);
if (err) {
kfree(a);
goto out;
}
ether_addr_copy(a->addr, addr);
a->vid = vid;
a->db = db;
refcount_set(&a->refcount, 1);
list_add_tail(&a->list, &lag->fdbs);
out:
mutex_unlock(&lag->fdb_lock);
return err;
}
static int dsa_switch_do_lag_fdb_del(struct dsa_switch *ds, struct dsa_lag *lag,
const unsigned char *addr, u16 vid,
struct dsa_db db)
{
struct dsa_mac_addr *a;
int err = 0;
mutex_lock(&lag->fdb_lock);
a = dsa_mac_addr_find(&lag->fdbs, addr, vid, db);
if (!a) {
trace_dsa_lag_fdb_del_not_found(lag->dev, addr, vid, &db);
err = -ENOENT;
goto out;
}
if (!refcount_dec_and_test(&a->refcount)) {
trace_dsa_lag_fdb_del_drop(lag->dev, addr, vid, &db,
&a->refcount);
goto out;
}
err = ds->ops->lag_fdb_del(ds, *lag, addr, vid, db);
trace_dsa_lag_fdb_del_hw(lag->dev, addr, vid, &db, err);
if (err) {
refcount_set(&a->refcount, 1);
goto out;
}
list_del(&a->list);
kfree(a);
out:
mutex_unlock(&lag->fdb_lock);
return err;
}
static int dsa_switch_host_fdb_add(struct dsa_switch *ds,
struct dsa_notifier_fdb_info *info)
{
struct dsa_port *dp;
int err = 0;
if (!ds->ops->port_fdb_add)
return -EOPNOTSUPP;
dsa_switch_for_each_port(dp, ds) {
if (dsa_port_host_address_match(dp, info->dp)) {
if (dsa_port_is_cpu(dp) && info->dp->cpu_port_in_lag) {
err = dsa_switch_do_lag_fdb_add(ds, dp->lag,
info->addr,
info->vid,
info->db);
} else {
err = dsa_port_do_fdb_add(dp, info->addr,
info->vid, info->db);
}
if (err)
break;
}
}
return err;
}
static int dsa_switch_host_fdb_del(struct dsa_switch *ds,
struct dsa_notifier_fdb_info *info)
{
struct dsa_port *dp;
int err = 0;
if (!ds->ops->port_fdb_del)
return -EOPNOTSUPP;
dsa_switch_for_each_port(dp, ds) {
if (dsa_port_host_address_match(dp, info->dp)) {
if (dsa_port_is_cpu(dp) && info->dp->cpu_port_in_lag) {
err = dsa_switch_do_lag_fdb_del(ds, dp->lag,
info->addr,
info->vid,
info->db);
} else {
err = dsa_port_do_fdb_del(dp, info->addr,
info->vid, info->db);
}
if (err)
break;
}
}
return err;
}
static int dsa_switch_fdb_add(struct dsa_switch *ds,
struct dsa_notifier_fdb_info *info)
{
int port = dsa_towards_port(ds, info->dp->ds->index, info->dp->index);
struct dsa_port *dp = dsa_to_port(ds, port);
if (!ds->ops->port_fdb_add)
return -EOPNOTSUPP;
return dsa_port_do_fdb_add(dp, info->addr, info->vid, info->db);
}
static int dsa_switch_fdb_del(struct dsa_switch *ds,
struct dsa_notifier_fdb_info *info)
{
int port = dsa_towards_port(ds, info->dp->ds->index, info->dp->index);
struct dsa_port *dp = dsa_to_port(ds, port);
if (!ds->ops->port_fdb_del)
return -EOPNOTSUPP;
return dsa_port_do_fdb_del(dp, info->addr, info->vid, info->db);
}
static int dsa_switch_lag_fdb_add(struct dsa_switch *ds,
struct dsa_notifier_lag_fdb_info *info)
{
struct dsa_port *dp;
if (!ds->ops->lag_fdb_add)
return -EOPNOTSUPP;
/* Notify switch only if it has a port in this LAG */
dsa_switch_for_each_port(dp, ds)
if (dsa_port_offloads_lag(dp, info->lag))
return dsa_switch_do_lag_fdb_add(ds, info->lag,
info->addr, info->vid,
info->db);
return 0;
}
static int dsa_switch_lag_fdb_del(struct dsa_switch *ds,
struct dsa_notifier_lag_fdb_info *info)
{
struct dsa_port *dp;
if (!ds->ops->lag_fdb_del)
return -EOPNOTSUPP;
/* Notify switch only if it has a port in this LAG */
dsa_switch_for_each_port(dp, ds)
if (dsa_port_offloads_lag(dp, info->lag))
return dsa_switch_do_lag_fdb_del(ds, info->lag,
info->addr, info->vid,
info->db);
return 0;
}
static int dsa_switch_lag_change(struct dsa_switch *ds,
struct dsa_notifier_lag_info *info)
{
if (info->dp->ds == ds && ds->ops->port_lag_change)
return ds->ops->port_lag_change(ds, info->dp->index);
if (info->dp->ds != ds && ds->ops->crosschip_lag_change)
return ds->ops->crosschip_lag_change(ds, info->dp->ds->index,
info->dp->index);
return 0;
}
static int dsa_switch_lag_join(struct dsa_switch *ds,
struct dsa_notifier_lag_info *info)
{
if (info->dp->ds == ds && ds->ops->port_lag_join)
return ds->ops->port_lag_join(ds, info->dp->index, info->lag,
info->info, info->extack);
if (info->dp->ds != ds && ds->ops->crosschip_lag_join)
return ds->ops->crosschip_lag_join(ds, info->dp->ds->index,
info->dp->index, info->lag,
info->info, info->extack);
return -EOPNOTSUPP;
}
static int dsa_switch_lag_leave(struct dsa_switch *ds,
struct dsa_notifier_lag_info *info)
{
if (info->dp->ds == ds && ds->ops->port_lag_leave)
return ds->ops->port_lag_leave(ds, info->dp->index, info->lag);
if (info->dp->ds != ds && ds->ops->crosschip_lag_leave)
return ds->ops->crosschip_lag_leave(ds, info->dp->ds->index,
info->dp->index, info->lag);
return -EOPNOTSUPP;
}
static int dsa_switch_mdb_add(struct dsa_switch *ds,
struct dsa_notifier_mdb_info *info)
{
int port = dsa_towards_port(ds, info->dp->ds->index, info->dp->index);
struct dsa_port *dp = dsa_to_port(ds, port);
if (!ds->ops->port_mdb_add)
return -EOPNOTSUPP;
return dsa_port_do_mdb_add(dp, info->mdb, info->db);
}
static int dsa_switch_mdb_del(struct dsa_switch *ds,
struct dsa_notifier_mdb_info *info)
{
int port = dsa_towards_port(ds, info->dp->ds->index, info->dp->index);
struct dsa_port *dp = dsa_to_port(ds, port);
if (!ds->ops->port_mdb_del)
return -EOPNOTSUPP;
return dsa_port_do_mdb_del(dp, info->mdb, info->db);
}
static int dsa_switch_host_mdb_add(struct dsa_switch *ds,
struct dsa_notifier_mdb_info *info)
{
struct dsa_port *dp;
int err = 0;
if (!ds->ops->port_mdb_add)
return -EOPNOTSUPP;
dsa_switch_for_each_port(dp, ds) {
if (dsa_port_host_address_match(dp, info->dp)) {
err = dsa_port_do_mdb_add(dp, info->mdb, info->db);
if (err)
break;
}
}
return err;
}
static int dsa_switch_host_mdb_del(struct dsa_switch *ds,
struct dsa_notifier_mdb_info *info)
{
struct dsa_port *dp;
int err = 0;
if (!ds->ops->port_mdb_del)
return -EOPNOTSUPP;
dsa_switch_for_each_port(dp, ds) {
if (dsa_port_host_address_match(dp, info->dp)) {
err = dsa_port_do_mdb_del(dp, info->mdb, info->db);
if (err)
break;
}
}
return err;
}
/* Port VLANs match on the targeted port and on all DSA ports */
static bool dsa_port_vlan_match(struct dsa_port *dp,
struct dsa_notifier_vlan_info *info)
{
return dsa_port_is_dsa(dp) || dp == info->dp;
}
/* Host VLANs match on the targeted port's CPU port, and on all DSA ports
* (upstream and downstream) of that switch and its upstream switches.
*/
static bool dsa_port_host_vlan_match(struct dsa_port *dp,
const struct dsa_port *targeted_dp)
{
struct dsa_port *cpu_dp = targeted_dp->cpu_dp;
if (dsa_switch_is_upstream_of(dp->ds, targeted_dp->ds))
return dsa_port_is_dsa(dp) || dp == cpu_dp;
return false;
}
struct dsa_vlan *dsa_vlan_find(struct list_head *vlan_list,
const struct switchdev_obj_port_vlan *vlan)
{
struct dsa_vlan *v;
list_for_each_entry(v, vlan_list, list)
if (v->vid == vlan->vid)
return v;
return NULL;
}
static int dsa_port_do_vlan_add(struct dsa_port *dp,
const struct switchdev_obj_port_vlan *vlan,
struct netlink_ext_ack *extack)
{
struct dsa_switch *ds = dp->ds;
int port = dp->index;
struct dsa_vlan *v;
int err = 0;
/* No need to bother with refcounting for user ports. */
if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp))) {
err = ds->ops->port_vlan_add(ds, port, vlan, extack);
trace_dsa_vlan_add_hw(dp, vlan, err);
return err;
}
/* No need to propagate on shared ports the existing VLANs that were
* re-notified after just the flags have changed. This would cause a
* refcount bump which we need to avoid, since it unbalances the
* additions with the deletions.
*/
if (vlan->changed)
return 0;
mutex_lock(&dp->vlans_lock);
v = dsa_vlan_find(&dp->vlans, vlan);
if (v) {
refcount_inc(&v->refcount);
trace_dsa_vlan_add_bump(dp, vlan, &v->refcount);
goto out;
}
v = kzalloc(sizeof(*v), GFP_KERNEL);
if (!v) {
err = -ENOMEM;
goto out;
}
err = ds->ops->port_vlan_add(ds, port, vlan, extack);
trace_dsa_vlan_add_hw(dp, vlan, err);
if (err) {
kfree(v);
goto out;
}
v->vid = vlan->vid;
refcount_set(&v->refcount, 1);
list_add_tail(&v->list, &dp->vlans);
out:
mutex_unlock(&dp->vlans_lock);
return err;
}
static int dsa_port_do_vlan_del(struct dsa_port *dp,
const struct switchdev_obj_port_vlan *vlan)
{
struct dsa_switch *ds = dp->ds;
int port = dp->index;
struct dsa_vlan *v;
int err = 0;
/* No need to bother with refcounting for user ports */
if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp))) {
err = ds->ops->port_vlan_del(ds, port, vlan);
trace_dsa_vlan_del_hw(dp, vlan, err);
return err;
}
mutex_lock(&dp->vlans_lock);
v = dsa_vlan_find(&dp->vlans, vlan);
if (!v) {
trace_dsa_vlan_del_not_found(dp, vlan);
err = -ENOENT;
goto out;
}
if (!refcount_dec_and_test(&v->refcount)) {
trace_dsa_vlan_del_drop(dp, vlan, &v->refcount);
goto out;
}
err = ds->ops->port_vlan_del(ds, port, vlan);
trace_dsa_vlan_del_hw(dp, vlan, err);
if (err) {
refcount_set(&v->refcount, 1);
goto out;
}
list_del(&v->list);
kfree(v);
out:
mutex_unlock(&dp->vlans_lock);
return err;
}
static int dsa_switch_vlan_add(struct dsa_switch *ds,
struct dsa_notifier_vlan_info *info)
{
struct dsa_port *dp;
int err;
if (!ds->ops->port_vlan_add)
return -EOPNOTSUPP;
dsa_switch_for_each_port(dp, ds) {
if (dsa_port_vlan_match(dp, info)) {
err = dsa_port_do_vlan_add(dp, info->vlan,
info->extack);
if (err)
return err;
}
}
return 0;
}
static int dsa_switch_vlan_del(struct dsa_switch *ds,
struct dsa_notifier_vlan_info *info)
{
struct dsa_port *dp;
int err;
if (!ds->ops->port_vlan_del)
return -EOPNOTSUPP;
dsa_switch_for_each_port(dp, ds) {
if (dsa_port_vlan_match(dp, info)) {
err = dsa_port_do_vlan_del(dp, info->vlan);
if (err)
return err;
}
}
return 0;
}
static int dsa_switch_host_vlan_add(struct dsa_switch *ds,
struct dsa_notifier_vlan_info *info)
{
struct dsa_port *dp;
int err;
if (!ds->ops->port_vlan_add)
return -EOPNOTSUPP;
dsa_switch_for_each_port(dp, ds) {
if (dsa_port_host_vlan_match(dp, info->dp)) {
err = dsa_port_do_vlan_add(dp, info->vlan,
info->extack);
if (err)
return err;
}
}
return 0;
}
static int dsa_switch_host_vlan_del(struct dsa_switch *ds,
struct dsa_notifier_vlan_info *info)
{
struct dsa_port *dp;
int err;
if (!ds->ops->port_vlan_del)
return -EOPNOTSUPP;
dsa_switch_for_each_port(dp, ds) {
if (dsa_port_host_vlan_match(dp, info->dp)) {
err = dsa_port_do_vlan_del(dp, info->vlan);
if (err)
return err;
}
}
return 0;
}
static int dsa_switch_change_tag_proto(struct dsa_switch *ds,
struct dsa_notifier_tag_proto_info *info)
{
const struct dsa_device_ops *tag_ops = info->tag_ops;
struct dsa_port *dp, *cpu_dp;
int err;
if (!ds->ops->change_tag_protocol)
return -EOPNOTSUPP;
ASSERT_RTNL();
err = ds->ops->change_tag_protocol(ds, tag_ops->proto);
if (err)
return err;
dsa_switch_for_each_cpu_port(cpu_dp, ds)
dsa_port_set_tag_protocol(cpu_dp, tag_ops);
/* Now that changing the tag protocol can no longer fail, let's update
* the remaining bits which are "duplicated for faster access", and the
* bits that depend on the tagger, such as the MTU.
*/
dsa_switch_for_each_user_port(dp, ds) {
struct net_device *slave = dp->slave;
dsa_slave_setup_tagger(slave);
/* rtnl_mutex is held in dsa_tree_change_tag_proto */
dsa_slave_change_mtu(slave, slave->mtu);
}
return 0;
}
/* We use the same cross-chip notifiers to inform both the tagger side, as well
* as the switch side, of connection and disconnection events.
* Since ds->tagger_data is owned by the tagger, it isn't a hard error if the
* switch side doesn't support connecting to this tagger, and therefore, the
* fact that we don't disconnect the tagger side doesn't constitute a memory
* leak: the tagger will still operate with persistent per-switch memory, just
* with the switch side unconnected to it. What does constitute a hard error is
* when the switch side supports connecting but fails.
*/
static int
dsa_switch_connect_tag_proto(struct dsa_switch *ds,
struct dsa_notifier_tag_proto_info *info)
{
const struct dsa_device_ops *tag_ops = info->tag_ops;
int err;
/* Notify the new tagger about the connection to this switch */
if (tag_ops->connect) {
err = tag_ops->connect(ds);
if (err)
return err;
}
if (!ds->ops->connect_tag_protocol)
return -EOPNOTSUPP;
/* Notify the switch about the connection to the new tagger */
err = ds->ops->connect_tag_protocol(ds, tag_ops->proto);
if (err) {
/* Revert the new tagger's connection to this tree */
if (tag_ops->disconnect)
tag_ops->disconnect(ds);
return err;
}
return 0;
}
static int
dsa_switch_disconnect_tag_proto(struct dsa_switch *ds,
struct dsa_notifier_tag_proto_info *info)
{
const struct dsa_device_ops *tag_ops = info->tag_ops;
/* Notify the tagger about the disconnection from this switch */
if (tag_ops->disconnect && ds->tagger_data)
tag_ops->disconnect(ds);
/* No need to notify the switch, since it shouldn't have any
* resources to tear down
*/
return 0;
}
static int
dsa_switch_master_state_change(struct dsa_switch *ds,
struct dsa_notifier_master_state_info *info)
{
if (!ds->ops->master_state_change)
return 0;
ds->ops->master_state_change(ds, info->master, info->operational);
return 0;
}
static int dsa_switch_event(struct notifier_block *nb,
unsigned long event, void *info)
{
struct dsa_switch *ds = container_of(nb, struct dsa_switch, nb);
int err;
switch (event) {
case DSA_NOTIFIER_AGEING_TIME:
err = dsa_switch_ageing_time(ds, info);
break;
case DSA_NOTIFIER_BRIDGE_JOIN:
err = dsa_switch_bridge_join(ds, info);
break;
case DSA_NOTIFIER_BRIDGE_LEAVE:
err = dsa_switch_bridge_leave(ds, info);
break;
case DSA_NOTIFIER_FDB_ADD:
err = dsa_switch_fdb_add(ds, info);
break;
case DSA_NOTIFIER_FDB_DEL:
err = dsa_switch_fdb_del(ds, info);
break;
case DSA_NOTIFIER_HOST_FDB_ADD:
err = dsa_switch_host_fdb_add(ds, info);
break;
case DSA_NOTIFIER_HOST_FDB_DEL:
err = dsa_switch_host_fdb_del(ds, info);
break;
case DSA_NOTIFIER_LAG_FDB_ADD:
err = dsa_switch_lag_fdb_add(ds, info);
break;
case DSA_NOTIFIER_LAG_FDB_DEL:
err = dsa_switch_lag_fdb_del(ds, info);
break;
case DSA_NOTIFIER_LAG_CHANGE:
err = dsa_switch_lag_change(ds, info);
break;
case DSA_NOTIFIER_LAG_JOIN:
err = dsa_switch_lag_join(ds, info);
break;
case DSA_NOTIFIER_LAG_LEAVE:
err = dsa_switch_lag_leave(ds, info);
break;
case DSA_NOTIFIER_MDB_ADD:
err = dsa_switch_mdb_add(ds, info);
break;
case DSA_NOTIFIER_MDB_DEL:
err = dsa_switch_mdb_del(ds, info);
break;
case DSA_NOTIFIER_HOST_MDB_ADD:
err = dsa_switch_host_mdb_add(ds, info);
break;
case DSA_NOTIFIER_HOST_MDB_DEL:
err = dsa_switch_host_mdb_del(ds, info);
break;
case DSA_NOTIFIER_VLAN_ADD:
err = dsa_switch_vlan_add(ds, info);
break;
case DSA_NOTIFIER_VLAN_DEL:
err = dsa_switch_vlan_del(ds, info);
break;
case DSA_NOTIFIER_HOST_VLAN_ADD:
err = dsa_switch_host_vlan_add(ds, info);
break;
case DSA_NOTIFIER_HOST_VLAN_DEL:
err = dsa_switch_host_vlan_del(ds, info);
break;
case DSA_NOTIFIER_MTU:
err = dsa_switch_mtu(ds, info);
break;
case DSA_NOTIFIER_TAG_PROTO:
err = dsa_switch_change_tag_proto(ds, info);
break;
case DSA_NOTIFIER_TAG_PROTO_CONNECT:
err = dsa_switch_connect_tag_proto(ds, info);
break;
case DSA_NOTIFIER_TAG_PROTO_DISCONNECT:
err = dsa_switch_disconnect_tag_proto(ds, info);
break;
case DSA_NOTIFIER_TAG_8021Q_VLAN_ADD:
err = dsa_switch_tag_8021q_vlan_add(ds, info);
break;
case DSA_NOTIFIER_TAG_8021Q_VLAN_DEL:
err = dsa_switch_tag_8021q_vlan_del(ds, info);
break;
case DSA_NOTIFIER_MASTER_STATE_CHANGE:
err = dsa_switch_master_state_change(ds, info);
break;
default:
err = -EOPNOTSUPP;
break;
}
if (err)
dev_dbg(ds->dev, "breaking chain for DSA event %lu (%d)\n",
event, err);
return notifier_from_errno(err);
}
/**
* dsa_tree_notify - Execute code for all switches in a DSA switch tree.
* @dst: collection of struct dsa_switch devices to notify.
* @e: event, must be of type DSA_NOTIFIER_*
* @v: event-specific value.
*
* Given a struct dsa_switch_tree, this can be used to run a function once for
* each member DSA switch. The other alternative of traversing the tree is only
* through its ports list, which does not uniquely list the switches.
*/
int dsa_tree_notify(struct dsa_switch_tree *dst, unsigned long e, void *v)
{
struct raw_notifier_head *nh = &dst->nh;
int err;
err = raw_notifier_call_chain(nh, e, v);
return notifier_to_errno(err);
}
/**
* dsa_broadcast - Notify all DSA trees in the system.
* @e: event, must be of type DSA_NOTIFIER_*
* @v: event-specific value.
*
* Can be used to notify the switching fabric of events such as cross-chip
* bridging between disjoint trees (such as islands of tagger-compatible
* switches bridged by an incompatible middle switch).
*
* WARNING: this function is not reliable during probe time, because probing
* between trees is asynchronous and not all DSA trees might have probed.
*/
int dsa_broadcast(unsigned long e, void *v)
{
struct dsa_switch_tree *dst;
int err = 0;
list_for_each_entry(dst, &dsa_tree_list, list) {
err = dsa_tree_notify(dst, e, v);
if (err)
break;
}
return err;
}
int dsa_switch_register_notifier(struct dsa_switch *ds)
{
ds->nb.notifier_call = dsa_switch_event;
return raw_notifier_chain_register(&ds->dst->nh, &ds->nb);
}
void dsa_switch_unregister_notifier(struct dsa_switch *ds)
{
int err;
err = raw_notifier_chain_unregister(&ds->dst->nh, &ds->nb);
if (err)
dev_err(ds->dev, "failed to unregister notifier (%d)\n", err);
}
| linux-master | net/dsa/switch.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Broadcom tag support
*
* Copyright (C) 2014 Broadcom Corporation
*/
#include <linux/dsa/brcm.h>
#include <linux/etherdevice.h>
#include <linux/if_vlan.h>
#include <linux/list.h>
#include <linux/slab.h>
#include "tag.h"
#define BRCM_NAME "brcm"
#define BRCM_LEGACY_NAME "brcm-legacy"
#define BRCM_PREPEND_NAME "brcm-prepend"
/* Legacy Broadcom tag (6 bytes) */
#define BRCM_LEG_TAG_LEN 6
/* Type fields */
/* 1st byte in the tag */
#define BRCM_LEG_TYPE_HI 0x88
/* 2nd byte in the tag */
#define BRCM_LEG_TYPE_LO 0x74
/* Tag fields */
/* 3rd byte in the tag */
#define BRCM_LEG_UNICAST (0 << 5)
#define BRCM_LEG_MULTICAST (1 << 5)
#define BRCM_LEG_EGRESS (2 << 5)
#define BRCM_LEG_INGRESS (3 << 5)
/* 6th byte in the tag */
#define BRCM_LEG_PORT_ID (0xf)
/* Newer Broadcom tag (4 bytes) */
#define BRCM_TAG_LEN 4
/* Tag is constructed and deconstructed using byte by byte access
* because the tag is placed after the MAC Source Address, which does
* not make it 4-bytes aligned, so this might cause unaligned accesses
* on most systems where this is used.
*/
/* Ingress and egress opcodes */
#define BRCM_OPCODE_SHIFT 5
#define BRCM_OPCODE_MASK 0x7
/* Ingress fields */
/* 1st byte in the tag */
#define BRCM_IG_TC_SHIFT 2
#define BRCM_IG_TC_MASK 0x7
/* 2nd byte in the tag */
#define BRCM_IG_TE_MASK 0x3
#define BRCM_IG_TS_SHIFT 7
/* 3rd byte in the tag */
#define BRCM_IG_DSTMAP2_MASK 1
#define BRCM_IG_DSTMAP1_MASK 0xff
/* Egress fields */
/* 2nd byte in the tag */
#define BRCM_EG_CID_MASK 0xff
/* 3rd byte in the tag */
#define BRCM_EG_RC_MASK 0xff
#define BRCM_EG_RC_RSVD (3 << 6)
#define BRCM_EG_RC_EXCEPTION (1 << 5)
#define BRCM_EG_RC_PROT_SNOOP (1 << 4)
#define BRCM_EG_RC_PROT_TERM (1 << 3)
#define BRCM_EG_RC_SWITCH (1 << 2)
#define BRCM_EG_RC_MAC_LEARN (1 << 1)
#define BRCM_EG_RC_MIRROR (1 << 0)
#define BRCM_EG_TC_SHIFT 5
#define BRCM_EG_TC_MASK 0x7
#define BRCM_EG_PID_MASK 0x1f
#if IS_ENABLED(CONFIG_NET_DSA_TAG_BRCM) || \
IS_ENABLED(CONFIG_NET_DSA_TAG_BRCM_PREPEND)
static struct sk_buff *brcm_tag_xmit_ll(struct sk_buff *skb,
struct net_device *dev,
unsigned int offset)
{
struct dsa_port *dp = dsa_slave_to_port(dev);
u16 queue = skb_get_queue_mapping(skb);
u8 *brcm_tag;
/* The Ethernet switch we are interfaced with needs packets to be at
* least 64 bytes (including FCS) otherwise they will be discarded when
* they enter the switch port logic. When Broadcom tags are enabled, we
* need to make sure that packets are at least 68 bytes
* (including FCS and tag) because the length verification is done after
* the Broadcom tag is stripped off the ingress packet.
*
* Let dsa_slave_xmit() free the SKB
*/
if (__skb_put_padto(skb, ETH_ZLEN + BRCM_TAG_LEN, false))
return NULL;
skb_push(skb, BRCM_TAG_LEN);
if (offset)
dsa_alloc_etype_header(skb, BRCM_TAG_LEN);
brcm_tag = skb->data + offset;
/* Set the ingress opcode, traffic class, tag enforcement is
* deprecated
*/
brcm_tag[0] = (1 << BRCM_OPCODE_SHIFT) |
((queue & BRCM_IG_TC_MASK) << BRCM_IG_TC_SHIFT);
brcm_tag[1] = 0;
brcm_tag[2] = 0;
if (dp->index == 8)
brcm_tag[2] = BRCM_IG_DSTMAP2_MASK;
brcm_tag[3] = (1 << dp->index) & BRCM_IG_DSTMAP1_MASK;
/* Now tell the master network device about the desired output queue
* as well
*/
skb_set_queue_mapping(skb, BRCM_TAG_SET_PORT_QUEUE(dp->index, queue));
return skb;
}
/* Frames with this tag have one of these two layouts:
* -----------------------------------
* | MAC DA | MAC SA | 4b tag | Type | DSA_TAG_PROTO_BRCM
* -----------------------------------
* -----------------------------------
* | 4b tag | MAC DA | MAC SA | Type | DSA_TAG_PROTO_BRCM_PREPEND
* -----------------------------------
* In both cases, at receive time, skb->data points 2 bytes before the actual
* Ethernet type field and we have an offset of 4bytes between where skb->data
* and where the payload starts. So the same low-level receive function can be
* used.
*/
static struct sk_buff *brcm_tag_rcv_ll(struct sk_buff *skb,
struct net_device *dev,
unsigned int offset)
{
int source_port;
u8 *brcm_tag;
if (unlikely(!pskb_may_pull(skb, BRCM_TAG_LEN)))
return NULL;
brcm_tag = skb->data - offset;
/* The opcode should never be different than 0b000 */
if (unlikely((brcm_tag[0] >> BRCM_OPCODE_SHIFT) & BRCM_OPCODE_MASK))
return NULL;
/* We should never see a reserved reason code without knowing how to
* handle it
*/
if (unlikely(brcm_tag[2] & BRCM_EG_RC_RSVD))
return NULL;
/* Locate which port this is coming from */
source_port = brcm_tag[3] & BRCM_EG_PID_MASK;
skb->dev = dsa_master_find_slave(dev, 0, source_port);
if (!skb->dev)
return NULL;
/* Remove Broadcom tag and update checksum */
skb_pull_rcsum(skb, BRCM_TAG_LEN);
dsa_default_offload_fwd_mark(skb);
return skb;
}
#endif
#if IS_ENABLED(CONFIG_NET_DSA_TAG_BRCM)
static struct sk_buff *brcm_tag_xmit(struct sk_buff *skb,
struct net_device *dev)
{
/* Build the tag after the MAC Source Address */
return brcm_tag_xmit_ll(skb, dev, 2 * ETH_ALEN);
}
static struct sk_buff *brcm_tag_rcv(struct sk_buff *skb, struct net_device *dev)
{
struct sk_buff *nskb;
/* skb->data points to the EtherType, the tag is right before it */
nskb = brcm_tag_rcv_ll(skb, dev, 2);
if (!nskb)
return nskb;
dsa_strip_etype_header(skb, BRCM_TAG_LEN);
return nskb;
}
static const struct dsa_device_ops brcm_netdev_ops = {
.name = BRCM_NAME,
.proto = DSA_TAG_PROTO_BRCM,
.xmit = brcm_tag_xmit,
.rcv = brcm_tag_rcv,
.needed_headroom = BRCM_TAG_LEN,
};
DSA_TAG_DRIVER(brcm_netdev_ops);
MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_BRCM, BRCM_NAME);
#endif
#if IS_ENABLED(CONFIG_NET_DSA_TAG_BRCM_LEGACY)
static struct sk_buff *brcm_leg_tag_xmit(struct sk_buff *skb,
struct net_device *dev)
{
struct dsa_port *dp = dsa_slave_to_port(dev);
u8 *brcm_tag;
/* The Ethernet switch we are interfaced with needs packets to be at
* least 64 bytes (including FCS) otherwise they will be discarded when
* they enter the switch port logic. When Broadcom tags are enabled, we
* need to make sure that packets are at least 70 bytes
* (including FCS and tag) because the length verification is done after
* the Broadcom tag is stripped off the ingress packet.
*
* Let dsa_slave_xmit() free the SKB
*/
if (__skb_put_padto(skb, ETH_ZLEN + BRCM_LEG_TAG_LEN, false))
return NULL;
skb_push(skb, BRCM_LEG_TAG_LEN);
dsa_alloc_etype_header(skb, BRCM_LEG_TAG_LEN);
brcm_tag = skb->data + 2 * ETH_ALEN;
/* Broadcom tag type */
brcm_tag[0] = BRCM_LEG_TYPE_HI;
brcm_tag[1] = BRCM_LEG_TYPE_LO;
/* Broadcom tag value */
brcm_tag[2] = BRCM_LEG_EGRESS;
brcm_tag[3] = 0;
brcm_tag[4] = 0;
brcm_tag[5] = dp->index & BRCM_LEG_PORT_ID;
return skb;
}
static struct sk_buff *brcm_leg_tag_rcv(struct sk_buff *skb,
struct net_device *dev)
{
int len = BRCM_LEG_TAG_LEN;
int source_port;
u8 *brcm_tag;
if (unlikely(!pskb_may_pull(skb, BRCM_LEG_PORT_ID)))
return NULL;
brcm_tag = dsa_etype_header_pos_rx(skb);
source_port = brcm_tag[5] & BRCM_LEG_PORT_ID;
skb->dev = dsa_master_find_slave(dev, 0, source_port);
if (!skb->dev)
return NULL;
/* VLAN tag is added by BCM63xx internal switch */
if (netdev_uses_dsa(skb->dev))
len += VLAN_HLEN;
/* Remove Broadcom tag and update checksum */
skb_pull_rcsum(skb, len);
dsa_default_offload_fwd_mark(skb);
dsa_strip_etype_header(skb, len);
return skb;
}
static const struct dsa_device_ops brcm_legacy_netdev_ops = {
.name = BRCM_LEGACY_NAME,
.proto = DSA_TAG_PROTO_BRCM_LEGACY,
.xmit = brcm_leg_tag_xmit,
.rcv = brcm_leg_tag_rcv,
.needed_headroom = BRCM_LEG_TAG_LEN,
};
DSA_TAG_DRIVER(brcm_legacy_netdev_ops);
MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_BRCM_LEGACY, BRCM_LEGACY_NAME);
#endif /* CONFIG_NET_DSA_TAG_BRCM_LEGACY */
#if IS_ENABLED(CONFIG_NET_DSA_TAG_BRCM_PREPEND)
static struct sk_buff *brcm_tag_xmit_prepend(struct sk_buff *skb,
struct net_device *dev)
{
/* tag is prepended to the packet */
return brcm_tag_xmit_ll(skb, dev, 0);
}
static struct sk_buff *brcm_tag_rcv_prepend(struct sk_buff *skb,
struct net_device *dev)
{
/* tag is prepended to the packet */
return brcm_tag_rcv_ll(skb, dev, ETH_HLEN);
}
static const struct dsa_device_ops brcm_prepend_netdev_ops = {
.name = BRCM_PREPEND_NAME,
.proto = DSA_TAG_PROTO_BRCM_PREPEND,
.xmit = brcm_tag_xmit_prepend,
.rcv = brcm_tag_rcv_prepend,
.needed_headroom = BRCM_TAG_LEN,
};
DSA_TAG_DRIVER(brcm_prepend_netdev_ops);
MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_BRCM_PREPEND, BRCM_PREPEND_NAME);
#endif
static struct dsa_tag_driver *dsa_tag_driver_array[] = {
#if IS_ENABLED(CONFIG_NET_DSA_TAG_BRCM)
&DSA_TAG_DRIVER_NAME(brcm_netdev_ops),
#endif
#if IS_ENABLED(CONFIG_NET_DSA_TAG_BRCM_LEGACY)
&DSA_TAG_DRIVER_NAME(brcm_legacy_netdev_ops),
#endif
#if IS_ENABLED(CONFIG_NET_DSA_TAG_BRCM_PREPEND)
&DSA_TAG_DRIVER_NAME(brcm_prepend_netdev_ops),
#endif
};
module_dsa_tag_drivers(dsa_tag_driver_array);
MODULE_LICENSE("GPL");
| linux-master | net/dsa/tag_brcm.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright 2020-2021 NXP
*
* An implementation of the software-defined tag_8021q.c tagger format, which
* also preserves full functionality under a vlan_filtering bridge. It does
* this by using the TCAM engines for:
* - pushing the RX VLAN as a second, outer tag, on egress towards the CPU port
* - redirecting towards the correct front port based on TX VLAN and popping
* that on egress
*/
#include <linux/dsa/8021q.h>
#include <linux/dsa/ocelot.h>
#include "tag.h"
#include "tag_8021q.h"
#define OCELOT_8021Q_NAME "ocelot-8021q"
struct ocelot_8021q_tagger_private {
struct ocelot_8021q_tagger_data data; /* Must be first */
struct kthread_worker *xmit_worker;
};
static struct sk_buff *ocelot_defer_xmit(struct dsa_port *dp,
struct sk_buff *skb)
{
struct ocelot_8021q_tagger_private *priv = dp->ds->tagger_data;
struct ocelot_8021q_tagger_data *data = &priv->data;
void (*xmit_work_fn)(struct kthread_work *work);
struct felix_deferred_xmit_work *xmit_work;
struct kthread_worker *xmit_worker;
xmit_work_fn = data->xmit_work_fn;
xmit_worker = priv->xmit_worker;
if (!xmit_work_fn || !xmit_worker)
return NULL;
/* PTP over IP packets need UDP checksumming. We may have inherited
* NETIF_F_HW_CSUM from the DSA master, but these packets are not sent
* through the DSA master, so calculate the checksum here.
*/
if (skb->ip_summed == CHECKSUM_PARTIAL && skb_checksum_help(skb))
return NULL;
xmit_work = kzalloc(sizeof(*xmit_work), GFP_ATOMIC);
if (!xmit_work)
return NULL;
/* Calls felix_port_deferred_xmit in felix.c */
kthread_init_work(&xmit_work->work, xmit_work_fn);
/* Increase refcount so the kfree_skb in dsa_slave_xmit
* won't really free the packet.
*/
xmit_work->dp = dp;
xmit_work->skb = skb_get(skb);
kthread_queue_work(xmit_worker, &xmit_work->work);
return NULL;
}
static struct sk_buff *ocelot_xmit(struct sk_buff *skb,
struct net_device *netdev)
{
struct dsa_port *dp = dsa_slave_to_port(netdev);
u16 queue_mapping = skb_get_queue_mapping(skb);
u8 pcp = netdev_txq_to_tc(netdev, queue_mapping);
u16 tx_vid = dsa_tag_8021q_standalone_vid(dp);
struct ethhdr *hdr = eth_hdr(skb);
if (ocelot_ptp_rew_op(skb) || is_link_local_ether_addr(hdr->h_dest))
return ocelot_defer_xmit(dp, skb);
return dsa_8021q_xmit(skb, netdev, ETH_P_8021Q,
((pcp << VLAN_PRIO_SHIFT) | tx_vid));
}
static struct sk_buff *ocelot_rcv(struct sk_buff *skb,
struct net_device *netdev)
{
int src_port, switch_id;
dsa_8021q_rcv(skb, &src_port, &switch_id, NULL);
skb->dev = dsa_master_find_slave(netdev, switch_id, src_port);
if (!skb->dev)
return NULL;
dsa_default_offload_fwd_mark(skb);
return skb;
}
static void ocelot_disconnect(struct dsa_switch *ds)
{
struct ocelot_8021q_tagger_private *priv = ds->tagger_data;
kthread_destroy_worker(priv->xmit_worker);
kfree(priv);
ds->tagger_data = NULL;
}
static int ocelot_connect(struct dsa_switch *ds)
{
struct ocelot_8021q_tagger_private *priv;
int err;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->xmit_worker = kthread_create_worker(0, "felix_xmit");
if (IS_ERR(priv->xmit_worker)) {
err = PTR_ERR(priv->xmit_worker);
kfree(priv);
return err;
}
ds->tagger_data = priv;
return 0;
}
static const struct dsa_device_ops ocelot_8021q_netdev_ops = {
.name = OCELOT_8021Q_NAME,
.proto = DSA_TAG_PROTO_OCELOT_8021Q,
.xmit = ocelot_xmit,
.rcv = ocelot_rcv,
.connect = ocelot_connect,
.disconnect = ocelot_disconnect,
.needed_headroom = VLAN_HLEN,
.promisc_on_master = true,
};
MODULE_LICENSE("GPL v2");
MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_OCELOT_8021Q, OCELOT_8021Q_NAME);
module_dsa_tag_driver(ocelot_8021q_netdev_ops);
| linux-master | net/dsa/tag_ocelot_8021q.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* net/dsa/tag_trailer.c - Trailer tag format handling
* Copyright (c) 2008-2009 Marvell Semiconductor
*/
#include <linux/etherdevice.h>
#include <linux/list.h>
#include <linux/slab.h>
#include "tag.h"
#define TRAILER_NAME "trailer"
static struct sk_buff *trailer_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct dsa_port *dp = dsa_slave_to_port(dev);
u8 *trailer;
trailer = skb_put(skb, 4);
trailer[0] = 0x80;
trailer[1] = 1 << dp->index;
trailer[2] = 0x10;
trailer[3] = 0x00;
return skb;
}
static struct sk_buff *trailer_rcv(struct sk_buff *skb, struct net_device *dev)
{
u8 *trailer;
int source_port;
if (skb_linearize(skb))
return NULL;
trailer = skb_tail_pointer(skb) - 4;
if (trailer[0] != 0x80 || (trailer[1] & 0xf8) != 0x00 ||
(trailer[2] & 0xef) != 0x00 || trailer[3] != 0x00)
return NULL;
source_port = trailer[1] & 7;
skb->dev = dsa_master_find_slave(dev, 0, source_port);
if (!skb->dev)
return NULL;
if (pskb_trim_rcsum(skb, skb->len - 4))
return NULL;
return skb;
}
static const struct dsa_device_ops trailer_netdev_ops = {
.name = TRAILER_NAME,
.proto = DSA_TAG_PROTO_TRAILER,
.xmit = trailer_xmit,
.rcv = trailer_rcv,
.needed_tailroom = 4,
};
MODULE_LICENSE("GPL");
MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_TRAILER, TRAILER_NAME);
module_dsa_tag_driver(trailer_netdev_ops);
| linux-master | net/dsa/tag_trailer.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2019 Pengutronix, Oleksij Rempel <[email protected]>
*/
#include <linux/bitfield.h>
#include <linux/etherdevice.h>
#include "tag.h"
#define AR9331_NAME "ar9331"
#define AR9331_HDR_LEN 2
#define AR9331_HDR_VERSION 1
#define AR9331_HDR_VERSION_MASK GENMASK(15, 14)
#define AR9331_HDR_PRIORITY_MASK GENMASK(13, 12)
#define AR9331_HDR_TYPE_MASK GENMASK(10, 8)
#define AR9331_HDR_BROADCAST BIT(7)
#define AR9331_HDR_FROM_CPU BIT(6)
/* AR9331_HDR_RESERVED - not used or may be version field.
* According to the AR8216 doc it should 0b10. On AR9331 it is 0b11 on RX path
* and should be set to 0b11 to make it work.
*/
#define AR9331_HDR_RESERVED_MASK GENMASK(5, 4)
#define AR9331_HDR_PORT_NUM_MASK GENMASK(3, 0)
static struct sk_buff *ar9331_tag_xmit(struct sk_buff *skb,
struct net_device *dev)
{
struct dsa_port *dp = dsa_slave_to_port(dev);
__le16 *phdr;
u16 hdr;
phdr = skb_push(skb, AR9331_HDR_LEN);
hdr = FIELD_PREP(AR9331_HDR_VERSION_MASK, AR9331_HDR_VERSION);
hdr |= AR9331_HDR_FROM_CPU | dp->index;
/* 0b10 for AR8216 and 0b11 for AR9331 */
hdr |= AR9331_HDR_RESERVED_MASK;
phdr[0] = cpu_to_le16(hdr);
return skb;
}
static struct sk_buff *ar9331_tag_rcv(struct sk_buff *skb,
struct net_device *ndev)
{
u8 ver, port;
u16 hdr;
if (unlikely(!pskb_may_pull(skb, AR9331_HDR_LEN)))
return NULL;
hdr = le16_to_cpu(*(__le16 *)skb_mac_header(skb));
ver = FIELD_GET(AR9331_HDR_VERSION_MASK, hdr);
if (unlikely(ver != AR9331_HDR_VERSION)) {
netdev_warn_once(ndev, "%s:%i wrong header version 0x%2x\n",
__func__, __LINE__, hdr);
return NULL;
}
if (unlikely(hdr & AR9331_HDR_FROM_CPU)) {
netdev_warn_once(ndev, "%s:%i packet should not be from cpu 0x%2x\n",
__func__, __LINE__, hdr);
return NULL;
}
skb_pull_rcsum(skb, AR9331_HDR_LEN);
/* Get source port information */
port = FIELD_GET(AR9331_HDR_PORT_NUM_MASK, hdr);
skb->dev = dsa_master_find_slave(ndev, 0, port);
if (!skb->dev)
return NULL;
return skb;
}
static const struct dsa_device_ops ar9331_netdev_ops = {
.name = AR9331_NAME,
.proto = DSA_TAG_PROTO_AR9331,
.xmit = ar9331_tag_xmit,
.rcv = ar9331_tag_rcv,
.needed_headroom = AR9331_HDR_LEN,
};
MODULE_LICENSE("GPL v2");
MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_AR9331, AR9331_NAME);
module_dsa_tag_driver(ar9331_netdev_ops);
| linux-master | net/dsa/tag_ar9331.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Handler for Realtek 4 byte DSA switch tags
* Currently only supports protocol "A" found in RTL8366RB
* Copyright (c) 2020 Linus Walleij <[email protected]>
*
* This "proprietary tag" header looks like so:
*
* -------------------------------------------------
* | MAC DA | MAC SA | 0x8899 | 2 bytes tag | Type |
* -------------------------------------------------
*
* The 2 bytes tag form a 16 bit big endian word. The exact
* meaning has been guessed from packet dumps from ingress
* frames.
*/
#include <linux/etherdevice.h>
#include <linux/bits.h>
#include "tag.h"
#define RTL4_A_NAME "rtl4a"
#define RTL4_A_HDR_LEN 4
#define RTL4_A_ETHERTYPE 0x8899
#define RTL4_A_PROTOCOL_SHIFT 12
/*
* 0x1 = Realtek Remote Control protocol (RRCP)
* 0x2/0x3 seems to be used for loopback testing
* 0x9 = RTL8306 DSA protocol
* 0xa = RTL8366RB DSA protocol
*/
#define RTL4_A_PROTOCOL_RTL8366RB 0xa
static struct sk_buff *rtl4a_tag_xmit(struct sk_buff *skb,
struct net_device *dev)
{
struct dsa_port *dp = dsa_slave_to_port(dev);
__be16 *p;
u8 *tag;
u16 out;
/* Pad out to at least 60 bytes */
if (unlikely(__skb_put_padto(skb, ETH_ZLEN, false)))
return NULL;
netdev_dbg(dev, "add realtek tag to package to port %d\n",
dp->index);
skb_push(skb, RTL4_A_HDR_LEN);
dsa_alloc_etype_header(skb, RTL4_A_HDR_LEN);
tag = dsa_etype_header_pos_tx(skb);
/* Set Ethertype */
p = (__be16 *)tag;
*p = htons(RTL4_A_ETHERTYPE);
out = (RTL4_A_PROTOCOL_RTL8366RB << RTL4_A_PROTOCOL_SHIFT);
/* The lower bits indicate the port number */
out |= BIT(dp->index);
p = (__be16 *)(tag + 2);
*p = htons(out);
return skb;
}
static struct sk_buff *rtl4a_tag_rcv(struct sk_buff *skb,
struct net_device *dev)
{
u16 protport;
__be16 *p;
u16 etype;
u8 *tag;
u8 prot;
u8 port;
if (unlikely(!pskb_may_pull(skb, RTL4_A_HDR_LEN)))
return NULL;
tag = dsa_etype_header_pos_rx(skb);
p = (__be16 *)tag;
etype = ntohs(*p);
if (etype != RTL4_A_ETHERTYPE) {
/* Not custom, just pass through */
netdev_dbg(dev, "non-realtek ethertype 0x%04x\n", etype);
return skb;
}
p = (__be16 *)(tag + 2);
protport = ntohs(*p);
/* The 4 upper bits are the protocol */
prot = (protport >> RTL4_A_PROTOCOL_SHIFT) & 0x0f;
if (prot != RTL4_A_PROTOCOL_RTL8366RB) {
netdev_err(dev, "unknown realtek protocol 0x%01x\n", prot);
return NULL;
}
port = protport & 0xff;
skb->dev = dsa_master_find_slave(dev, 0, port);
if (!skb->dev) {
netdev_dbg(dev, "could not find slave for port %d\n", port);
return NULL;
}
/* Remove RTL4 tag and recalculate checksum */
skb_pull_rcsum(skb, RTL4_A_HDR_LEN);
dsa_strip_etype_header(skb, RTL4_A_HDR_LEN);
dsa_default_offload_fwd_mark(skb);
return skb;
}
static const struct dsa_device_ops rtl4a_netdev_ops = {
.name = RTL4_A_NAME,
.proto = DSA_TAG_PROTO_RTL4_A,
.xmit = rtl4a_tag_xmit,
.rcv = rtl4a_tag_rcv,
.needed_headroom = RTL4_A_HDR_LEN,
};
module_dsa_tag_driver(rtl4a_netdev_ops);
MODULE_LICENSE("GPL");
MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_RTL4_A, RTL4_A_NAME);
| linux-master | net/dsa/tag_rtl4_a.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2022 Schneider Electric
*
* Clément Léger <[email protected]>
*/
#include <linux/bitfield.h>
#include <linux/etherdevice.h>
#include <linux/if_ether.h>
#include <net/dsa.h>
#include "tag.h"
/* To define the outgoing port and to discover the incoming port a TAG is
* inserted after Src MAC :
*
* Dest MAC Src MAC TAG Type
* ...| 1 2 3 4 5 6 | 1 2 3 4 5 6 | 1 2 3 4 5 6 7 8 | 1 2 |...
* |<--------------->|
*
* See struct a5psw_tag for layout
*/
#define A5PSW_NAME "a5psw"
#define ETH_P_DSA_A5PSW 0xE001
#define A5PSW_TAG_LEN 8
#define A5PSW_CTRL_DATA_FORCE_FORWARD BIT(0)
/* This is both used for xmit tag and rcv tagging */
#define A5PSW_CTRL_DATA_PORT GENMASK(3, 0)
struct a5psw_tag {
__be16 ctrl_tag;
__be16 ctrl_data;
__be16 ctrl_data2_hi;
__be16 ctrl_data2_lo;
};
static struct sk_buff *a5psw_tag_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct dsa_port *dp = dsa_slave_to_port(dev);
struct a5psw_tag *ptag;
u32 data2_val;
BUILD_BUG_ON(sizeof(*ptag) != A5PSW_TAG_LEN);
/* The Ethernet switch we are interfaced with needs packets to be at
* least 60 bytes otherwise they will be discarded when they enter the
* switch port logic.
*/
if (__skb_put_padto(skb, ETH_ZLEN, false))
return NULL;
/* provide 'A5PSW_TAG_LEN' bytes additional space */
skb_push(skb, A5PSW_TAG_LEN);
/* make room between MACs and Ether-Type to insert tag */
dsa_alloc_etype_header(skb, A5PSW_TAG_LEN);
ptag = dsa_etype_header_pos_tx(skb);
data2_val = FIELD_PREP(A5PSW_CTRL_DATA_PORT, BIT(dp->index));
ptag->ctrl_tag = htons(ETH_P_DSA_A5PSW);
ptag->ctrl_data = htons(A5PSW_CTRL_DATA_FORCE_FORWARD);
ptag->ctrl_data2_lo = htons(data2_val);
ptag->ctrl_data2_hi = 0;
return skb;
}
static struct sk_buff *a5psw_tag_rcv(struct sk_buff *skb,
struct net_device *dev)
{
struct a5psw_tag *tag;
int port;
if (unlikely(!pskb_may_pull(skb, A5PSW_TAG_LEN))) {
dev_warn_ratelimited(&dev->dev,
"Dropping packet, cannot pull\n");
return NULL;
}
tag = dsa_etype_header_pos_rx(skb);
if (tag->ctrl_tag != htons(ETH_P_DSA_A5PSW)) {
dev_warn_ratelimited(&dev->dev, "Dropping packet due to invalid TAG marker\n");
return NULL;
}
port = FIELD_GET(A5PSW_CTRL_DATA_PORT, ntohs(tag->ctrl_data));
skb->dev = dsa_master_find_slave(dev, 0, port);
if (!skb->dev)
return NULL;
skb_pull_rcsum(skb, A5PSW_TAG_LEN);
dsa_strip_etype_header(skb, A5PSW_TAG_LEN);
dsa_default_offload_fwd_mark(skb);
return skb;
}
static const struct dsa_device_ops a5psw_netdev_ops = {
.name = A5PSW_NAME,
.proto = DSA_TAG_PROTO_RZN1_A5PSW,
.xmit = a5psw_tag_xmit,
.rcv = a5psw_tag_rcv,
.needed_headroom = A5PSW_TAG_LEN,
};
MODULE_LICENSE("GPL v2");
MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_A5PSW, A5PSW_NAME);
module_dsa_tag_driver(a5psw_netdev_ops);
| linux-master | net/dsa/tag_rzn1_a5psw.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Regular and Ethertype DSA tagging
* Copyright (c) 2008-2009 Marvell Semiconductor
*
* Regular DSA
* -----------
* For untagged (in 802.1Q terms) packets, the switch will splice in
* the tag between the SA and the ethertype of the original
* packet. Tagged frames will instead have their outermost .1Q tag
* converted to a DSA tag. It expects the same layout when receiving
* packets from the CPU.
*
* Example:
*
* .----.----.----.---------
* Pu: | DA | SA | ET | Payload ...
* '----'----'----'---------
* 6 6 2 N
* .----.----.--------.-----.----.---------
* Pt: | DA | SA | 0x8100 | TCI | ET | Payload ...
* '----'----'--------'-----'----'---------
* 6 6 2 2 2 N
* .----.----.-----.----.---------
* Pd: | DA | SA | DSA | ET | Payload ...
* '----'----'-----'----'---------
* 6 6 4 2 N
*
* No matter if a packet is received untagged (Pu) or tagged (Pt),
* they will both have the same layout (Pd) when they are sent to the
* CPU. This is done by ignoring 802.3, replacing the ethertype field
* with more metadata, among which is a bit to signal if the original
* packet was tagged or not.
*
* Ethertype DSA
* -------------
* Uses the exact same tag format as regular DSA, but also includes a
* proper ethertype field (which the mv88e6xxx driver sets to
* ETH_P_EDSA/0xdada) followed by two zero bytes:
*
* .----.----.--------.--------.-----.----.---------
* | DA | SA | 0xdada | 0x0000 | DSA | ET | Payload ...
* '----'----'--------'--------'-----'----'---------
* 6 6 2 2 4 2 N
*/
#include <linux/dsa/mv88e6xxx.h>
#include <linux/etherdevice.h>
#include <linux/list.h>
#include <linux/slab.h>
#include "tag.h"
#define DSA_NAME "dsa"
#define EDSA_NAME "edsa"
#define DSA_HLEN 4
/**
* enum dsa_cmd - DSA Command
* @DSA_CMD_TO_CPU: Set on packets that were trapped or mirrored to
* the CPU port. This is needed to implement control protocols,
* e.g. STP and LLDP, that must not allow those control packets to
* be switched according to the normal rules.
* @DSA_CMD_FROM_CPU: Used by the CPU to send a packet to a specific
* port, ignoring all the barriers that the switch normally
* enforces (VLANs, STP port states etc.). No source address
* learning takes place. "sudo send packet"
* @DSA_CMD_TO_SNIFFER: Set on the copies of packets that matched some
* user configured ingress or egress monitor criteria. These are
* forwarded by the switch tree to the user configured ingress or
* egress monitor port, which can be set to the CPU port or a
* regular port. If the destination is a regular port, the tag
* will be removed before egressing the port. If the destination
* is the CPU port, the tag will not be removed.
* @DSA_CMD_FORWARD: This tag is used on all bulk traffic passing
* through the switch tree, including the flows that are directed
* towards the CPU. Its device/port tuple encodes the original
* source port on which the packet ingressed. It can also be used
* on transmit by the CPU to defer the forwarding decision to the
* hardware, based on the current config of PVT/VTU/ATU
* etc. Source address learning takes places if enabled on the
* receiving DSA/CPU port.
*/
enum dsa_cmd {
DSA_CMD_TO_CPU = 0,
DSA_CMD_FROM_CPU = 1,
DSA_CMD_TO_SNIFFER = 2,
DSA_CMD_FORWARD = 3
};
/**
* enum dsa_code - TO_CPU Code
*
* @DSA_CODE_MGMT_TRAP: DA was classified as a management
* address. Typical examples include STP BPDUs and LLDP.
* @DSA_CODE_FRAME2REG: Response to a "remote management" request.
* @DSA_CODE_IGMP_MLD_TRAP: IGMP/MLD signaling.
* @DSA_CODE_POLICY_TRAP: Frame matched some policy configuration on
* the device. Typical examples are matching on DA/SA/VID and DHCP
* snooping.
* @DSA_CODE_ARP_MIRROR: The name says it all really.
* @DSA_CODE_POLICY_MIRROR: Same as @DSA_CODE_POLICY_TRAP, but the
* particular policy was set to trigger a mirror instead of a
* trap.
* @DSA_CODE_RESERVED_6: Unused on all devices up to at least 6393X.
* @DSA_CODE_RESERVED_7: Unused on all devices up to at least 6393X.
*
* A 3-bit code is used to relay why a particular frame was sent to
* the CPU. We only use this to determine if the packet was mirrored
* or trapped, i.e. whether the packet has been forwarded by hardware
* or not.
*
* This is the superset of all possible codes. Any particular device
* may only implement a subset.
*/
enum dsa_code {
DSA_CODE_MGMT_TRAP = 0,
DSA_CODE_FRAME2REG = 1,
DSA_CODE_IGMP_MLD_TRAP = 2,
DSA_CODE_POLICY_TRAP = 3,
DSA_CODE_ARP_MIRROR = 4,
DSA_CODE_POLICY_MIRROR = 5,
DSA_CODE_RESERVED_6 = 6,
DSA_CODE_RESERVED_7 = 7
};
static struct sk_buff *dsa_xmit_ll(struct sk_buff *skb, struct net_device *dev,
u8 extra)
{
struct dsa_port *dp = dsa_slave_to_port(dev);
struct net_device *br_dev;
u8 tag_dev, tag_port;
enum dsa_cmd cmd;
u8 *dsa_header;
if (skb->offload_fwd_mark) {
unsigned int bridge_num = dsa_port_bridge_num_get(dp);
struct dsa_switch_tree *dst = dp->ds->dst;
cmd = DSA_CMD_FORWARD;
/* When offloading forwarding for a bridge, inject FORWARD
* packets on behalf of a virtual switch device with an index
* past the physical switches.
*/
tag_dev = dst->last_switch + bridge_num;
tag_port = 0;
} else {
cmd = DSA_CMD_FROM_CPU;
tag_dev = dp->ds->index;
tag_port = dp->index;
}
br_dev = dsa_port_bridge_dev_get(dp);
/* If frame is already 802.1Q tagged, we can convert it to a DSA
* tag (avoiding a memmove), but only if the port is standalone
* (in which case we always send FROM_CPU) or if the port's
* bridge has VLAN filtering enabled (in which case the CPU port
* will be a member of the VLAN).
*/
if (skb->protocol == htons(ETH_P_8021Q) &&
(!br_dev || br_vlan_enabled(br_dev))) {
if (extra) {
skb_push(skb, extra);
dsa_alloc_etype_header(skb, extra);
}
/* Construct tagged DSA tag from 802.1Q tag. */
dsa_header = dsa_etype_header_pos_tx(skb) + extra;
dsa_header[0] = (cmd << 6) | 0x20 | tag_dev;
dsa_header[1] = tag_port << 3;
/* Move CFI field from byte 2 to byte 1. */
if (dsa_header[2] & 0x10) {
dsa_header[1] |= 0x01;
dsa_header[2] &= ~0x10;
}
} else {
u16 vid;
vid = br_dev ? MV88E6XXX_VID_BRIDGED : MV88E6XXX_VID_STANDALONE;
skb_push(skb, DSA_HLEN + extra);
dsa_alloc_etype_header(skb, DSA_HLEN + extra);
/* Construct DSA header from untagged frame. */
dsa_header = dsa_etype_header_pos_tx(skb) + extra;
dsa_header[0] = (cmd << 6) | tag_dev;
dsa_header[1] = tag_port << 3;
dsa_header[2] = vid >> 8;
dsa_header[3] = vid & 0xff;
}
return skb;
}
static struct sk_buff *dsa_rcv_ll(struct sk_buff *skb, struct net_device *dev,
u8 extra)
{
bool trap = false, trunk = false;
int source_device, source_port;
enum dsa_code code;
enum dsa_cmd cmd;
u8 *dsa_header;
/* The ethertype field is part of the DSA header. */
dsa_header = dsa_etype_header_pos_rx(skb);
cmd = dsa_header[0] >> 6;
switch (cmd) {
case DSA_CMD_FORWARD:
trunk = !!(dsa_header[1] & 4);
break;
case DSA_CMD_TO_CPU:
code = (dsa_header[1] & 0x6) | ((dsa_header[2] >> 4) & 1);
switch (code) {
case DSA_CODE_FRAME2REG:
/* Remote management is not implemented yet,
* drop.
*/
return NULL;
case DSA_CODE_ARP_MIRROR:
case DSA_CODE_POLICY_MIRROR:
/* Mark mirrored packets to notify any upper
* device (like a bridge) that forwarding has
* already been done by hardware.
*/
break;
case DSA_CODE_MGMT_TRAP:
case DSA_CODE_IGMP_MLD_TRAP:
case DSA_CODE_POLICY_TRAP:
/* Traps have, by definition, not been
* forwarded by hardware, so don't mark them.
*/
trap = true;
break;
default:
/* Reserved code, this could be anything. Drop
* seems like the safest option.
*/
return NULL;
}
break;
default:
return NULL;
}
source_device = dsa_header[0] & 0x1f;
source_port = (dsa_header[1] >> 3) & 0x1f;
if (trunk) {
struct dsa_port *cpu_dp = dev->dsa_ptr;
struct dsa_lag *lag;
/* The exact source port is not available in the tag,
* so we inject the frame directly on the upper
* team/bond.
*/
lag = dsa_lag_by_id(cpu_dp->dst, source_port + 1);
skb->dev = lag ? lag->dev : NULL;
} else {
skb->dev = dsa_master_find_slave(dev, source_device,
source_port);
}
if (!skb->dev)
return NULL;
/* When using LAG offload, skb->dev is not a DSA slave interface,
* so we cannot call dsa_default_offload_fwd_mark and we need to
* special-case it.
*/
if (trunk)
skb->offload_fwd_mark = true;
else if (!trap)
dsa_default_offload_fwd_mark(skb);
/* If the 'tagged' bit is set; convert the DSA tag to a 802.1Q
* tag, and delete the ethertype (extra) if applicable. If the
* 'tagged' bit is cleared; delete the DSA tag, and ethertype
* if applicable.
*/
if (dsa_header[0] & 0x20) {
u8 new_header[4];
/* Insert 802.1Q ethertype and copy the VLAN-related
* fields, but clear the bit that will hold CFI (since
* DSA uses that bit location for another purpose).
*/
new_header[0] = (ETH_P_8021Q >> 8) & 0xff;
new_header[1] = ETH_P_8021Q & 0xff;
new_header[2] = dsa_header[2] & ~0x10;
new_header[3] = dsa_header[3];
/* Move CFI bit from its place in the DSA header to
* its 802.1Q-designated place.
*/
if (dsa_header[1] & 0x01)
new_header[2] |= 0x10;
/* Update packet checksum if skb is CHECKSUM_COMPLETE. */
if (skb->ip_summed == CHECKSUM_COMPLETE) {
__wsum c = skb->csum;
c = csum_add(c, csum_partial(new_header + 2, 2, 0));
c = csum_sub(c, csum_partial(dsa_header + 2, 2, 0));
skb->csum = c;
}
memcpy(dsa_header, new_header, DSA_HLEN);
if (extra)
dsa_strip_etype_header(skb, extra);
} else {
skb_pull_rcsum(skb, DSA_HLEN);
dsa_strip_etype_header(skb, DSA_HLEN + extra);
}
return skb;
}
#if IS_ENABLED(CONFIG_NET_DSA_TAG_DSA)
static struct sk_buff *dsa_xmit(struct sk_buff *skb, struct net_device *dev)
{
return dsa_xmit_ll(skb, dev, 0);
}
static struct sk_buff *dsa_rcv(struct sk_buff *skb, struct net_device *dev)
{
if (unlikely(!pskb_may_pull(skb, DSA_HLEN)))
return NULL;
return dsa_rcv_ll(skb, dev, 0);
}
static const struct dsa_device_ops dsa_netdev_ops = {
.name = DSA_NAME,
.proto = DSA_TAG_PROTO_DSA,
.xmit = dsa_xmit,
.rcv = dsa_rcv,
.needed_headroom = DSA_HLEN,
};
DSA_TAG_DRIVER(dsa_netdev_ops);
MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_DSA, DSA_NAME);
#endif /* CONFIG_NET_DSA_TAG_DSA */
#if IS_ENABLED(CONFIG_NET_DSA_TAG_EDSA)
#define EDSA_HLEN 8
static struct sk_buff *edsa_xmit(struct sk_buff *skb, struct net_device *dev)
{
u8 *edsa_header;
skb = dsa_xmit_ll(skb, dev, EDSA_HLEN - DSA_HLEN);
if (!skb)
return NULL;
edsa_header = dsa_etype_header_pos_tx(skb);
edsa_header[0] = (ETH_P_EDSA >> 8) & 0xff;
edsa_header[1] = ETH_P_EDSA & 0xff;
edsa_header[2] = 0x00;
edsa_header[3] = 0x00;
return skb;
}
static struct sk_buff *edsa_rcv(struct sk_buff *skb, struct net_device *dev)
{
if (unlikely(!pskb_may_pull(skb, EDSA_HLEN)))
return NULL;
skb_pull_rcsum(skb, EDSA_HLEN - DSA_HLEN);
return dsa_rcv_ll(skb, dev, EDSA_HLEN - DSA_HLEN);
}
static const struct dsa_device_ops edsa_netdev_ops = {
.name = EDSA_NAME,
.proto = DSA_TAG_PROTO_EDSA,
.xmit = edsa_xmit,
.rcv = edsa_rcv,
.needed_headroom = EDSA_HLEN,
};
DSA_TAG_DRIVER(edsa_netdev_ops);
MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_EDSA, EDSA_NAME);
#endif /* CONFIG_NET_DSA_TAG_EDSA */
static struct dsa_tag_driver *dsa_tag_drivers[] = {
#if IS_ENABLED(CONFIG_NET_DSA_TAG_DSA)
&DSA_TAG_DRIVER_NAME(dsa_netdev_ops),
#endif
#if IS_ENABLED(CONFIG_NET_DSA_TAG_EDSA)
&DSA_TAG_DRIVER_NAME(edsa_netdev_ops),
#endif
};
module_dsa_tag_drivers(dsa_tag_drivers);
MODULE_LICENSE("GPL");
| linux-master | net/dsa/tag_dsa.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Handler for Realtek 8 byte switch tags
*
* Copyright (C) 2021 Alvin Šipraga <[email protected]>
*
* NOTE: Currently only supports protocol "4" found in the RTL8365MB, hence
* named tag_rtl8_4.
*
* This tag has the following format:
*
* 0 7|8 15
* |-----------------------------------+-----------------------------------|---
* | (16-bit) | ^
* | Realtek EtherType [0x8899] | |
* |-----------------------------------+-----------------------------------| 8
* | (8-bit) | (8-bit) |
* | Protocol [0x04] | REASON | b
* |-----------------------------------+-----------------------------------| y
* | (1) | (1) | (2) | (1) | (3) | (1) | (1) | (1) | (5) | t
* | FID_EN | X | FID | PRI_EN | PRI | KEEP | X | LEARN_DIS | X | e
* |-----------------------------------+-----------------------------------| s
* | (1) | (15-bit) | |
* | ALLOW | TX/RX | v
* |-----------------------------------+-----------------------------------|---
*
* With the following field descriptions:
*
* field | description
* ------------+-------------
* Realtek | 0x8899: indicates that this is a proprietary Realtek tag;
* EtherType | note that Realtek uses the same EtherType for
* | other incompatible tag formats (e.g. tag_rtl4_a.c)
* Protocol | 0x04: indicates that this tag conforms to this format
* X | reserved
* ------------+-------------
* REASON | reason for forwarding packet to CPU
* | 0: packet was forwarded or flooded to CPU
* | 80: packet was trapped to CPU
* FID_EN | 1: packet has an FID
* | 0: no FID
* FID | FID of packet (if FID_EN=1)
* PRI_EN | 1: force priority of packet
* | 0: don't force priority
* PRI | priority of packet (if PRI_EN=1)
* KEEP | preserve packet VLAN tag format
* LEARN_DIS | don't learn the source MAC address of the packet
* ALLOW | 1: treat TX/RX field as an allowance port mask, meaning the
* | packet may only be forwarded to ports specified in the
* | mask
* | 0: no allowance port mask, TX/RX field is the forwarding
* | port mask
* TX/RX | TX (switch->CPU): port number the packet was received on
* | RX (CPU->switch): forwarding port mask (if ALLOW=0)
* | allowance port mask (if ALLOW=1)
*
* The tag can be positioned before Ethertype, using tag "rtl8_4":
*
* +--------+--------+------------+------+-----
* | MAC DA | MAC SA | 8 byte tag | Type | ...
* +--------+--------+------------+------+-----
*
* The tag can also appear between the end of the payload and before the CRC,
* using tag "rtl8_4t":
*
* +--------+--------+------+-----+---------+------------+-----+
* | MAC DA | MAC SA | TYPE | ... | payload | 8-byte tag | CRC |
* +--------+--------+------+-----+---------+------------+-----+
*
* The added bytes after the payload will break most checksums, either in
* software or hardware. To avoid this issue, if the checksum is still pending,
* this tagger checksums the packet in software before adding the tag.
*
*/
#include <linux/bitfield.h>
#include <linux/bits.h>
#include <linux/etherdevice.h>
#include "tag.h"
/* Protocols supported:
*
* 0x04 = RTL8365MB DSA protocol
*/
#define RTL8_4_NAME "rtl8_4"
#define RTL8_4T_NAME "rtl8_4t"
#define RTL8_4_TAG_LEN 8
#define RTL8_4_PROTOCOL GENMASK(15, 8)
#define RTL8_4_PROTOCOL_RTL8365MB 0x04
#define RTL8_4_REASON GENMASK(7, 0)
#define RTL8_4_REASON_FORWARD 0
#define RTL8_4_REASON_TRAP 80
#define RTL8_4_LEARN_DIS BIT(5)
#define RTL8_4_TX GENMASK(3, 0)
#define RTL8_4_RX GENMASK(10, 0)
static void rtl8_4_write_tag(struct sk_buff *skb, struct net_device *dev,
void *tag)
{
struct dsa_port *dp = dsa_slave_to_port(dev);
__be16 tag16[RTL8_4_TAG_LEN / 2];
/* Set Realtek EtherType */
tag16[0] = htons(ETH_P_REALTEK);
/* Set Protocol; zero REASON */
tag16[1] = htons(FIELD_PREP(RTL8_4_PROTOCOL, RTL8_4_PROTOCOL_RTL8365MB));
/* Zero FID_EN, FID, PRI_EN, PRI, KEEP; set LEARN_DIS */
tag16[2] = htons(FIELD_PREP(RTL8_4_LEARN_DIS, 1));
/* Zero ALLOW; set RX (CPU->switch) forwarding port mask */
tag16[3] = htons(FIELD_PREP(RTL8_4_RX, BIT(dp->index)));
memcpy(tag, tag16, RTL8_4_TAG_LEN);
}
static struct sk_buff *rtl8_4_tag_xmit(struct sk_buff *skb,
struct net_device *dev)
{
skb_push(skb, RTL8_4_TAG_LEN);
dsa_alloc_etype_header(skb, RTL8_4_TAG_LEN);
rtl8_4_write_tag(skb, dev, dsa_etype_header_pos_tx(skb));
return skb;
}
static struct sk_buff *rtl8_4t_tag_xmit(struct sk_buff *skb,
struct net_device *dev)
{
/* Calculate the checksum here if not done yet as trailing tags will
* break either software or hardware based checksum
*/
if (skb->ip_summed == CHECKSUM_PARTIAL && skb_checksum_help(skb))
return NULL;
rtl8_4_write_tag(skb, dev, skb_put(skb, RTL8_4_TAG_LEN));
return skb;
}
static int rtl8_4_read_tag(struct sk_buff *skb, struct net_device *dev,
void *tag)
{
__be16 tag16[RTL8_4_TAG_LEN / 2];
u16 etype;
u8 reason;
u8 proto;
u8 port;
memcpy(tag16, tag, RTL8_4_TAG_LEN);
/* Parse Realtek EtherType */
etype = ntohs(tag16[0]);
if (unlikely(etype != ETH_P_REALTEK)) {
dev_warn_ratelimited(&dev->dev,
"non-realtek ethertype 0x%04x\n", etype);
return -EPROTO;
}
/* Parse Protocol */
proto = FIELD_GET(RTL8_4_PROTOCOL, ntohs(tag16[1]));
if (unlikely(proto != RTL8_4_PROTOCOL_RTL8365MB)) {
dev_warn_ratelimited(&dev->dev,
"unknown realtek protocol 0x%02x\n",
proto);
return -EPROTO;
}
/* Parse REASON */
reason = FIELD_GET(RTL8_4_REASON, ntohs(tag16[1]));
/* Parse TX (switch->CPU) */
port = FIELD_GET(RTL8_4_TX, ntohs(tag16[3]));
skb->dev = dsa_master_find_slave(dev, 0, port);
if (!skb->dev) {
dev_warn_ratelimited(&dev->dev,
"could not find slave for port %d\n",
port);
return -ENOENT;
}
if (reason != RTL8_4_REASON_TRAP)
dsa_default_offload_fwd_mark(skb);
return 0;
}
static struct sk_buff *rtl8_4_tag_rcv(struct sk_buff *skb,
struct net_device *dev)
{
if (unlikely(!pskb_may_pull(skb, RTL8_4_TAG_LEN)))
return NULL;
if (unlikely(rtl8_4_read_tag(skb, dev, dsa_etype_header_pos_rx(skb))))
return NULL;
/* Remove tag and recalculate checksum */
skb_pull_rcsum(skb, RTL8_4_TAG_LEN);
dsa_strip_etype_header(skb, RTL8_4_TAG_LEN);
return skb;
}
static struct sk_buff *rtl8_4t_tag_rcv(struct sk_buff *skb,
struct net_device *dev)
{
if (skb_linearize(skb))
return NULL;
if (unlikely(rtl8_4_read_tag(skb, dev, skb_tail_pointer(skb) - RTL8_4_TAG_LEN)))
return NULL;
if (pskb_trim_rcsum(skb, skb->len - RTL8_4_TAG_LEN))
return NULL;
return skb;
}
/* Ethertype version */
static const struct dsa_device_ops rtl8_4_netdev_ops = {
.name = "rtl8_4",
.proto = DSA_TAG_PROTO_RTL8_4,
.xmit = rtl8_4_tag_xmit,
.rcv = rtl8_4_tag_rcv,
.needed_headroom = RTL8_4_TAG_LEN,
};
DSA_TAG_DRIVER(rtl8_4_netdev_ops);
MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_RTL8_4, RTL8_4_NAME);
/* Tail version */
static const struct dsa_device_ops rtl8_4t_netdev_ops = {
.name = "rtl8_4t",
.proto = DSA_TAG_PROTO_RTL8_4T,
.xmit = rtl8_4t_tag_xmit,
.rcv = rtl8_4t_tag_rcv,
.needed_tailroom = RTL8_4_TAG_LEN,
};
DSA_TAG_DRIVER(rtl8_4t_netdev_ops);
MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_RTL8_4T, RTL8_4T_NAME);
static struct dsa_tag_driver *dsa_tag_drivers[] = {
&DSA_TAG_DRIVER_NAME(rtl8_4_netdev_ops),
&DSA_TAG_DRIVER_NAME(rtl8_4t_netdev_ops),
};
module_dsa_tag_drivers(dsa_tag_drivers);
MODULE_LICENSE("GPL");
| linux-master | net/dsa/tag_rtl8_4.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Mediatek DSA Tag support
* Copyright (C) 2017 Landen Chao <[email protected]>
* Sean Wang <[email protected]>
*/
#include <linux/etherdevice.h>
#include <linux/if_vlan.h>
#include "tag.h"
#define MTK_NAME "mtk"
#define MTK_HDR_LEN 4
#define MTK_HDR_XMIT_UNTAGGED 0
#define MTK_HDR_XMIT_TAGGED_TPID_8100 1
#define MTK_HDR_XMIT_TAGGED_TPID_88A8 2
#define MTK_HDR_RECV_SOURCE_PORT_MASK GENMASK(2, 0)
#define MTK_HDR_XMIT_DP_BIT_MASK GENMASK(5, 0)
#define MTK_HDR_XMIT_SA_DIS BIT(6)
static struct sk_buff *mtk_tag_xmit(struct sk_buff *skb,
struct net_device *dev)
{
struct dsa_port *dp = dsa_slave_to_port(dev);
u8 xmit_tpid;
u8 *mtk_tag;
skb_set_queue_mapping(skb, dp->index);
/* Build the special tag after the MAC Source Address. If VLAN header
* is present, it's required that VLAN header and special tag is
* being combined. Only in this way we can allow the switch can parse
* the both special and VLAN tag at the same time and then look up VLAN
* table with VID.
*/
switch (skb->protocol) {
case htons(ETH_P_8021Q):
xmit_tpid = MTK_HDR_XMIT_TAGGED_TPID_8100;
break;
case htons(ETH_P_8021AD):
xmit_tpid = MTK_HDR_XMIT_TAGGED_TPID_88A8;
break;
default:
xmit_tpid = MTK_HDR_XMIT_UNTAGGED;
skb_push(skb, MTK_HDR_LEN);
dsa_alloc_etype_header(skb, MTK_HDR_LEN);
}
mtk_tag = dsa_etype_header_pos_tx(skb);
/* Mark tag attribute on special tag insertion to notify hardware
* whether that's a combined special tag with 802.1Q header.
*/
mtk_tag[0] = xmit_tpid;
mtk_tag[1] = (1 << dp->index) & MTK_HDR_XMIT_DP_BIT_MASK;
/* Tag control information is kept for 802.1Q */
if (xmit_tpid == MTK_HDR_XMIT_UNTAGGED) {
mtk_tag[2] = 0;
mtk_tag[3] = 0;
}
return skb;
}
static struct sk_buff *mtk_tag_rcv(struct sk_buff *skb, struct net_device *dev)
{
u16 hdr;
int port;
__be16 *phdr;
if (unlikely(!pskb_may_pull(skb, MTK_HDR_LEN)))
return NULL;
phdr = dsa_etype_header_pos_rx(skb);
hdr = ntohs(*phdr);
/* Remove MTK tag and recalculate checksum. */
skb_pull_rcsum(skb, MTK_HDR_LEN);
dsa_strip_etype_header(skb, MTK_HDR_LEN);
/* Get source port information */
port = (hdr & MTK_HDR_RECV_SOURCE_PORT_MASK);
skb->dev = dsa_master_find_slave(dev, 0, port);
if (!skb->dev)
return NULL;
dsa_default_offload_fwd_mark(skb);
return skb;
}
static const struct dsa_device_ops mtk_netdev_ops = {
.name = MTK_NAME,
.proto = DSA_TAG_PROTO_MTK,
.xmit = mtk_tag_xmit,
.rcv = mtk_tag_rcv,
.needed_headroom = MTK_HDR_LEN,
};
MODULE_LICENSE("GPL");
MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_MTK, MTK_NAME);
module_dsa_tag_driver(mtk_netdev_ops);
| linux-master | net/dsa/tag_mtk.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* XRS700x tag format handling
* Copyright (c) 2008-2009 Marvell Semiconductor
* Copyright (c) 2020 NovaTech LLC
*/
#include <linux/bitops.h>
#include "tag.h"
#define XRS700X_NAME "xrs700x"
static struct sk_buff *xrs700x_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct dsa_port *partner, *dp = dsa_slave_to_port(dev);
u8 *trailer;
trailer = skb_put(skb, 1);
trailer[0] = BIT(dp->index);
if (dp->hsr_dev)
dsa_hsr_foreach_port(partner, dp->ds, dp->hsr_dev)
if (partner != dp)
trailer[0] |= BIT(partner->index);
return skb;
}
static struct sk_buff *xrs700x_rcv(struct sk_buff *skb, struct net_device *dev)
{
int source_port;
u8 *trailer;
trailer = skb_tail_pointer(skb) - 1;
source_port = ffs((int)trailer[0]) - 1;
if (source_port < 0)
return NULL;
skb->dev = dsa_master_find_slave(dev, 0, source_port);
if (!skb->dev)
return NULL;
if (pskb_trim_rcsum(skb, skb->len - 1))
return NULL;
/* Frame is forwarded by hardware, don't forward in software. */
dsa_default_offload_fwd_mark(skb);
return skb;
}
static const struct dsa_device_ops xrs700x_netdev_ops = {
.name = XRS700X_NAME,
.proto = DSA_TAG_PROTO_XRS700X,
.xmit = xrs700x_xmit,
.rcv = xrs700x_rcv,
.needed_tailroom = 1,
};
MODULE_LICENSE("GPL");
MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_XRS700X, XRS700X_NAME);
module_dsa_tag_driver(xrs700x_netdev_ops);
| linux-master | net/dsa/tag_xrs700x.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2019, Vladimir Oltean <[email protected]>
*
* This module is not a complete tagger implementation. It only provides
* primitives for taggers that rely on 802.1Q VLAN tags to use.
*/
#include <linux/if_vlan.h>
#include <linux/dsa/8021q.h>
#include "port.h"
#include "switch.h"
#include "tag.h"
#include "tag_8021q.h"
/* Binary structure of the fake 12-bit VID field (when the TPID is
* ETH_P_DSA_8021Q):
*
* | 11 | 10 | 9 | 8 | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 |
* +-----------+-----+-----------------+-----------+-----------------------+
* | RSV | VBID| SWITCH_ID | VBID | PORT |
* +-----------+-----+-----------------+-----------+-----------------------+
*
* RSV - VID[11:10]:
* Reserved. Must be set to 3 (0b11).
*
* SWITCH_ID - VID[8:6]:
* Index of switch within DSA tree. Must be between 0 and 7.
*
* VBID - { VID[9], VID[5:4] }:
* Virtual bridge ID. If between 1 and 7, packet targets the broadcast
* domain of a bridge. If transmitted as zero, packet targets a single
* port.
*
* PORT - VID[3:0]:
* Index of switch port. Must be between 0 and 15.
*/
#define DSA_8021Q_RSV_VAL 3
#define DSA_8021Q_RSV_SHIFT 10
#define DSA_8021Q_RSV_MASK GENMASK(11, 10)
#define DSA_8021Q_RSV ((DSA_8021Q_RSV_VAL << DSA_8021Q_RSV_SHIFT) & \
DSA_8021Q_RSV_MASK)
#define DSA_8021Q_SWITCH_ID_SHIFT 6
#define DSA_8021Q_SWITCH_ID_MASK GENMASK(8, 6)
#define DSA_8021Q_SWITCH_ID(x) (((x) << DSA_8021Q_SWITCH_ID_SHIFT) & \
DSA_8021Q_SWITCH_ID_MASK)
#define DSA_8021Q_VBID_HI_SHIFT 9
#define DSA_8021Q_VBID_HI_MASK GENMASK(9, 9)
#define DSA_8021Q_VBID_LO_SHIFT 4
#define DSA_8021Q_VBID_LO_MASK GENMASK(5, 4)
#define DSA_8021Q_VBID_HI(x) (((x) & GENMASK(2, 2)) >> 2)
#define DSA_8021Q_VBID_LO(x) ((x) & GENMASK(1, 0))
#define DSA_8021Q_VBID(x) \
(((DSA_8021Q_VBID_LO(x) << DSA_8021Q_VBID_LO_SHIFT) & \
DSA_8021Q_VBID_LO_MASK) | \
((DSA_8021Q_VBID_HI(x) << DSA_8021Q_VBID_HI_SHIFT) & \
DSA_8021Q_VBID_HI_MASK))
#define DSA_8021Q_PORT_SHIFT 0
#define DSA_8021Q_PORT_MASK GENMASK(3, 0)
#define DSA_8021Q_PORT(x) (((x) << DSA_8021Q_PORT_SHIFT) & \
DSA_8021Q_PORT_MASK)
struct dsa_tag_8021q_vlan {
struct list_head list;
int port;
u16 vid;
refcount_t refcount;
};
struct dsa_8021q_context {
struct dsa_switch *ds;
struct list_head vlans;
/* EtherType of RX VID, used for filtering on master interface */
__be16 proto;
};
u16 dsa_tag_8021q_bridge_vid(unsigned int bridge_num)
{
/* The VBID value of 0 is reserved for precise TX, but it is also
* reserved/invalid for the bridge_num, so all is well.
*/
return DSA_8021Q_RSV | DSA_8021Q_VBID(bridge_num);
}
EXPORT_SYMBOL_GPL(dsa_tag_8021q_bridge_vid);
/* Returns the VID that will be installed as pvid for this switch port, sent as
* tagged egress towards the CPU port and decoded by the rcv function.
*/
u16 dsa_tag_8021q_standalone_vid(const struct dsa_port *dp)
{
return DSA_8021Q_RSV | DSA_8021Q_SWITCH_ID(dp->ds->index) |
DSA_8021Q_PORT(dp->index);
}
EXPORT_SYMBOL_GPL(dsa_tag_8021q_standalone_vid);
/* Returns the decoded switch ID from the RX VID. */
int dsa_8021q_rx_switch_id(u16 vid)
{
return (vid & DSA_8021Q_SWITCH_ID_MASK) >> DSA_8021Q_SWITCH_ID_SHIFT;
}
EXPORT_SYMBOL_GPL(dsa_8021q_rx_switch_id);
/* Returns the decoded port ID from the RX VID. */
int dsa_8021q_rx_source_port(u16 vid)
{
return (vid & DSA_8021Q_PORT_MASK) >> DSA_8021Q_PORT_SHIFT;
}
EXPORT_SYMBOL_GPL(dsa_8021q_rx_source_port);
/* Returns the decoded VBID from the RX VID. */
static int dsa_tag_8021q_rx_vbid(u16 vid)
{
u16 vbid_hi = (vid & DSA_8021Q_VBID_HI_MASK) >> DSA_8021Q_VBID_HI_SHIFT;
u16 vbid_lo = (vid & DSA_8021Q_VBID_LO_MASK) >> DSA_8021Q_VBID_LO_SHIFT;
return (vbid_hi << 2) | vbid_lo;
}
bool vid_is_dsa_8021q(u16 vid)
{
u16 rsv = (vid & DSA_8021Q_RSV_MASK) >> DSA_8021Q_RSV_SHIFT;
return rsv == DSA_8021Q_RSV_VAL;
}
EXPORT_SYMBOL_GPL(vid_is_dsa_8021q);
static struct dsa_tag_8021q_vlan *
dsa_tag_8021q_vlan_find(struct dsa_8021q_context *ctx, int port, u16 vid)
{
struct dsa_tag_8021q_vlan *v;
list_for_each_entry(v, &ctx->vlans, list)
if (v->vid == vid && v->port == port)
return v;
return NULL;
}
static int dsa_port_do_tag_8021q_vlan_add(struct dsa_port *dp, u16 vid,
u16 flags)
{
struct dsa_8021q_context *ctx = dp->ds->tag_8021q_ctx;
struct dsa_switch *ds = dp->ds;
struct dsa_tag_8021q_vlan *v;
int port = dp->index;
int err;
/* No need to bother with refcounting for user ports */
if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
return ds->ops->tag_8021q_vlan_add(ds, port, vid, flags);
v = dsa_tag_8021q_vlan_find(ctx, port, vid);
if (v) {
refcount_inc(&v->refcount);
return 0;
}
v = kzalloc(sizeof(*v), GFP_KERNEL);
if (!v)
return -ENOMEM;
err = ds->ops->tag_8021q_vlan_add(ds, port, vid, flags);
if (err) {
kfree(v);
return err;
}
v->vid = vid;
v->port = port;
refcount_set(&v->refcount, 1);
list_add_tail(&v->list, &ctx->vlans);
return 0;
}
static int dsa_port_do_tag_8021q_vlan_del(struct dsa_port *dp, u16 vid)
{
struct dsa_8021q_context *ctx = dp->ds->tag_8021q_ctx;
struct dsa_switch *ds = dp->ds;
struct dsa_tag_8021q_vlan *v;
int port = dp->index;
int err;
/* No need to bother with refcounting for user ports */
if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
return ds->ops->tag_8021q_vlan_del(ds, port, vid);
v = dsa_tag_8021q_vlan_find(ctx, port, vid);
if (!v)
return -ENOENT;
if (!refcount_dec_and_test(&v->refcount))
return 0;
err = ds->ops->tag_8021q_vlan_del(ds, port, vid);
if (err) {
refcount_inc(&v->refcount);
return err;
}
list_del(&v->list);
kfree(v);
return 0;
}
static bool
dsa_port_tag_8021q_vlan_match(struct dsa_port *dp,
struct dsa_notifier_tag_8021q_vlan_info *info)
{
return dsa_port_is_dsa(dp) || dsa_port_is_cpu(dp) || dp == info->dp;
}
int dsa_switch_tag_8021q_vlan_add(struct dsa_switch *ds,
struct dsa_notifier_tag_8021q_vlan_info *info)
{
struct dsa_port *dp;
int err;
/* Since we use dsa_broadcast(), there might be other switches in other
* trees which don't support tag_8021q, so don't return an error.
* Or they might even support tag_8021q but have not registered yet to
* use it (maybe they use another tagger currently).
*/
if (!ds->ops->tag_8021q_vlan_add || !ds->tag_8021q_ctx)
return 0;
dsa_switch_for_each_port(dp, ds) {
if (dsa_port_tag_8021q_vlan_match(dp, info)) {
u16 flags = 0;
if (dsa_port_is_user(dp))
flags |= BRIDGE_VLAN_INFO_UNTAGGED |
BRIDGE_VLAN_INFO_PVID;
err = dsa_port_do_tag_8021q_vlan_add(dp, info->vid,
flags);
if (err)
return err;
}
}
return 0;
}
int dsa_switch_tag_8021q_vlan_del(struct dsa_switch *ds,
struct dsa_notifier_tag_8021q_vlan_info *info)
{
struct dsa_port *dp;
int err;
if (!ds->ops->tag_8021q_vlan_del || !ds->tag_8021q_ctx)
return 0;
dsa_switch_for_each_port(dp, ds) {
if (dsa_port_tag_8021q_vlan_match(dp, info)) {
err = dsa_port_do_tag_8021q_vlan_del(dp, info->vid);
if (err)
return err;
}
}
return 0;
}
/* There are 2 ways of offloading tag_8021q VLANs.
*
* One is to use a hardware TCAM to push the port's standalone VLAN into the
* frame when forwarding it to the CPU, as an egress modification rule on the
* CPU port. This is preferable because it has no side effects for the
* autonomous forwarding path, and accomplishes tag_8021q's primary goal of
* identifying the source port of each packet based on VLAN ID.
*
* The other is to commit the tag_8021q VLAN as a PVID to the VLAN table, and
* to configure the port as VLAN-unaware. This is less preferable because
* unique source port identification can only be done for standalone ports;
* under a VLAN-unaware bridge, all ports share the same tag_8021q VLAN as
* PVID, and under a VLAN-aware bridge, packets received by software will not
* have tag_8021q VLANs appended, just bridge VLANs.
*
* For tag_8021q implementations of the second type, this method is used to
* replace the standalone tag_8021q VLAN of a port with the tag_8021q VLAN to
* be used for VLAN-unaware bridging.
*/
int dsa_tag_8021q_bridge_join(struct dsa_switch *ds, int port,
struct dsa_bridge bridge)
{
struct dsa_port *dp = dsa_to_port(ds, port);
u16 standalone_vid, bridge_vid;
int err;
/* Delete the standalone VLAN of the port and replace it with a
* bridging VLAN
*/
standalone_vid = dsa_tag_8021q_standalone_vid(dp);
bridge_vid = dsa_tag_8021q_bridge_vid(bridge.num);
err = dsa_port_tag_8021q_vlan_add(dp, bridge_vid, true);
if (err)
return err;
dsa_port_tag_8021q_vlan_del(dp, standalone_vid, false);
return 0;
}
EXPORT_SYMBOL_GPL(dsa_tag_8021q_bridge_join);
void dsa_tag_8021q_bridge_leave(struct dsa_switch *ds, int port,
struct dsa_bridge bridge)
{
struct dsa_port *dp = dsa_to_port(ds, port);
u16 standalone_vid, bridge_vid;
int err;
/* Delete the bridging VLAN of the port and replace it with a
* standalone VLAN
*/
standalone_vid = dsa_tag_8021q_standalone_vid(dp);
bridge_vid = dsa_tag_8021q_bridge_vid(bridge.num);
err = dsa_port_tag_8021q_vlan_add(dp, standalone_vid, false);
if (err) {
dev_err(ds->dev,
"Failed to delete tag_8021q standalone VLAN %d from port %d: %pe\n",
standalone_vid, port, ERR_PTR(err));
}
dsa_port_tag_8021q_vlan_del(dp, bridge_vid, true);
}
EXPORT_SYMBOL_GPL(dsa_tag_8021q_bridge_leave);
/* Set up a port's standalone tag_8021q VLAN */
static int dsa_tag_8021q_port_setup(struct dsa_switch *ds, int port)
{
struct dsa_8021q_context *ctx = ds->tag_8021q_ctx;
struct dsa_port *dp = dsa_to_port(ds, port);
u16 vid = dsa_tag_8021q_standalone_vid(dp);
struct net_device *master;
int err;
/* The CPU port is implicitly configured by
* configuring the front-panel ports
*/
if (!dsa_port_is_user(dp))
return 0;
master = dsa_port_to_master(dp);
err = dsa_port_tag_8021q_vlan_add(dp, vid, false);
if (err) {
dev_err(ds->dev,
"Failed to apply standalone VID %d to port %d: %pe\n",
vid, port, ERR_PTR(err));
return err;
}
/* Add the VLAN to the master's RX filter. */
vlan_vid_add(master, ctx->proto, vid);
return err;
}
static void dsa_tag_8021q_port_teardown(struct dsa_switch *ds, int port)
{
struct dsa_8021q_context *ctx = ds->tag_8021q_ctx;
struct dsa_port *dp = dsa_to_port(ds, port);
u16 vid = dsa_tag_8021q_standalone_vid(dp);
struct net_device *master;
/* The CPU port is implicitly configured by
* configuring the front-panel ports
*/
if (!dsa_port_is_user(dp))
return;
master = dsa_port_to_master(dp);
dsa_port_tag_8021q_vlan_del(dp, vid, false);
vlan_vid_del(master, ctx->proto, vid);
}
static int dsa_tag_8021q_setup(struct dsa_switch *ds)
{
int err, port;
ASSERT_RTNL();
for (port = 0; port < ds->num_ports; port++) {
err = dsa_tag_8021q_port_setup(ds, port);
if (err < 0) {
dev_err(ds->dev,
"Failed to setup VLAN tagging for port %d: %pe\n",
port, ERR_PTR(err));
return err;
}
}
return 0;
}
static void dsa_tag_8021q_teardown(struct dsa_switch *ds)
{
int port;
ASSERT_RTNL();
for (port = 0; port < ds->num_ports; port++)
dsa_tag_8021q_port_teardown(ds, port);
}
int dsa_tag_8021q_register(struct dsa_switch *ds, __be16 proto)
{
struct dsa_8021q_context *ctx;
int err;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
ctx->proto = proto;
ctx->ds = ds;
INIT_LIST_HEAD(&ctx->vlans);
ds->tag_8021q_ctx = ctx;
err = dsa_tag_8021q_setup(ds);
if (err)
goto err_free;
return 0;
err_free:
kfree(ctx);
return err;
}
EXPORT_SYMBOL_GPL(dsa_tag_8021q_register);
void dsa_tag_8021q_unregister(struct dsa_switch *ds)
{
struct dsa_8021q_context *ctx = ds->tag_8021q_ctx;
struct dsa_tag_8021q_vlan *v, *n;
dsa_tag_8021q_teardown(ds);
list_for_each_entry_safe(v, n, &ctx->vlans, list) {
list_del(&v->list);
kfree(v);
}
ds->tag_8021q_ctx = NULL;
kfree(ctx);
}
EXPORT_SYMBOL_GPL(dsa_tag_8021q_unregister);
struct sk_buff *dsa_8021q_xmit(struct sk_buff *skb, struct net_device *netdev,
u16 tpid, u16 tci)
{
/* skb->data points at the MAC header, which is fine
* for vlan_insert_tag().
*/
return vlan_insert_tag(skb, htons(tpid), tci);
}
EXPORT_SYMBOL_GPL(dsa_8021q_xmit);
struct net_device *dsa_tag_8021q_find_port_by_vbid(struct net_device *master,
int vbid)
{
struct dsa_port *cpu_dp = master->dsa_ptr;
struct dsa_switch_tree *dst = cpu_dp->dst;
struct dsa_port *dp;
if (WARN_ON(!vbid))
return NULL;
dsa_tree_for_each_user_port(dp, dst) {
if (!dp->bridge)
continue;
if (dp->stp_state != BR_STATE_LEARNING &&
dp->stp_state != BR_STATE_FORWARDING)
continue;
if (dp->cpu_dp != cpu_dp)
continue;
if (dsa_port_bridge_num_get(dp) == vbid)
return dp->slave;
}
return NULL;
}
EXPORT_SYMBOL_GPL(dsa_tag_8021q_find_port_by_vbid);
void dsa_8021q_rcv(struct sk_buff *skb, int *source_port, int *switch_id,
int *vbid)
{
u16 vid, tci;
if (skb_vlan_tag_present(skb)) {
tci = skb_vlan_tag_get(skb);
__vlan_hwaccel_clear_tag(skb);
} else {
skb_push_rcsum(skb, ETH_HLEN);
__skb_vlan_pop(skb, &tci);
skb_pull_rcsum(skb, ETH_HLEN);
}
vid = tci & VLAN_VID_MASK;
*source_port = dsa_8021q_rx_source_port(vid);
*switch_id = dsa_8021q_rx_switch_id(vid);
if (vbid)
*vbid = dsa_tag_8021q_rx_vbid(vid);
skb->priority = (tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
}
EXPORT_SYMBOL_GPL(dsa_8021q_rcv);
| linux-master | net/dsa/tag_8021q.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* net/dsa/tag_none.c - Traffic handling for switches with no tag
* Copyright (c) 2008-2009 Marvell Semiconductor
* Copyright (c) 2013 Florian Fainelli <[email protected]>
*
* WARNING: do not use this for new switches. In case of no hardware
* tagging support, look at tag_8021q.c instead.
*/
#include "tag.h"
#define NONE_NAME "none"
static struct sk_buff *dsa_slave_notag_xmit(struct sk_buff *skb,
struct net_device *dev)
{
/* Just return the original SKB */
return skb;
}
static const struct dsa_device_ops none_ops = {
.name = NONE_NAME,
.proto = DSA_TAG_PROTO_NONE,
.xmit = dsa_slave_notag_xmit,
};
module_dsa_tag_driver(none_ops);
MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_NONE, NONE_NAME);
MODULE_LICENSE("GPL");
| linux-master | net/dsa/tag_none.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* DSA tagging protocol handling
*
* Copyright (c) 2008-2009 Marvell Semiconductor
* Copyright (c) 2013 Florian Fainelli <[email protected]>
* Copyright (c) 2016 Andrew Lunn <[email protected]>
*/
#include <linux/netdevice.h>
#include <linux/ptp_classify.h>
#include <linux/skbuff.h>
#include <net/dsa.h>
#include <net/dst_metadata.h>
#include "slave.h"
#include "tag.h"
static LIST_HEAD(dsa_tag_drivers_list);
static DEFINE_MUTEX(dsa_tag_drivers_lock);
/* Determine if we should defer delivery of skb until we have a rx timestamp.
*
* Called from dsa_switch_rcv. For now, this will only work if tagging is
* enabled on the switch. Normally the MAC driver would retrieve the hardware
* timestamp when it reads the packet out of the hardware. However in a DSA
* switch, the DSA driver owning the interface to which the packet is
* delivered is never notified unless we do so here.
*/
static bool dsa_skb_defer_rx_timestamp(struct dsa_slave_priv *p,
struct sk_buff *skb)
{
struct dsa_switch *ds = p->dp->ds;
unsigned int type;
if (!ds->ops->port_rxtstamp)
return false;
if (skb_headroom(skb) < ETH_HLEN)
return false;
__skb_push(skb, ETH_HLEN);
type = ptp_classify_raw(skb);
__skb_pull(skb, ETH_HLEN);
if (type == PTP_CLASS_NONE)
return false;
return ds->ops->port_rxtstamp(ds, p->dp->index, skb, type);
}
static int dsa_switch_rcv(struct sk_buff *skb, struct net_device *dev,
struct packet_type *pt, struct net_device *unused)
{
struct metadata_dst *md_dst = skb_metadata_dst(skb);
struct dsa_port *cpu_dp = dev->dsa_ptr;
struct sk_buff *nskb = NULL;
struct dsa_slave_priv *p;
if (unlikely(!cpu_dp)) {
kfree_skb(skb);
return 0;
}
skb = skb_unshare(skb, GFP_ATOMIC);
if (!skb)
return 0;
if (md_dst && md_dst->type == METADATA_HW_PORT_MUX) {
unsigned int port = md_dst->u.port_info.port_id;
skb_dst_drop(skb);
if (!skb_has_extensions(skb))
skb->slow_gro = 0;
skb->dev = dsa_master_find_slave(dev, 0, port);
if (likely(skb->dev)) {
dsa_default_offload_fwd_mark(skb);
nskb = skb;
}
} else {
nskb = cpu_dp->rcv(skb, dev);
}
if (!nskb) {
kfree_skb(skb);
return 0;
}
skb = nskb;
skb_push(skb, ETH_HLEN);
skb->pkt_type = PACKET_HOST;
skb->protocol = eth_type_trans(skb, skb->dev);
if (unlikely(!dsa_slave_dev_check(skb->dev))) {
/* Packet is to be injected directly on an upper
* device, e.g. a team/bond, so skip all DSA-port
* specific actions.
*/
netif_rx(skb);
return 0;
}
p = netdev_priv(skb->dev);
if (unlikely(cpu_dp->ds->untag_bridge_pvid)) {
nskb = dsa_untag_bridge_pvid(skb);
if (!nskb) {
kfree_skb(skb);
return 0;
}
skb = nskb;
}
dev_sw_netstats_rx_add(skb->dev, skb->len + ETH_HLEN);
if (dsa_skb_defer_rx_timestamp(p, skb))
return 0;
gro_cells_receive(&p->gcells, skb);
return 0;
}
struct packet_type dsa_pack_type __read_mostly = {
.type = cpu_to_be16(ETH_P_XDSA),
.func = dsa_switch_rcv,
};
static void dsa_tag_driver_register(struct dsa_tag_driver *dsa_tag_driver,
struct module *owner)
{
dsa_tag_driver->owner = owner;
mutex_lock(&dsa_tag_drivers_lock);
list_add_tail(&dsa_tag_driver->list, &dsa_tag_drivers_list);
mutex_unlock(&dsa_tag_drivers_lock);
}
void dsa_tag_drivers_register(struct dsa_tag_driver *dsa_tag_driver_array[],
unsigned int count, struct module *owner)
{
unsigned int i;
for (i = 0; i < count; i++)
dsa_tag_driver_register(dsa_tag_driver_array[i], owner);
}
static void dsa_tag_driver_unregister(struct dsa_tag_driver *dsa_tag_driver)
{
mutex_lock(&dsa_tag_drivers_lock);
list_del(&dsa_tag_driver->list);
mutex_unlock(&dsa_tag_drivers_lock);
}
EXPORT_SYMBOL_GPL(dsa_tag_drivers_register);
void dsa_tag_drivers_unregister(struct dsa_tag_driver *dsa_tag_driver_array[],
unsigned int count)
{
unsigned int i;
for (i = 0; i < count; i++)
dsa_tag_driver_unregister(dsa_tag_driver_array[i]);
}
EXPORT_SYMBOL_GPL(dsa_tag_drivers_unregister);
const char *dsa_tag_protocol_to_str(const struct dsa_device_ops *ops)
{
return ops->name;
};
/* Function takes a reference on the module owning the tagger,
* so dsa_tag_driver_put must be called afterwards.
*/
const struct dsa_device_ops *dsa_tag_driver_get_by_name(const char *name)
{
const struct dsa_device_ops *ops = ERR_PTR(-ENOPROTOOPT);
struct dsa_tag_driver *dsa_tag_driver;
request_module("%s%s", DSA_TAG_DRIVER_ALIAS, name);
mutex_lock(&dsa_tag_drivers_lock);
list_for_each_entry(dsa_tag_driver, &dsa_tag_drivers_list, list) {
const struct dsa_device_ops *tmp = dsa_tag_driver->ops;
if (strcmp(name, tmp->name))
continue;
if (!try_module_get(dsa_tag_driver->owner))
break;
ops = tmp;
break;
}
mutex_unlock(&dsa_tag_drivers_lock);
return ops;
}
const struct dsa_device_ops *dsa_tag_driver_get_by_id(int tag_protocol)
{
struct dsa_tag_driver *dsa_tag_driver;
const struct dsa_device_ops *ops;
bool found = false;
request_module("%sid-%d", DSA_TAG_DRIVER_ALIAS, tag_protocol);
mutex_lock(&dsa_tag_drivers_lock);
list_for_each_entry(dsa_tag_driver, &dsa_tag_drivers_list, list) {
ops = dsa_tag_driver->ops;
if (ops->proto == tag_protocol) {
found = true;
break;
}
}
if (found) {
if (!try_module_get(dsa_tag_driver->owner))
ops = ERR_PTR(-ENOPROTOOPT);
} else {
ops = ERR_PTR(-ENOPROTOOPT);
}
mutex_unlock(&dsa_tag_drivers_lock);
return ops;
}
void dsa_tag_driver_put(const struct dsa_device_ops *ops)
{
struct dsa_tag_driver *dsa_tag_driver;
mutex_lock(&dsa_tag_drivers_lock);
list_for_each_entry(dsa_tag_driver, &dsa_tag_drivers_list, list) {
if (dsa_tag_driver->ops == ops) {
module_put(dsa_tag_driver->owner);
break;
}
}
mutex_unlock(&dsa_tag_drivers_lock);
}
| linux-master | net/dsa/tag.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* net/dsa/slave.c - Slave device handling
* Copyright (c) 2008-2009 Marvell Semiconductor
*/
#include <linux/list.h>
#include <linux/etherdevice.h>
#include <linux/netdevice.h>
#include <linux/phy.h>
#include <linux/phy_fixed.h>
#include <linux/phylink.h>
#include <linux/of_net.h>
#include <linux/of_mdio.h>
#include <linux/mdio.h>
#include <net/rtnetlink.h>
#include <net/pkt_cls.h>
#include <net/selftests.h>
#include <net/tc_act/tc_mirred.h>
#include <linux/if_bridge.h>
#include <linux/if_hsr.h>
#include <net/dcbnl.h>
#include <linux/netpoll.h>
#include <linux/string.h>
#include "dsa.h"
#include "port.h"
#include "master.h"
#include "netlink.h"
#include "slave.h"
#include "switch.h"
#include "tag.h"
struct dsa_switchdev_event_work {
struct net_device *dev;
struct net_device *orig_dev;
struct work_struct work;
unsigned long event;
/* Specific for SWITCHDEV_FDB_ADD_TO_DEVICE and
* SWITCHDEV_FDB_DEL_TO_DEVICE
*/
unsigned char addr[ETH_ALEN];
u16 vid;
bool host_addr;
};
enum dsa_standalone_event {
DSA_UC_ADD,
DSA_UC_DEL,
DSA_MC_ADD,
DSA_MC_DEL,
};
struct dsa_standalone_event_work {
struct work_struct work;
struct net_device *dev;
enum dsa_standalone_event event;
unsigned char addr[ETH_ALEN];
u16 vid;
};
struct dsa_host_vlan_rx_filtering_ctx {
struct net_device *dev;
const unsigned char *addr;
enum dsa_standalone_event event;
};
static bool dsa_switch_supports_uc_filtering(struct dsa_switch *ds)
{
return ds->ops->port_fdb_add && ds->ops->port_fdb_del &&
ds->fdb_isolation && !ds->vlan_filtering_is_global &&
!ds->needs_standalone_vlan_filtering;
}
static bool dsa_switch_supports_mc_filtering(struct dsa_switch *ds)
{
return ds->ops->port_mdb_add && ds->ops->port_mdb_del &&
ds->fdb_isolation && !ds->vlan_filtering_is_global &&
!ds->needs_standalone_vlan_filtering;
}
static void dsa_slave_standalone_event_work(struct work_struct *work)
{
struct dsa_standalone_event_work *standalone_work =
container_of(work, struct dsa_standalone_event_work, work);
const unsigned char *addr = standalone_work->addr;
struct net_device *dev = standalone_work->dev;
struct dsa_port *dp = dsa_slave_to_port(dev);
struct switchdev_obj_port_mdb mdb;
struct dsa_switch *ds = dp->ds;
u16 vid = standalone_work->vid;
int err;
switch (standalone_work->event) {
case DSA_UC_ADD:
err = dsa_port_standalone_host_fdb_add(dp, addr, vid);
if (err) {
dev_err(ds->dev,
"port %d failed to add %pM vid %d to fdb: %d\n",
dp->index, addr, vid, err);
break;
}
break;
case DSA_UC_DEL:
err = dsa_port_standalone_host_fdb_del(dp, addr, vid);
if (err) {
dev_err(ds->dev,
"port %d failed to delete %pM vid %d from fdb: %d\n",
dp->index, addr, vid, err);
}
break;
case DSA_MC_ADD:
ether_addr_copy(mdb.addr, addr);
mdb.vid = vid;
err = dsa_port_standalone_host_mdb_add(dp, &mdb);
if (err) {
dev_err(ds->dev,
"port %d failed to add %pM vid %d to mdb: %d\n",
dp->index, addr, vid, err);
break;
}
break;
case DSA_MC_DEL:
ether_addr_copy(mdb.addr, addr);
mdb.vid = vid;
err = dsa_port_standalone_host_mdb_del(dp, &mdb);
if (err) {
dev_err(ds->dev,
"port %d failed to delete %pM vid %d from mdb: %d\n",
dp->index, addr, vid, err);
}
break;
}
kfree(standalone_work);
}
static int dsa_slave_schedule_standalone_work(struct net_device *dev,
enum dsa_standalone_event event,
const unsigned char *addr,
u16 vid)
{
struct dsa_standalone_event_work *standalone_work;
standalone_work = kzalloc(sizeof(*standalone_work), GFP_ATOMIC);
if (!standalone_work)
return -ENOMEM;
INIT_WORK(&standalone_work->work, dsa_slave_standalone_event_work);
standalone_work->event = event;
standalone_work->dev = dev;
ether_addr_copy(standalone_work->addr, addr);
standalone_work->vid = vid;
dsa_schedule_work(&standalone_work->work);
return 0;
}
static int dsa_slave_host_vlan_rx_filtering(void *arg, int vid)
{
struct dsa_host_vlan_rx_filtering_ctx *ctx = arg;
return dsa_slave_schedule_standalone_work(ctx->dev, ctx->event,
ctx->addr, vid);
}
static int dsa_slave_vlan_for_each(struct net_device *dev,
int (*cb)(void *arg, int vid), void *arg)
{
struct dsa_port *dp = dsa_slave_to_port(dev);
struct dsa_vlan *v;
int err;
lockdep_assert_held(&dev->addr_list_lock);
err = cb(arg, 0);
if (err)
return err;
list_for_each_entry(v, &dp->user_vlans, list) {
err = cb(arg, v->vid);
if (err)
return err;
}
return 0;
}
static int dsa_slave_sync_uc(struct net_device *dev,
const unsigned char *addr)
{
struct net_device *master = dsa_slave_to_master(dev);
struct dsa_port *dp = dsa_slave_to_port(dev);
struct dsa_host_vlan_rx_filtering_ctx ctx = {
.dev = dev,
.addr = addr,
.event = DSA_UC_ADD,
};
dev_uc_add(master, addr);
if (!dsa_switch_supports_uc_filtering(dp->ds))
return 0;
return dsa_slave_vlan_for_each(dev, dsa_slave_host_vlan_rx_filtering,
&ctx);
}
static int dsa_slave_unsync_uc(struct net_device *dev,
const unsigned char *addr)
{
struct net_device *master = dsa_slave_to_master(dev);
struct dsa_port *dp = dsa_slave_to_port(dev);
struct dsa_host_vlan_rx_filtering_ctx ctx = {
.dev = dev,
.addr = addr,
.event = DSA_UC_DEL,
};
dev_uc_del(master, addr);
if (!dsa_switch_supports_uc_filtering(dp->ds))
return 0;
return dsa_slave_vlan_for_each(dev, dsa_slave_host_vlan_rx_filtering,
&ctx);
}
static int dsa_slave_sync_mc(struct net_device *dev,
const unsigned char *addr)
{
struct net_device *master = dsa_slave_to_master(dev);
struct dsa_port *dp = dsa_slave_to_port(dev);
struct dsa_host_vlan_rx_filtering_ctx ctx = {
.dev = dev,
.addr = addr,
.event = DSA_MC_ADD,
};
dev_mc_add(master, addr);
if (!dsa_switch_supports_mc_filtering(dp->ds))
return 0;
return dsa_slave_vlan_for_each(dev, dsa_slave_host_vlan_rx_filtering,
&ctx);
}
static int dsa_slave_unsync_mc(struct net_device *dev,
const unsigned char *addr)
{
struct net_device *master = dsa_slave_to_master(dev);
struct dsa_port *dp = dsa_slave_to_port(dev);
struct dsa_host_vlan_rx_filtering_ctx ctx = {
.dev = dev,
.addr = addr,
.event = DSA_MC_DEL,
};
dev_mc_del(master, addr);
if (!dsa_switch_supports_mc_filtering(dp->ds))
return 0;
return dsa_slave_vlan_for_each(dev, dsa_slave_host_vlan_rx_filtering,
&ctx);
}
void dsa_slave_sync_ha(struct net_device *dev)
{
struct dsa_port *dp = dsa_slave_to_port(dev);
struct dsa_switch *ds = dp->ds;
struct netdev_hw_addr *ha;
netif_addr_lock_bh(dev);
netdev_for_each_synced_mc_addr(ha, dev)
dsa_slave_sync_mc(dev, ha->addr);
netdev_for_each_synced_uc_addr(ha, dev)
dsa_slave_sync_uc(dev, ha->addr);
netif_addr_unlock_bh(dev);
if (dsa_switch_supports_uc_filtering(ds) ||
dsa_switch_supports_mc_filtering(ds))
dsa_flush_workqueue();
}
void dsa_slave_unsync_ha(struct net_device *dev)
{
struct dsa_port *dp = dsa_slave_to_port(dev);
struct dsa_switch *ds = dp->ds;
struct netdev_hw_addr *ha;
netif_addr_lock_bh(dev);
netdev_for_each_synced_uc_addr(ha, dev)
dsa_slave_unsync_uc(dev, ha->addr);
netdev_for_each_synced_mc_addr(ha, dev)
dsa_slave_unsync_mc(dev, ha->addr);
netif_addr_unlock_bh(dev);
if (dsa_switch_supports_uc_filtering(ds) ||
dsa_switch_supports_mc_filtering(ds))
dsa_flush_workqueue();
}
/* slave mii_bus handling ***************************************************/
static int dsa_slave_phy_read(struct mii_bus *bus, int addr, int reg)
{
struct dsa_switch *ds = bus->priv;
if (ds->phys_mii_mask & (1 << addr))
return ds->ops->phy_read(ds, addr, reg);
return 0xffff;
}
static int dsa_slave_phy_write(struct mii_bus *bus, int addr, int reg, u16 val)
{
struct dsa_switch *ds = bus->priv;
if (ds->phys_mii_mask & (1 << addr))
return ds->ops->phy_write(ds, addr, reg, val);
return 0;
}
void dsa_slave_mii_bus_init(struct dsa_switch *ds)
{
ds->slave_mii_bus->priv = (void *)ds;
ds->slave_mii_bus->name = "dsa slave smi";
ds->slave_mii_bus->read = dsa_slave_phy_read;
ds->slave_mii_bus->write = dsa_slave_phy_write;
snprintf(ds->slave_mii_bus->id, MII_BUS_ID_SIZE, "dsa-%d.%d",
ds->dst->index, ds->index);
ds->slave_mii_bus->parent = ds->dev;
ds->slave_mii_bus->phy_mask = ~ds->phys_mii_mask;
}
/* slave device handling ****************************************************/
static int dsa_slave_get_iflink(const struct net_device *dev)
{
return dsa_slave_to_master(dev)->ifindex;
}
static int dsa_slave_open(struct net_device *dev)
{
struct net_device *master = dsa_slave_to_master(dev);
struct dsa_port *dp = dsa_slave_to_port(dev);
struct dsa_switch *ds = dp->ds;
int err;
err = dev_open(master, NULL);
if (err < 0) {
netdev_err(dev, "failed to open master %s\n", master->name);
goto out;
}
if (dsa_switch_supports_uc_filtering(ds)) {
err = dsa_port_standalone_host_fdb_add(dp, dev->dev_addr, 0);
if (err)
goto out;
}
if (!ether_addr_equal(dev->dev_addr, master->dev_addr)) {
err = dev_uc_add(master, dev->dev_addr);
if (err < 0)
goto del_host_addr;
}
err = dsa_port_enable_rt(dp, dev->phydev);
if (err)
goto del_unicast;
return 0;
del_unicast:
if (!ether_addr_equal(dev->dev_addr, master->dev_addr))
dev_uc_del(master, dev->dev_addr);
del_host_addr:
if (dsa_switch_supports_uc_filtering(ds))
dsa_port_standalone_host_fdb_del(dp, dev->dev_addr, 0);
out:
return err;
}
static int dsa_slave_close(struct net_device *dev)
{
struct net_device *master = dsa_slave_to_master(dev);
struct dsa_port *dp = dsa_slave_to_port(dev);
struct dsa_switch *ds = dp->ds;
dsa_port_disable_rt(dp);
if (!ether_addr_equal(dev->dev_addr, master->dev_addr))
dev_uc_del(master, dev->dev_addr);
if (dsa_switch_supports_uc_filtering(ds))
dsa_port_standalone_host_fdb_del(dp, dev->dev_addr, 0);
return 0;
}
static void dsa_slave_manage_host_flood(struct net_device *dev)
{
bool mc = dev->flags & (IFF_PROMISC | IFF_ALLMULTI);
struct dsa_port *dp = dsa_slave_to_port(dev);
bool uc = dev->flags & IFF_PROMISC;
dsa_port_set_host_flood(dp, uc, mc);
}
static void dsa_slave_change_rx_flags(struct net_device *dev, int change)
{
struct net_device *master = dsa_slave_to_master(dev);
struct dsa_port *dp = dsa_slave_to_port(dev);
struct dsa_switch *ds = dp->ds;
if (change & IFF_ALLMULTI)
dev_set_allmulti(master,
dev->flags & IFF_ALLMULTI ? 1 : -1);
if (change & IFF_PROMISC)
dev_set_promiscuity(master,
dev->flags & IFF_PROMISC ? 1 : -1);
if (dsa_switch_supports_uc_filtering(ds) &&
dsa_switch_supports_mc_filtering(ds))
dsa_slave_manage_host_flood(dev);
}
static void dsa_slave_set_rx_mode(struct net_device *dev)
{
__dev_mc_sync(dev, dsa_slave_sync_mc, dsa_slave_unsync_mc);
__dev_uc_sync(dev, dsa_slave_sync_uc, dsa_slave_unsync_uc);
}
static int dsa_slave_set_mac_address(struct net_device *dev, void *a)
{
struct net_device *master = dsa_slave_to_master(dev);
struct dsa_port *dp = dsa_slave_to_port(dev);
struct dsa_switch *ds = dp->ds;
struct sockaddr *addr = a;
int err;
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
/* If the port is down, the address isn't synced yet to hardware or
* to the DSA master, so there is nothing to change.
*/
if (!(dev->flags & IFF_UP))
goto out_change_dev_addr;
if (dsa_switch_supports_uc_filtering(ds)) {
err = dsa_port_standalone_host_fdb_add(dp, addr->sa_data, 0);
if (err)
return err;
}
if (!ether_addr_equal(addr->sa_data, master->dev_addr)) {
err = dev_uc_add(master, addr->sa_data);
if (err < 0)
goto del_unicast;
}
if (!ether_addr_equal(dev->dev_addr, master->dev_addr))
dev_uc_del(master, dev->dev_addr);
if (dsa_switch_supports_uc_filtering(ds))
dsa_port_standalone_host_fdb_del(dp, dev->dev_addr, 0);
out_change_dev_addr:
eth_hw_addr_set(dev, addr->sa_data);
return 0;
del_unicast:
if (dsa_switch_supports_uc_filtering(ds))
dsa_port_standalone_host_fdb_del(dp, addr->sa_data, 0);
return err;
}
struct dsa_slave_dump_ctx {
struct net_device *dev;
struct sk_buff *skb;
struct netlink_callback *cb;
int idx;
};
static int
dsa_slave_port_fdb_do_dump(const unsigned char *addr, u16 vid,
bool is_static, void *data)
{
struct dsa_slave_dump_ctx *dump = data;
u32 portid = NETLINK_CB(dump->cb->skb).portid;
u32 seq = dump->cb->nlh->nlmsg_seq;
struct nlmsghdr *nlh;
struct ndmsg *ndm;
if (dump->idx < dump->cb->args[2])
goto skip;
nlh = nlmsg_put(dump->skb, portid, seq, RTM_NEWNEIGH,
sizeof(*ndm), NLM_F_MULTI);
if (!nlh)
return -EMSGSIZE;
ndm = nlmsg_data(nlh);
ndm->ndm_family = AF_BRIDGE;
ndm->ndm_pad1 = 0;
ndm->ndm_pad2 = 0;
ndm->ndm_flags = NTF_SELF;
ndm->ndm_type = 0;
ndm->ndm_ifindex = dump->dev->ifindex;
ndm->ndm_state = is_static ? NUD_NOARP : NUD_REACHABLE;
if (nla_put(dump->skb, NDA_LLADDR, ETH_ALEN, addr))
goto nla_put_failure;
if (vid && nla_put_u16(dump->skb, NDA_VLAN, vid))
goto nla_put_failure;
nlmsg_end(dump->skb, nlh);
skip:
dump->idx++;
return 0;
nla_put_failure:
nlmsg_cancel(dump->skb, nlh);
return -EMSGSIZE;
}
static int
dsa_slave_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
struct net_device *dev, struct net_device *filter_dev,
int *idx)
{
struct dsa_port *dp = dsa_slave_to_port(dev);
struct dsa_slave_dump_ctx dump = {
.dev = dev,
.skb = skb,
.cb = cb,
.idx = *idx,
};
int err;
err = dsa_port_fdb_dump(dp, dsa_slave_port_fdb_do_dump, &dump);
*idx = dump.idx;
return err;
}
static int dsa_slave_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
struct dsa_slave_priv *p = netdev_priv(dev);
struct dsa_switch *ds = p->dp->ds;
int port = p->dp->index;
/* Pass through to switch driver if it supports timestamping */
switch (cmd) {
case SIOCGHWTSTAMP:
if (ds->ops->port_hwtstamp_get)
return ds->ops->port_hwtstamp_get(ds, port, ifr);
break;
case SIOCSHWTSTAMP:
if (ds->ops->port_hwtstamp_set)
return ds->ops->port_hwtstamp_set(ds, port, ifr);
break;
}
return phylink_mii_ioctl(p->dp->pl, ifr, cmd);
}
static int dsa_slave_port_attr_set(struct net_device *dev, const void *ctx,
const struct switchdev_attr *attr,
struct netlink_ext_ack *extack)
{
struct dsa_port *dp = dsa_slave_to_port(dev);
int ret;
if (ctx && ctx != dp)
return 0;
switch (attr->id) {
case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
if (!dsa_port_offloads_bridge_port(dp, attr->orig_dev))
return -EOPNOTSUPP;
ret = dsa_port_set_state(dp, attr->u.stp_state, true);
break;
case SWITCHDEV_ATTR_ID_PORT_MST_STATE:
if (!dsa_port_offloads_bridge_port(dp, attr->orig_dev))
return -EOPNOTSUPP;
ret = dsa_port_set_mst_state(dp, &attr->u.mst_state, extack);
break;
case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
if (!dsa_port_offloads_bridge_dev(dp, attr->orig_dev))
return -EOPNOTSUPP;
ret = dsa_port_vlan_filtering(dp, attr->u.vlan_filtering,
extack);
break;
case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
if (!dsa_port_offloads_bridge_dev(dp, attr->orig_dev))
return -EOPNOTSUPP;
ret = dsa_port_ageing_time(dp, attr->u.ageing_time);
break;
case SWITCHDEV_ATTR_ID_BRIDGE_MST:
if (!dsa_port_offloads_bridge_dev(dp, attr->orig_dev))
return -EOPNOTSUPP;
ret = dsa_port_mst_enable(dp, attr->u.mst, extack);
break;
case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
if (!dsa_port_offloads_bridge_port(dp, attr->orig_dev))
return -EOPNOTSUPP;
ret = dsa_port_pre_bridge_flags(dp, attr->u.brport_flags,
extack);
break;
case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
if (!dsa_port_offloads_bridge_port(dp, attr->orig_dev))
return -EOPNOTSUPP;
ret = dsa_port_bridge_flags(dp, attr->u.brport_flags, extack);
break;
case SWITCHDEV_ATTR_ID_VLAN_MSTI:
if (!dsa_port_offloads_bridge_dev(dp, attr->orig_dev))
return -EOPNOTSUPP;
ret = dsa_port_vlan_msti(dp, &attr->u.vlan_msti);
break;
default:
ret = -EOPNOTSUPP;
break;
}
return ret;
}
/* Must be called under rcu_read_lock() */
static int
dsa_slave_vlan_check_for_8021q_uppers(struct net_device *slave,
const struct switchdev_obj_port_vlan *vlan)
{
struct net_device *upper_dev;
struct list_head *iter;
netdev_for_each_upper_dev_rcu(slave, upper_dev, iter) {
u16 vid;
if (!is_vlan_dev(upper_dev))
continue;
vid = vlan_dev_vlan_id(upper_dev);
if (vid == vlan->vid)
return -EBUSY;
}
return 0;
}
static int dsa_slave_vlan_add(struct net_device *dev,
const struct switchdev_obj *obj,
struct netlink_ext_ack *extack)
{
struct dsa_port *dp = dsa_slave_to_port(dev);
struct switchdev_obj_port_vlan *vlan;
int err;
if (dsa_port_skip_vlan_configuration(dp)) {
NL_SET_ERR_MSG_MOD(extack, "skipping configuration of VLAN");
return 0;
}
vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
/* Deny adding a bridge VLAN when there is already an 802.1Q upper with
* the same VID.
*/
if (br_vlan_enabled(dsa_port_bridge_dev_get(dp))) {
rcu_read_lock();
err = dsa_slave_vlan_check_for_8021q_uppers(dev, vlan);
rcu_read_unlock();
if (err) {
NL_SET_ERR_MSG_MOD(extack,
"Port already has a VLAN upper with this VID");
return err;
}
}
return dsa_port_vlan_add(dp, vlan, extack);
}
/* Offload a VLAN installed on the bridge or on a foreign interface by
* installing it as a VLAN towards the CPU port.
*/
static int dsa_slave_host_vlan_add(struct net_device *dev,
const struct switchdev_obj *obj,
struct netlink_ext_ack *extack)
{
struct dsa_port *dp = dsa_slave_to_port(dev);
struct switchdev_obj_port_vlan vlan;
/* Do nothing if this is a software bridge */
if (!dp->bridge)
return -EOPNOTSUPP;
if (dsa_port_skip_vlan_configuration(dp)) {
NL_SET_ERR_MSG_MOD(extack, "skipping configuration of VLAN");
return 0;
}
vlan = *SWITCHDEV_OBJ_PORT_VLAN(obj);
/* Even though drivers often handle CPU membership in special ways,
* it doesn't make sense to program a PVID, so clear this flag.
*/
vlan.flags &= ~BRIDGE_VLAN_INFO_PVID;
return dsa_port_host_vlan_add(dp, &vlan, extack);
}
static int dsa_slave_port_obj_add(struct net_device *dev, const void *ctx,
const struct switchdev_obj *obj,
struct netlink_ext_ack *extack)
{
struct dsa_port *dp = dsa_slave_to_port(dev);
int err;
if (ctx && ctx != dp)
return 0;
switch (obj->id) {
case SWITCHDEV_OBJ_ID_PORT_MDB:
if (!dsa_port_offloads_bridge_port(dp, obj->orig_dev))
return -EOPNOTSUPP;
err = dsa_port_mdb_add(dp, SWITCHDEV_OBJ_PORT_MDB(obj));
break;
case SWITCHDEV_OBJ_ID_HOST_MDB:
if (!dsa_port_offloads_bridge_dev(dp, obj->orig_dev))
return -EOPNOTSUPP;
err = dsa_port_bridge_host_mdb_add(dp, SWITCHDEV_OBJ_PORT_MDB(obj));
break;
case SWITCHDEV_OBJ_ID_PORT_VLAN:
if (dsa_port_offloads_bridge_port(dp, obj->orig_dev))
err = dsa_slave_vlan_add(dev, obj, extack);
else
err = dsa_slave_host_vlan_add(dev, obj, extack);
break;
case SWITCHDEV_OBJ_ID_MRP:
if (!dsa_port_offloads_bridge_dev(dp, obj->orig_dev))
return -EOPNOTSUPP;
err = dsa_port_mrp_add(dp, SWITCHDEV_OBJ_MRP(obj));
break;
case SWITCHDEV_OBJ_ID_RING_ROLE_MRP:
if (!dsa_port_offloads_bridge_dev(dp, obj->orig_dev))
return -EOPNOTSUPP;
err = dsa_port_mrp_add_ring_role(dp,
SWITCHDEV_OBJ_RING_ROLE_MRP(obj));
break;
default:
err = -EOPNOTSUPP;
break;
}
return err;
}
static int dsa_slave_vlan_del(struct net_device *dev,
const struct switchdev_obj *obj)
{
struct dsa_port *dp = dsa_slave_to_port(dev);
struct switchdev_obj_port_vlan *vlan;
if (dsa_port_skip_vlan_configuration(dp))
return 0;
vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
return dsa_port_vlan_del(dp, vlan);
}
static int dsa_slave_host_vlan_del(struct net_device *dev,
const struct switchdev_obj *obj)
{
struct dsa_port *dp = dsa_slave_to_port(dev);
struct switchdev_obj_port_vlan *vlan;
/* Do nothing if this is a software bridge */
if (!dp->bridge)
return -EOPNOTSUPP;
if (dsa_port_skip_vlan_configuration(dp))
return 0;
vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
return dsa_port_host_vlan_del(dp, vlan);
}
static int dsa_slave_port_obj_del(struct net_device *dev, const void *ctx,
const struct switchdev_obj *obj)
{
struct dsa_port *dp = dsa_slave_to_port(dev);
int err;
if (ctx && ctx != dp)
return 0;
switch (obj->id) {
case SWITCHDEV_OBJ_ID_PORT_MDB:
if (!dsa_port_offloads_bridge_port(dp, obj->orig_dev))
return -EOPNOTSUPP;
err = dsa_port_mdb_del(dp, SWITCHDEV_OBJ_PORT_MDB(obj));
break;
case SWITCHDEV_OBJ_ID_HOST_MDB:
if (!dsa_port_offloads_bridge_dev(dp, obj->orig_dev))
return -EOPNOTSUPP;
err = dsa_port_bridge_host_mdb_del(dp, SWITCHDEV_OBJ_PORT_MDB(obj));
break;
case SWITCHDEV_OBJ_ID_PORT_VLAN:
if (dsa_port_offloads_bridge_port(dp, obj->orig_dev))
err = dsa_slave_vlan_del(dev, obj);
else
err = dsa_slave_host_vlan_del(dev, obj);
break;
case SWITCHDEV_OBJ_ID_MRP:
if (!dsa_port_offloads_bridge_dev(dp, obj->orig_dev))
return -EOPNOTSUPP;
err = dsa_port_mrp_del(dp, SWITCHDEV_OBJ_MRP(obj));
break;
case SWITCHDEV_OBJ_ID_RING_ROLE_MRP:
if (!dsa_port_offloads_bridge_dev(dp, obj->orig_dev))
return -EOPNOTSUPP;
err = dsa_port_mrp_del_ring_role(dp,
SWITCHDEV_OBJ_RING_ROLE_MRP(obj));
break;
default:
err = -EOPNOTSUPP;
break;
}
return err;
}
static inline netdev_tx_t dsa_slave_netpoll_send_skb(struct net_device *dev,
struct sk_buff *skb)
{
#ifdef CONFIG_NET_POLL_CONTROLLER
struct dsa_slave_priv *p = netdev_priv(dev);
return netpoll_send_skb(p->netpoll, skb);
#else
BUG();
return NETDEV_TX_OK;
#endif
}
static void dsa_skb_tx_timestamp(struct dsa_slave_priv *p,
struct sk_buff *skb)
{
struct dsa_switch *ds = p->dp->ds;
if (!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
return;
if (!ds->ops->port_txtstamp)
return;
ds->ops->port_txtstamp(ds, p->dp->index, skb);
}
netdev_tx_t dsa_enqueue_skb(struct sk_buff *skb, struct net_device *dev)
{
/* SKB for netpoll still need to be mangled with the protocol-specific
* tag to be successfully transmitted
*/
if (unlikely(netpoll_tx_running(dev)))
return dsa_slave_netpoll_send_skb(dev, skb);
/* Queue the SKB for transmission on the parent interface, but
* do not modify its EtherType
*/
skb->dev = dsa_slave_to_master(dev);
dev_queue_xmit(skb);
return NETDEV_TX_OK;
}
EXPORT_SYMBOL_GPL(dsa_enqueue_skb);
static int dsa_realloc_skb(struct sk_buff *skb, struct net_device *dev)
{
int needed_headroom = dev->needed_headroom;
int needed_tailroom = dev->needed_tailroom;
/* For tail taggers, we need to pad short frames ourselves, to ensure
* that the tail tag does not fail at its role of being at the end of
* the packet, once the master interface pads the frame. Account for
* that pad length here, and pad later.
*/
if (unlikely(needed_tailroom && skb->len < ETH_ZLEN))
needed_tailroom += ETH_ZLEN - skb->len;
/* skb_headroom() returns unsigned int... */
needed_headroom = max_t(int, needed_headroom - skb_headroom(skb), 0);
needed_tailroom = max_t(int, needed_tailroom - skb_tailroom(skb), 0);
if (likely(!needed_headroom && !needed_tailroom && !skb_cloned(skb)))
/* No reallocation needed, yay! */
return 0;
return pskb_expand_head(skb, needed_headroom, needed_tailroom,
GFP_ATOMIC);
}
static netdev_tx_t dsa_slave_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct dsa_slave_priv *p = netdev_priv(dev);
struct sk_buff *nskb;
dev_sw_netstats_tx_add(dev, 1, skb->len);
memset(skb->cb, 0, sizeof(skb->cb));
/* Handle tx timestamp if any */
dsa_skb_tx_timestamp(p, skb);
if (dsa_realloc_skb(skb, dev)) {
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
/* needed_tailroom should still be 'warm' in the cache line from
* dsa_realloc_skb(), which has also ensured that padding is safe.
*/
if (dev->needed_tailroom)
eth_skb_pad(skb);
/* Transmit function may have to reallocate the original SKB,
* in which case it must have freed it. Only free it here on error.
*/
nskb = p->xmit(skb, dev);
if (!nskb) {
kfree_skb(skb);
return NETDEV_TX_OK;
}
return dsa_enqueue_skb(nskb, dev);
}
/* ethtool operations *******************************************************/
static void dsa_slave_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *drvinfo)
{
strscpy(drvinfo->driver, "dsa", sizeof(drvinfo->driver));
strscpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
strscpy(drvinfo->bus_info, "platform", sizeof(drvinfo->bus_info));
}
static int dsa_slave_get_regs_len(struct net_device *dev)
{
struct dsa_port *dp = dsa_slave_to_port(dev);
struct dsa_switch *ds = dp->ds;
if (ds->ops->get_regs_len)
return ds->ops->get_regs_len(ds, dp->index);
return -EOPNOTSUPP;
}
static void
dsa_slave_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
{
struct dsa_port *dp = dsa_slave_to_port(dev);
struct dsa_switch *ds = dp->ds;
if (ds->ops->get_regs)
ds->ops->get_regs(ds, dp->index, regs, _p);
}
static int dsa_slave_nway_reset(struct net_device *dev)
{
struct dsa_port *dp = dsa_slave_to_port(dev);
return phylink_ethtool_nway_reset(dp->pl);
}
static int dsa_slave_get_eeprom_len(struct net_device *dev)
{
struct dsa_port *dp = dsa_slave_to_port(dev);
struct dsa_switch *ds = dp->ds;
if (ds->cd && ds->cd->eeprom_len)
return ds->cd->eeprom_len;
if (ds->ops->get_eeprom_len)
return ds->ops->get_eeprom_len(ds);
return 0;
}
static int dsa_slave_get_eeprom(struct net_device *dev,
struct ethtool_eeprom *eeprom, u8 *data)
{
struct dsa_port *dp = dsa_slave_to_port(dev);
struct dsa_switch *ds = dp->ds;
if (ds->ops->get_eeprom)
return ds->ops->get_eeprom(ds, eeprom, data);
return -EOPNOTSUPP;
}
static int dsa_slave_set_eeprom(struct net_device *dev,
struct ethtool_eeprom *eeprom, u8 *data)
{
struct dsa_port *dp = dsa_slave_to_port(dev);
struct dsa_switch *ds = dp->ds;
if (ds->ops->set_eeprom)
return ds->ops->set_eeprom(ds, eeprom, data);
return -EOPNOTSUPP;
}
static void dsa_slave_get_strings(struct net_device *dev,
uint32_t stringset, uint8_t *data)
{
struct dsa_port *dp = dsa_slave_to_port(dev);
struct dsa_switch *ds = dp->ds;
if (stringset == ETH_SS_STATS) {
int len = ETH_GSTRING_LEN;
strscpy_pad(data, "tx_packets", len);
strscpy_pad(data + len, "tx_bytes", len);
strscpy_pad(data + 2 * len, "rx_packets", len);
strscpy_pad(data + 3 * len, "rx_bytes", len);
if (ds->ops->get_strings)
ds->ops->get_strings(ds, dp->index, stringset,
data + 4 * len);
} else if (stringset == ETH_SS_TEST) {
net_selftest_get_strings(data);
}
}
static void dsa_slave_get_ethtool_stats(struct net_device *dev,
struct ethtool_stats *stats,
uint64_t *data)
{
struct dsa_port *dp = dsa_slave_to_port(dev);
struct dsa_switch *ds = dp->ds;
struct pcpu_sw_netstats *s;
unsigned int start;
int i;
for_each_possible_cpu(i) {
u64 tx_packets, tx_bytes, rx_packets, rx_bytes;
s = per_cpu_ptr(dev->tstats, i);
do {
start = u64_stats_fetch_begin(&s->syncp);
tx_packets = u64_stats_read(&s->tx_packets);
tx_bytes = u64_stats_read(&s->tx_bytes);
rx_packets = u64_stats_read(&s->rx_packets);
rx_bytes = u64_stats_read(&s->rx_bytes);
} while (u64_stats_fetch_retry(&s->syncp, start));
data[0] += tx_packets;
data[1] += tx_bytes;
data[2] += rx_packets;
data[3] += rx_bytes;
}
if (ds->ops->get_ethtool_stats)
ds->ops->get_ethtool_stats(ds, dp->index, data + 4);
}
static int dsa_slave_get_sset_count(struct net_device *dev, int sset)
{
struct dsa_port *dp = dsa_slave_to_port(dev);
struct dsa_switch *ds = dp->ds;
if (sset == ETH_SS_STATS) {
int count = 0;
if (ds->ops->get_sset_count) {
count = ds->ops->get_sset_count(ds, dp->index, sset);
if (count < 0)
return count;
}
return count + 4;
} else if (sset == ETH_SS_TEST) {
return net_selftest_get_count();
}
return -EOPNOTSUPP;
}
static void dsa_slave_get_eth_phy_stats(struct net_device *dev,
struct ethtool_eth_phy_stats *phy_stats)
{
struct dsa_port *dp = dsa_slave_to_port(dev);
struct dsa_switch *ds = dp->ds;
if (ds->ops->get_eth_phy_stats)
ds->ops->get_eth_phy_stats(ds, dp->index, phy_stats);
}
static void dsa_slave_get_eth_mac_stats(struct net_device *dev,
struct ethtool_eth_mac_stats *mac_stats)
{
struct dsa_port *dp = dsa_slave_to_port(dev);
struct dsa_switch *ds = dp->ds;
if (ds->ops->get_eth_mac_stats)
ds->ops->get_eth_mac_stats(ds, dp->index, mac_stats);
}
static void
dsa_slave_get_eth_ctrl_stats(struct net_device *dev,
struct ethtool_eth_ctrl_stats *ctrl_stats)
{
struct dsa_port *dp = dsa_slave_to_port(dev);
struct dsa_switch *ds = dp->ds;
if (ds->ops->get_eth_ctrl_stats)
ds->ops->get_eth_ctrl_stats(ds, dp->index, ctrl_stats);
}
static void
dsa_slave_get_rmon_stats(struct net_device *dev,
struct ethtool_rmon_stats *rmon_stats,
const struct ethtool_rmon_hist_range **ranges)
{
struct dsa_port *dp = dsa_slave_to_port(dev);
struct dsa_switch *ds = dp->ds;
if (ds->ops->get_rmon_stats)
ds->ops->get_rmon_stats(ds, dp->index, rmon_stats, ranges);
}
static void dsa_slave_net_selftest(struct net_device *ndev,
struct ethtool_test *etest, u64 *buf)
{
struct dsa_port *dp = dsa_slave_to_port(ndev);
struct dsa_switch *ds = dp->ds;
if (ds->ops->self_test) {
ds->ops->self_test(ds, dp->index, etest, buf);
return;
}
net_selftest(ndev, etest, buf);
}
static int dsa_slave_get_mm(struct net_device *dev,
struct ethtool_mm_state *state)
{
struct dsa_port *dp = dsa_slave_to_port(dev);
struct dsa_switch *ds = dp->ds;
if (!ds->ops->get_mm)
return -EOPNOTSUPP;
return ds->ops->get_mm(ds, dp->index, state);
}
static int dsa_slave_set_mm(struct net_device *dev, struct ethtool_mm_cfg *cfg,
struct netlink_ext_ack *extack)
{
struct dsa_port *dp = dsa_slave_to_port(dev);
struct dsa_switch *ds = dp->ds;
if (!ds->ops->set_mm)
return -EOPNOTSUPP;
return ds->ops->set_mm(ds, dp->index, cfg, extack);
}
static void dsa_slave_get_mm_stats(struct net_device *dev,
struct ethtool_mm_stats *stats)
{
struct dsa_port *dp = dsa_slave_to_port(dev);
struct dsa_switch *ds = dp->ds;
if (ds->ops->get_mm_stats)
ds->ops->get_mm_stats(ds, dp->index, stats);
}
static void dsa_slave_get_wol(struct net_device *dev, struct ethtool_wolinfo *w)
{
struct dsa_port *dp = dsa_slave_to_port(dev);
struct dsa_switch *ds = dp->ds;
phylink_ethtool_get_wol(dp->pl, w);
if (ds->ops->get_wol)
ds->ops->get_wol(ds, dp->index, w);
}
static int dsa_slave_set_wol(struct net_device *dev, struct ethtool_wolinfo *w)
{
struct dsa_port *dp = dsa_slave_to_port(dev);
struct dsa_switch *ds = dp->ds;
int ret = -EOPNOTSUPP;
phylink_ethtool_set_wol(dp->pl, w);
if (ds->ops->set_wol)
ret = ds->ops->set_wol(ds, dp->index, w);
return ret;
}
static int dsa_slave_set_eee(struct net_device *dev, struct ethtool_eee *e)
{
struct dsa_port *dp = dsa_slave_to_port(dev);
struct dsa_switch *ds = dp->ds;
int ret;
/* Port's PHY and MAC both need to be EEE capable */
if (!dev->phydev || !dp->pl)
return -ENODEV;
if (!ds->ops->set_mac_eee)
return -EOPNOTSUPP;
ret = ds->ops->set_mac_eee(ds, dp->index, e);
if (ret)
return ret;
return phylink_ethtool_set_eee(dp->pl, e);
}
static int dsa_slave_get_eee(struct net_device *dev, struct ethtool_eee *e)
{
struct dsa_port *dp = dsa_slave_to_port(dev);
struct dsa_switch *ds = dp->ds;
int ret;
/* Port's PHY and MAC both need to be EEE capable */
if (!dev->phydev || !dp->pl)
return -ENODEV;
if (!ds->ops->get_mac_eee)
return -EOPNOTSUPP;
ret = ds->ops->get_mac_eee(ds, dp->index, e);
if (ret)
return ret;
return phylink_ethtool_get_eee(dp->pl, e);
}
static int dsa_slave_get_link_ksettings(struct net_device *dev,
struct ethtool_link_ksettings *cmd)
{
struct dsa_port *dp = dsa_slave_to_port(dev);
return phylink_ethtool_ksettings_get(dp->pl, cmd);
}
static int dsa_slave_set_link_ksettings(struct net_device *dev,
const struct ethtool_link_ksettings *cmd)
{
struct dsa_port *dp = dsa_slave_to_port(dev);
return phylink_ethtool_ksettings_set(dp->pl, cmd);
}
static void dsa_slave_get_pause_stats(struct net_device *dev,
struct ethtool_pause_stats *pause_stats)
{
struct dsa_port *dp = dsa_slave_to_port(dev);
struct dsa_switch *ds = dp->ds;
if (ds->ops->get_pause_stats)
ds->ops->get_pause_stats(ds, dp->index, pause_stats);
}
static void dsa_slave_get_pauseparam(struct net_device *dev,
struct ethtool_pauseparam *pause)
{
struct dsa_port *dp = dsa_slave_to_port(dev);
phylink_ethtool_get_pauseparam(dp->pl, pause);
}
static int dsa_slave_set_pauseparam(struct net_device *dev,
struct ethtool_pauseparam *pause)
{
struct dsa_port *dp = dsa_slave_to_port(dev);
return phylink_ethtool_set_pauseparam(dp->pl, pause);
}
#ifdef CONFIG_NET_POLL_CONTROLLER
static int dsa_slave_netpoll_setup(struct net_device *dev,
struct netpoll_info *ni)
{
struct net_device *master = dsa_slave_to_master(dev);
struct dsa_slave_priv *p = netdev_priv(dev);
struct netpoll *netpoll;
int err = 0;
netpoll = kzalloc(sizeof(*netpoll), GFP_KERNEL);
if (!netpoll)
return -ENOMEM;
err = __netpoll_setup(netpoll, master);
if (err) {
kfree(netpoll);
goto out;
}
p->netpoll = netpoll;
out:
return err;
}
static void dsa_slave_netpoll_cleanup(struct net_device *dev)
{
struct dsa_slave_priv *p = netdev_priv(dev);
struct netpoll *netpoll = p->netpoll;
if (!netpoll)
return;
p->netpoll = NULL;
__netpoll_free(netpoll);
}
static void dsa_slave_poll_controller(struct net_device *dev)
{
}
#endif
static struct dsa_mall_tc_entry *
dsa_slave_mall_tc_entry_find(struct net_device *dev, unsigned long cookie)
{
struct dsa_slave_priv *p = netdev_priv(dev);
struct dsa_mall_tc_entry *mall_tc_entry;
list_for_each_entry(mall_tc_entry, &p->mall_tc_list, list)
if (mall_tc_entry->cookie == cookie)
return mall_tc_entry;
return NULL;
}
static int
dsa_slave_add_cls_matchall_mirred(struct net_device *dev,
struct tc_cls_matchall_offload *cls,
bool ingress)
{
struct netlink_ext_ack *extack = cls->common.extack;
struct dsa_port *dp = dsa_slave_to_port(dev);
struct dsa_slave_priv *p = netdev_priv(dev);
struct dsa_mall_mirror_tc_entry *mirror;
struct dsa_mall_tc_entry *mall_tc_entry;
struct dsa_switch *ds = dp->ds;
struct flow_action_entry *act;
struct dsa_port *to_dp;
int err;
if (!ds->ops->port_mirror_add)
return -EOPNOTSUPP;
if (!flow_action_basic_hw_stats_check(&cls->rule->action,
cls->common.extack))
return -EOPNOTSUPP;
act = &cls->rule->action.entries[0];
if (!act->dev)
return -EINVAL;
if (!dsa_slave_dev_check(act->dev))
return -EOPNOTSUPP;
mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL);
if (!mall_tc_entry)
return -ENOMEM;
mall_tc_entry->cookie = cls->cookie;
mall_tc_entry->type = DSA_PORT_MALL_MIRROR;
mirror = &mall_tc_entry->mirror;
to_dp = dsa_slave_to_port(act->dev);
mirror->to_local_port = to_dp->index;
mirror->ingress = ingress;
err = ds->ops->port_mirror_add(ds, dp->index, mirror, ingress, extack);
if (err) {
kfree(mall_tc_entry);
return err;
}
list_add_tail(&mall_tc_entry->list, &p->mall_tc_list);
return err;
}
static int
dsa_slave_add_cls_matchall_police(struct net_device *dev,
struct tc_cls_matchall_offload *cls,
bool ingress)
{
struct netlink_ext_ack *extack = cls->common.extack;
struct dsa_port *dp = dsa_slave_to_port(dev);
struct dsa_slave_priv *p = netdev_priv(dev);
struct dsa_mall_policer_tc_entry *policer;
struct dsa_mall_tc_entry *mall_tc_entry;
struct dsa_switch *ds = dp->ds;
struct flow_action_entry *act;
int err;
if (!ds->ops->port_policer_add) {
NL_SET_ERR_MSG_MOD(extack,
"Policing offload not implemented");
return -EOPNOTSUPP;
}
if (!ingress) {
NL_SET_ERR_MSG_MOD(extack,
"Only supported on ingress qdisc");
return -EOPNOTSUPP;
}
if (!flow_action_basic_hw_stats_check(&cls->rule->action,
cls->common.extack))
return -EOPNOTSUPP;
list_for_each_entry(mall_tc_entry, &p->mall_tc_list, list) {
if (mall_tc_entry->type == DSA_PORT_MALL_POLICER) {
NL_SET_ERR_MSG_MOD(extack,
"Only one port policer allowed");
return -EEXIST;
}
}
act = &cls->rule->action.entries[0];
mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL);
if (!mall_tc_entry)
return -ENOMEM;
mall_tc_entry->cookie = cls->cookie;
mall_tc_entry->type = DSA_PORT_MALL_POLICER;
policer = &mall_tc_entry->policer;
policer->rate_bytes_per_sec = act->police.rate_bytes_ps;
policer->burst = act->police.burst;
err = ds->ops->port_policer_add(ds, dp->index, policer);
if (err) {
kfree(mall_tc_entry);
return err;
}
list_add_tail(&mall_tc_entry->list, &p->mall_tc_list);
return err;
}
static int dsa_slave_add_cls_matchall(struct net_device *dev,
struct tc_cls_matchall_offload *cls,
bool ingress)
{
int err = -EOPNOTSUPP;
if (cls->common.protocol == htons(ETH_P_ALL) &&
flow_offload_has_one_action(&cls->rule->action) &&
cls->rule->action.entries[0].id == FLOW_ACTION_MIRRED)
err = dsa_slave_add_cls_matchall_mirred(dev, cls, ingress);
else if (flow_offload_has_one_action(&cls->rule->action) &&
cls->rule->action.entries[0].id == FLOW_ACTION_POLICE)
err = dsa_slave_add_cls_matchall_police(dev, cls, ingress);
return err;
}
static void dsa_slave_del_cls_matchall(struct net_device *dev,
struct tc_cls_matchall_offload *cls)
{
struct dsa_port *dp = dsa_slave_to_port(dev);
struct dsa_mall_tc_entry *mall_tc_entry;
struct dsa_switch *ds = dp->ds;
mall_tc_entry = dsa_slave_mall_tc_entry_find(dev, cls->cookie);
if (!mall_tc_entry)
return;
list_del(&mall_tc_entry->list);
switch (mall_tc_entry->type) {
case DSA_PORT_MALL_MIRROR:
if (ds->ops->port_mirror_del)
ds->ops->port_mirror_del(ds, dp->index,
&mall_tc_entry->mirror);
break;
case DSA_PORT_MALL_POLICER:
if (ds->ops->port_policer_del)
ds->ops->port_policer_del(ds, dp->index);
break;
default:
WARN_ON(1);
}
kfree(mall_tc_entry);
}
static int dsa_slave_setup_tc_cls_matchall(struct net_device *dev,
struct tc_cls_matchall_offload *cls,
bool ingress)
{
if (cls->common.chain_index)
return -EOPNOTSUPP;
switch (cls->command) {
case TC_CLSMATCHALL_REPLACE:
return dsa_slave_add_cls_matchall(dev, cls, ingress);
case TC_CLSMATCHALL_DESTROY:
dsa_slave_del_cls_matchall(dev, cls);
return 0;
default:
return -EOPNOTSUPP;
}
}
static int dsa_slave_add_cls_flower(struct net_device *dev,
struct flow_cls_offload *cls,
bool ingress)
{
struct dsa_port *dp = dsa_slave_to_port(dev);
struct dsa_switch *ds = dp->ds;
int port = dp->index;
if (!ds->ops->cls_flower_add)
return -EOPNOTSUPP;
return ds->ops->cls_flower_add(ds, port, cls, ingress);
}
static int dsa_slave_del_cls_flower(struct net_device *dev,
struct flow_cls_offload *cls,
bool ingress)
{
struct dsa_port *dp = dsa_slave_to_port(dev);
struct dsa_switch *ds = dp->ds;
int port = dp->index;
if (!ds->ops->cls_flower_del)
return -EOPNOTSUPP;
return ds->ops->cls_flower_del(ds, port, cls, ingress);
}
static int dsa_slave_stats_cls_flower(struct net_device *dev,
struct flow_cls_offload *cls,
bool ingress)
{
struct dsa_port *dp = dsa_slave_to_port(dev);
struct dsa_switch *ds = dp->ds;
int port = dp->index;
if (!ds->ops->cls_flower_stats)
return -EOPNOTSUPP;
return ds->ops->cls_flower_stats(ds, port, cls, ingress);
}
static int dsa_slave_setup_tc_cls_flower(struct net_device *dev,
struct flow_cls_offload *cls,
bool ingress)
{
switch (cls->command) {
case FLOW_CLS_REPLACE:
return dsa_slave_add_cls_flower(dev, cls, ingress);
case FLOW_CLS_DESTROY:
return dsa_slave_del_cls_flower(dev, cls, ingress);
case FLOW_CLS_STATS:
return dsa_slave_stats_cls_flower(dev, cls, ingress);
default:
return -EOPNOTSUPP;
}
}
static int dsa_slave_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
void *cb_priv, bool ingress)
{
struct net_device *dev = cb_priv;
if (!tc_can_offload(dev))
return -EOPNOTSUPP;
switch (type) {
case TC_SETUP_CLSMATCHALL:
return dsa_slave_setup_tc_cls_matchall(dev, type_data, ingress);
case TC_SETUP_CLSFLOWER:
return dsa_slave_setup_tc_cls_flower(dev, type_data, ingress);
default:
return -EOPNOTSUPP;
}
}
static int dsa_slave_setup_tc_block_cb_ig(enum tc_setup_type type,
void *type_data, void *cb_priv)
{
return dsa_slave_setup_tc_block_cb(type, type_data, cb_priv, true);
}
static int dsa_slave_setup_tc_block_cb_eg(enum tc_setup_type type,
void *type_data, void *cb_priv)
{
return dsa_slave_setup_tc_block_cb(type, type_data, cb_priv, false);
}
static LIST_HEAD(dsa_slave_block_cb_list);
static int dsa_slave_setup_tc_block(struct net_device *dev,
struct flow_block_offload *f)
{
struct flow_block_cb *block_cb;
flow_setup_cb_t *cb;
if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
cb = dsa_slave_setup_tc_block_cb_ig;
else if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS)
cb = dsa_slave_setup_tc_block_cb_eg;
else
return -EOPNOTSUPP;
f->driver_block_list = &dsa_slave_block_cb_list;
switch (f->command) {
case FLOW_BLOCK_BIND:
if (flow_block_cb_is_busy(cb, dev, &dsa_slave_block_cb_list))
return -EBUSY;
block_cb = flow_block_cb_alloc(cb, dev, dev, NULL);
if (IS_ERR(block_cb))
return PTR_ERR(block_cb);
flow_block_cb_add(block_cb, f);
list_add_tail(&block_cb->driver_list, &dsa_slave_block_cb_list);
return 0;
case FLOW_BLOCK_UNBIND:
block_cb = flow_block_cb_lookup(f->block, cb, dev);
if (!block_cb)
return -ENOENT;
flow_block_cb_remove(block_cb, f);
list_del(&block_cb->driver_list);
return 0;
default:
return -EOPNOTSUPP;
}
}
static int dsa_slave_setup_ft_block(struct dsa_switch *ds, int port,
void *type_data)
{
struct net_device *master = dsa_port_to_master(dsa_to_port(ds, port));
if (!master->netdev_ops->ndo_setup_tc)
return -EOPNOTSUPP;
return master->netdev_ops->ndo_setup_tc(master, TC_SETUP_FT, type_data);
}
static int dsa_slave_setup_tc(struct net_device *dev, enum tc_setup_type type,
void *type_data)
{
struct dsa_port *dp = dsa_slave_to_port(dev);
struct dsa_switch *ds = dp->ds;
switch (type) {
case TC_SETUP_BLOCK:
return dsa_slave_setup_tc_block(dev, type_data);
case TC_SETUP_FT:
return dsa_slave_setup_ft_block(ds, dp->index, type_data);
default:
break;
}
if (!ds->ops->port_setup_tc)
return -EOPNOTSUPP;
return ds->ops->port_setup_tc(ds, dp->index, type, type_data);
}
static int dsa_slave_get_rxnfc(struct net_device *dev,
struct ethtool_rxnfc *nfc, u32 *rule_locs)
{
struct dsa_port *dp = dsa_slave_to_port(dev);
struct dsa_switch *ds = dp->ds;
if (!ds->ops->get_rxnfc)
return -EOPNOTSUPP;
return ds->ops->get_rxnfc(ds, dp->index, nfc, rule_locs);
}
static int dsa_slave_set_rxnfc(struct net_device *dev,
struct ethtool_rxnfc *nfc)
{
struct dsa_port *dp = dsa_slave_to_port(dev);
struct dsa_switch *ds = dp->ds;
if (!ds->ops->set_rxnfc)
return -EOPNOTSUPP;
return ds->ops->set_rxnfc(ds, dp->index, nfc);
}
static int dsa_slave_get_ts_info(struct net_device *dev,
struct ethtool_ts_info *ts)
{
struct dsa_slave_priv *p = netdev_priv(dev);
struct dsa_switch *ds = p->dp->ds;
if (!ds->ops->get_ts_info)
return -EOPNOTSUPP;
return ds->ops->get_ts_info(ds, p->dp->index, ts);
}
static int dsa_slave_vlan_rx_add_vid(struct net_device *dev, __be16 proto,
u16 vid)
{
struct dsa_port *dp = dsa_slave_to_port(dev);
struct switchdev_obj_port_vlan vlan = {
.obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
.vid = vid,
/* This API only allows programming tagged, non-PVID VIDs */
.flags = 0,
};
struct netlink_ext_ack extack = {0};
struct dsa_switch *ds = dp->ds;
struct netdev_hw_addr *ha;
struct dsa_vlan *v;
int ret;
/* User port... */
ret = dsa_port_vlan_add(dp, &vlan, &extack);
if (ret) {
if (extack._msg)
netdev_err(dev, "%s\n", extack._msg);
return ret;
}
/* And CPU port... */
ret = dsa_port_host_vlan_add(dp, &vlan, &extack);
if (ret) {
if (extack._msg)
netdev_err(dev, "CPU port %d: %s\n", dp->cpu_dp->index,
extack._msg);
return ret;
}
if (!dsa_switch_supports_uc_filtering(ds) &&
!dsa_switch_supports_mc_filtering(ds))
return 0;
v = kzalloc(sizeof(*v), GFP_KERNEL);
if (!v) {
ret = -ENOMEM;
goto rollback;
}
netif_addr_lock_bh(dev);
v->vid = vid;
list_add_tail(&v->list, &dp->user_vlans);
if (dsa_switch_supports_mc_filtering(ds)) {
netdev_for_each_synced_mc_addr(ha, dev) {
dsa_slave_schedule_standalone_work(dev, DSA_MC_ADD,
ha->addr, vid);
}
}
if (dsa_switch_supports_uc_filtering(ds)) {
netdev_for_each_synced_uc_addr(ha, dev) {
dsa_slave_schedule_standalone_work(dev, DSA_UC_ADD,
ha->addr, vid);
}
}
netif_addr_unlock_bh(dev);
dsa_flush_workqueue();
return 0;
rollback:
dsa_port_host_vlan_del(dp, &vlan);
dsa_port_vlan_del(dp, &vlan);
return ret;
}
static int dsa_slave_vlan_rx_kill_vid(struct net_device *dev, __be16 proto,
u16 vid)
{
struct dsa_port *dp = dsa_slave_to_port(dev);
struct switchdev_obj_port_vlan vlan = {
.vid = vid,
/* This API only allows programming tagged, non-PVID VIDs */
.flags = 0,
};
struct dsa_switch *ds = dp->ds;
struct netdev_hw_addr *ha;
struct dsa_vlan *v;
int err;
err = dsa_port_vlan_del(dp, &vlan);
if (err)
return err;
err = dsa_port_host_vlan_del(dp, &vlan);
if (err)
return err;
if (!dsa_switch_supports_uc_filtering(ds) &&
!dsa_switch_supports_mc_filtering(ds))
return 0;
netif_addr_lock_bh(dev);
v = dsa_vlan_find(&dp->user_vlans, &vlan);
if (!v) {
netif_addr_unlock_bh(dev);
return -ENOENT;
}
list_del(&v->list);
kfree(v);
if (dsa_switch_supports_mc_filtering(ds)) {
netdev_for_each_synced_mc_addr(ha, dev) {
dsa_slave_schedule_standalone_work(dev, DSA_MC_DEL,
ha->addr, vid);
}
}
if (dsa_switch_supports_uc_filtering(ds)) {
netdev_for_each_synced_uc_addr(ha, dev) {
dsa_slave_schedule_standalone_work(dev, DSA_UC_DEL,
ha->addr, vid);
}
}
netif_addr_unlock_bh(dev);
dsa_flush_workqueue();
return 0;
}
static int dsa_slave_restore_vlan(struct net_device *vdev, int vid, void *arg)
{
__be16 proto = vdev ? vlan_dev_vlan_proto(vdev) : htons(ETH_P_8021Q);
return dsa_slave_vlan_rx_add_vid(arg, proto, vid);
}
static int dsa_slave_clear_vlan(struct net_device *vdev, int vid, void *arg)
{
__be16 proto = vdev ? vlan_dev_vlan_proto(vdev) : htons(ETH_P_8021Q);
return dsa_slave_vlan_rx_kill_vid(arg, proto, vid);
}
/* Keep the VLAN RX filtering list in sync with the hardware only if VLAN
* filtering is enabled. The baseline is that only ports that offload a
* VLAN-aware bridge are VLAN-aware, and standalone ports are VLAN-unaware,
* but there are exceptions for quirky hardware.
*
* If ds->vlan_filtering_is_global = true, then standalone ports which share
* the same switch with other ports that offload a VLAN-aware bridge are also
* inevitably VLAN-aware.
*
* To summarize, a DSA switch port offloads:
*
* - If standalone (this includes software bridge, software LAG):
* - if ds->needs_standalone_vlan_filtering = true, OR if
* (ds->vlan_filtering_is_global = true AND there are bridges spanning
* this switch chip which have vlan_filtering=1)
* - the 8021q upper VLANs
* - else (standalone VLAN filtering is not needed, VLAN filtering is not
* global, or it is, but no port is under a VLAN-aware bridge):
* - no VLAN (any 8021q upper is a software VLAN)
*
* - If under a vlan_filtering=0 bridge which it offload:
* - if ds->configure_vlan_while_not_filtering = true (default):
* - the bridge VLANs. These VLANs are committed to hardware but inactive.
* - else (deprecated):
* - no VLAN. The bridge VLANs are not restored when VLAN awareness is
* enabled, so this behavior is broken and discouraged.
*
* - If under a vlan_filtering=1 bridge which it offload:
* - the bridge VLANs
* - the 8021q upper VLANs
*/
int dsa_slave_manage_vlan_filtering(struct net_device *slave,
bool vlan_filtering)
{
int err;
if (vlan_filtering) {
slave->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
err = vlan_for_each(slave, dsa_slave_restore_vlan, slave);
if (err) {
vlan_for_each(slave, dsa_slave_clear_vlan, slave);
slave->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
return err;
}
} else {
err = vlan_for_each(slave, dsa_slave_clear_vlan, slave);
if (err)
return err;
slave->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
}
return 0;
}
struct dsa_hw_port {
struct list_head list;
struct net_device *dev;
int old_mtu;
};
static int dsa_hw_port_list_set_mtu(struct list_head *hw_port_list, int mtu)
{
const struct dsa_hw_port *p;
int err;
list_for_each_entry(p, hw_port_list, list) {
if (p->dev->mtu == mtu)
continue;
err = dev_set_mtu(p->dev, mtu);
if (err)
goto rollback;
}
return 0;
rollback:
list_for_each_entry_continue_reverse(p, hw_port_list, list) {
if (p->dev->mtu == p->old_mtu)
continue;
if (dev_set_mtu(p->dev, p->old_mtu))
netdev_err(p->dev, "Failed to restore MTU\n");
}
return err;
}
static void dsa_hw_port_list_free(struct list_head *hw_port_list)
{
struct dsa_hw_port *p, *n;
list_for_each_entry_safe(p, n, hw_port_list, list)
kfree(p);
}
/* Make the hardware datapath to/from @dev limited to a common MTU */
static void dsa_bridge_mtu_normalization(struct dsa_port *dp)
{
struct list_head hw_port_list;
struct dsa_switch_tree *dst;
int min_mtu = ETH_MAX_MTU;
struct dsa_port *other_dp;
int err;
if (!dp->ds->mtu_enforcement_ingress)
return;
if (!dp->bridge)
return;
INIT_LIST_HEAD(&hw_port_list);
/* Populate the list of ports that are part of the same bridge
* as the newly added/modified port
*/
list_for_each_entry(dst, &dsa_tree_list, list) {
list_for_each_entry(other_dp, &dst->ports, list) {
struct dsa_hw_port *hw_port;
struct net_device *slave;
if (other_dp->type != DSA_PORT_TYPE_USER)
continue;
if (!dsa_port_bridge_same(dp, other_dp))
continue;
if (!other_dp->ds->mtu_enforcement_ingress)
continue;
slave = other_dp->slave;
if (min_mtu > slave->mtu)
min_mtu = slave->mtu;
hw_port = kzalloc(sizeof(*hw_port), GFP_KERNEL);
if (!hw_port)
goto out;
hw_port->dev = slave;
hw_port->old_mtu = slave->mtu;
list_add(&hw_port->list, &hw_port_list);
}
}
/* Attempt to configure the entire hardware bridge to the newly added
* interface's MTU first, regardless of whether the intention of the
* user was to raise or lower it.
*/
err = dsa_hw_port_list_set_mtu(&hw_port_list, dp->slave->mtu);
if (!err)
goto out;
/* Clearly that didn't work out so well, so just set the minimum MTU on
* all hardware bridge ports now. If this fails too, then all ports will
* still have their old MTU rolled back anyway.
*/
dsa_hw_port_list_set_mtu(&hw_port_list, min_mtu);
out:
dsa_hw_port_list_free(&hw_port_list);
}
int dsa_slave_change_mtu(struct net_device *dev, int new_mtu)
{
struct net_device *master = dsa_slave_to_master(dev);
struct dsa_port *dp = dsa_slave_to_port(dev);
struct dsa_port *cpu_dp = dp->cpu_dp;
struct dsa_switch *ds = dp->ds;
struct dsa_port *other_dp;
int largest_mtu = 0;
int new_master_mtu;
int old_master_mtu;
int mtu_limit;
int overhead;
int cpu_mtu;
int err;
if (!ds->ops->port_change_mtu)
return -EOPNOTSUPP;
dsa_tree_for_each_user_port(other_dp, ds->dst) {
int slave_mtu;
/* During probe, this function will be called for each slave
* device, while not all of them have been allocated. That's
* ok, it doesn't change what the maximum is, so ignore it.
*/
if (!other_dp->slave)
continue;
/* Pretend that we already applied the setting, which we
* actually haven't (still haven't done all integrity checks)
*/
if (dp == other_dp)
slave_mtu = new_mtu;
else
slave_mtu = other_dp->slave->mtu;
if (largest_mtu < slave_mtu)
largest_mtu = slave_mtu;
}
overhead = dsa_tag_protocol_overhead(cpu_dp->tag_ops);
mtu_limit = min_t(int, master->max_mtu, dev->max_mtu + overhead);
old_master_mtu = master->mtu;
new_master_mtu = largest_mtu + overhead;
if (new_master_mtu > mtu_limit)
return -ERANGE;
/* If the master MTU isn't over limit, there's no need to check the CPU
* MTU, since that surely isn't either.
*/
cpu_mtu = largest_mtu;
/* Start applying stuff */
if (new_master_mtu != old_master_mtu) {
err = dev_set_mtu(master, new_master_mtu);
if (err < 0)
goto out_master_failed;
/* We only need to propagate the MTU of the CPU port to
* upstream switches, so emit a notifier which updates them.
*/
err = dsa_port_mtu_change(cpu_dp, cpu_mtu);
if (err)
goto out_cpu_failed;
}
err = ds->ops->port_change_mtu(ds, dp->index, new_mtu);
if (err)
goto out_port_failed;
dev->mtu = new_mtu;
dsa_bridge_mtu_normalization(dp);
return 0;
out_port_failed:
if (new_master_mtu != old_master_mtu)
dsa_port_mtu_change(cpu_dp, old_master_mtu - overhead);
out_cpu_failed:
if (new_master_mtu != old_master_mtu)
dev_set_mtu(master, old_master_mtu);
out_master_failed:
return err;
}
static int __maybe_unused
dsa_slave_dcbnl_set_default_prio(struct net_device *dev, struct dcb_app *app)
{
struct dsa_port *dp = dsa_slave_to_port(dev);
struct dsa_switch *ds = dp->ds;
unsigned long mask, new_prio;
int err, port = dp->index;
if (!ds->ops->port_set_default_prio)
return -EOPNOTSUPP;
err = dcb_ieee_setapp(dev, app);
if (err)
return err;
mask = dcb_ieee_getapp_mask(dev, app);
new_prio = __fls(mask);
err = ds->ops->port_set_default_prio(ds, port, new_prio);
if (err) {
dcb_ieee_delapp(dev, app);
return err;
}
return 0;
}
static int __maybe_unused
dsa_slave_dcbnl_add_dscp_prio(struct net_device *dev, struct dcb_app *app)
{
struct dsa_port *dp = dsa_slave_to_port(dev);
struct dsa_switch *ds = dp->ds;
unsigned long mask, new_prio;
int err, port = dp->index;
u8 dscp = app->protocol;
if (!ds->ops->port_add_dscp_prio)
return -EOPNOTSUPP;
if (dscp >= 64) {
netdev_err(dev, "DSCP APP entry with protocol value %u is invalid\n",
dscp);
return -EINVAL;
}
err = dcb_ieee_setapp(dev, app);
if (err)
return err;
mask = dcb_ieee_getapp_mask(dev, app);
new_prio = __fls(mask);
err = ds->ops->port_add_dscp_prio(ds, port, dscp, new_prio);
if (err) {
dcb_ieee_delapp(dev, app);
return err;
}
return 0;
}
static int __maybe_unused dsa_slave_dcbnl_ieee_setapp(struct net_device *dev,
struct dcb_app *app)
{
switch (app->selector) {
case IEEE_8021QAZ_APP_SEL_ETHERTYPE:
switch (app->protocol) {
case 0:
return dsa_slave_dcbnl_set_default_prio(dev, app);
default:
return -EOPNOTSUPP;
}
break;
case IEEE_8021QAZ_APP_SEL_DSCP:
return dsa_slave_dcbnl_add_dscp_prio(dev, app);
default:
return -EOPNOTSUPP;
}
}
static int __maybe_unused
dsa_slave_dcbnl_del_default_prio(struct net_device *dev, struct dcb_app *app)
{
struct dsa_port *dp = dsa_slave_to_port(dev);
struct dsa_switch *ds = dp->ds;
unsigned long mask, new_prio;
int err, port = dp->index;
if (!ds->ops->port_set_default_prio)
return -EOPNOTSUPP;
err = dcb_ieee_delapp(dev, app);
if (err)
return err;
mask = dcb_ieee_getapp_mask(dev, app);
new_prio = mask ? __fls(mask) : 0;
err = ds->ops->port_set_default_prio(ds, port, new_prio);
if (err) {
dcb_ieee_setapp(dev, app);
return err;
}
return 0;
}
static int __maybe_unused
dsa_slave_dcbnl_del_dscp_prio(struct net_device *dev, struct dcb_app *app)
{
struct dsa_port *dp = dsa_slave_to_port(dev);
struct dsa_switch *ds = dp->ds;
int err, port = dp->index;
u8 dscp = app->protocol;
if (!ds->ops->port_del_dscp_prio)
return -EOPNOTSUPP;
err = dcb_ieee_delapp(dev, app);
if (err)
return err;
err = ds->ops->port_del_dscp_prio(ds, port, dscp, app->priority);
if (err) {
dcb_ieee_setapp(dev, app);
return err;
}
return 0;
}
static int __maybe_unused dsa_slave_dcbnl_ieee_delapp(struct net_device *dev,
struct dcb_app *app)
{
switch (app->selector) {
case IEEE_8021QAZ_APP_SEL_ETHERTYPE:
switch (app->protocol) {
case 0:
return dsa_slave_dcbnl_del_default_prio(dev, app);
default:
return -EOPNOTSUPP;
}
break;
case IEEE_8021QAZ_APP_SEL_DSCP:
return dsa_slave_dcbnl_del_dscp_prio(dev, app);
default:
return -EOPNOTSUPP;
}
}
/* Pre-populate the DCB application priority table with the priorities
* configured during switch setup, which we read from hardware here.
*/
static int dsa_slave_dcbnl_init(struct net_device *dev)
{
struct dsa_port *dp = dsa_slave_to_port(dev);
struct dsa_switch *ds = dp->ds;
int port = dp->index;
int err;
if (ds->ops->port_get_default_prio) {
int prio = ds->ops->port_get_default_prio(ds, port);
struct dcb_app app = {
.selector = IEEE_8021QAZ_APP_SEL_ETHERTYPE,
.protocol = 0,
.priority = prio,
};
if (prio < 0)
return prio;
err = dcb_ieee_setapp(dev, &app);
if (err)
return err;
}
if (ds->ops->port_get_dscp_prio) {
int protocol;
for (protocol = 0; protocol < 64; protocol++) {
struct dcb_app app = {
.selector = IEEE_8021QAZ_APP_SEL_DSCP,
.protocol = protocol,
};
int prio;
prio = ds->ops->port_get_dscp_prio(ds, port, protocol);
if (prio == -EOPNOTSUPP)
continue;
if (prio < 0)
return prio;
app.priority = prio;
err = dcb_ieee_setapp(dev, &app);
if (err)
return err;
}
}
return 0;
}
static const struct ethtool_ops dsa_slave_ethtool_ops = {
.get_drvinfo = dsa_slave_get_drvinfo,
.get_regs_len = dsa_slave_get_regs_len,
.get_regs = dsa_slave_get_regs,
.nway_reset = dsa_slave_nway_reset,
.get_link = ethtool_op_get_link,
.get_eeprom_len = dsa_slave_get_eeprom_len,
.get_eeprom = dsa_slave_get_eeprom,
.set_eeprom = dsa_slave_set_eeprom,
.get_strings = dsa_slave_get_strings,
.get_ethtool_stats = dsa_slave_get_ethtool_stats,
.get_sset_count = dsa_slave_get_sset_count,
.get_eth_phy_stats = dsa_slave_get_eth_phy_stats,
.get_eth_mac_stats = dsa_slave_get_eth_mac_stats,
.get_eth_ctrl_stats = dsa_slave_get_eth_ctrl_stats,
.get_rmon_stats = dsa_slave_get_rmon_stats,
.set_wol = dsa_slave_set_wol,
.get_wol = dsa_slave_get_wol,
.set_eee = dsa_slave_set_eee,
.get_eee = dsa_slave_get_eee,
.get_link_ksettings = dsa_slave_get_link_ksettings,
.set_link_ksettings = dsa_slave_set_link_ksettings,
.get_pause_stats = dsa_slave_get_pause_stats,
.get_pauseparam = dsa_slave_get_pauseparam,
.set_pauseparam = dsa_slave_set_pauseparam,
.get_rxnfc = dsa_slave_get_rxnfc,
.set_rxnfc = dsa_slave_set_rxnfc,
.get_ts_info = dsa_slave_get_ts_info,
.self_test = dsa_slave_net_selftest,
.get_mm = dsa_slave_get_mm,
.set_mm = dsa_slave_set_mm,
.get_mm_stats = dsa_slave_get_mm_stats,
};
static const struct dcbnl_rtnl_ops __maybe_unused dsa_slave_dcbnl_ops = {
.ieee_setapp = dsa_slave_dcbnl_ieee_setapp,
.ieee_delapp = dsa_slave_dcbnl_ieee_delapp,
};
static void dsa_slave_get_stats64(struct net_device *dev,
struct rtnl_link_stats64 *s)
{
struct dsa_port *dp = dsa_slave_to_port(dev);
struct dsa_switch *ds = dp->ds;
if (ds->ops->get_stats64)
ds->ops->get_stats64(ds, dp->index, s);
else
dev_get_tstats64(dev, s);
}
static int dsa_slave_fill_forward_path(struct net_device_path_ctx *ctx,
struct net_device_path *path)
{
struct dsa_port *dp = dsa_slave_to_port(ctx->dev);
struct net_device *master = dsa_port_to_master(dp);
struct dsa_port *cpu_dp = dp->cpu_dp;
path->dev = ctx->dev;
path->type = DEV_PATH_DSA;
path->dsa.proto = cpu_dp->tag_ops->proto;
path->dsa.port = dp->index;
ctx->dev = master;
return 0;
}
static const struct net_device_ops dsa_slave_netdev_ops = {
.ndo_open = dsa_slave_open,
.ndo_stop = dsa_slave_close,
.ndo_start_xmit = dsa_slave_xmit,
.ndo_change_rx_flags = dsa_slave_change_rx_flags,
.ndo_set_rx_mode = dsa_slave_set_rx_mode,
.ndo_set_mac_address = dsa_slave_set_mac_address,
.ndo_fdb_dump = dsa_slave_fdb_dump,
.ndo_eth_ioctl = dsa_slave_ioctl,
.ndo_get_iflink = dsa_slave_get_iflink,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_netpoll_setup = dsa_slave_netpoll_setup,
.ndo_netpoll_cleanup = dsa_slave_netpoll_cleanup,
.ndo_poll_controller = dsa_slave_poll_controller,
#endif
.ndo_setup_tc = dsa_slave_setup_tc,
.ndo_get_stats64 = dsa_slave_get_stats64,
.ndo_vlan_rx_add_vid = dsa_slave_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = dsa_slave_vlan_rx_kill_vid,
.ndo_change_mtu = dsa_slave_change_mtu,
.ndo_fill_forward_path = dsa_slave_fill_forward_path,
};
static struct device_type dsa_type = {
.name = "dsa",
};
void dsa_port_phylink_mac_change(struct dsa_switch *ds, int port, bool up)
{
const struct dsa_port *dp = dsa_to_port(ds, port);
if (dp->pl)
phylink_mac_change(dp->pl, up);
}
EXPORT_SYMBOL_GPL(dsa_port_phylink_mac_change);
static void dsa_slave_phylink_fixed_state(struct phylink_config *config,
struct phylink_link_state *state)
{
struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
struct dsa_switch *ds = dp->ds;
/* No need to check that this operation is valid, the callback would
* not be called if it was not.
*/
ds->ops->phylink_fixed_state(ds, dp->index, state);
}
/* slave device setup *******************************************************/
static int dsa_slave_phy_connect(struct net_device *slave_dev, int addr,
u32 flags)
{
struct dsa_port *dp = dsa_slave_to_port(slave_dev);
struct dsa_switch *ds = dp->ds;
slave_dev->phydev = mdiobus_get_phy(ds->slave_mii_bus, addr);
if (!slave_dev->phydev) {
netdev_err(slave_dev, "no phy at %d\n", addr);
return -ENODEV;
}
slave_dev->phydev->dev_flags |= flags;
return phylink_connect_phy(dp->pl, slave_dev->phydev);
}
static int dsa_slave_phy_setup(struct net_device *slave_dev)
{
struct dsa_port *dp = dsa_slave_to_port(slave_dev);
struct device_node *port_dn = dp->dn;
struct dsa_switch *ds = dp->ds;
u32 phy_flags = 0;
int ret;
dp->pl_config.dev = &slave_dev->dev;
dp->pl_config.type = PHYLINK_NETDEV;
/* The get_fixed_state callback takes precedence over polling the
* link GPIO in PHYLINK (see phylink_get_fixed_state). Only set
* this if the switch provides such a callback.
*/
if (ds->ops->phylink_fixed_state) {
dp->pl_config.get_fixed_state = dsa_slave_phylink_fixed_state;
dp->pl_config.poll_fixed_state = true;
}
ret = dsa_port_phylink_create(dp);
if (ret)
return ret;
if (ds->ops->get_phy_flags)
phy_flags = ds->ops->get_phy_flags(ds, dp->index);
ret = phylink_of_phy_connect(dp->pl, port_dn, phy_flags);
if (ret == -ENODEV && ds->slave_mii_bus) {
/* We could not connect to a designated PHY or SFP, so try to
* use the switch internal MDIO bus instead
*/
ret = dsa_slave_phy_connect(slave_dev, dp->index, phy_flags);
}
if (ret) {
netdev_err(slave_dev, "failed to connect to PHY: %pe\n",
ERR_PTR(ret));
dsa_port_phylink_destroy(dp);
}
return ret;
}
void dsa_slave_setup_tagger(struct net_device *slave)
{
struct dsa_port *dp = dsa_slave_to_port(slave);
struct net_device *master = dsa_port_to_master(dp);
struct dsa_slave_priv *p = netdev_priv(slave);
const struct dsa_port *cpu_dp = dp->cpu_dp;
const struct dsa_switch *ds = dp->ds;
slave->needed_headroom = cpu_dp->tag_ops->needed_headroom;
slave->needed_tailroom = cpu_dp->tag_ops->needed_tailroom;
/* Try to save one extra realloc later in the TX path (in the master)
* by also inheriting the master's needed headroom and tailroom.
* The 8021q driver also does this.
*/
slave->needed_headroom += master->needed_headroom;
slave->needed_tailroom += master->needed_tailroom;
p->xmit = cpu_dp->tag_ops->xmit;
slave->features = master->vlan_features | NETIF_F_HW_TC;
slave->hw_features |= NETIF_F_HW_TC;
slave->features |= NETIF_F_LLTX;
if (slave->needed_tailroom)
slave->features &= ~(NETIF_F_SG | NETIF_F_FRAGLIST);
if (ds->needs_standalone_vlan_filtering)
slave->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
}
int dsa_slave_suspend(struct net_device *slave_dev)
{
struct dsa_port *dp = dsa_slave_to_port(slave_dev);
if (!netif_running(slave_dev))
return 0;
netif_device_detach(slave_dev);
rtnl_lock();
phylink_stop(dp->pl);
rtnl_unlock();
return 0;
}
int dsa_slave_resume(struct net_device *slave_dev)
{
struct dsa_port *dp = dsa_slave_to_port(slave_dev);
if (!netif_running(slave_dev))
return 0;
netif_device_attach(slave_dev);
rtnl_lock();
phylink_start(dp->pl);
rtnl_unlock();
return 0;
}
int dsa_slave_create(struct dsa_port *port)
{
struct net_device *master = dsa_port_to_master(port);
struct dsa_switch *ds = port->ds;
struct net_device *slave_dev;
struct dsa_slave_priv *p;
const char *name;
int assign_type;
int ret;
if (!ds->num_tx_queues)
ds->num_tx_queues = 1;
if (port->name) {
name = port->name;
assign_type = NET_NAME_PREDICTABLE;
} else {
name = "eth%d";
assign_type = NET_NAME_ENUM;
}
slave_dev = alloc_netdev_mqs(sizeof(struct dsa_slave_priv), name,
assign_type, ether_setup,
ds->num_tx_queues, 1);
if (slave_dev == NULL)
return -ENOMEM;
slave_dev->rtnl_link_ops = &dsa_link_ops;
slave_dev->ethtool_ops = &dsa_slave_ethtool_ops;
#if IS_ENABLED(CONFIG_DCB)
slave_dev->dcbnl_ops = &dsa_slave_dcbnl_ops;
#endif
if (!is_zero_ether_addr(port->mac))
eth_hw_addr_set(slave_dev, port->mac);
else
eth_hw_addr_inherit(slave_dev, master);
slave_dev->priv_flags |= IFF_NO_QUEUE;
if (dsa_switch_supports_uc_filtering(ds))
slave_dev->priv_flags |= IFF_UNICAST_FLT;
slave_dev->netdev_ops = &dsa_slave_netdev_ops;
if (ds->ops->port_max_mtu)
slave_dev->max_mtu = ds->ops->port_max_mtu(ds, port->index);
SET_NETDEV_DEVTYPE(slave_dev, &dsa_type);
SET_NETDEV_DEV(slave_dev, port->ds->dev);
SET_NETDEV_DEVLINK_PORT(slave_dev, &port->devlink_port);
slave_dev->dev.of_node = port->dn;
slave_dev->vlan_features = master->vlan_features;
p = netdev_priv(slave_dev);
slave_dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
if (!slave_dev->tstats) {
free_netdev(slave_dev);
return -ENOMEM;
}
ret = gro_cells_init(&p->gcells, slave_dev);
if (ret)
goto out_free;
p->dp = port;
INIT_LIST_HEAD(&p->mall_tc_list);
port->slave = slave_dev;
dsa_slave_setup_tagger(slave_dev);
netif_carrier_off(slave_dev);
ret = dsa_slave_phy_setup(slave_dev);
if (ret) {
netdev_err(slave_dev,
"error %d setting up PHY for tree %d, switch %d, port %d\n",
ret, ds->dst->index, ds->index, port->index);
goto out_gcells;
}
rtnl_lock();
ret = dsa_slave_change_mtu(slave_dev, ETH_DATA_LEN);
if (ret && ret != -EOPNOTSUPP)
dev_warn(ds->dev, "nonfatal error %d setting MTU to %d on port %d\n",
ret, ETH_DATA_LEN, port->index);
ret = register_netdevice(slave_dev);
if (ret) {
netdev_err(master, "error %d registering interface %s\n",
ret, slave_dev->name);
rtnl_unlock();
goto out_phy;
}
if (IS_ENABLED(CONFIG_DCB)) {
ret = dsa_slave_dcbnl_init(slave_dev);
if (ret) {
netdev_err(slave_dev,
"failed to initialize DCB: %pe\n",
ERR_PTR(ret));
rtnl_unlock();
goto out_unregister;
}
}
ret = netdev_upper_dev_link(master, slave_dev, NULL);
rtnl_unlock();
if (ret)
goto out_unregister;
return 0;
out_unregister:
unregister_netdev(slave_dev);
out_phy:
rtnl_lock();
phylink_disconnect_phy(p->dp->pl);
rtnl_unlock();
dsa_port_phylink_destroy(p->dp);
out_gcells:
gro_cells_destroy(&p->gcells);
out_free:
free_percpu(slave_dev->tstats);
free_netdev(slave_dev);
port->slave = NULL;
return ret;
}
void dsa_slave_destroy(struct net_device *slave_dev)
{
struct net_device *master = dsa_slave_to_master(slave_dev);
struct dsa_port *dp = dsa_slave_to_port(slave_dev);
struct dsa_slave_priv *p = netdev_priv(slave_dev);
netif_carrier_off(slave_dev);
rtnl_lock();
netdev_upper_dev_unlink(master, slave_dev);
unregister_netdevice(slave_dev);
phylink_disconnect_phy(dp->pl);
rtnl_unlock();
dsa_port_phylink_destroy(dp);
gro_cells_destroy(&p->gcells);
free_percpu(slave_dev->tstats);
free_netdev(slave_dev);
}
int dsa_slave_change_master(struct net_device *dev, struct net_device *master,
struct netlink_ext_ack *extack)
{
struct net_device *old_master = dsa_slave_to_master(dev);
struct dsa_port *dp = dsa_slave_to_port(dev);
struct dsa_switch *ds = dp->ds;
struct net_device *upper;
struct list_head *iter;
int err;
if (master == old_master)
return 0;
if (!ds->ops->port_change_master) {
NL_SET_ERR_MSG_MOD(extack,
"Driver does not support changing DSA master");
return -EOPNOTSUPP;
}
if (!netdev_uses_dsa(master)) {
NL_SET_ERR_MSG_MOD(extack,
"Interface not eligible as DSA master");
return -EOPNOTSUPP;
}
netdev_for_each_upper_dev_rcu(master, upper, iter) {
if (dsa_slave_dev_check(upper))
continue;
if (netif_is_bridge_master(upper))
continue;
NL_SET_ERR_MSG_MOD(extack, "Cannot join master with unknown uppers");
return -EOPNOTSUPP;
}
/* Since we allow live-changing the DSA master, plus we auto-open the
* DSA master when the user port opens => we need to ensure that the
* new DSA master is open too.
*/
if (dev->flags & IFF_UP) {
err = dev_open(master, extack);
if (err)
return err;
}
netdev_upper_dev_unlink(old_master, dev);
err = netdev_upper_dev_link(master, dev, extack);
if (err)
goto out_revert_old_master_unlink;
err = dsa_port_change_master(dp, master, extack);
if (err)
goto out_revert_master_link;
/* Update the MTU of the new CPU port through cross-chip notifiers */
err = dsa_slave_change_mtu(dev, dev->mtu);
if (err && err != -EOPNOTSUPP) {
netdev_warn(dev,
"nonfatal error updating MTU with new master: %pe\n",
ERR_PTR(err));
}
/* If the port doesn't have its own MAC address and relies on the DSA
* master's one, inherit it again from the new DSA master.
*/
if (is_zero_ether_addr(dp->mac))
eth_hw_addr_inherit(dev, master);
return 0;
out_revert_master_link:
netdev_upper_dev_unlink(master, dev);
out_revert_old_master_unlink:
netdev_upper_dev_link(old_master, dev, NULL);
return err;
}
bool dsa_slave_dev_check(const struct net_device *dev)
{
return dev->netdev_ops == &dsa_slave_netdev_ops;
}
EXPORT_SYMBOL_GPL(dsa_slave_dev_check);
static int dsa_slave_changeupper(struct net_device *dev,
struct netdev_notifier_changeupper_info *info)
{
struct dsa_port *dp = dsa_slave_to_port(dev);
struct netlink_ext_ack *extack;
int err = NOTIFY_DONE;
if (!dsa_slave_dev_check(dev))
return err;
extack = netdev_notifier_info_to_extack(&info->info);
if (netif_is_bridge_master(info->upper_dev)) {
if (info->linking) {
err = dsa_port_bridge_join(dp, info->upper_dev, extack);
if (!err)
dsa_bridge_mtu_normalization(dp);
if (err == -EOPNOTSUPP) {
NL_SET_ERR_MSG_WEAK_MOD(extack,
"Offloading not supported");
err = 0;
}
err = notifier_from_errno(err);
} else {
dsa_port_bridge_leave(dp, info->upper_dev);
err = NOTIFY_OK;
}
} else if (netif_is_lag_master(info->upper_dev)) {
if (info->linking) {
err = dsa_port_lag_join(dp, info->upper_dev,
info->upper_info, extack);
if (err == -EOPNOTSUPP) {
NL_SET_ERR_MSG_WEAK_MOD(extack,
"Offloading not supported");
err = 0;
}
err = notifier_from_errno(err);
} else {
dsa_port_lag_leave(dp, info->upper_dev);
err = NOTIFY_OK;
}
} else if (is_hsr_master(info->upper_dev)) {
if (info->linking) {
err = dsa_port_hsr_join(dp, info->upper_dev);
if (err == -EOPNOTSUPP) {
NL_SET_ERR_MSG_WEAK_MOD(extack,
"Offloading not supported");
err = 0;
}
err = notifier_from_errno(err);
} else {
dsa_port_hsr_leave(dp, info->upper_dev);
err = NOTIFY_OK;
}
}
return err;
}
static int dsa_slave_prechangeupper(struct net_device *dev,
struct netdev_notifier_changeupper_info *info)
{
struct dsa_port *dp = dsa_slave_to_port(dev);
if (!dsa_slave_dev_check(dev))
return NOTIFY_DONE;
if (netif_is_bridge_master(info->upper_dev) && !info->linking)
dsa_port_pre_bridge_leave(dp, info->upper_dev);
else if (netif_is_lag_master(info->upper_dev) && !info->linking)
dsa_port_pre_lag_leave(dp, info->upper_dev);
/* dsa_port_pre_hsr_leave is not yet necessary since hsr cannot be
* meaningfully enslaved to a bridge yet
*/
return NOTIFY_DONE;
}
static int
dsa_slave_lag_changeupper(struct net_device *dev,
struct netdev_notifier_changeupper_info *info)
{
struct net_device *lower;
struct list_head *iter;
int err = NOTIFY_DONE;
struct dsa_port *dp;
if (!netif_is_lag_master(dev))
return err;
netdev_for_each_lower_dev(dev, lower, iter) {
if (!dsa_slave_dev_check(lower))
continue;
dp = dsa_slave_to_port(lower);
if (!dp->lag)
/* Software LAG */
continue;
err = dsa_slave_changeupper(lower, info);
if (notifier_to_errno(err))
break;
}
return err;
}
/* Same as dsa_slave_lag_changeupper() except that it calls
* dsa_slave_prechangeupper()
*/
static int
dsa_slave_lag_prechangeupper(struct net_device *dev,
struct netdev_notifier_changeupper_info *info)
{
struct net_device *lower;
struct list_head *iter;
int err = NOTIFY_DONE;
struct dsa_port *dp;
if (!netif_is_lag_master(dev))
return err;
netdev_for_each_lower_dev(dev, lower, iter) {
if (!dsa_slave_dev_check(lower))
continue;
dp = dsa_slave_to_port(lower);
if (!dp->lag)
/* Software LAG */
continue;
err = dsa_slave_prechangeupper(lower, info);
if (notifier_to_errno(err))
break;
}
return err;
}
static int
dsa_prevent_bridging_8021q_upper(struct net_device *dev,
struct netdev_notifier_changeupper_info *info)
{
struct netlink_ext_ack *ext_ack;
struct net_device *slave, *br;
struct dsa_port *dp;
ext_ack = netdev_notifier_info_to_extack(&info->info);
if (!is_vlan_dev(dev))
return NOTIFY_DONE;
slave = vlan_dev_real_dev(dev);
if (!dsa_slave_dev_check(slave))
return NOTIFY_DONE;
dp = dsa_slave_to_port(slave);
br = dsa_port_bridge_dev_get(dp);
if (!br)
return NOTIFY_DONE;
/* Deny enslaving a VLAN device into a VLAN-aware bridge */
if (br_vlan_enabled(br) &&
netif_is_bridge_master(info->upper_dev) && info->linking) {
NL_SET_ERR_MSG_MOD(ext_ack,
"Cannot enslave VLAN device into VLAN aware bridge");
return notifier_from_errno(-EINVAL);
}
return NOTIFY_DONE;
}
static int
dsa_slave_check_8021q_upper(struct net_device *dev,
struct netdev_notifier_changeupper_info *info)
{
struct dsa_port *dp = dsa_slave_to_port(dev);
struct net_device *br = dsa_port_bridge_dev_get(dp);
struct bridge_vlan_info br_info;
struct netlink_ext_ack *extack;
int err = NOTIFY_DONE;
u16 vid;
if (!br || !br_vlan_enabled(br))
return NOTIFY_DONE;
extack = netdev_notifier_info_to_extack(&info->info);
vid = vlan_dev_vlan_id(info->upper_dev);
/* br_vlan_get_info() returns -EINVAL or -ENOENT if the
* device, respectively the VID is not found, returning
* 0 means success, which is a failure for us here.
*/
err = br_vlan_get_info(br, vid, &br_info);
if (err == 0) {
NL_SET_ERR_MSG_MOD(extack,
"This VLAN is already configured by the bridge");
return notifier_from_errno(-EBUSY);
}
return NOTIFY_DONE;
}
static int
dsa_slave_prechangeupper_sanity_check(struct net_device *dev,
struct netdev_notifier_changeupper_info *info)
{
struct dsa_switch *ds;
struct dsa_port *dp;
int err;
if (!dsa_slave_dev_check(dev))
return dsa_prevent_bridging_8021q_upper(dev, info);
dp = dsa_slave_to_port(dev);
ds = dp->ds;
if (ds->ops->port_prechangeupper) {
err = ds->ops->port_prechangeupper(ds, dp->index, info);
if (err)
return notifier_from_errno(err);
}
if (is_vlan_dev(info->upper_dev))
return dsa_slave_check_8021q_upper(dev, info);
return NOTIFY_DONE;
}
/* To be eligible as a DSA master, a LAG must have all lower interfaces be
* eligible DSA masters. Additionally, all LAG slaves must be DSA masters of
* switches in the same switch tree.
*/
static int dsa_lag_master_validate(struct net_device *lag_dev,
struct netlink_ext_ack *extack)
{
struct net_device *lower1, *lower2;
struct list_head *iter1, *iter2;
netdev_for_each_lower_dev(lag_dev, lower1, iter1) {
netdev_for_each_lower_dev(lag_dev, lower2, iter2) {
if (!netdev_uses_dsa(lower1) ||
!netdev_uses_dsa(lower2)) {
NL_SET_ERR_MSG_MOD(extack,
"All LAG ports must be eligible as DSA masters");
return notifier_from_errno(-EINVAL);
}
if (lower1 == lower2)
continue;
if (!dsa_port_tree_same(lower1->dsa_ptr,
lower2->dsa_ptr)) {
NL_SET_ERR_MSG_MOD(extack,
"LAG contains DSA masters of disjoint switch trees");
return notifier_from_errno(-EINVAL);
}
}
}
return NOTIFY_DONE;
}
static int
dsa_master_prechangeupper_sanity_check(struct net_device *master,
struct netdev_notifier_changeupper_info *info)
{
struct netlink_ext_ack *extack = netdev_notifier_info_to_extack(&info->info);
if (!netdev_uses_dsa(master))
return NOTIFY_DONE;
if (!info->linking)
return NOTIFY_DONE;
/* Allow DSA switch uppers */
if (dsa_slave_dev_check(info->upper_dev))
return NOTIFY_DONE;
/* Allow bridge uppers of DSA masters, subject to further
* restrictions in dsa_bridge_prechangelower_sanity_check()
*/
if (netif_is_bridge_master(info->upper_dev))
return NOTIFY_DONE;
/* Allow LAG uppers, subject to further restrictions in
* dsa_lag_master_prechangelower_sanity_check()
*/
if (netif_is_lag_master(info->upper_dev))
return dsa_lag_master_validate(info->upper_dev, extack);
NL_SET_ERR_MSG_MOD(extack,
"DSA master cannot join unknown upper interfaces");
return notifier_from_errno(-EBUSY);
}
static int
dsa_lag_master_prechangelower_sanity_check(struct net_device *dev,
struct netdev_notifier_changeupper_info *info)
{
struct netlink_ext_ack *extack = netdev_notifier_info_to_extack(&info->info);
struct net_device *lag_dev = info->upper_dev;
struct net_device *lower;
struct list_head *iter;
if (!netdev_uses_dsa(lag_dev) || !netif_is_lag_master(lag_dev))
return NOTIFY_DONE;
if (!info->linking)
return NOTIFY_DONE;
if (!netdev_uses_dsa(dev)) {
NL_SET_ERR_MSG(extack,
"Only DSA masters can join a LAG DSA master");
return notifier_from_errno(-EINVAL);
}
netdev_for_each_lower_dev(lag_dev, lower, iter) {
if (!dsa_port_tree_same(dev->dsa_ptr, lower->dsa_ptr)) {
NL_SET_ERR_MSG(extack,
"Interface is DSA master for a different switch tree than this LAG");
return notifier_from_errno(-EINVAL);
}
break;
}
return NOTIFY_DONE;
}
/* Don't allow bridging of DSA masters, since the bridge layer rx_handler
* prevents the DSA fake ethertype handler to be invoked, so we don't get the
* chance to strip off and parse the DSA switch tag protocol header (the bridge
* layer just returns RX_HANDLER_CONSUMED, stopping RX processing for these
* frames).
* The only case where that would not be an issue is when bridging can already
* be offloaded, such as when the DSA master is itself a DSA or plain switchdev
* port, and is bridged only with other ports from the same hardware device.
*/
static int
dsa_bridge_prechangelower_sanity_check(struct net_device *new_lower,
struct netdev_notifier_changeupper_info *info)
{
struct net_device *br = info->upper_dev;
struct netlink_ext_ack *extack;
struct net_device *lower;
struct list_head *iter;
if (!netif_is_bridge_master(br))
return NOTIFY_DONE;
if (!info->linking)
return NOTIFY_DONE;
extack = netdev_notifier_info_to_extack(&info->info);
netdev_for_each_lower_dev(br, lower, iter) {
if (!netdev_uses_dsa(new_lower) && !netdev_uses_dsa(lower))
continue;
if (!netdev_port_same_parent_id(lower, new_lower)) {
NL_SET_ERR_MSG(extack,
"Cannot do software bridging with a DSA master");
return notifier_from_errno(-EINVAL);
}
}
return NOTIFY_DONE;
}
static void dsa_tree_migrate_ports_from_lag_master(struct dsa_switch_tree *dst,
struct net_device *lag_dev)
{
struct net_device *new_master = dsa_tree_find_first_master(dst);
struct dsa_port *dp;
int err;
dsa_tree_for_each_user_port(dp, dst) {
if (dsa_port_to_master(dp) != lag_dev)
continue;
err = dsa_slave_change_master(dp->slave, new_master, NULL);
if (err) {
netdev_err(dp->slave,
"failed to restore master to %s: %pe\n",
new_master->name, ERR_PTR(err));
}
}
}
static int dsa_master_lag_join(struct net_device *master,
struct net_device *lag_dev,
struct netdev_lag_upper_info *uinfo,
struct netlink_ext_ack *extack)
{
struct dsa_port *cpu_dp = master->dsa_ptr;
struct dsa_switch_tree *dst = cpu_dp->dst;
struct dsa_port *dp;
int err;
err = dsa_master_lag_setup(lag_dev, cpu_dp, uinfo, extack);
if (err)
return err;
dsa_tree_for_each_user_port(dp, dst) {
if (dsa_port_to_master(dp) != master)
continue;
err = dsa_slave_change_master(dp->slave, lag_dev, extack);
if (err)
goto restore;
}
return 0;
restore:
dsa_tree_for_each_user_port_continue_reverse(dp, dst) {
if (dsa_port_to_master(dp) != lag_dev)
continue;
err = dsa_slave_change_master(dp->slave, master, NULL);
if (err) {
netdev_err(dp->slave,
"failed to restore master to %s: %pe\n",
master->name, ERR_PTR(err));
}
}
dsa_master_lag_teardown(lag_dev, master->dsa_ptr);
return err;
}
static void dsa_master_lag_leave(struct net_device *master,
struct net_device *lag_dev)
{
struct dsa_port *dp, *cpu_dp = lag_dev->dsa_ptr;
struct dsa_switch_tree *dst = cpu_dp->dst;
struct dsa_port *new_cpu_dp = NULL;
struct net_device *lower;
struct list_head *iter;
netdev_for_each_lower_dev(lag_dev, lower, iter) {
if (netdev_uses_dsa(lower)) {
new_cpu_dp = lower->dsa_ptr;
break;
}
}
if (new_cpu_dp) {
/* Update the CPU port of the user ports still under the LAG
* so that dsa_port_to_master() continues to work properly
*/
dsa_tree_for_each_user_port(dp, dst)
if (dsa_port_to_master(dp) == lag_dev)
dp->cpu_dp = new_cpu_dp;
/* Update the index of the virtual CPU port to match the lowest
* physical CPU port
*/
lag_dev->dsa_ptr = new_cpu_dp;
wmb();
} else {
/* If the LAG DSA master has no ports left, migrate back all
* user ports to the first physical CPU port
*/
dsa_tree_migrate_ports_from_lag_master(dst, lag_dev);
}
/* This DSA master has left its LAG in any case, so let
* the CPU port leave the hardware LAG as well
*/
dsa_master_lag_teardown(lag_dev, master->dsa_ptr);
}
static int dsa_master_changeupper(struct net_device *dev,
struct netdev_notifier_changeupper_info *info)
{
struct netlink_ext_ack *extack;
int err = NOTIFY_DONE;
if (!netdev_uses_dsa(dev))
return err;
extack = netdev_notifier_info_to_extack(&info->info);
if (netif_is_lag_master(info->upper_dev)) {
if (info->linking) {
err = dsa_master_lag_join(dev, info->upper_dev,
info->upper_info, extack);
err = notifier_from_errno(err);
} else {
dsa_master_lag_leave(dev, info->upper_dev);
err = NOTIFY_OK;
}
}
return err;
}
static int dsa_slave_netdevice_event(struct notifier_block *nb,
unsigned long event, void *ptr)
{
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
switch (event) {
case NETDEV_PRECHANGEUPPER: {
struct netdev_notifier_changeupper_info *info = ptr;
int err;
err = dsa_slave_prechangeupper_sanity_check(dev, info);
if (notifier_to_errno(err))
return err;
err = dsa_master_prechangeupper_sanity_check(dev, info);
if (notifier_to_errno(err))
return err;
err = dsa_lag_master_prechangelower_sanity_check(dev, info);
if (notifier_to_errno(err))
return err;
err = dsa_bridge_prechangelower_sanity_check(dev, info);
if (notifier_to_errno(err))
return err;
err = dsa_slave_prechangeupper(dev, ptr);
if (notifier_to_errno(err))
return err;
err = dsa_slave_lag_prechangeupper(dev, ptr);
if (notifier_to_errno(err))
return err;
break;
}
case NETDEV_CHANGEUPPER: {
int err;
err = dsa_slave_changeupper(dev, ptr);
if (notifier_to_errno(err))
return err;
err = dsa_slave_lag_changeupper(dev, ptr);
if (notifier_to_errno(err))
return err;
err = dsa_master_changeupper(dev, ptr);
if (notifier_to_errno(err))
return err;
break;
}
case NETDEV_CHANGELOWERSTATE: {
struct netdev_notifier_changelowerstate_info *info = ptr;
struct dsa_port *dp;
int err = 0;
if (dsa_slave_dev_check(dev)) {
dp = dsa_slave_to_port(dev);
err = dsa_port_lag_change(dp, info->lower_state_info);
}
/* Mirror LAG port events on DSA masters that are in
* a LAG towards their respective switch CPU ports
*/
if (netdev_uses_dsa(dev)) {
dp = dev->dsa_ptr;
err = dsa_port_lag_change(dp, info->lower_state_info);
}
return notifier_from_errno(err);
}
case NETDEV_CHANGE:
case NETDEV_UP: {
/* Track state of master port.
* DSA driver may require the master port (and indirectly
* the tagger) to be available for some special operation.
*/
if (netdev_uses_dsa(dev)) {
struct dsa_port *cpu_dp = dev->dsa_ptr;
struct dsa_switch_tree *dst = cpu_dp->ds->dst;
/* Track when the master port is UP */
dsa_tree_master_oper_state_change(dst, dev,
netif_oper_up(dev));
/* Track when the master port is ready and can accept
* packet.
* NETDEV_UP event is not enough to flag a port as ready.
* We also have to wait for linkwatch_do_dev to dev_activate
* and emit a NETDEV_CHANGE event.
* We check if a master port is ready by checking if the dev
* have a qdisc assigned and is not noop.
*/
dsa_tree_master_admin_state_change(dst, dev,
!qdisc_tx_is_noop(dev));
return NOTIFY_OK;
}
return NOTIFY_DONE;
}
case NETDEV_GOING_DOWN: {
struct dsa_port *dp, *cpu_dp;
struct dsa_switch_tree *dst;
LIST_HEAD(close_list);
if (!netdev_uses_dsa(dev))
return NOTIFY_DONE;
cpu_dp = dev->dsa_ptr;
dst = cpu_dp->ds->dst;
dsa_tree_master_admin_state_change(dst, dev, false);
list_for_each_entry(dp, &dst->ports, list) {
if (!dsa_port_is_user(dp))
continue;
if (dp->cpu_dp != cpu_dp)
continue;
list_add(&dp->slave->close_list, &close_list);
}
dev_close_many(&close_list, true);
return NOTIFY_OK;
}
default:
break;
}
return NOTIFY_DONE;
}
static void
dsa_fdb_offload_notify(struct dsa_switchdev_event_work *switchdev_work)
{
struct switchdev_notifier_fdb_info info = {};
info.addr = switchdev_work->addr;
info.vid = switchdev_work->vid;
info.offloaded = true;
call_switchdev_notifiers(SWITCHDEV_FDB_OFFLOADED,
switchdev_work->orig_dev, &info.info, NULL);
}
static void dsa_slave_switchdev_event_work(struct work_struct *work)
{
struct dsa_switchdev_event_work *switchdev_work =
container_of(work, struct dsa_switchdev_event_work, work);
const unsigned char *addr = switchdev_work->addr;
struct net_device *dev = switchdev_work->dev;
u16 vid = switchdev_work->vid;
struct dsa_switch *ds;
struct dsa_port *dp;
int err;
dp = dsa_slave_to_port(dev);
ds = dp->ds;
switch (switchdev_work->event) {
case SWITCHDEV_FDB_ADD_TO_DEVICE:
if (switchdev_work->host_addr)
err = dsa_port_bridge_host_fdb_add(dp, addr, vid);
else if (dp->lag)
err = dsa_port_lag_fdb_add(dp, addr, vid);
else
err = dsa_port_fdb_add(dp, addr, vid);
if (err) {
dev_err(ds->dev,
"port %d failed to add %pM vid %d to fdb: %d\n",
dp->index, addr, vid, err);
break;
}
dsa_fdb_offload_notify(switchdev_work);
break;
case SWITCHDEV_FDB_DEL_TO_DEVICE:
if (switchdev_work->host_addr)
err = dsa_port_bridge_host_fdb_del(dp, addr, vid);
else if (dp->lag)
err = dsa_port_lag_fdb_del(dp, addr, vid);
else
err = dsa_port_fdb_del(dp, addr, vid);
if (err) {
dev_err(ds->dev,
"port %d failed to delete %pM vid %d from fdb: %d\n",
dp->index, addr, vid, err);
}
break;
}
kfree(switchdev_work);
}
static bool dsa_foreign_dev_check(const struct net_device *dev,
const struct net_device *foreign_dev)
{
const struct dsa_port *dp = dsa_slave_to_port(dev);
struct dsa_switch_tree *dst = dp->ds->dst;
if (netif_is_bridge_master(foreign_dev))
return !dsa_tree_offloads_bridge_dev(dst, foreign_dev);
if (netif_is_bridge_port(foreign_dev))
return !dsa_tree_offloads_bridge_port(dst, foreign_dev);
/* Everything else is foreign */
return true;
}
static int dsa_slave_fdb_event(struct net_device *dev,
struct net_device *orig_dev,
unsigned long event, const void *ctx,
const struct switchdev_notifier_fdb_info *fdb_info)
{
struct dsa_switchdev_event_work *switchdev_work;
struct dsa_port *dp = dsa_slave_to_port(dev);
bool host_addr = fdb_info->is_local;
struct dsa_switch *ds = dp->ds;
if (ctx && ctx != dp)
return 0;
if (!dp->bridge)
return 0;
if (switchdev_fdb_is_dynamically_learned(fdb_info)) {
if (dsa_port_offloads_bridge_port(dp, orig_dev))
return 0;
/* FDB entries learned by the software bridge or by foreign
* bridge ports should be installed as host addresses only if
* the driver requests assisted learning.
*/
if (!ds->assisted_learning_on_cpu_port)
return 0;
}
/* Also treat FDB entries on foreign interfaces bridged with us as host
* addresses.
*/
if (dsa_foreign_dev_check(dev, orig_dev))
host_addr = true;
/* Check early that we're not doing work in vain.
* Host addresses on LAG ports still require regular FDB ops,
* since the CPU port isn't in a LAG.
*/
if (dp->lag && !host_addr) {
if (!ds->ops->lag_fdb_add || !ds->ops->lag_fdb_del)
return -EOPNOTSUPP;
} else {
if (!ds->ops->port_fdb_add || !ds->ops->port_fdb_del)
return -EOPNOTSUPP;
}
switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC);
if (!switchdev_work)
return -ENOMEM;
netdev_dbg(dev, "%s FDB entry towards %s, addr %pM vid %d%s\n",
event == SWITCHDEV_FDB_ADD_TO_DEVICE ? "Adding" : "Deleting",
orig_dev->name, fdb_info->addr, fdb_info->vid,
host_addr ? " as host address" : "");
INIT_WORK(&switchdev_work->work, dsa_slave_switchdev_event_work);
switchdev_work->event = event;
switchdev_work->dev = dev;
switchdev_work->orig_dev = orig_dev;
ether_addr_copy(switchdev_work->addr, fdb_info->addr);
switchdev_work->vid = fdb_info->vid;
switchdev_work->host_addr = host_addr;
dsa_schedule_work(&switchdev_work->work);
return 0;
}
/* Called under rcu_read_lock() */
static int dsa_slave_switchdev_event(struct notifier_block *unused,
unsigned long event, void *ptr)
{
struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
int err;
switch (event) {
case SWITCHDEV_PORT_ATTR_SET:
err = switchdev_handle_port_attr_set(dev, ptr,
dsa_slave_dev_check,
dsa_slave_port_attr_set);
return notifier_from_errno(err);
case SWITCHDEV_FDB_ADD_TO_DEVICE:
case SWITCHDEV_FDB_DEL_TO_DEVICE:
err = switchdev_handle_fdb_event_to_device(dev, event, ptr,
dsa_slave_dev_check,
dsa_foreign_dev_check,
dsa_slave_fdb_event);
return notifier_from_errno(err);
default:
return NOTIFY_DONE;
}
return NOTIFY_OK;
}
static int dsa_slave_switchdev_blocking_event(struct notifier_block *unused,
unsigned long event, void *ptr)
{
struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
int err;
switch (event) {
case SWITCHDEV_PORT_OBJ_ADD:
err = switchdev_handle_port_obj_add_foreign(dev, ptr,
dsa_slave_dev_check,
dsa_foreign_dev_check,
dsa_slave_port_obj_add);
return notifier_from_errno(err);
case SWITCHDEV_PORT_OBJ_DEL:
err = switchdev_handle_port_obj_del_foreign(dev, ptr,
dsa_slave_dev_check,
dsa_foreign_dev_check,
dsa_slave_port_obj_del);
return notifier_from_errno(err);
case SWITCHDEV_PORT_ATTR_SET:
err = switchdev_handle_port_attr_set(dev, ptr,
dsa_slave_dev_check,
dsa_slave_port_attr_set);
return notifier_from_errno(err);
}
return NOTIFY_DONE;
}
static struct notifier_block dsa_slave_nb __read_mostly = {
.notifier_call = dsa_slave_netdevice_event,
};
struct notifier_block dsa_slave_switchdev_notifier = {
.notifier_call = dsa_slave_switchdev_event,
};
struct notifier_block dsa_slave_switchdev_blocking_notifier = {
.notifier_call = dsa_slave_switchdev_blocking_event,
};
int dsa_slave_register_notifier(void)
{
struct notifier_block *nb;
int err;
err = register_netdevice_notifier(&dsa_slave_nb);
if (err)
return err;
err = register_switchdev_notifier(&dsa_slave_switchdev_notifier);
if (err)
goto err_switchdev_nb;
nb = &dsa_slave_switchdev_blocking_notifier;
err = register_switchdev_blocking_notifier(nb);
if (err)
goto err_switchdev_blocking_nb;
return 0;
err_switchdev_blocking_nb:
unregister_switchdev_notifier(&dsa_slave_switchdev_notifier);
err_switchdev_nb:
unregister_netdevice_notifier(&dsa_slave_nb);
return err;
}
void dsa_slave_unregister_notifier(void)
{
struct notifier_block *nb;
int err;
nb = &dsa_slave_switchdev_blocking_notifier;
err = unregister_switchdev_blocking_notifier(nb);
if (err)
pr_err("DSA: failed to unregister switchdev blocking notifier (%d)\n", err);
err = unregister_switchdev_notifier(&dsa_slave_switchdev_notifier);
if (err)
pr_err("DSA: failed to unregister switchdev notifier (%d)\n", err);
err = unregister_netdevice_notifier(&dsa_slave_nb);
if (err)
pr_err("DSA: failed to unregister slave notifier (%d)\n", err);
}
| linux-master | net/dsa/slave.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Handling of a single switch port
*
* Copyright (c) 2017 Savoir-faire Linux Inc.
* Vivien Didelot <[email protected]>
*/
#include <linux/if_bridge.h>
#include <linux/netdevice.h>
#include <linux/notifier.h>
#include <linux/of_mdio.h>
#include <linux/of_net.h>
#include "dsa.h"
#include "port.h"
#include "slave.h"
#include "switch.h"
#include "tag_8021q.h"
/**
* dsa_port_notify - Notify the switching fabric of changes to a port
* @dp: port on which change occurred
* @e: event, must be of type DSA_NOTIFIER_*
* @v: event-specific value.
*
* Notify all switches in the DSA tree that this port's switch belongs to,
* including this switch itself, of an event. Allows the other switches to
* reconfigure themselves for cross-chip operations. Can also be used to
* reconfigure ports without net_devices (CPU ports, DSA links) whenever
* a user port's state changes.
*/
static int dsa_port_notify(const struct dsa_port *dp, unsigned long e, void *v)
{
return dsa_tree_notify(dp->ds->dst, e, v);
}
static void dsa_port_notify_bridge_fdb_flush(const struct dsa_port *dp, u16 vid)
{
struct net_device *brport_dev = dsa_port_to_bridge_port(dp);
struct switchdev_notifier_fdb_info info = {
.vid = vid,
};
/* When the port becomes standalone it has already left the bridge.
* Don't notify the bridge in that case.
*/
if (!brport_dev)
return;
call_switchdev_notifiers(SWITCHDEV_FDB_FLUSH_TO_BRIDGE,
brport_dev, &info.info, NULL);
}
static void dsa_port_fast_age(const struct dsa_port *dp)
{
struct dsa_switch *ds = dp->ds;
if (!ds->ops->port_fast_age)
return;
ds->ops->port_fast_age(ds, dp->index);
/* flush all VLANs */
dsa_port_notify_bridge_fdb_flush(dp, 0);
}
static int dsa_port_vlan_fast_age(const struct dsa_port *dp, u16 vid)
{
struct dsa_switch *ds = dp->ds;
int err;
if (!ds->ops->port_vlan_fast_age)
return -EOPNOTSUPP;
err = ds->ops->port_vlan_fast_age(ds, dp->index, vid);
if (!err)
dsa_port_notify_bridge_fdb_flush(dp, vid);
return err;
}
static int dsa_port_msti_fast_age(const struct dsa_port *dp, u16 msti)
{
DECLARE_BITMAP(vids, VLAN_N_VID) = { 0 };
int err, vid;
err = br_mst_get_info(dsa_port_bridge_dev_get(dp), msti, vids);
if (err)
return err;
for_each_set_bit(vid, vids, VLAN_N_VID) {
err = dsa_port_vlan_fast_age(dp, vid);
if (err)
return err;
}
return 0;
}
static bool dsa_port_can_configure_learning(struct dsa_port *dp)
{
struct switchdev_brport_flags flags = {
.mask = BR_LEARNING,
};
struct dsa_switch *ds = dp->ds;
int err;
if (!ds->ops->port_bridge_flags || !ds->ops->port_pre_bridge_flags)
return false;
err = ds->ops->port_pre_bridge_flags(ds, dp->index, flags, NULL);
return !err;
}
bool dsa_port_supports_hwtstamp(struct dsa_port *dp)
{
struct dsa_switch *ds = dp->ds;
struct ifreq ifr = {};
int err;
if (!ds->ops->port_hwtstamp_get || !ds->ops->port_hwtstamp_set)
return false;
/* "See through" shim implementations of the "get" method.
* Since we can't cook up a complete ioctl request structure, this will
* fail in copy_to_user() with -EFAULT, which hopefully is enough to
* detect a valid implementation.
*/
err = ds->ops->port_hwtstamp_get(ds, dp->index, &ifr);
return err != -EOPNOTSUPP;
}
int dsa_port_set_state(struct dsa_port *dp, u8 state, bool do_fast_age)
{
struct dsa_switch *ds = dp->ds;
int port = dp->index;
if (!ds->ops->port_stp_state_set)
return -EOPNOTSUPP;
ds->ops->port_stp_state_set(ds, port, state);
if (!dsa_port_can_configure_learning(dp) ||
(do_fast_age && dp->learning)) {
/* Fast age FDB entries or flush appropriate forwarding database
* for the given port, if we are moving it from Learning or
* Forwarding state, to Disabled or Blocking or Listening state.
* Ports that were standalone before the STP state change don't
* need to fast age the FDB, since address learning is off in
* standalone mode.
*/
if ((dp->stp_state == BR_STATE_LEARNING ||
dp->stp_state == BR_STATE_FORWARDING) &&
(state == BR_STATE_DISABLED ||
state == BR_STATE_BLOCKING ||
state == BR_STATE_LISTENING))
dsa_port_fast_age(dp);
}
dp->stp_state = state;
return 0;
}
static void dsa_port_set_state_now(struct dsa_port *dp, u8 state,
bool do_fast_age)
{
struct dsa_switch *ds = dp->ds;
int err;
err = dsa_port_set_state(dp, state, do_fast_age);
if (err && err != -EOPNOTSUPP) {
dev_err(ds->dev, "port %d failed to set STP state %u: %pe\n",
dp->index, state, ERR_PTR(err));
}
}
int dsa_port_set_mst_state(struct dsa_port *dp,
const struct switchdev_mst_state *state,
struct netlink_ext_ack *extack)
{
struct dsa_switch *ds = dp->ds;
u8 prev_state;
int err;
if (!ds->ops->port_mst_state_set)
return -EOPNOTSUPP;
err = br_mst_get_state(dsa_port_to_bridge_port(dp), state->msti,
&prev_state);
if (err)
return err;
err = ds->ops->port_mst_state_set(ds, dp->index, state);
if (err)
return err;
if (!(dp->learning &&
(prev_state == BR_STATE_LEARNING ||
prev_state == BR_STATE_FORWARDING) &&
(state->state == BR_STATE_DISABLED ||
state->state == BR_STATE_BLOCKING ||
state->state == BR_STATE_LISTENING)))
return 0;
err = dsa_port_msti_fast_age(dp, state->msti);
if (err)
NL_SET_ERR_MSG_MOD(extack,
"Unable to flush associated VLANs");
return 0;
}
int dsa_port_enable_rt(struct dsa_port *dp, struct phy_device *phy)
{
struct dsa_switch *ds = dp->ds;
int port = dp->index;
int err;
if (ds->ops->port_enable) {
err = ds->ops->port_enable(ds, port, phy);
if (err)
return err;
}
if (!dp->bridge)
dsa_port_set_state_now(dp, BR_STATE_FORWARDING, false);
if (dp->pl)
phylink_start(dp->pl);
return 0;
}
int dsa_port_enable(struct dsa_port *dp, struct phy_device *phy)
{
int err;
rtnl_lock();
err = dsa_port_enable_rt(dp, phy);
rtnl_unlock();
return err;
}
void dsa_port_disable_rt(struct dsa_port *dp)
{
struct dsa_switch *ds = dp->ds;
int port = dp->index;
if (dp->pl)
phylink_stop(dp->pl);
if (!dp->bridge)
dsa_port_set_state_now(dp, BR_STATE_DISABLED, false);
if (ds->ops->port_disable)
ds->ops->port_disable(ds, port);
}
void dsa_port_disable(struct dsa_port *dp)
{
rtnl_lock();
dsa_port_disable_rt(dp);
rtnl_unlock();
}
static void dsa_port_reset_vlan_filtering(struct dsa_port *dp,
struct dsa_bridge bridge)
{
struct netlink_ext_ack extack = {0};
bool change_vlan_filtering = false;
struct dsa_switch *ds = dp->ds;
struct dsa_port *other_dp;
bool vlan_filtering;
int err;
if (ds->needs_standalone_vlan_filtering &&
!br_vlan_enabled(bridge.dev)) {
change_vlan_filtering = true;
vlan_filtering = true;
} else if (!ds->needs_standalone_vlan_filtering &&
br_vlan_enabled(bridge.dev)) {
change_vlan_filtering = true;
vlan_filtering = false;
}
/* If the bridge was vlan_filtering, the bridge core doesn't trigger an
* event for changing vlan_filtering setting upon slave ports leaving
* it. That is a good thing, because that lets us handle it and also
* handle the case where the switch's vlan_filtering setting is global
* (not per port). When that happens, the correct moment to trigger the
* vlan_filtering callback is only when the last port leaves the last
* VLAN-aware bridge.
*/
if (change_vlan_filtering && ds->vlan_filtering_is_global) {
dsa_switch_for_each_port(other_dp, ds) {
struct net_device *br = dsa_port_bridge_dev_get(other_dp);
if (br && br_vlan_enabled(br)) {
change_vlan_filtering = false;
break;
}
}
}
if (!change_vlan_filtering)
return;
err = dsa_port_vlan_filtering(dp, vlan_filtering, &extack);
if (extack._msg) {
dev_err(ds->dev, "port %d: %s\n", dp->index,
extack._msg);
}
if (err && err != -EOPNOTSUPP) {
dev_err(ds->dev,
"port %d failed to reset VLAN filtering to %d: %pe\n",
dp->index, vlan_filtering, ERR_PTR(err));
}
}
static int dsa_port_inherit_brport_flags(struct dsa_port *dp,
struct netlink_ext_ack *extack)
{
const unsigned long mask = BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD |
BR_BCAST_FLOOD | BR_PORT_LOCKED;
struct net_device *brport_dev = dsa_port_to_bridge_port(dp);
int flag, err;
for_each_set_bit(flag, &mask, 32) {
struct switchdev_brport_flags flags = {0};
flags.mask = BIT(flag);
if (br_port_flag_is_set(brport_dev, BIT(flag)))
flags.val = BIT(flag);
err = dsa_port_bridge_flags(dp, flags, extack);
if (err && err != -EOPNOTSUPP)
return err;
}
return 0;
}
static void dsa_port_clear_brport_flags(struct dsa_port *dp)
{
const unsigned long val = BR_FLOOD | BR_MCAST_FLOOD | BR_BCAST_FLOOD;
const unsigned long mask = BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD |
BR_BCAST_FLOOD | BR_PORT_LOCKED;
int flag, err;
for_each_set_bit(flag, &mask, 32) {
struct switchdev_brport_flags flags = {0};
flags.mask = BIT(flag);
flags.val = val & BIT(flag);
err = dsa_port_bridge_flags(dp, flags, NULL);
if (err && err != -EOPNOTSUPP)
dev_err(dp->ds->dev,
"failed to clear bridge port flag %lu: %pe\n",
flags.val, ERR_PTR(err));
}
}
static int dsa_port_switchdev_sync_attrs(struct dsa_port *dp,
struct netlink_ext_ack *extack)
{
struct net_device *brport_dev = dsa_port_to_bridge_port(dp);
struct net_device *br = dsa_port_bridge_dev_get(dp);
int err;
err = dsa_port_inherit_brport_flags(dp, extack);
if (err)
return err;
err = dsa_port_set_state(dp, br_port_get_stp_state(brport_dev), false);
if (err && err != -EOPNOTSUPP)
return err;
err = dsa_port_vlan_filtering(dp, br_vlan_enabled(br), extack);
if (err && err != -EOPNOTSUPP)
return err;
err = dsa_port_ageing_time(dp, br_get_ageing_time(br));
if (err && err != -EOPNOTSUPP)
return err;
return 0;
}
static void dsa_port_switchdev_unsync_attrs(struct dsa_port *dp,
struct dsa_bridge bridge)
{
/* Configure the port for standalone mode (no address learning,
* flood everything).
* The bridge only emits SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS events
* when the user requests it through netlink or sysfs, but not
* automatically at port join or leave, so we need to handle resetting
* the brport flags ourselves. But we even prefer it that way, because
* otherwise, some setups might never get the notification they need,
* for example, when a port leaves a LAG that offloads the bridge,
* it becomes standalone, but as far as the bridge is concerned, no
* port ever left.
*/
dsa_port_clear_brport_flags(dp);
/* Port left the bridge, put in BR_STATE_DISABLED by the bridge layer,
* so allow it to be in BR_STATE_FORWARDING to be kept functional
*/
dsa_port_set_state_now(dp, BR_STATE_FORWARDING, true);
dsa_port_reset_vlan_filtering(dp, bridge);
/* Ageing time may be global to the switch chip, so don't change it
* here because we have no good reason (or value) to change it to.
*/
}
static int dsa_port_bridge_create(struct dsa_port *dp,
struct net_device *br,
struct netlink_ext_ack *extack)
{
struct dsa_switch *ds = dp->ds;
struct dsa_bridge *bridge;
bridge = dsa_tree_bridge_find(ds->dst, br);
if (bridge) {
refcount_inc(&bridge->refcount);
dp->bridge = bridge;
return 0;
}
bridge = kzalloc(sizeof(*bridge), GFP_KERNEL);
if (!bridge)
return -ENOMEM;
refcount_set(&bridge->refcount, 1);
bridge->dev = br;
bridge->num = dsa_bridge_num_get(br, ds->max_num_bridges);
if (ds->max_num_bridges && !bridge->num) {
NL_SET_ERR_MSG_MOD(extack,
"Range of offloadable bridges exceeded");
kfree(bridge);
return -EOPNOTSUPP;
}
dp->bridge = bridge;
return 0;
}
static void dsa_port_bridge_destroy(struct dsa_port *dp,
const struct net_device *br)
{
struct dsa_bridge *bridge = dp->bridge;
dp->bridge = NULL;
if (!refcount_dec_and_test(&bridge->refcount))
return;
if (bridge->num)
dsa_bridge_num_put(br, bridge->num);
kfree(bridge);
}
static bool dsa_port_supports_mst(struct dsa_port *dp)
{
struct dsa_switch *ds = dp->ds;
return ds->ops->vlan_msti_set &&
ds->ops->port_mst_state_set &&
ds->ops->port_vlan_fast_age &&
dsa_port_can_configure_learning(dp);
}
int dsa_port_bridge_join(struct dsa_port *dp, struct net_device *br,
struct netlink_ext_ack *extack)
{
struct dsa_notifier_bridge_info info = {
.dp = dp,
.extack = extack,
};
struct net_device *dev = dp->slave;
struct net_device *brport_dev;
int err;
if (br_mst_enabled(br) && !dsa_port_supports_mst(dp))
return -EOPNOTSUPP;
/* Here the interface is already bridged. Reflect the current
* configuration so that drivers can program their chips accordingly.
*/
err = dsa_port_bridge_create(dp, br, extack);
if (err)
return err;
brport_dev = dsa_port_to_bridge_port(dp);
info.bridge = *dp->bridge;
err = dsa_broadcast(DSA_NOTIFIER_BRIDGE_JOIN, &info);
if (err)
goto out_rollback;
/* Drivers which support bridge TX forwarding should set this */
dp->bridge->tx_fwd_offload = info.tx_fwd_offload;
err = switchdev_bridge_port_offload(brport_dev, dev, dp,
&dsa_slave_switchdev_notifier,
&dsa_slave_switchdev_blocking_notifier,
dp->bridge->tx_fwd_offload, extack);
if (err)
goto out_rollback_unbridge;
err = dsa_port_switchdev_sync_attrs(dp, extack);
if (err)
goto out_rollback_unoffload;
return 0;
out_rollback_unoffload:
switchdev_bridge_port_unoffload(brport_dev, dp,
&dsa_slave_switchdev_notifier,
&dsa_slave_switchdev_blocking_notifier);
dsa_flush_workqueue();
out_rollback_unbridge:
dsa_broadcast(DSA_NOTIFIER_BRIDGE_LEAVE, &info);
out_rollback:
dsa_port_bridge_destroy(dp, br);
return err;
}
void dsa_port_pre_bridge_leave(struct dsa_port *dp, struct net_device *br)
{
struct net_device *brport_dev = dsa_port_to_bridge_port(dp);
/* Don't try to unoffload something that is not offloaded */
if (!brport_dev)
return;
switchdev_bridge_port_unoffload(brport_dev, dp,
&dsa_slave_switchdev_notifier,
&dsa_slave_switchdev_blocking_notifier);
dsa_flush_workqueue();
}
void dsa_port_bridge_leave(struct dsa_port *dp, struct net_device *br)
{
struct dsa_notifier_bridge_info info = {
.dp = dp,
};
int err;
/* If the port could not be offloaded to begin with, then
* there is nothing to do.
*/
if (!dp->bridge)
return;
info.bridge = *dp->bridge;
/* Here the port is already unbridged. Reflect the current configuration
* so that drivers can program their chips accordingly.
*/
dsa_port_bridge_destroy(dp, br);
err = dsa_broadcast(DSA_NOTIFIER_BRIDGE_LEAVE, &info);
if (err)
dev_err(dp->ds->dev,
"port %d failed to notify DSA_NOTIFIER_BRIDGE_LEAVE: %pe\n",
dp->index, ERR_PTR(err));
dsa_port_switchdev_unsync_attrs(dp, info.bridge);
}
int dsa_port_lag_change(struct dsa_port *dp,
struct netdev_lag_lower_state_info *linfo)
{
struct dsa_notifier_lag_info info = {
.dp = dp,
};
bool tx_enabled;
if (!dp->lag)
return 0;
/* On statically configured aggregates (e.g. loadbalance
* without LACP) ports will always be tx_enabled, even if the
* link is down. Thus we require both link_up and tx_enabled
* in order to include it in the tx set.
*/
tx_enabled = linfo->link_up && linfo->tx_enabled;
if (tx_enabled == dp->lag_tx_enabled)
return 0;
dp->lag_tx_enabled = tx_enabled;
return dsa_port_notify(dp, DSA_NOTIFIER_LAG_CHANGE, &info);
}
static int dsa_port_lag_create(struct dsa_port *dp,
struct net_device *lag_dev)
{
struct dsa_switch *ds = dp->ds;
struct dsa_lag *lag;
lag = dsa_tree_lag_find(ds->dst, lag_dev);
if (lag) {
refcount_inc(&lag->refcount);
dp->lag = lag;
return 0;
}
lag = kzalloc(sizeof(*lag), GFP_KERNEL);
if (!lag)
return -ENOMEM;
refcount_set(&lag->refcount, 1);
mutex_init(&lag->fdb_lock);
INIT_LIST_HEAD(&lag->fdbs);
lag->dev = lag_dev;
dsa_lag_map(ds->dst, lag);
dp->lag = lag;
return 0;
}
static void dsa_port_lag_destroy(struct dsa_port *dp)
{
struct dsa_lag *lag = dp->lag;
dp->lag = NULL;
dp->lag_tx_enabled = false;
if (!refcount_dec_and_test(&lag->refcount))
return;
WARN_ON(!list_empty(&lag->fdbs));
dsa_lag_unmap(dp->ds->dst, lag);
kfree(lag);
}
int dsa_port_lag_join(struct dsa_port *dp, struct net_device *lag_dev,
struct netdev_lag_upper_info *uinfo,
struct netlink_ext_ack *extack)
{
struct dsa_notifier_lag_info info = {
.dp = dp,
.info = uinfo,
.extack = extack,
};
struct net_device *bridge_dev;
int err;
err = dsa_port_lag_create(dp, lag_dev);
if (err)
goto err_lag_create;
info.lag = *dp->lag;
err = dsa_port_notify(dp, DSA_NOTIFIER_LAG_JOIN, &info);
if (err)
goto err_lag_join;
bridge_dev = netdev_master_upper_dev_get(lag_dev);
if (!bridge_dev || !netif_is_bridge_master(bridge_dev))
return 0;
err = dsa_port_bridge_join(dp, bridge_dev, extack);
if (err)
goto err_bridge_join;
return 0;
err_bridge_join:
dsa_port_notify(dp, DSA_NOTIFIER_LAG_LEAVE, &info);
err_lag_join:
dsa_port_lag_destroy(dp);
err_lag_create:
return err;
}
void dsa_port_pre_lag_leave(struct dsa_port *dp, struct net_device *lag_dev)
{
struct net_device *br = dsa_port_bridge_dev_get(dp);
if (br)
dsa_port_pre_bridge_leave(dp, br);
}
void dsa_port_lag_leave(struct dsa_port *dp, struct net_device *lag_dev)
{
struct net_device *br = dsa_port_bridge_dev_get(dp);
struct dsa_notifier_lag_info info = {
.dp = dp,
};
int err;
if (!dp->lag)
return;
/* Port might have been part of a LAG that in turn was
* attached to a bridge.
*/
if (br)
dsa_port_bridge_leave(dp, br);
info.lag = *dp->lag;
dsa_port_lag_destroy(dp);
err = dsa_port_notify(dp, DSA_NOTIFIER_LAG_LEAVE, &info);
if (err)
dev_err(dp->ds->dev,
"port %d failed to notify DSA_NOTIFIER_LAG_LEAVE: %pe\n",
dp->index, ERR_PTR(err));
}
/* Must be called under rcu_read_lock() */
static bool dsa_port_can_apply_vlan_filtering(struct dsa_port *dp,
bool vlan_filtering,
struct netlink_ext_ack *extack)
{
struct dsa_switch *ds = dp->ds;
struct dsa_port *other_dp;
int err;
/* VLAN awareness was off, so the question is "can we turn it on".
* We may have had 8021q uppers, those need to go. Make sure we don't
* enter an inconsistent state: deny changing the VLAN awareness state
* as long as we have 8021q uppers.
*/
if (vlan_filtering && dsa_port_is_user(dp)) {
struct net_device *br = dsa_port_bridge_dev_get(dp);
struct net_device *upper_dev, *slave = dp->slave;
struct list_head *iter;
netdev_for_each_upper_dev_rcu(slave, upper_dev, iter) {
struct bridge_vlan_info br_info;
u16 vid;
if (!is_vlan_dev(upper_dev))
continue;
vid = vlan_dev_vlan_id(upper_dev);
/* br_vlan_get_info() returns -EINVAL or -ENOENT if the
* device, respectively the VID is not found, returning
* 0 means success, which is a failure for us here.
*/
err = br_vlan_get_info(br, vid, &br_info);
if (err == 0) {
NL_SET_ERR_MSG_MOD(extack,
"Must first remove VLAN uppers having VIDs also present in bridge");
return false;
}
}
}
if (!ds->vlan_filtering_is_global)
return true;
/* For cases where enabling/disabling VLAN awareness is global to the
* switch, we need to handle the case where multiple bridges span
* different ports of the same switch device and one of them has a
* different setting than what is being requested.
*/
dsa_switch_for_each_port(other_dp, ds) {
struct net_device *other_br = dsa_port_bridge_dev_get(other_dp);
/* If it's the same bridge, it also has same
* vlan_filtering setting => no need to check
*/
if (!other_br || other_br == dsa_port_bridge_dev_get(dp))
continue;
if (br_vlan_enabled(other_br) != vlan_filtering) {
NL_SET_ERR_MSG_MOD(extack,
"VLAN filtering is a global setting");
return false;
}
}
return true;
}
int dsa_port_vlan_filtering(struct dsa_port *dp, bool vlan_filtering,
struct netlink_ext_ack *extack)
{
bool old_vlan_filtering = dsa_port_is_vlan_filtering(dp);
struct dsa_switch *ds = dp->ds;
bool apply;
int err;
if (!ds->ops->port_vlan_filtering)
return -EOPNOTSUPP;
/* We are called from dsa_slave_switchdev_blocking_event(),
* which is not under rcu_read_lock(), unlike
* dsa_slave_switchdev_event().
*/
rcu_read_lock();
apply = dsa_port_can_apply_vlan_filtering(dp, vlan_filtering, extack);
rcu_read_unlock();
if (!apply)
return -EINVAL;
if (dsa_port_is_vlan_filtering(dp) == vlan_filtering)
return 0;
err = ds->ops->port_vlan_filtering(ds, dp->index, vlan_filtering,
extack);
if (err)
return err;
if (ds->vlan_filtering_is_global) {
struct dsa_port *other_dp;
ds->vlan_filtering = vlan_filtering;
dsa_switch_for_each_user_port(other_dp, ds) {
struct net_device *slave = other_dp->slave;
/* We might be called in the unbind path, so not
* all slave devices might still be registered.
*/
if (!slave)
continue;
err = dsa_slave_manage_vlan_filtering(slave,
vlan_filtering);
if (err)
goto restore;
}
} else {
dp->vlan_filtering = vlan_filtering;
err = dsa_slave_manage_vlan_filtering(dp->slave,
vlan_filtering);
if (err)
goto restore;
}
return 0;
restore:
ds->ops->port_vlan_filtering(ds, dp->index, old_vlan_filtering, NULL);
if (ds->vlan_filtering_is_global)
ds->vlan_filtering = old_vlan_filtering;
else
dp->vlan_filtering = old_vlan_filtering;
return err;
}
/* This enforces legacy behavior for switch drivers which assume they can't
* receive VLAN configuration when enslaved to a bridge with vlan_filtering=0
*/
bool dsa_port_skip_vlan_configuration(struct dsa_port *dp)
{
struct net_device *br = dsa_port_bridge_dev_get(dp);
struct dsa_switch *ds = dp->ds;
if (!br)
return false;
return !ds->configure_vlan_while_not_filtering && !br_vlan_enabled(br);
}
int dsa_port_ageing_time(struct dsa_port *dp, clock_t ageing_clock)
{
unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock);
unsigned int ageing_time = jiffies_to_msecs(ageing_jiffies);
struct dsa_notifier_ageing_time_info info;
int err;
info.ageing_time = ageing_time;
err = dsa_port_notify(dp, DSA_NOTIFIER_AGEING_TIME, &info);
if (err)
return err;
dp->ageing_time = ageing_time;
return 0;
}
int dsa_port_mst_enable(struct dsa_port *dp, bool on,
struct netlink_ext_ack *extack)
{
if (on && !dsa_port_supports_mst(dp)) {
NL_SET_ERR_MSG_MOD(extack, "Hardware does not support MST");
return -EINVAL;
}
return 0;
}
int dsa_port_pre_bridge_flags(const struct dsa_port *dp,
struct switchdev_brport_flags flags,
struct netlink_ext_ack *extack)
{
struct dsa_switch *ds = dp->ds;
if (!ds->ops->port_pre_bridge_flags)
return -EINVAL;
return ds->ops->port_pre_bridge_flags(ds, dp->index, flags, extack);
}
int dsa_port_bridge_flags(struct dsa_port *dp,
struct switchdev_brport_flags flags,
struct netlink_ext_ack *extack)
{
struct dsa_switch *ds = dp->ds;
int err;
if (!ds->ops->port_bridge_flags)
return -EOPNOTSUPP;
err = ds->ops->port_bridge_flags(ds, dp->index, flags, extack);
if (err)
return err;
if (flags.mask & BR_LEARNING) {
bool learning = flags.val & BR_LEARNING;
if (learning == dp->learning)
return 0;
if ((dp->learning && !learning) &&
(dp->stp_state == BR_STATE_LEARNING ||
dp->stp_state == BR_STATE_FORWARDING))
dsa_port_fast_age(dp);
dp->learning = learning;
}
return 0;
}
void dsa_port_set_host_flood(struct dsa_port *dp, bool uc, bool mc)
{
struct dsa_switch *ds = dp->ds;
if (ds->ops->port_set_host_flood)
ds->ops->port_set_host_flood(ds, dp->index, uc, mc);
}
int dsa_port_vlan_msti(struct dsa_port *dp,
const struct switchdev_vlan_msti *msti)
{
struct dsa_switch *ds = dp->ds;
if (!ds->ops->vlan_msti_set)
return -EOPNOTSUPP;
return ds->ops->vlan_msti_set(ds, *dp->bridge, msti);
}
int dsa_port_mtu_change(struct dsa_port *dp, int new_mtu)
{
struct dsa_notifier_mtu_info info = {
.dp = dp,
.mtu = new_mtu,
};
return dsa_port_notify(dp, DSA_NOTIFIER_MTU, &info);
}
int dsa_port_fdb_add(struct dsa_port *dp, const unsigned char *addr,
u16 vid)
{
struct dsa_notifier_fdb_info info = {
.dp = dp,
.addr = addr,
.vid = vid,
.db = {
.type = DSA_DB_BRIDGE,
.bridge = *dp->bridge,
},
};
/* Refcounting takes bridge.num as a key, and should be global for all
* bridges in the absence of FDB isolation, and per bridge otherwise.
* Force the bridge.num to zero here in the absence of FDB isolation.
*/
if (!dp->ds->fdb_isolation)
info.db.bridge.num = 0;
return dsa_port_notify(dp, DSA_NOTIFIER_FDB_ADD, &info);
}
int dsa_port_fdb_del(struct dsa_port *dp, const unsigned char *addr,
u16 vid)
{
struct dsa_notifier_fdb_info info = {
.dp = dp,
.addr = addr,
.vid = vid,
.db = {
.type = DSA_DB_BRIDGE,
.bridge = *dp->bridge,
},
};
if (!dp->ds->fdb_isolation)
info.db.bridge.num = 0;
return dsa_port_notify(dp, DSA_NOTIFIER_FDB_DEL, &info);
}
static int dsa_port_host_fdb_add(struct dsa_port *dp,
const unsigned char *addr, u16 vid,
struct dsa_db db)
{
struct dsa_notifier_fdb_info info = {
.dp = dp,
.addr = addr,
.vid = vid,
.db = db,
};
return dsa_port_notify(dp, DSA_NOTIFIER_HOST_FDB_ADD, &info);
}
int dsa_port_standalone_host_fdb_add(struct dsa_port *dp,
const unsigned char *addr, u16 vid)
{
struct dsa_db db = {
.type = DSA_DB_PORT,
.dp = dp,
};
return dsa_port_host_fdb_add(dp, addr, vid, db);
}
int dsa_port_bridge_host_fdb_add(struct dsa_port *dp,
const unsigned char *addr, u16 vid)
{
struct net_device *master = dsa_port_to_master(dp);
struct dsa_db db = {
.type = DSA_DB_BRIDGE,
.bridge = *dp->bridge,
};
int err;
if (!dp->ds->fdb_isolation)
db.bridge.num = 0;
/* Avoid a call to __dev_set_promiscuity() on the master, which
* requires rtnl_lock(), since we can't guarantee that is held here,
* and we can't take it either.
*/
if (master->priv_flags & IFF_UNICAST_FLT) {
err = dev_uc_add(master, addr);
if (err)
return err;
}
return dsa_port_host_fdb_add(dp, addr, vid, db);
}
static int dsa_port_host_fdb_del(struct dsa_port *dp,
const unsigned char *addr, u16 vid,
struct dsa_db db)
{
struct dsa_notifier_fdb_info info = {
.dp = dp,
.addr = addr,
.vid = vid,
.db = db,
};
return dsa_port_notify(dp, DSA_NOTIFIER_HOST_FDB_DEL, &info);
}
int dsa_port_standalone_host_fdb_del(struct dsa_port *dp,
const unsigned char *addr, u16 vid)
{
struct dsa_db db = {
.type = DSA_DB_PORT,
.dp = dp,
};
return dsa_port_host_fdb_del(dp, addr, vid, db);
}
int dsa_port_bridge_host_fdb_del(struct dsa_port *dp,
const unsigned char *addr, u16 vid)
{
struct net_device *master = dsa_port_to_master(dp);
struct dsa_db db = {
.type = DSA_DB_BRIDGE,
.bridge = *dp->bridge,
};
int err;
if (!dp->ds->fdb_isolation)
db.bridge.num = 0;
if (master->priv_flags & IFF_UNICAST_FLT) {
err = dev_uc_del(master, addr);
if (err)
return err;
}
return dsa_port_host_fdb_del(dp, addr, vid, db);
}
int dsa_port_lag_fdb_add(struct dsa_port *dp, const unsigned char *addr,
u16 vid)
{
struct dsa_notifier_lag_fdb_info info = {
.lag = dp->lag,
.addr = addr,
.vid = vid,
.db = {
.type = DSA_DB_BRIDGE,
.bridge = *dp->bridge,
},
};
if (!dp->ds->fdb_isolation)
info.db.bridge.num = 0;
return dsa_port_notify(dp, DSA_NOTIFIER_LAG_FDB_ADD, &info);
}
int dsa_port_lag_fdb_del(struct dsa_port *dp, const unsigned char *addr,
u16 vid)
{
struct dsa_notifier_lag_fdb_info info = {
.lag = dp->lag,
.addr = addr,
.vid = vid,
.db = {
.type = DSA_DB_BRIDGE,
.bridge = *dp->bridge,
},
};
if (!dp->ds->fdb_isolation)
info.db.bridge.num = 0;
return dsa_port_notify(dp, DSA_NOTIFIER_LAG_FDB_DEL, &info);
}
int dsa_port_fdb_dump(struct dsa_port *dp, dsa_fdb_dump_cb_t *cb, void *data)
{
struct dsa_switch *ds = dp->ds;
int port = dp->index;
if (!ds->ops->port_fdb_dump)
return -EOPNOTSUPP;
return ds->ops->port_fdb_dump(ds, port, cb, data);
}
int dsa_port_mdb_add(const struct dsa_port *dp,
const struct switchdev_obj_port_mdb *mdb)
{
struct dsa_notifier_mdb_info info = {
.dp = dp,
.mdb = mdb,
.db = {
.type = DSA_DB_BRIDGE,
.bridge = *dp->bridge,
},
};
if (!dp->ds->fdb_isolation)
info.db.bridge.num = 0;
return dsa_port_notify(dp, DSA_NOTIFIER_MDB_ADD, &info);
}
int dsa_port_mdb_del(const struct dsa_port *dp,
const struct switchdev_obj_port_mdb *mdb)
{
struct dsa_notifier_mdb_info info = {
.dp = dp,
.mdb = mdb,
.db = {
.type = DSA_DB_BRIDGE,
.bridge = *dp->bridge,
},
};
if (!dp->ds->fdb_isolation)
info.db.bridge.num = 0;
return dsa_port_notify(dp, DSA_NOTIFIER_MDB_DEL, &info);
}
static int dsa_port_host_mdb_add(const struct dsa_port *dp,
const struct switchdev_obj_port_mdb *mdb,
struct dsa_db db)
{
struct dsa_notifier_mdb_info info = {
.dp = dp,
.mdb = mdb,
.db = db,
};
return dsa_port_notify(dp, DSA_NOTIFIER_HOST_MDB_ADD, &info);
}
int dsa_port_standalone_host_mdb_add(const struct dsa_port *dp,
const struct switchdev_obj_port_mdb *mdb)
{
struct dsa_db db = {
.type = DSA_DB_PORT,
.dp = dp,
};
return dsa_port_host_mdb_add(dp, mdb, db);
}
int dsa_port_bridge_host_mdb_add(const struct dsa_port *dp,
const struct switchdev_obj_port_mdb *mdb)
{
struct net_device *master = dsa_port_to_master(dp);
struct dsa_db db = {
.type = DSA_DB_BRIDGE,
.bridge = *dp->bridge,
};
int err;
if (!dp->ds->fdb_isolation)
db.bridge.num = 0;
err = dev_mc_add(master, mdb->addr);
if (err)
return err;
return dsa_port_host_mdb_add(dp, mdb, db);
}
static int dsa_port_host_mdb_del(const struct dsa_port *dp,
const struct switchdev_obj_port_mdb *mdb,
struct dsa_db db)
{
struct dsa_notifier_mdb_info info = {
.dp = dp,
.mdb = mdb,
.db = db,
};
return dsa_port_notify(dp, DSA_NOTIFIER_HOST_MDB_DEL, &info);
}
int dsa_port_standalone_host_mdb_del(const struct dsa_port *dp,
const struct switchdev_obj_port_mdb *mdb)
{
struct dsa_db db = {
.type = DSA_DB_PORT,
.dp = dp,
};
return dsa_port_host_mdb_del(dp, mdb, db);
}
int dsa_port_bridge_host_mdb_del(const struct dsa_port *dp,
const struct switchdev_obj_port_mdb *mdb)
{
struct net_device *master = dsa_port_to_master(dp);
struct dsa_db db = {
.type = DSA_DB_BRIDGE,
.bridge = *dp->bridge,
};
int err;
if (!dp->ds->fdb_isolation)
db.bridge.num = 0;
err = dev_mc_del(master, mdb->addr);
if (err)
return err;
return dsa_port_host_mdb_del(dp, mdb, db);
}
int dsa_port_vlan_add(struct dsa_port *dp,
const struct switchdev_obj_port_vlan *vlan,
struct netlink_ext_ack *extack)
{
struct dsa_notifier_vlan_info info = {
.dp = dp,
.vlan = vlan,
.extack = extack,
};
return dsa_port_notify(dp, DSA_NOTIFIER_VLAN_ADD, &info);
}
int dsa_port_vlan_del(struct dsa_port *dp,
const struct switchdev_obj_port_vlan *vlan)
{
struct dsa_notifier_vlan_info info = {
.dp = dp,
.vlan = vlan,
};
return dsa_port_notify(dp, DSA_NOTIFIER_VLAN_DEL, &info);
}
int dsa_port_host_vlan_add(struct dsa_port *dp,
const struct switchdev_obj_port_vlan *vlan,
struct netlink_ext_ack *extack)
{
struct net_device *master = dsa_port_to_master(dp);
struct dsa_notifier_vlan_info info = {
.dp = dp,
.vlan = vlan,
.extack = extack,
};
int err;
err = dsa_port_notify(dp, DSA_NOTIFIER_HOST_VLAN_ADD, &info);
if (err && err != -EOPNOTSUPP)
return err;
vlan_vid_add(master, htons(ETH_P_8021Q), vlan->vid);
return err;
}
int dsa_port_host_vlan_del(struct dsa_port *dp,
const struct switchdev_obj_port_vlan *vlan)
{
struct net_device *master = dsa_port_to_master(dp);
struct dsa_notifier_vlan_info info = {
.dp = dp,
.vlan = vlan,
};
int err;
err = dsa_port_notify(dp, DSA_NOTIFIER_HOST_VLAN_DEL, &info);
if (err && err != -EOPNOTSUPP)
return err;
vlan_vid_del(master, htons(ETH_P_8021Q), vlan->vid);
return err;
}
int dsa_port_mrp_add(const struct dsa_port *dp,
const struct switchdev_obj_mrp *mrp)
{
struct dsa_switch *ds = dp->ds;
if (!ds->ops->port_mrp_add)
return -EOPNOTSUPP;
return ds->ops->port_mrp_add(ds, dp->index, mrp);
}
int dsa_port_mrp_del(const struct dsa_port *dp,
const struct switchdev_obj_mrp *mrp)
{
struct dsa_switch *ds = dp->ds;
if (!ds->ops->port_mrp_del)
return -EOPNOTSUPP;
return ds->ops->port_mrp_del(ds, dp->index, mrp);
}
int dsa_port_mrp_add_ring_role(const struct dsa_port *dp,
const struct switchdev_obj_ring_role_mrp *mrp)
{
struct dsa_switch *ds = dp->ds;
if (!ds->ops->port_mrp_add_ring_role)
return -EOPNOTSUPP;
return ds->ops->port_mrp_add_ring_role(ds, dp->index, mrp);
}
int dsa_port_mrp_del_ring_role(const struct dsa_port *dp,
const struct switchdev_obj_ring_role_mrp *mrp)
{
struct dsa_switch *ds = dp->ds;
if (!ds->ops->port_mrp_del_ring_role)
return -EOPNOTSUPP;
return ds->ops->port_mrp_del_ring_role(ds, dp->index, mrp);
}
static int dsa_port_assign_master(struct dsa_port *dp,
struct net_device *master,
struct netlink_ext_ack *extack,
bool fail_on_err)
{
struct dsa_switch *ds = dp->ds;
int port = dp->index, err;
err = ds->ops->port_change_master(ds, port, master, extack);
if (err && !fail_on_err)
dev_err(ds->dev, "port %d failed to assign master %s: %pe\n",
port, master->name, ERR_PTR(err));
if (err && fail_on_err)
return err;
dp->cpu_dp = master->dsa_ptr;
dp->cpu_port_in_lag = netif_is_lag_master(master);
return 0;
}
/* Change the dp->cpu_dp affinity for a user port. Note that both cross-chip
* notifiers and drivers have implicit assumptions about user-to-CPU-port
* mappings, so we unfortunately cannot delay the deletion of the objects
* (switchdev, standalone addresses, standalone VLANs) on the old CPU port
* until the new CPU port has been set up. So we need to completely tear down
* the old CPU port before changing it, and restore it on errors during the
* bringup of the new one.
*/
int dsa_port_change_master(struct dsa_port *dp, struct net_device *master,
struct netlink_ext_ack *extack)
{
struct net_device *bridge_dev = dsa_port_bridge_dev_get(dp);
struct net_device *old_master = dsa_port_to_master(dp);
struct net_device *dev = dp->slave;
struct dsa_switch *ds = dp->ds;
bool vlan_filtering;
int err, tmp;
/* Bridges may hold host FDB, MDB and VLAN objects. These need to be
* migrated, so dynamically unoffload and later reoffload the bridge
* port.
*/
if (bridge_dev) {
dsa_port_pre_bridge_leave(dp, bridge_dev);
dsa_port_bridge_leave(dp, bridge_dev);
}
/* The port might still be VLAN filtering even if it's no longer
* under a bridge, either due to ds->vlan_filtering_is_global or
* ds->needs_standalone_vlan_filtering. In turn this means VLANs
* on the CPU port.
*/
vlan_filtering = dsa_port_is_vlan_filtering(dp);
if (vlan_filtering) {
err = dsa_slave_manage_vlan_filtering(dev, false);
if (err) {
NL_SET_ERR_MSG_MOD(extack,
"Failed to remove standalone VLANs");
goto rewind_old_bridge;
}
}
/* Standalone addresses, and addresses of upper interfaces like
* VLAN, LAG, HSR need to be migrated.
*/
dsa_slave_unsync_ha(dev);
err = dsa_port_assign_master(dp, master, extack, true);
if (err)
goto rewind_old_addrs;
dsa_slave_sync_ha(dev);
if (vlan_filtering) {
err = dsa_slave_manage_vlan_filtering(dev, true);
if (err) {
NL_SET_ERR_MSG_MOD(extack,
"Failed to restore standalone VLANs");
goto rewind_new_addrs;
}
}
if (bridge_dev) {
err = dsa_port_bridge_join(dp, bridge_dev, extack);
if (err && err == -EOPNOTSUPP) {
NL_SET_ERR_MSG_MOD(extack,
"Failed to reoffload bridge");
goto rewind_new_vlan;
}
}
return 0;
rewind_new_vlan:
if (vlan_filtering)
dsa_slave_manage_vlan_filtering(dev, false);
rewind_new_addrs:
dsa_slave_unsync_ha(dev);
dsa_port_assign_master(dp, old_master, NULL, false);
/* Restore the objects on the old CPU port */
rewind_old_addrs:
dsa_slave_sync_ha(dev);
if (vlan_filtering) {
tmp = dsa_slave_manage_vlan_filtering(dev, true);
if (tmp) {
dev_err(ds->dev,
"port %d failed to restore standalone VLANs: %pe\n",
dp->index, ERR_PTR(tmp));
}
}
rewind_old_bridge:
if (bridge_dev) {
tmp = dsa_port_bridge_join(dp, bridge_dev, extack);
if (tmp) {
dev_err(ds->dev,
"port %d failed to rejoin bridge %s: %pe\n",
dp->index, bridge_dev->name, ERR_PTR(tmp));
}
}
return err;
}
void dsa_port_set_tag_protocol(struct dsa_port *cpu_dp,
const struct dsa_device_ops *tag_ops)
{
cpu_dp->rcv = tag_ops->rcv;
cpu_dp->tag_ops = tag_ops;
}
static struct phy_device *dsa_port_get_phy_device(struct dsa_port *dp)
{
struct device_node *phy_dn;
struct phy_device *phydev;
phy_dn = of_parse_phandle(dp->dn, "phy-handle", 0);
if (!phy_dn)
return NULL;
phydev = of_phy_find_device(phy_dn);
if (!phydev) {
of_node_put(phy_dn);
return ERR_PTR(-EPROBE_DEFER);
}
of_node_put(phy_dn);
return phydev;
}
static void dsa_port_phylink_validate(struct phylink_config *config,
unsigned long *supported,
struct phylink_link_state *state)
{
/* Skip call for drivers which don't yet set mac_capabilities,
* since validating in that case would mean their PHY will advertise
* nothing. In turn, skipping validation makes them advertise
* everything that the PHY supports, so those drivers should be
* converted ASAP.
*/
if (config->mac_capabilities)
phylink_generic_validate(config, supported, state);
}
static struct phylink_pcs *
dsa_port_phylink_mac_select_pcs(struct phylink_config *config,
phy_interface_t interface)
{
struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
struct phylink_pcs *pcs = ERR_PTR(-EOPNOTSUPP);
struct dsa_switch *ds = dp->ds;
if (ds->ops->phylink_mac_select_pcs)
pcs = ds->ops->phylink_mac_select_pcs(ds, dp->index, interface);
return pcs;
}
static int dsa_port_phylink_mac_prepare(struct phylink_config *config,
unsigned int mode,
phy_interface_t interface)
{
struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
struct dsa_switch *ds = dp->ds;
int err = 0;
if (ds->ops->phylink_mac_prepare)
err = ds->ops->phylink_mac_prepare(ds, dp->index, mode,
interface);
return err;
}
static void dsa_port_phylink_mac_config(struct phylink_config *config,
unsigned int mode,
const struct phylink_link_state *state)
{
struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
struct dsa_switch *ds = dp->ds;
if (!ds->ops->phylink_mac_config)
return;
ds->ops->phylink_mac_config(ds, dp->index, mode, state);
}
static int dsa_port_phylink_mac_finish(struct phylink_config *config,
unsigned int mode,
phy_interface_t interface)
{
struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
struct dsa_switch *ds = dp->ds;
int err = 0;
if (ds->ops->phylink_mac_finish)
err = ds->ops->phylink_mac_finish(ds, dp->index, mode,
interface);
return err;
}
static void dsa_port_phylink_mac_link_down(struct phylink_config *config,
unsigned int mode,
phy_interface_t interface)
{
struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
struct phy_device *phydev = NULL;
struct dsa_switch *ds = dp->ds;
if (dsa_port_is_user(dp))
phydev = dp->slave->phydev;
if (!ds->ops->phylink_mac_link_down) {
if (ds->ops->adjust_link && phydev)
ds->ops->adjust_link(ds, dp->index, phydev);
return;
}
ds->ops->phylink_mac_link_down(ds, dp->index, mode, interface);
}
static void dsa_port_phylink_mac_link_up(struct phylink_config *config,
struct phy_device *phydev,
unsigned int mode,
phy_interface_t interface,
int speed, int duplex,
bool tx_pause, bool rx_pause)
{
struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
struct dsa_switch *ds = dp->ds;
if (!ds->ops->phylink_mac_link_up) {
if (ds->ops->adjust_link && phydev)
ds->ops->adjust_link(ds, dp->index, phydev);
return;
}
ds->ops->phylink_mac_link_up(ds, dp->index, mode, interface, phydev,
speed, duplex, tx_pause, rx_pause);
}
static const struct phylink_mac_ops dsa_port_phylink_mac_ops = {
.validate = dsa_port_phylink_validate,
.mac_select_pcs = dsa_port_phylink_mac_select_pcs,
.mac_prepare = dsa_port_phylink_mac_prepare,
.mac_config = dsa_port_phylink_mac_config,
.mac_finish = dsa_port_phylink_mac_finish,
.mac_link_down = dsa_port_phylink_mac_link_down,
.mac_link_up = dsa_port_phylink_mac_link_up,
};
int dsa_port_phylink_create(struct dsa_port *dp)
{
struct dsa_switch *ds = dp->ds;
phy_interface_t mode;
struct phylink *pl;
int err;
err = of_get_phy_mode(dp->dn, &mode);
if (err)
mode = PHY_INTERFACE_MODE_NA;
if (ds->ops->phylink_get_caps) {
ds->ops->phylink_get_caps(ds, dp->index, &dp->pl_config);
} else {
/* For legacy drivers */
if (mode != PHY_INTERFACE_MODE_NA) {
__set_bit(mode, dp->pl_config.supported_interfaces);
} else {
__set_bit(PHY_INTERFACE_MODE_INTERNAL,
dp->pl_config.supported_interfaces);
__set_bit(PHY_INTERFACE_MODE_GMII,
dp->pl_config.supported_interfaces);
}
}
pl = phylink_create(&dp->pl_config, of_fwnode_handle(dp->dn),
mode, &dsa_port_phylink_mac_ops);
if (IS_ERR(pl)) {
pr_err("error creating PHYLINK: %ld\n", PTR_ERR(pl));
return PTR_ERR(pl);
}
dp->pl = pl;
return 0;
}
void dsa_port_phylink_destroy(struct dsa_port *dp)
{
phylink_destroy(dp->pl);
dp->pl = NULL;
}
static int dsa_shared_port_setup_phy_of(struct dsa_port *dp, bool enable)
{
struct dsa_switch *ds = dp->ds;
struct phy_device *phydev;
int port = dp->index;
int err = 0;
phydev = dsa_port_get_phy_device(dp);
if (!phydev)
return 0;
if (IS_ERR(phydev))
return PTR_ERR(phydev);
if (enable) {
err = genphy_resume(phydev);
if (err < 0)
goto err_put_dev;
err = genphy_read_status(phydev);
if (err < 0)
goto err_put_dev;
} else {
err = genphy_suspend(phydev);
if (err < 0)
goto err_put_dev;
}
if (ds->ops->adjust_link)
ds->ops->adjust_link(ds, port, phydev);
dev_dbg(ds->dev, "enabled port's phy: %s", phydev_name(phydev));
err_put_dev:
put_device(&phydev->mdio.dev);
return err;
}
static int dsa_shared_port_fixed_link_register_of(struct dsa_port *dp)
{
struct device_node *dn = dp->dn;
struct dsa_switch *ds = dp->ds;
struct phy_device *phydev;
int port = dp->index;
phy_interface_t mode;
int err;
err = of_phy_register_fixed_link(dn);
if (err) {
dev_err(ds->dev,
"failed to register the fixed PHY of port %d\n",
port);
return err;
}
phydev = of_phy_find_device(dn);
err = of_get_phy_mode(dn, &mode);
if (err)
mode = PHY_INTERFACE_MODE_NA;
phydev->interface = mode;
genphy_read_status(phydev);
if (ds->ops->adjust_link)
ds->ops->adjust_link(ds, port, phydev);
put_device(&phydev->mdio.dev);
return 0;
}
static int dsa_shared_port_phylink_register(struct dsa_port *dp)
{
struct dsa_switch *ds = dp->ds;
struct device_node *port_dn = dp->dn;
int err;
dp->pl_config.dev = ds->dev;
dp->pl_config.type = PHYLINK_DEV;
err = dsa_port_phylink_create(dp);
if (err)
return err;
err = phylink_of_phy_connect(dp->pl, port_dn, 0);
if (err && err != -ENODEV) {
pr_err("could not attach to PHY: %d\n", err);
goto err_phy_connect;
}
return 0;
err_phy_connect:
dsa_port_phylink_destroy(dp);
return err;
}
/* During the initial DSA driver migration to OF, port nodes were sometimes
* added to device trees with no indication of how they should operate from a
* link management perspective (phy-handle, fixed-link, etc). Additionally, the
* phy-mode may be absent. The interpretation of these port OF nodes depends on
* their type.
*
* User ports with no phy-handle or fixed-link are expected to connect to an
* internal PHY located on the ds->slave_mii_bus at an MDIO address equal to
* the port number. This description is still actively supported.
*
* Shared (CPU and DSA) ports with no phy-handle or fixed-link are expected to
* operate at the maximum speed that their phy-mode is capable of. If the
* phy-mode is absent, they are expected to operate using the phy-mode
* supported by the port that gives the highest link speed. It is unspecified
* if the port should use flow control or not, half duplex or full duplex, or
* if the phy-mode is a SERDES link, whether in-band autoneg is expected to be
* enabled or not.
*
* In the latter case of shared ports, omitting the link management description
* from the firmware node is deprecated and strongly discouraged. DSA uses
* phylink, which rejects the firmware nodes of these ports for lacking
* required properties.
*
* For switches in this table, DSA will skip enforcing validation and will
* later omit registering a phylink instance for the shared ports, if they lack
* a fixed-link, a phy-handle, or a managed = "in-band-status" property.
* It becomes the responsibility of the driver to ensure that these ports
* operate at the maximum speed (whatever this means) and will interoperate
* with the DSA master or other cascade port, since phylink methods will not be
* invoked for them.
*
* If you are considering expanding this table for newly introduced switches,
* think again. It is OK to remove switches from this table if there aren't DT
* blobs in circulation which rely on defaulting the shared ports.
*/
static const char * const dsa_switches_apply_workarounds[] = {
#if IS_ENABLED(CONFIG_NET_DSA_XRS700X)
"arrow,xrs7003e",
"arrow,xrs7003f",
"arrow,xrs7004e",
"arrow,xrs7004f",
#endif
#if IS_ENABLED(CONFIG_B53)
"brcm,bcm5325",
"brcm,bcm53115",
"brcm,bcm53125",
"brcm,bcm53128",
"brcm,bcm5365",
"brcm,bcm5389",
"brcm,bcm5395",
"brcm,bcm5397",
"brcm,bcm5398",
"brcm,bcm53010-srab",
"brcm,bcm53011-srab",
"brcm,bcm53012-srab",
"brcm,bcm53018-srab",
"brcm,bcm53019-srab",
"brcm,bcm5301x-srab",
"brcm,bcm11360-srab",
"brcm,bcm58522-srab",
"brcm,bcm58525-srab",
"brcm,bcm58535-srab",
"brcm,bcm58622-srab",
"brcm,bcm58623-srab",
"brcm,bcm58625-srab",
"brcm,bcm88312-srab",
"brcm,cygnus-srab",
"brcm,nsp-srab",
"brcm,omega-srab",
"brcm,bcm3384-switch",
"brcm,bcm6328-switch",
"brcm,bcm6368-switch",
"brcm,bcm63xx-switch",
#endif
#if IS_ENABLED(CONFIG_NET_DSA_BCM_SF2)
"brcm,bcm7445-switch-v4.0",
"brcm,bcm7278-switch-v4.0",
"brcm,bcm7278-switch-v4.8",
#endif
#if IS_ENABLED(CONFIG_NET_DSA_LANTIQ_GSWIP)
"lantiq,xrx200-gswip",
"lantiq,xrx300-gswip",
"lantiq,xrx330-gswip",
#endif
#if IS_ENABLED(CONFIG_NET_DSA_MV88E6060)
"marvell,mv88e6060",
#endif
#if IS_ENABLED(CONFIG_NET_DSA_MV88E6XXX)
"marvell,mv88e6085",
"marvell,mv88e6190",
"marvell,mv88e6250",
#endif
#if IS_ENABLED(CONFIG_NET_DSA_MICROCHIP_KSZ_COMMON)
"microchip,ksz8765",
"microchip,ksz8794",
"microchip,ksz8795",
"microchip,ksz8863",
"microchip,ksz8873",
"microchip,ksz9477",
"microchip,ksz9897",
"microchip,ksz9893",
"microchip,ksz9563",
"microchip,ksz8563",
"microchip,ksz9567",
#endif
#if IS_ENABLED(CONFIG_NET_DSA_SMSC_LAN9303_MDIO)
"smsc,lan9303-mdio",
#endif
#if IS_ENABLED(CONFIG_NET_DSA_SMSC_LAN9303_I2C)
"smsc,lan9303-i2c",
#endif
NULL,
};
static void dsa_shared_port_validate_of(struct dsa_port *dp,
bool *missing_phy_mode,
bool *missing_link_description)
{
struct device_node *dn = dp->dn, *phy_np;
struct dsa_switch *ds = dp->ds;
phy_interface_t mode;
*missing_phy_mode = false;
*missing_link_description = false;
if (of_get_phy_mode(dn, &mode)) {
*missing_phy_mode = true;
dev_err(ds->dev,
"OF node %pOF of %s port %d lacks the required \"phy-mode\" property\n",
dn, dsa_port_is_cpu(dp) ? "CPU" : "DSA", dp->index);
}
/* Note: of_phy_is_fixed_link() also returns true for
* managed = "in-band-status"
*/
if (of_phy_is_fixed_link(dn))
return;
phy_np = of_parse_phandle(dn, "phy-handle", 0);
if (phy_np) {
of_node_put(phy_np);
return;
}
*missing_link_description = true;
dev_err(ds->dev,
"OF node %pOF of %s port %d lacks the required \"phy-handle\", \"fixed-link\" or \"managed\" properties\n",
dn, dsa_port_is_cpu(dp) ? "CPU" : "DSA", dp->index);
}
int dsa_shared_port_link_register_of(struct dsa_port *dp)
{
struct dsa_switch *ds = dp->ds;
bool missing_link_description;
bool missing_phy_mode;
int port = dp->index;
dsa_shared_port_validate_of(dp, &missing_phy_mode,
&missing_link_description);
if ((missing_phy_mode || missing_link_description) &&
!of_device_compatible_match(ds->dev->of_node,
dsa_switches_apply_workarounds))
return -EINVAL;
if (!ds->ops->adjust_link) {
if (missing_link_description) {
dev_warn(ds->dev,
"Skipping phylink registration for %s port %d\n",
dsa_port_is_cpu(dp) ? "CPU" : "DSA", dp->index);
} else {
if (ds->ops->phylink_mac_link_down)
ds->ops->phylink_mac_link_down(ds, port,
MLO_AN_FIXED, PHY_INTERFACE_MODE_NA);
return dsa_shared_port_phylink_register(dp);
}
return 0;
}
dev_warn(ds->dev,
"Using legacy PHYLIB callbacks. Please migrate to PHYLINK!\n");
if (of_phy_is_fixed_link(dp->dn))
return dsa_shared_port_fixed_link_register_of(dp);
else
return dsa_shared_port_setup_phy_of(dp, true);
}
void dsa_shared_port_link_unregister_of(struct dsa_port *dp)
{
struct dsa_switch *ds = dp->ds;
if (!ds->ops->adjust_link && dp->pl) {
rtnl_lock();
phylink_disconnect_phy(dp->pl);
rtnl_unlock();
dsa_port_phylink_destroy(dp);
return;
}
if (of_phy_is_fixed_link(dp->dn))
of_phy_deregister_fixed_link(dp->dn);
else
dsa_shared_port_setup_phy_of(dp, false);
}
int dsa_port_hsr_join(struct dsa_port *dp, struct net_device *hsr)
{
struct dsa_switch *ds = dp->ds;
int err;
if (!ds->ops->port_hsr_join)
return -EOPNOTSUPP;
dp->hsr_dev = hsr;
err = ds->ops->port_hsr_join(ds, dp->index, hsr);
if (err)
dp->hsr_dev = NULL;
return err;
}
void dsa_port_hsr_leave(struct dsa_port *dp, struct net_device *hsr)
{
struct dsa_switch *ds = dp->ds;
int err;
dp->hsr_dev = NULL;
if (ds->ops->port_hsr_leave) {
err = ds->ops->port_hsr_leave(ds, dp->index, hsr);
if (err)
dev_err(dp->ds->dev,
"port %d failed to leave HSR %s: %pe\n",
dp->index, hsr->name, ERR_PTR(err));
}
}
int dsa_port_tag_8021q_vlan_add(struct dsa_port *dp, u16 vid, bool broadcast)
{
struct dsa_notifier_tag_8021q_vlan_info info = {
.dp = dp,
.vid = vid,
};
if (broadcast)
return dsa_broadcast(DSA_NOTIFIER_TAG_8021Q_VLAN_ADD, &info);
return dsa_port_notify(dp, DSA_NOTIFIER_TAG_8021Q_VLAN_ADD, &info);
}
void dsa_port_tag_8021q_vlan_del(struct dsa_port *dp, u16 vid, bool broadcast)
{
struct dsa_notifier_tag_8021q_vlan_info info = {
.dp = dp,
.vid = vid,
};
int err;
if (broadcast)
err = dsa_broadcast(DSA_NOTIFIER_TAG_8021Q_VLAN_DEL, &info);
else
err = dsa_port_notify(dp, DSA_NOTIFIER_TAG_8021Q_VLAN_DEL, &info);
if (err)
dev_err(dp->ds->dev,
"port %d failed to notify tag_8021q VLAN %d deletion: %pe\n",
dp->index, vid, ERR_PTR(err));
}
| linux-master | net/dsa/port.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* DSA devlink handling
*/
#include <net/dsa.h>
#include <net/devlink.h>
#include "devlink.h"
static int dsa_devlink_info_get(struct devlink *dl,
struct devlink_info_req *req,
struct netlink_ext_ack *extack)
{
struct dsa_switch *ds = dsa_devlink_to_ds(dl);
if (ds->ops->devlink_info_get)
return ds->ops->devlink_info_get(ds, req, extack);
return -EOPNOTSUPP;
}
static int dsa_devlink_sb_pool_get(struct devlink *dl,
unsigned int sb_index, u16 pool_index,
struct devlink_sb_pool_info *pool_info)
{
struct dsa_switch *ds = dsa_devlink_to_ds(dl);
if (!ds->ops->devlink_sb_pool_get)
return -EOPNOTSUPP;
return ds->ops->devlink_sb_pool_get(ds, sb_index, pool_index,
pool_info);
}
static int dsa_devlink_sb_pool_set(struct devlink *dl, unsigned int sb_index,
u16 pool_index, u32 size,
enum devlink_sb_threshold_type threshold_type,
struct netlink_ext_ack *extack)
{
struct dsa_switch *ds = dsa_devlink_to_ds(dl);
if (!ds->ops->devlink_sb_pool_set)
return -EOPNOTSUPP;
return ds->ops->devlink_sb_pool_set(ds, sb_index, pool_index, size,
threshold_type, extack);
}
static int dsa_devlink_sb_port_pool_get(struct devlink_port *dlp,
unsigned int sb_index, u16 pool_index,
u32 *p_threshold)
{
struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp);
int port = dsa_devlink_port_to_port(dlp);
if (!ds->ops->devlink_sb_port_pool_get)
return -EOPNOTSUPP;
return ds->ops->devlink_sb_port_pool_get(ds, port, sb_index,
pool_index, p_threshold);
}
static int dsa_devlink_sb_port_pool_set(struct devlink_port *dlp,
unsigned int sb_index, u16 pool_index,
u32 threshold,
struct netlink_ext_ack *extack)
{
struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp);
int port = dsa_devlink_port_to_port(dlp);
if (!ds->ops->devlink_sb_port_pool_set)
return -EOPNOTSUPP;
return ds->ops->devlink_sb_port_pool_set(ds, port, sb_index,
pool_index, threshold, extack);
}
static int
dsa_devlink_sb_tc_pool_bind_get(struct devlink_port *dlp,
unsigned int sb_index, u16 tc_index,
enum devlink_sb_pool_type pool_type,
u16 *p_pool_index, u32 *p_threshold)
{
struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp);
int port = dsa_devlink_port_to_port(dlp);
if (!ds->ops->devlink_sb_tc_pool_bind_get)
return -EOPNOTSUPP;
return ds->ops->devlink_sb_tc_pool_bind_get(ds, port, sb_index,
tc_index, pool_type,
p_pool_index, p_threshold);
}
static int
dsa_devlink_sb_tc_pool_bind_set(struct devlink_port *dlp,
unsigned int sb_index, u16 tc_index,
enum devlink_sb_pool_type pool_type,
u16 pool_index, u32 threshold,
struct netlink_ext_ack *extack)
{
struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp);
int port = dsa_devlink_port_to_port(dlp);
if (!ds->ops->devlink_sb_tc_pool_bind_set)
return -EOPNOTSUPP;
return ds->ops->devlink_sb_tc_pool_bind_set(ds, port, sb_index,
tc_index, pool_type,
pool_index, threshold,
extack);
}
static int dsa_devlink_sb_occ_snapshot(struct devlink *dl,
unsigned int sb_index)
{
struct dsa_switch *ds = dsa_devlink_to_ds(dl);
if (!ds->ops->devlink_sb_occ_snapshot)
return -EOPNOTSUPP;
return ds->ops->devlink_sb_occ_snapshot(ds, sb_index);
}
static int dsa_devlink_sb_occ_max_clear(struct devlink *dl,
unsigned int sb_index)
{
struct dsa_switch *ds = dsa_devlink_to_ds(dl);
if (!ds->ops->devlink_sb_occ_max_clear)
return -EOPNOTSUPP;
return ds->ops->devlink_sb_occ_max_clear(ds, sb_index);
}
static int dsa_devlink_sb_occ_port_pool_get(struct devlink_port *dlp,
unsigned int sb_index,
u16 pool_index, u32 *p_cur,
u32 *p_max)
{
struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp);
int port = dsa_devlink_port_to_port(dlp);
if (!ds->ops->devlink_sb_occ_port_pool_get)
return -EOPNOTSUPP;
return ds->ops->devlink_sb_occ_port_pool_get(ds, port, sb_index,
pool_index, p_cur, p_max);
}
static int
dsa_devlink_sb_occ_tc_port_bind_get(struct devlink_port *dlp,
unsigned int sb_index, u16 tc_index,
enum devlink_sb_pool_type pool_type,
u32 *p_cur, u32 *p_max)
{
struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp);
int port = dsa_devlink_port_to_port(dlp);
if (!ds->ops->devlink_sb_occ_tc_port_bind_get)
return -EOPNOTSUPP;
return ds->ops->devlink_sb_occ_tc_port_bind_get(ds, port,
sb_index, tc_index,
pool_type, p_cur,
p_max);
}
static const struct devlink_ops dsa_devlink_ops = {
.info_get = dsa_devlink_info_get,
.sb_pool_get = dsa_devlink_sb_pool_get,
.sb_pool_set = dsa_devlink_sb_pool_set,
.sb_port_pool_get = dsa_devlink_sb_port_pool_get,
.sb_port_pool_set = dsa_devlink_sb_port_pool_set,
.sb_tc_pool_bind_get = dsa_devlink_sb_tc_pool_bind_get,
.sb_tc_pool_bind_set = dsa_devlink_sb_tc_pool_bind_set,
.sb_occ_snapshot = dsa_devlink_sb_occ_snapshot,
.sb_occ_max_clear = dsa_devlink_sb_occ_max_clear,
.sb_occ_port_pool_get = dsa_devlink_sb_occ_port_pool_get,
.sb_occ_tc_port_bind_get = dsa_devlink_sb_occ_tc_port_bind_get,
};
int dsa_devlink_param_get(struct devlink *dl, u32 id,
struct devlink_param_gset_ctx *ctx)
{
struct dsa_switch *ds = dsa_devlink_to_ds(dl);
if (!ds->ops->devlink_param_get)
return -EOPNOTSUPP;
return ds->ops->devlink_param_get(ds, id, ctx);
}
EXPORT_SYMBOL_GPL(dsa_devlink_param_get);
int dsa_devlink_param_set(struct devlink *dl, u32 id,
struct devlink_param_gset_ctx *ctx)
{
struct dsa_switch *ds = dsa_devlink_to_ds(dl);
if (!ds->ops->devlink_param_set)
return -EOPNOTSUPP;
return ds->ops->devlink_param_set(ds, id, ctx);
}
EXPORT_SYMBOL_GPL(dsa_devlink_param_set);
int dsa_devlink_params_register(struct dsa_switch *ds,
const struct devlink_param *params,
size_t params_count)
{
return devlink_params_register(ds->devlink, params, params_count);
}
EXPORT_SYMBOL_GPL(dsa_devlink_params_register);
void dsa_devlink_params_unregister(struct dsa_switch *ds,
const struct devlink_param *params,
size_t params_count)
{
devlink_params_unregister(ds->devlink, params, params_count);
}
EXPORT_SYMBOL_GPL(dsa_devlink_params_unregister);
int dsa_devlink_resource_register(struct dsa_switch *ds,
const char *resource_name,
u64 resource_size,
u64 resource_id,
u64 parent_resource_id,
const struct devlink_resource_size_params *size_params)
{
return devlink_resource_register(ds->devlink, resource_name,
resource_size, resource_id,
parent_resource_id,
size_params);
}
EXPORT_SYMBOL_GPL(dsa_devlink_resource_register);
void dsa_devlink_resources_unregister(struct dsa_switch *ds)
{
devlink_resources_unregister(ds->devlink);
}
EXPORT_SYMBOL_GPL(dsa_devlink_resources_unregister);
void dsa_devlink_resource_occ_get_register(struct dsa_switch *ds,
u64 resource_id,
devlink_resource_occ_get_t *occ_get,
void *occ_get_priv)
{
return devlink_resource_occ_get_register(ds->devlink, resource_id,
occ_get, occ_get_priv);
}
EXPORT_SYMBOL_GPL(dsa_devlink_resource_occ_get_register);
void dsa_devlink_resource_occ_get_unregister(struct dsa_switch *ds,
u64 resource_id)
{
devlink_resource_occ_get_unregister(ds->devlink, resource_id);
}
EXPORT_SYMBOL_GPL(dsa_devlink_resource_occ_get_unregister);
struct devlink_region *
dsa_devlink_region_create(struct dsa_switch *ds,
const struct devlink_region_ops *ops,
u32 region_max_snapshots, u64 region_size)
{
return devlink_region_create(ds->devlink, ops, region_max_snapshots,
region_size);
}
EXPORT_SYMBOL_GPL(dsa_devlink_region_create);
struct devlink_region *
dsa_devlink_port_region_create(struct dsa_switch *ds,
int port,
const struct devlink_port_region_ops *ops,
u32 region_max_snapshots, u64 region_size)
{
struct dsa_port *dp = dsa_to_port(ds, port);
return devlink_port_region_create(&dp->devlink_port, ops,
region_max_snapshots,
region_size);
}
EXPORT_SYMBOL_GPL(dsa_devlink_port_region_create);
void dsa_devlink_region_destroy(struct devlink_region *region)
{
devlink_region_destroy(region);
}
EXPORT_SYMBOL_GPL(dsa_devlink_region_destroy);
int dsa_port_devlink_setup(struct dsa_port *dp)
{
struct devlink_port *dlp = &dp->devlink_port;
struct dsa_switch_tree *dst = dp->ds->dst;
struct devlink_port_attrs attrs = {};
struct devlink *dl = dp->ds->devlink;
struct dsa_switch *ds = dp->ds;
const unsigned char *id;
unsigned char len;
int err;
memset(dlp, 0, sizeof(*dlp));
devlink_port_init(dl, dlp);
if (ds->ops->port_setup) {
err = ds->ops->port_setup(ds, dp->index);
if (err)
return err;
}
id = (const unsigned char *)&dst->index;
len = sizeof(dst->index);
attrs.phys.port_number = dp->index;
memcpy(attrs.switch_id.id, id, len);
attrs.switch_id.id_len = len;
switch (dp->type) {
case DSA_PORT_TYPE_UNUSED:
attrs.flavour = DEVLINK_PORT_FLAVOUR_UNUSED;
break;
case DSA_PORT_TYPE_CPU:
attrs.flavour = DEVLINK_PORT_FLAVOUR_CPU;
break;
case DSA_PORT_TYPE_DSA:
attrs.flavour = DEVLINK_PORT_FLAVOUR_DSA;
break;
case DSA_PORT_TYPE_USER:
attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
break;
}
devlink_port_attrs_set(dlp, &attrs);
err = devlink_port_register(dl, dlp, dp->index);
if (err) {
if (ds->ops->port_teardown)
ds->ops->port_teardown(ds, dp->index);
return err;
}
return 0;
}
void dsa_port_devlink_teardown(struct dsa_port *dp)
{
struct devlink_port *dlp = &dp->devlink_port;
struct dsa_switch *ds = dp->ds;
devlink_port_unregister(dlp);
if (ds->ops->port_teardown)
ds->ops->port_teardown(ds, dp->index);
devlink_port_fini(dlp);
}
void dsa_switch_devlink_register(struct dsa_switch *ds)
{
devlink_register(ds->devlink);
}
void dsa_switch_devlink_unregister(struct dsa_switch *ds)
{
devlink_unregister(ds->devlink);
}
int dsa_switch_devlink_alloc(struct dsa_switch *ds)
{
struct dsa_devlink_priv *dl_priv;
struct devlink *dl;
/* Add the switch to devlink before calling setup, so that setup can
* add dpipe tables
*/
dl = devlink_alloc(&dsa_devlink_ops, sizeof(*dl_priv), ds->dev);
if (!dl)
return -ENOMEM;
ds->devlink = dl;
dl_priv = devlink_priv(ds->devlink);
dl_priv->ds = ds;
return 0;
}
void dsa_switch_devlink_free(struct dsa_switch *ds)
{
devlink_free(ds->devlink);
ds->devlink = NULL;
}
| linux-master | net/dsa/devlink.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2019, Vladimir Oltean <[email protected]>
*/
#include <linux/if_vlan.h>
#include <linux/dsa/sja1105.h>
#include <linux/dsa/8021q.h>
#include <linux/packing.h>
#include "tag.h"
#include "tag_8021q.h"
#define SJA1105_NAME "sja1105"
#define SJA1110_NAME "sja1110"
/* Is this a TX or an RX header? */
#define SJA1110_HEADER_HOST_TO_SWITCH BIT(15)
/* RX header */
#define SJA1110_RX_HEADER_IS_METADATA BIT(14)
#define SJA1110_RX_HEADER_HOST_ONLY BIT(13)
#define SJA1110_RX_HEADER_HAS_TRAILER BIT(12)
/* Trap-to-host format (no trailer present) */
#define SJA1110_RX_HEADER_SRC_PORT(x) (((x) & GENMASK(7, 4)) >> 4)
#define SJA1110_RX_HEADER_SWITCH_ID(x) ((x) & GENMASK(3, 0))
/* Timestamp format (trailer present) */
#define SJA1110_RX_HEADER_TRAILER_POS(x) ((x) & GENMASK(11, 0))
#define SJA1110_RX_TRAILER_SWITCH_ID(x) (((x) & GENMASK(7, 4)) >> 4)
#define SJA1110_RX_TRAILER_SRC_PORT(x) ((x) & GENMASK(3, 0))
/* Meta frame format (for 2-step TX timestamps) */
#define SJA1110_RX_HEADER_N_TS(x) (((x) & GENMASK(8, 4)) >> 4)
/* TX header */
#define SJA1110_TX_HEADER_UPDATE_TC BIT(14)
#define SJA1110_TX_HEADER_TAKE_TS BIT(13)
#define SJA1110_TX_HEADER_TAKE_TS_CASC BIT(12)
#define SJA1110_TX_HEADER_HAS_TRAILER BIT(11)
/* Only valid if SJA1110_TX_HEADER_HAS_TRAILER is false */
#define SJA1110_TX_HEADER_PRIO(x) (((x) << 7) & GENMASK(10, 7))
#define SJA1110_TX_HEADER_TSTAMP_ID(x) ((x) & GENMASK(7, 0))
/* Only valid if SJA1110_TX_HEADER_HAS_TRAILER is true */
#define SJA1110_TX_HEADER_TRAILER_POS(x) ((x) & GENMASK(10, 0))
#define SJA1110_TX_TRAILER_TSTAMP_ID(x) (((x) << 24) & GENMASK(31, 24))
#define SJA1110_TX_TRAILER_PRIO(x) (((x) << 21) & GENMASK(23, 21))
#define SJA1110_TX_TRAILER_SWITCHID(x) (((x) << 12) & GENMASK(15, 12))
#define SJA1110_TX_TRAILER_DESTPORTS(x) (((x) << 1) & GENMASK(11, 1))
#define SJA1110_META_TSTAMP_SIZE 10
#define SJA1110_HEADER_LEN 4
#define SJA1110_RX_TRAILER_LEN 13
#define SJA1110_TX_TRAILER_LEN 4
#define SJA1110_MAX_PADDING_LEN 15
struct sja1105_tagger_private {
struct sja1105_tagger_data data; /* Must be first */
/* Protects concurrent access to the meta state machine
* from taggers running on multiple ports on SMP systems
*/
spinlock_t meta_lock;
struct sk_buff *stampable_skb;
struct kthread_worker *xmit_worker;
};
static struct sja1105_tagger_private *
sja1105_tagger_private(struct dsa_switch *ds)
{
return ds->tagger_data;
}
/* Similar to is_link_local_ether_addr(hdr->h_dest) but also covers PTP */
static inline bool sja1105_is_link_local(const struct sk_buff *skb)
{
const struct ethhdr *hdr = eth_hdr(skb);
u64 dmac = ether_addr_to_u64(hdr->h_dest);
if (ntohs(hdr->h_proto) == ETH_P_SJA1105_META)
return false;
if ((dmac & SJA1105_LINKLOCAL_FILTER_A_MASK) ==
SJA1105_LINKLOCAL_FILTER_A)
return true;
if ((dmac & SJA1105_LINKLOCAL_FILTER_B_MASK) ==
SJA1105_LINKLOCAL_FILTER_B)
return true;
return false;
}
struct sja1105_meta {
u64 tstamp;
u64 dmac_byte_4;
u64 dmac_byte_3;
u64 source_port;
u64 switch_id;
};
static void sja1105_meta_unpack(const struct sk_buff *skb,
struct sja1105_meta *meta)
{
u8 *buf = skb_mac_header(skb) + ETH_HLEN;
/* UM10944.pdf section 4.2.17 AVB Parameters:
* Structure of the meta-data follow-up frame.
* It is in network byte order, so there are no quirks
* while unpacking the meta frame.
*
* Also SJA1105 E/T only populates bits 23:0 of the timestamp
* whereas P/Q/R/S does 32 bits. Since the structure is the
* same and the E/T puts zeroes in the high-order byte, use
* a unified unpacking command for both device series.
*/
packing(buf, &meta->tstamp, 31, 0, 4, UNPACK, 0);
packing(buf + 4, &meta->dmac_byte_3, 7, 0, 1, UNPACK, 0);
packing(buf + 5, &meta->dmac_byte_4, 7, 0, 1, UNPACK, 0);
packing(buf + 6, &meta->source_port, 7, 0, 1, UNPACK, 0);
packing(buf + 7, &meta->switch_id, 7, 0, 1, UNPACK, 0);
}
static inline bool sja1105_is_meta_frame(const struct sk_buff *skb)
{
const struct ethhdr *hdr = eth_hdr(skb);
u64 smac = ether_addr_to_u64(hdr->h_source);
u64 dmac = ether_addr_to_u64(hdr->h_dest);
if (smac != SJA1105_META_SMAC)
return false;
if (dmac != SJA1105_META_DMAC)
return false;
if (ntohs(hdr->h_proto) != ETH_P_SJA1105_META)
return false;
return true;
}
/* Calls sja1105_port_deferred_xmit in sja1105_main.c */
static struct sk_buff *sja1105_defer_xmit(struct dsa_port *dp,
struct sk_buff *skb)
{
struct sja1105_tagger_data *tagger_data = sja1105_tagger_data(dp->ds);
struct sja1105_tagger_private *priv = sja1105_tagger_private(dp->ds);
void (*xmit_work_fn)(struct kthread_work *work);
struct sja1105_deferred_xmit_work *xmit_work;
struct kthread_worker *xmit_worker;
xmit_work_fn = tagger_data->xmit_work_fn;
xmit_worker = priv->xmit_worker;
if (!xmit_work_fn || !xmit_worker)
return NULL;
xmit_work = kzalloc(sizeof(*xmit_work), GFP_ATOMIC);
if (!xmit_work)
return NULL;
kthread_init_work(&xmit_work->work, xmit_work_fn);
/* Increase refcount so the kfree_skb in dsa_slave_xmit
* won't really free the packet.
*/
xmit_work->dp = dp;
xmit_work->skb = skb_get(skb);
kthread_queue_work(xmit_worker, &xmit_work->work);
return NULL;
}
/* Send VLAN tags with a TPID that blends in with whatever VLAN protocol a
* bridge spanning ports of this switch might have.
*/
static u16 sja1105_xmit_tpid(struct dsa_port *dp)
{
struct dsa_switch *ds = dp->ds;
struct dsa_port *other_dp;
u16 proto;
/* Since VLAN awareness is global, then if this port is VLAN-unaware,
* all ports are. Use the VLAN-unaware TPID used for tag_8021q.
*/
if (!dsa_port_is_vlan_filtering(dp))
return ETH_P_SJA1105;
/* Port is VLAN-aware, so there is a bridge somewhere (a single one,
* we're sure about that). It may not be on this port though, so we
* need to find it.
*/
dsa_switch_for_each_port(other_dp, ds) {
struct net_device *br = dsa_port_bridge_dev_get(other_dp);
if (!br)
continue;
/* Error is returned only if CONFIG_BRIDGE_VLAN_FILTERING,
* which seems pointless to handle, as our port cannot become
* VLAN-aware in that case.
*/
br_vlan_get_proto(br, &proto);
return proto;
}
WARN_ONCE(1, "Port is VLAN-aware but cannot find associated bridge!\n");
return ETH_P_SJA1105;
}
static struct sk_buff *sja1105_imprecise_xmit(struct sk_buff *skb,
struct net_device *netdev)
{
struct dsa_port *dp = dsa_slave_to_port(netdev);
unsigned int bridge_num = dsa_port_bridge_num_get(dp);
struct net_device *br = dsa_port_bridge_dev_get(dp);
u16 tx_vid;
/* If the port is under a VLAN-aware bridge, just slide the
* VLAN-tagged packet into the FDB and hope for the best.
* This works because we support a single VLAN-aware bridge
* across the entire dst, and its VLANs cannot be shared with
* any standalone port.
*/
if (br_vlan_enabled(br))
return skb;
/* If the port is under a VLAN-unaware bridge, use an imprecise
* TX VLAN that targets the bridge's entire broadcast domain,
* instead of just the specific port.
*/
tx_vid = dsa_tag_8021q_bridge_vid(bridge_num);
return dsa_8021q_xmit(skb, netdev, sja1105_xmit_tpid(dp), tx_vid);
}
/* Transform untagged control packets into pvid-tagged control packets so that
* all packets sent by this tagger are VLAN-tagged and we can configure the
* switch to drop untagged packets coming from the DSA master.
*/
static struct sk_buff *sja1105_pvid_tag_control_pkt(struct dsa_port *dp,
struct sk_buff *skb, u8 pcp)
{
__be16 xmit_tpid = htons(sja1105_xmit_tpid(dp));
struct vlan_ethhdr *hdr;
/* If VLAN tag is in hwaccel area, move it to the payload
* to deal with both cases uniformly and to ensure that
* the VLANs are added in the right order.
*/
if (unlikely(skb_vlan_tag_present(skb))) {
skb = __vlan_hwaccel_push_inside(skb);
if (!skb)
return NULL;
}
hdr = skb_vlan_eth_hdr(skb);
/* If skb is already VLAN-tagged, leave that VLAN ID in place */
if (hdr->h_vlan_proto == xmit_tpid)
return skb;
return vlan_insert_tag(skb, xmit_tpid, (pcp << VLAN_PRIO_SHIFT) |
SJA1105_DEFAULT_VLAN);
}
static struct sk_buff *sja1105_xmit(struct sk_buff *skb,
struct net_device *netdev)
{
struct dsa_port *dp = dsa_slave_to_port(netdev);
u16 queue_mapping = skb_get_queue_mapping(skb);
u8 pcp = netdev_txq_to_tc(netdev, queue_mapping);
u16 tx_vid = dsa_tag_8021q_standalone_vid(dp);
if (skb->offload_fwd_mark)
return sja1105_imprecise_xmit(skb, netdev);
/* Transmitting management traffic does not rely upon switch tagging,
* but instead SPI-installed management routes. Part 2 of this
* is the .port_deferred_xmit driver callback.
*/
if (unlikely(sja1105_is_link_local(skb))) {
skb = sja1105_pvid_tag_control_pkt(dp, skb, pcp);
if (!skb)
return NULL;
return sja1105_defer_xmit(dp, skb);
}
return dsa_8021q_xmit(skb, netdev, sja1105_xmit_tpid(dp),
((pcp << VLAN_PRIO_SHIFT) | tx_vid));
}
static struct sk_buff *sja1110_xmit(struct sk_buff *skb,
struct net_device *netdev)
{
struct sk_buff *clone = SJA1105_SKB_CB(skb)->clone;
struct dsa_port *dp = dsa_slave_to_port(netdev);
u16 queue_mapping = skb_get_queue_mapping(skb);
u8 pcp = netdev_txq_to_tc(netdev, queue_mapping);
u16 tx_vid = dsa_tag_8021q_standalone_vid(dp);
__be32 *tx_trailer;
__be16 *tx_header;
int trailer_pos;
if (skb->offload_fwd_mark)
return sja1105_imprecise_xmit(skb, netdev);
/* Transmitting control packets is done using in-band control
* extensions, while data packets are transmitted using
* tag_8021q TX VLANs.
*/
if (likely(!sja1105_is_link_local(skb)))
return dsa_8021q_xmit(skb, netdev, sja1105_xmit_tpid(dp),
((pcp << VLAN_PRIO_SHIFT) | tx_vid));
skb = sja1105_pvid_tag_control_pkt(dp, skb, pcp);
if (!skb)
return NULL;
skb_push(skb, SJA1110_HEADER_LEN);
dsa_alloc_etype_header(skb, SJA1110_HEADER_LEN);
trailer_pos = skb->len;
tx_header = dsa_etype_header_pos_tx(skb);
tx_trailer = skb_put(skb, SJA1110_TX_TRAILER_LEN);
tx_header[0] = htons(ETH_P_SJA1110);
tx_header[1] = htons(SJA1110_HEADER_HOST_TO_SWITCH |
SJA1110_TX_HEADER_HAS_TRAILER |
SJA1110_TX_HEADER_TRAILER_POS(trailer_pos));
*tx_trailer = cpu_to_be32(SJA1110_TX_TRAILER_PRIO(pcp) |
SJA1110_TX_TRAILER_SWITCHID(dp->ds->index) |
SJA1110_TX_TRAILER_DESTPORTS(BIT(dp->index)));
if (clone) {
u8 ts_id = SJA1105_SKB_CB(clone)->ts_id;
tx_header[1] |= htons(SJA1110_TX_HEADER_TAKE_TS);
*tx_trailer |= cpu_to_be32(SJA1110_TX_TRAILER_TSTAMP_ID(ts_id));
}
return skb;
}
static void sja1105_transfer_meta(struct sk_buff *skb,
const struct sja1105_meta *meta)
{
struct ethhdr *hdr = eth_hdr(skb);
hdr->h_dest[3] = meta->dmac_byte_3;
hdr->h_dest[4] = meta->dmac_byte_4;
SJA1105_SKB_CB(skb)->tstamp = meta->tstamp;
}
/* This is a simple state machine which follows the hardware mechanism of
* generating RX timestamps:
*
* After each timestampable skb (all traffic for which send_meta1 and
* send_meta0 is true, aka all MAC-filtered link-local traffic) a meta frame
* containing a partial timestamp is immediately generated by the switch and
* sent as a follow-up to the link-local frame on the CPU port.
*
* The meta frames have no unique identifier (such as sequence number) by which
* one may pair them to the correct timestampable frame.
* Instead, the switch has internal logic that ensures no frames are sent on
* the CPU port between a link-local timestampable frame and its corresponding
* meta follow-up. It also ensures strict ordering between ports (lower ports
* have higher priority towards the CPU port). For this reason, a per-port
* data structure is not needed/desirable.
*
* This function pairs the link-local frame with its partial timestamp from the
* meta follow-up frame. The full timestamp will be reconstructed later in a
* work queue.
*/
static struct sk_buff
*sja1105_rcv_meta_state_machine(struct sk_buff *skb,
struct sja1105_meta *meta,
bool is_link_local,
bool is_meta)
{
/* Step 1: A timestampable frame was received.
* Buffer it until we get its meta frame.
*/
if (is_link_local) {
struct dsa_port *dp = dsa_slave_to_port(skb->dev);
struct sja1105_tagger_private *priv;
struct dsa_switch *ds = dp->ds;
priv = sja1105_tagger_private(ds);
spin_lock(&priv->meta_lock);
/* Was this a link-local frame instead of the meta
* that we were expecting?
*/
if (priv->stampable_skb) {
dev_err_ratelimited(ds->dev,
"Expected meta frame, is %12llx "
"in the DSA master multicast filter?\n",
SJA1105_META_DMAC);
kfree_skb(priv->stampable_skb);
}
/* Hold a reference to avoid dsa_switch_rcv
* from freeing the skb.
*/
priv->stampable_skb = skb_get(skb);
spin_unlock(&priv->meta_lock);
/* Tell DSA we got nothing */
return NULL;
/* Step 2: The meta frame arrived.
* Time to take the stampable skb out of the closet, annotate it
* with the partial timestamp, and pretend that we received it
* just now (basically masquerade the buffered frame as the meta
* frame, which serves no further purpose).
*/
} else if (is_meta) {
struct dsa_port *dp = dsa_slave_to_port(skb->dev);
struct sja1105_tagger_private *priv;
struct dsa_switch *ds = dp->ds;
struct sk_buff *stampable_skb;
priv = sja1105_tagger_private(ds);
spin_lock(&priv->meta_lock);
stampable_skb = priv->stampable_skb;
priv->stampable_skb = NULL;
/* Was this a meta frame instead of the link-local
* that we were expecting?
*/
if (!stampable_skb) {
dev_err_ratelimited(ds->dev,
"Unexpected meta frame\n");
spin_unlock(&priv->meta_lock);
return NULL;
}
if (stampable_skb->dev != skb->dev) {
dev_err_ratelimited(ds->dev,
"Meta frame on wrong port\n");
spin_unlock(&priv->meta_lock);
return NULL;
}
/* Free the meta frame and give DSA the buffered stampable_skb
* for further processing up the network stack.
*/
kfree_skb(skb);
skb = stampable_skb;
sja1105_transfer_meta(skb, meta);
spin_unlock(&priv->meta_lock);
}
return skb;
}
static bool sja1105_skb_has_tag_8021q(const struct sk_buff *skb)
{
u16 tpid = ntohs(eth_hdr(skb)->h_proto);
return tpid == ETH_P_SJA1105 || tpid == ETH_P_8021Q ||
skb_vlan_tag_present(skb);
}
static bool sja1110_skb_has_inband_control_extension(const struct sk_buff *skb)
{
return ntohs(eth_hdr(skb)->h_proto) == ETH_P_SJA1110;
}
/* If the VLAN in the packet is a tag_8021q one, set @source_port and
* @switch_id and strip the header. Otherwise set @vid and keep it in the
* packet.
*/
static void sja1105_vlan_rcv(struct sk_buff *skb, int *source_port,
int *switch_id, int *vbid, u16 *vid)
{
struct vlan_ethhdr *hdr = vlan_eth_hdr(skb);
u16 vlan_tci;
if (skb_vlan_tag_present(skb))
vlan_tci = skb_vlan_tag_get(skb);
else
vlan_tci = ntohs(hdr->h_vlan_TCI);
if (vid_is_dsa_8021q(vlan_tci & VLAN_VID_MASK))
return dsa_8021q_rcv(skb, source_port, switch_id, vbid);
/* Try our best with imprecise RX */
*vid = vlan_tci & VLAN_VID_MASK;
}
static struct sk_buff *sja1105_rcv(struct sk_buff *skb,
struct net_device *netdev)
{
int source_port = -1, switch_id = -1, vbid = -1;
struct sja1105_meta meta = {0};
struct ethhdr *hdr;
bool is_link_local;
bool is_meta;
u16 vid;
hdr = eth_hdr(skb);
is_link_local = sja1105_is_link_local(skb);
is_meta = sja1105_is_meta_frame(skb);
if (is_link_local) {
/* Management traffic path. Switch embeds the switch ID and
* port ID into bytes of the destination MAC, courtesy of
* the incl_srcpt options.
*/
source_port = hdr->h_dest[3];
switch_id = hdr->h_dest[4];
} else if (is_meta) {
sja1105_meta_unpack(skb, &meta);
source_port = meta.source_port;
switch_id = meta.switch_id;
}
/* Normal data plane traffic and link-local frames are tagged with
* a tag_8021q VLAN which we have to strip
*/
if (sja1105_skb_has_tag_8021q(skb)) {
int tmp_source_port = -1, tmp_switch_id = -1;
sja1105_vlan_rcv(skb, &tmp_source_port, &tmp_switch_id, &vbid,
&vid);
/* Preserve the source information from the INCL_SRCPT option,
* if available. This allows us to not overwrite a valid source
* port and switch ID with zeroes when receiving link-local
* frames from a VLAN-unaware bridged port (non-zero vbid) or a
* VLAN-aware bridged port (non-zero vid). Furthermore, the
* tag_8021q source port information is only of trust when the
* vbid is 0 (precise port). Otherwise, tmp_source_port and
* tmp_switch_id will be zeroes.
*/
if (vbid == 0 && source_port == -1)
source_port = tmp_source_port;
if (vbid == 0 && switch_id == -1)
switch_id = tmp_switch_id;
} else if (source_port == -1 && switch_id == -1) {
/* Packets with no source information have no chance of
* getting accepted, drop them straight away.
*/
return NULL;
}
if (source_port != -1 && switch_id != -1)
skb->dev = dsa_master_find_slave(netdev, switch_id, source_port);
else if (vbid >= 1)
skb->dev = dsa_tag_8021q_find_port_by_vbid(netdev, vbid);
else
skb->dev = dsa_find_designated_bridge_port_by_vid(netdev, vid);
if (!skb->dev) {
netdev_warn(netdev, "Couldn't decode source port\n");
return NULL;
}
if (!is_link_local)
dsa_default_offload_fwd_mark(skb);
return sja1105_rcv_meta_state_machine(skb, &meta, is_link_local,
is_meta);
}
static struct sk_buff *sja1110_rcv_meta(struct sk_buff *skb, u16 rx_header)
{
u8 *buf = dsa_etype_header_pos_rx(skb) + SJA1110_HEADER_LEN;
int switch_id = SJA1110_RX_HEADER_SWITCH_ID(rx_header);
int n_ts = SJA1110_RX_HEADER_N_TS(rx_header);
struct sja1105_tagger_data *tagger_data;
struct net_device *master = skb->dev;
struct dsa_port *cpu_dp;
struct dsa_switch *ds;
int i;
cpu_dp = master->dsa_ptr;
ds = dsa_switch_find(cpu_dp->dst->index, switch_id);
if (!ds) {
net_err_ratelimited("%s: cannot find switch id %d\n",
master->name, switch_id);
return NULL;
}
tagger_data = sja1105_tagger_data(ds);
if (!tagger_data->meta_tstamp_handler)
return NULL;
for (i = 0; i <= n_ts; i++) {
u8 ts_id, source_port, dir;
u64 tstamp;
ts_id = buf[0];
source_port = (buf[1] & GENMASK(7, 4)) >> 4;
dir = (buf[1] & BIT(3)) >> 3;
tstamp = be64_to_cpu(*(__be64 *)(buf + 2));
tagger_data->meta_tstamp_handler(ds, source_port, ts_id, dir,
tstamp);
buf += SJA1110_META_TSTAMP_SIZE;
}
/* Discard the meta frame, we've consumed the timestamps it contained */
return NULL;
}
static struct sk_buff *sja1110_rcv_inband_control_extension(struct sk_buff *skb,
int *source_port,
int *switch_id,
bool *host_only)
{
u16 rx_header;
if (unlikely(!pskb_may_pull(skb, SJA1110_HEADER_LEN)))
return NULL;
/* skb->data points to skb_mac_header(skb) + ETH_HLEN, which is exactly
* what we need because the caller has checked the EtherType (which is
* located 2 bytes back) and we just need a pointer to the header that
* comes afterwards.
*/
rx_header = ntohs(*(__be16 *)skb->data);
if (rx_header & SJA1110_RX_HEADER_HOST_ONLY)
*host_only = true;
if (rx_header & SJA1110_RX_HEADER_IS_METADATA)
return sja1110_rcv_meta(skb, rx_header);
/* Timestamp frame, we have a trailer */
if (rx_header & SJA1110_RX_HEADER_HAS_TRAILER) {
int start_of_padding = SJA1110_RX_HEADER_TRAILER_POS(rx_header);
u8 *rx_trailer = skb_tail_pointer(skb) - SJA1110_RX_TRAILER_LEN;
u64 *tstamp = &SJA1105_SKB_CB(skb)->tstamp;
u8 last_byte = rx_trailer[12];
/* The timestamp is unaligned, so we need to use packing()
* to get it
*/
packing(rx_trailer, tstamp, 63, 0, 8, UNPACK, 0);
*source_port = SJA1110_RX_TRAILER_SRC_PORT(last_byte);
*switch_id = SJA1110_RX_TRAILER_SWITCH_ID(last_byte);
/* skb->len counts from skb->data, while start_of_padding
* counts from the destination MAC address. Right now skb->data
* is still as set by the DSA master, so to trim away the
* padding and trailer we need to account for the fact that
* skb->data points to skb_mac_header(skb) + ETH_HLEN.
*/
if (pskb_trim_rcsum(skb, start_of_padding - ETH_HLEN))
return NULL;
/* Trap-to-host frame, no timestamp trailer */
} else {
*source_port = SJA1110_RX_HEADER_SRC_PORT(rx_header);
*switch_id = SJA1110_RX_HEADER_SWITCH_ID(rx_header);
}
/* Advance skb->data past the DSA header */
skb_pull_rcsum(skb, SJA1110_HEADER_LEN);
dsa_strip_etype_header(skb, SJA1110_HEADER_LEN);
/* With skb->data in its final place, update the MAC header
* so that eth_hdr() continues to works properly.
*/
skb_set_mac_header(skb, -ETH_HLEN);
return skb;
}
static struct sk_buff *sja1110_rcv(struct sk_buff *skb,
struct net_device *netdev)
{
int source_port = -1, switch_id = -1, vbid = -1;
bool host_only = false;
u16 vid = 0;
if (sja1110_skb_has_inband_control_extension(skb)) {
skb = sja1110_rcv_inband_control_extension(skb, &source_port,
&switch_id,
&host_only);
if (!skb)
return NULL;
}
/* Packets with in-band control extensions might still have RX VLANs */
if (likely(sja1105_skb_has_tag_8021q(skb)))
sja1105_vlan_rcv(skb, &source_port, &switch_id, &vbid, &vid);
if (vbid >= 1)
skb->dev = dsa_tag_8021q_find_port_by_vbid(netdev, vbid);
else if (source_port == -1 || switch_id == -1)
skb->dev = dsa_find_designated_bridge_port_by_vid(netdev, vid);
else
skb->dev = dsa_master_find_slave(netdev, switch_id, source_port);
if (!skb->dev) {
netdev_warn(netdev, "Couldn't decode source port\n");
return NULL;
}
if (!host_only)
dsa_default_offload_fwd_mark(skb);
return skb;
}
static void sja1105_flow_dissect(const struct sk_buff *skb, __be16 *proto,
int *offset)
{
/* No tag added for management frames, all ok */
if (unlikely(sja1105_is_link_local(skb)))
return;
dsa_tag_generic_flow_dissect(skb, proto, offset);
}
static void sja1110_flow_dissect(const struct sk_buff *skb, __be16 *proto,
int *offset)
{
/* Management frames have 2 DSA tags on RX, so the needed_headroom we
* declared is fine for the generic dissector adjustment procedure.
*/
if (unlikely(sja1105_is_link_local(skb)))
return dsa_tag_generic_flow_dissect(skb, proto, offset);
/* For the rest, there is a single DSA tag, the tag_8021q one */
*offset = VLAN_HLEN;
*proto = ((__be16 *)skb->data)[(VLAN_HLEN / 2) - 1];
}
static void sja1105_disconnect(struct dsa_switch *ds)
{
struct sja1105_tagger_private *priv = ds->tagger_data;
kthread_destroy_worker(priv->xmit_worker);
kfree(priv);
ds->tagger_data = NULL;
}
static int sja1105_connect(struct dsa_switch *ds)
{
struct sja1105_tagger_private *priv;
struct kthread_worker *xmit_worker;
int err;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
spin_lock_init(&priv->meta_lock);
xmit_worker = kthread_create_worker(0, "dsa%d:%d_xmit",
ds->dst->index, ds->index);
if (IS_ERR(xmit_worker)) {
err = PTR_ERR(xmit_worker);
kfree(priv);
return err;
}
priv->xmit_worker = xmit_worker;
ds->tagger_data = priv;
return 0;
}
static const struct dsa_device_ops sja1105_netdev_ops = {
.name = SJA1105_NAME,
.proto = DSA_TAG_PROTO_SJA1105,
.xmit = sja1105_xmit,
.rcv = sja1105_rcv,
.connect = sja1105_connect,
.disconnect = sja1105_disconnect,
.needed_headroom = VLAN_HLEN,
.flow_dissect = sja1105_flow_dissect,
.promisc_on_master = true,
};
DSA_TAG_DRIVER(sja1105_netdev_ops);
MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_SJA1105, SJA1105_NAME);
static const struct dsa_device_ops sja1110_netdev_ops = {
.name = SJA1110_NAME,
.proto = DSA_TAG_PROTO_SJA1110,
.xmit = sja1110_xmit,
.rcv = sja1110_rcv,
.connect = sja1105_connect,
.disconnect = sja1105_disconnect,
.flow_dissect = sja1110_flow_dissect,
.needed_headroom = SJA1110_HEADER_LEN + VLAN_HLEN,
.needed_tailroom = SJA1110_RX_TRAILER_LEN + SJA1110_MAX_PADDING_LEN,
};
DSA_TAG_DRIVER(sja1110_netdev_ops);
MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_SJA1110, SJA1110_NAME);
static struct dsa_tag_driver *sja1105_tag_driver_array[] = {
&DSA_TAG_DRIVER_NAME(sja1105_netdev_ops),
&DSA_TAG_DRIVER_NAME(sja1110_netdev_ops),
};
module_dsa_tag_drivers(sja1105_tag_driver_array);
MODULE_LICENSE("GPL v2");
| linux-master | net/dsa/tag_sja1105.c |
// SPDX-License-Identifier: (GPL-2.0 OR MIT)
/*
* net/dsa/tag_hellcreek.c - Hirschmann Hellcreek switch tag format handling
*
* Copyright (C) 2019,2020 Linutronix GmbH
* Author Kurt Kanzenbach <[email protected]>
*
* Based on tag_ksz.c.
*/
#include <linux/skbuff.h>
#include <net/dsa.h>
#include "tag.h"
#define HELLCREEK_NAME "hellcreek"
#define HELLCREEK_TAG_LEN 1
static struct sk_buff *hellcreek_xmit(struct sk_buff *skb,
struct net_device *dev)
{
struct dsa_port *dp = dsa_slave_to_port(dev);
u8 *tag;
/* Calculate checksums (if required) before adding the trailer tag to
* avoid including it in calculations. That would lead to wrong
* checksums after the switch strips the tag.
*/
if (skb->ip_summed == CHECKSUM_PARTIAL &&
skb_checksum_help(skb))
return NULL;
/* Tag encoding */
tag = skb_put(skb, HELLCREEK_TAG_LEN);
*tag = BIT(dp->index);
return skb;
}
static struct sk_buff *hellcreek_rcv(struct sk_buff *skb,
struct net_device *dev)
{
/* Tag decoding */
u8 *tag = skb_tail_pointer(skb) - HELLCREEK_TAG_LEN;
unsigned int port = tag[0] & 0x03;
skb->dev = dsa_master_find_slave(dev, 0, port);
if (!skb->dev) {
netdev_warn_once(dev, "Failed to get source port: %d\n", port);
return NULL;
}
if (pskb_trim_rcsum(skb, skb->len - HELLCREEK_TAG_LEN))
return NULL;
dsa_default_offload_fwd_mark(skb);
return skb;
}
static const struct dsa_device_ops hellcreek_netdev_ops = {
.name = HELLCREEK_NAME,
.proto = DSA_TAG_PROTO_HELLCREEK,
.xmit = hellcreek_xmit,
.rcv = hellcreek_rcv,
.needed_tailroom = HELLCREEK_TAG_LEN,
};
MODULE_LICENSE("Dual MIT/GPL");
MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_HELLCREEK, HELLCREEK_NAME);
module_dsa_tag_driver(hellcreek_netdev_ops);
| linux-master | net/dsa/tag_hellcreek.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright 2022 NXP
*/
#include <linux/netdevice.h>
#include <net/rtnetlink.h>
#include "netlink.h"
#include "slave.h"
static const struct nla_policy dsa_policy[IFLA_DSA_MAX + 1] = {
[IFLA_DSA_MASTER] = { .type = NLA_U32 },
};
static int dsa_changelink(struct net_device *dev, struct nlattr *tb[],
struct nlattr *data[],
struct netlink_ext_ack *extack)
{
int err;
if (!data)
return 0;
if (data[IFLA_DSA_MASTER]) {
u32 ifindex = nla_get_u32(data[IFLA_DSA_MASTER]);
struct net_device *master;
master = __dev_get_by_index(dev_net(dev), ifindex);
if (!master)
return -EINVAL;
err = dsa_slave_change_master(dev, master, extack);
if (err)
return err;
}
return 0;
}
static size_t dsa_get_size(const struct net_device *dev)
{
return nla_total_size(sizeof(u32)) + /* IFLA_DSA_MASTER */
0;
}
static int dsa_fill_info(struct sk_buff *skb, const struct net_device *dev)
{
struct net_device *master = dsa_slave_to_master(dev);
if (nla_put_u32(skb, IFLA_DSA_MASTER, master->ifindex))
return -EMSGSIZE;
return 0;
}
struct rtnl_link_ops dsa_link_ops __read_mostly = {
.kind = "dsa",
.priv_size = sizeof(struct dsa_port),
.maxtype = IFLA_DSA_MAX,
.policy = dsa_policy,
.changelink = dsa_changelink,
.get_size = dsa_get_size,
.fill_info = dsa_fill_info,
.netns_refund = true,
};
| linux-master | net/dsa/netlink.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2017 Pengutronix, Juergen Borleis <[email protected]>
*/
#include <linux/dsa/lan9303.h>
#include <linux/etherdevice.h>
#include <linux/list.h>
#include <linux/slab.h>
#include "tag.h"
/* To define the outgoing port and to discover the incoming port a regular
* VLAN tag is used by the LAN9303. But its VID meaning is 'special':
*
* Dest MAC Src MAC TAG Type
* ...| 1 2 3 4 5 6 | 1 2 3 4 5 6 | 1 2 3 4 | 1 2 |...
* |<------->|
* TAG:
* |<------------->|
* | 1 2 | 3 4 |
* TPID VID
* 0x8100
*
* VID bit 3 indicates a request for an ALR lookup.
*
* If VID bit 3 is zero, then bits 0 and 1 specify the destination port
* (0, 1, 2) or broadcast (3) or the source port (1, 2).
*
* VID bit 4 is used to specify if the STP port state should be overridden.
* Required when no forwarding between the external ports should happen.
*/
#define LAN9303_NAME "lan9303"
#define LAN9303_TAG_LEN 4
# define LAN9303_TAG_TX_USE_ALR BIT(3)
# define LAN9303_TAG_TX_STP_OVERRIDE BIT(4)
# define LAN9303_TAG_RX_IGMP BIT(3)
# define LAN9303_TAG_RX_STP BIT(4)
# define LAN9303_TAG_RX_TRAPPED_TO_CPU (LAN9303_TAG_RX_IGMP | \
LAN9303_TAG_RX_STP)
/* Decide whether to transmit using ALR lookup, or transmit directly to
* port using tag. ALR learning is performed only when using ALR lookup.
* If the two external ports are bridged and the frame is unicast,
* then use ALR lookup to allow ALR learning on CPU port.
* Otherwise transmit directly to port with STP state override.
* See also: lan9303_separate_ports() and lan9303.pdf 6.4.10.1
*/
static int lan9303_xmit_use_arl(struct dsa_port *dp, u8 *dest_addr)
{
struct lan9303 *chip = dp->ds->priv;
return chip->is_bridged && !is_multicast_ether_addr(dest_addr);
}
static struct sk_buff *lan9303_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct dsa_port *dp = dsa_slave_to_port(dev);
__be16 *lan9303_tag;
u16 tag;
/* provide 'LAN9303_TAG_LEN' bytes additional space */
skb_push(skb, LAN9303_TAG_LEN);
/* make room between MACs and Ether-Type */
dsa_alloc_etype_header(skb, LAN9303_TAG_LEN);
lan9303_tag = dsa_etype_header_pos_tx(skb);
tag = lan9303_xmit_use_arl(dp, skb->data) ?
LAN9303_TAG_TX_USE_ALR :
dp->index | LAN9303_TAG_TX_STP_OVERRIDE;
lan9303_tag[0] = htons(ETH_P_8021Q);
lan9303_tag[1] = htons(tag);
return skb;
}
static struct sk_buff *lan9303_rcv(struct sk_buff *skb, struct net_device *dev)
{
u16 lan9303_tag1;
unsigned int source_port;
if (unlikely(!pskb_may_pull(skb, LAN9303_TAG_LEN))) {
dev_warn_ratelimited(&dev->dev,
"Dropping packet, cannot pull\n");
return NULL;
}
if (skb_vlan_tag_present(skb)) {
lan9303_tag1 = skb_vlan_tag_get(skb);
__vlan_hwaccel_clear_tag(skb);
} else {
skb_push_rcsum(skb, ETH_HLEN);
__skb_vlan_pop(skb, &lan9303_tag1);
skb_pull_rcsum(skb, ETH_HLEN);
}
source_port = lan9303_tag1 & 0x3;
skb->dev = dsa_master_find_slave(dev, 0, source_port);
if (!skb->dev) {
dev_warn_ratelimited(&dev->dev, "Dropping packet due to invalid source port\n");
return NULL;
}
if (!(lan9303_tag1 & LAN9303_TAG_RX_TRAPPED_TO_CPU))
dsa_default_offload_fwd_mark(skb);
return skb;
}
static const struct dsa_device_ops lan9303_netdev_ops = {
.name = LAN9303_NAME,
.proto = DSA_TAG_PROTO_LAN9303,
.xmit = lan9303_xmit,
.rcv = lan9303_rcv,
.needed_headroom = LAN9303_TAG_LEN,
};
MODULE_LICENSE("GPL");
MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_LAN9303, LAN9303_NAME);
module_dsa_tag_driver(lan9303_netdev_ops);
| linux-master | net/dsa/tag_lan9303.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright Samuel Mendoza-Jonas, IBM Corporation 2018.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/if_arp.h>
#include <linux/rtnetlink.h>
#include <linux/etherdevice.h>
#include <net/genetlink.h>
#include <net/ncsi.h>
#include <linux/skbuff.h>
#include <net/sock.h>
#include <uapi/linux/ncsi.h>
#include "internal.h"
#include "ncsi-pkt.h"
#include "ncsi-netlink.h"
static struct genl_family ncsi_genl_family;
static const struct nla_policy ncsi_genl_policy[NCSI_ATTR_MAX + 1] = {
[NCSI_ATTR_IFINDEX] = { .type = NLA_U32 },
[NCSI_ATTR_PACKAGE_LIST] = { .type = NLA_NESTED },
[NCSI_ATTR_PACKAGE_ID] = { .type = NLA_U32 },
[NCSI_ATTR_CHANNEL_ID] = { .type = NLA_U32 },
[NCSI_ATTR_DATA] = { .type = NLA_BINARY, .len = 2048 },
[NCSI_ATTR_MULTI_FLAG] = { .type = NLA_FLAG },
[NCSI_ATTR_PACKAGE_MASK] = { .type = NLA_U32 },
[NCSI_ATTR_CHANNEL_MASK] = { .type = NLA_U32 },
};
static struct ncsi_dev_priv *ndp_from_ifindex(struct net *net, u32 ifindex)
{
struct ncsi_dev_priv *ndp;
struct net_device *dev;
struct ncsi_dev *nd;
struct ncsi_dev;
if (!net)
return NULL;
dev = dev_get_by_index(net, ifindex);
if (!dev) {
pr_err("NCSI netlink: No device for ifindex %u\n", ifindex);
return NULL;
}
nd = ncsi_find_dev(dev);
ndp = nd ? TO_NCSI_DEV_PRIV(nd) : NULL;
dev_put(dev);
return ndp;
}
static int ncsi_write_channel_info(struct sk_buff *skb,
struct ncsi_dev_priv *ndp,
struct ncsi_channel *nc)
{
struct ncsi_channel_vlan_filter *ncf;
struct ncsi_channel_mode *m;
struct nlattr *vid_nest;
int i;
nla_put_u32(skb, NCSI_CHANNEL_ATTR_ID, nc->id);
m = &nc->modes[NCSI_MODE_LINK];
nla_put_u32(skb, NCSI_CHANNEL_ATTR_LINK_STATE, m->data[2]);
if (nc->state == NCSI_CHANNEL_ACTIVE)
nla_put_flag(skb, NCSI_CHANNEL_ATTR_ACTIVE);
if (nc == nc->package->preferred_channel)
nla_put_flag(skb, NCSI_CHANNEL_ATTR_FORCED);
nla_put_u32(skb, NCSI_CHANNEL_ATTR_VERSION_MAJOR, nc->version.version);
nla_put_u32(skb, NCSI_CHANNEL_ATTR_VERSION_MINOR, nc->version.alpha2);
nla_put_string(skb, NCSI_CHANNEL_ATTR_VERSION_STR, nc->version.fw_name);
vid_nest = nla_nest_start_noflag(skb, NCSI_CHANNEL_ATTR_VLAN_LIST);
if (!vid_nest)
return -ENOMEM;
ncf = &nc->vlan_filter;
i = -1;
while ((i = find_next_bit((void *)&ncf->bitmap, ncf->n_vids,
i + 1)) < ncf->n_vids) {
if (ncf->vids[i])
nla_put_u16(skb, NCSI_CHANNEL_ATTR_VLAN_ID,
ncf->vids[i]);
}
nla_nest_end(skb, vid_nest);
return 0;
}
static int ncsi_write_package_info(struct sk_buff *skb,
struct ncsi_dev_priv *ndp, unsigned int id)
{
struct nlattr *pnest, *cnest, *nest;
struct ncsi_package *np;
struct ncsi_channel *nc;
bool found;
int rc;
if (id > ndp->package_num - 1) {
netdev_info(ndp->ndev.dev, "NCSI: No package with id %u\n", id);
return -ENODEV;
}
found = false;
NCSI_FOR_EACH_PACKAGE(ndp, np) {
if (np->id != id)
continue;
pnest = nla_nest_start_noflag(skb, NCSI_PKG_ATTR);
if (!pnest)
return -ENOMEM;
rc = nla_put_u32(skb, NCSI_PKG_ATTR_ID, np->id);
if (rc) {
nla_nest_cancel(skb, pnest);
return rc;
}
if ((0x1 << np->id) == ndp->package_whitelist)
nla_put_flag(skb, NCSI_PKG_ATTR_FORCED);
cnest = nla_nest_start_noflag(skb, NCSI_PKG_ATTR_CHANNEL_LIST);
if (!cnest) {
nla_nest_cancel(skb, pnest);
return -ENOMEM;
}
NCSI_FOR_EACH_CHANNEL(np, nc) {
nest = nla_nest_start_noflag(skb, NCSI_CHANNEL_ATTR);
if (!nest) {
nla_nest_cancel(skb, cnest);
nla_nest_cancel(skb, pnest);
return -ENOMEM;
}
rc = ncsi_write_channel_info(skb, ndp, nc);
if (rc) {
nla_nest_cancel(skb, nest);
nla_nest_cancel(skb, cnest);
nla_nest_cancel(skb, pnest);
return rc;
}
nla_nest_end(skb, nest);
}
nla_nest_end(skb, cnest);
nla_nest_end(skb, pnest);
found = true;
}
if (!found)
return -ENODEV;
return 0;
}
static int ncsi_pkg_info_nl(struct sk_buff *msg, struct genl_info *info)
{
struct ncsi_dev_priv *ndp;
unsigned int package_id;
struct sk_buff *skb;
struct nlattr *attr;
void *hdr;
int rc;
if (!info || !info->attrs)
return -EINVAL;
if (!info->attrs[NCSI_ATTR_IFINDEX])
return -EINVAL;
if (!info->attrs[NCSI_ATTR_PACKAGE_ID])
return -EINVAL;
ndp = ndp_from_ifindex(genl_info_net(info),
nla_get_u32(info->attrs[NCSI_ATTR_IFINDEX]));
if (!ndp)
return -ENODEV;
skb = genlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!skb)
return -ENOMEM;
hdr = genlmsg_put(skb, info->snd_portid, info->snd_seq,
&ncsi_genl_family, 0, NCSI_CMD_PKG_INFO);
if (!hdr) {
kfree_skb(skb);
return -EMSGSIZE;
}
package_id = nla_get_u32(info->attrs[NCSI_ATTR_PACKAGE_ID]);
attr = nla_nest_start_noflag(skb, NCSI_ATTR_PACKAGE_LIST);
if (!attr) {
kfree_skb(skb);
return -EMSGSIZE;
}
rc = ncsi_write_package_info(skb, ndp, package_id);
if (rc) {
nla_nest_cancel(skb, attr);
goto err;
}
nla_nest_end(skb, attr);
genlmsg_end(skb, hdr);
return genlmsg_reply(skb, info);
err:
kfree_skb(skb);
return rc;
}
static int ncsi_pkg_info_all_nl(struct sk_buff *skb,
struct netlink_callback *cb)
{
struct nlattr *attrs[NCSI_ATTR_MAX + 1];
struct ncsi_package *np, *package;
struct ncsi_dev_priv *ndp;
unsigned int package_id;
struct nlattr *attr;
void *hdr;
int rc;
rc = genlmsg_parse_deprecated(cb->nlh, &ncsi_genl_family, attrs, NCSI_ATTR_MAX,
ncsi_genl_policy, NULL);
if (rc)
return rc;
if (!attrs[NCSI_ATTR_IFINDEX])
return -EINVAL;
ndp = ndp_from_ifindex(get_net(sock_net(skb->sk)),
nla_get_u32(attrs[NCSI_ATTR_IFINDEX]));
if (!ndp)
return -ENODEV;
package_id = cb->args[0];
package = NULL;
NCSI_FOR_EACH_PACKAGE(ndp, np)
if (np->id == package_id)
package = np;
if (!package)
return 0; /* done */
hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
&ncsi_genl_family, NLM_F_MULTI, NCSI_CMD_PKG_INFO);
if (!hdr) {
rc = -EMSGSIZE;
goto err;
}
attr = nla_nest_start_noflag(skb, NCSI_ATTR_PACKAGE_LIST);
if (!attr) {
rc = -EMSGSIZE;
goto err;
}
rc = ncsi_write_package_info(skb, ndp, package->id);
if (rc) {
nla_nest_cancel(skb, attr);
goto err;
}
nla_nest_end(skb, attr);
genlmsg_end(skb, hdr);
cb->args[0] = package_id + 1;
return skb->len;
err:
genlmsg_cancel(skb, hdr);
return rc;
}
static int ncsi_set_interface_nl(struct sk_buff *msg, struct genl_info *info)
{
struct ncsi_package *np, *package;
struct ncsi_channel *nc, *channel;
u32 package_id, channel_id;
struct ncsi_dev_priv *ndp;
unsigned long flags;
if (!info || !info->attrs)
return -EINVAL;
if (!info->attrs[NCSI_ATTR_IFINDEX])
return -EINVAL;
if (!info->attrs[NCSI_ATTR_PACKAGE_ID])
return -EINVAL;
ndp = ndp_from_ifindex(get_net(sock_net(msg->sk)),
nla_get_u32(info->attrs[NCSI_ATTR_IFINDEX]));
if (!ndp)
return -ENODEV;
package_id = nla_get_u32(info->attrs[NCSI_ATTR_PACKAGE_ID]);
package = NULL;
NCSI_FOR_EACH_PACKAGE(ndp, np)
if (np->id == package_id)
package = np;
if (!package) {
/* The user has set a package that does not exist */
return -ERANGE;
}
channel = NULL;
if (info->attrs[NCSI_ATTR_CHANNEL_ID]) {
channel_id = nla_get_u32(info->attrs[NCSI_ATTR_CHANNEL_ID]);
NCSI_FOR_EACH_CHANNEL(package, nc)
if (nc->id == channel_id) {
channel = nc;
break;
}
if (!channel) {
netdev_info(ndp->ndev.dev,
"NCSI: Channel %u does not exist!\n",
channel_id);
return -ERANGE;
}
}
spin_lock_irqsave(&ndp->lock, flags);
ndp->package_whitelist = 0x1 << package->id;
ndp->multi_package = false;
spin_unlock_irqrestore(&ndp->lock, flags);
spin_lock_irqsave(&package->lock, flags);
package->multi_channel = false;
if (channel) {
package->channel_whitelist = 0x1 << channel->id;
package->preferred_channel = channel;
} else {
/* Allow any channel */
package->channel_whitelist = UINT_MAX;
package->preferred_channel = NULL;
}
spin_unlock_irqrestore(&package->lock, flags);
if (channel)
netdev_info(ndp->ndev.dev,
"Set package 0x%x, channel 0x%x as preferred\n",
package_id, channel_id);
else
netdev_info(ndp->ndev.dev, "Set package 0x%x as preferred\n",
package_id);
/* Update channel configuration */
if (!(ndp->flags & NCSI_DEV_RESET))
ncsi_reset_dev(&ndp->ndev);
return 0;
}
static int ncsi_clear_interface_nl(struct sk_buff *msg, struct genl_info *info)
{
struct ncsi_dev_priv *ndp;
struct ncsi_package *np;
unsigned long flags;
if (!info || !info->attrs)
return -EINVAL;
if (!info->attrs[NCSI_ATTR_IFINDEX])
return -EINVAL;
ndp = ndp_from_ifindex(get_net(sock_net(msg->sk)),
nla_get_u32(info->attrs[NCSI_ATTR_IFINDEX]));
if (!ndp)
return -ENODEV;
/* Reset any whitelists and disable multi mode */
spin_lock_irqsave(&ndp->lock, flags);
ndp->package_whitelist = UINT_MAX;
ndp->multi_package = false;
spin_unlock_irqrestore(&ndp->lock, flags);
NCSI_FOR_EACH_PACKAGE(ndp, np) {
spin_lock_irqsave(&np->lock, flags);
np->multi_channel = false;
np->channel_whitelist = UINT_MAX;
np->preferred_channel = NULL;
spin_unlock_irqrestore(&np->lock, flags);
}
netdev_info(ndp->ndev.dev, "NCSI: Cleared preferred package/channel\n");
/* Update channel configuration */
if (!(ndp->flags & NCSI_DEV_RESET))
ncsi_reset_dev(&ndp->ndev);
return 0;
}
static int ncsi_send_cmd_nl(struct sk_buff *msg, struct genl_info *info)
{
struct ncsi_dev_priv *ndp;
struct ncsi_pkt_hdr *hdr;
struct ncsi_cmd_arg nca;
unsigned char *data;
u32 package_id;
u32 channel_id;
int len, ret;
if (!info || !info->attrs) {
ret = -EINVAL;
goto out;
}
if (!info->attrs[NCSI_ATTR_IFINDEX]) {
ret = -EINVAL;
goto out;
}
if (!info->attrs[NCSI_ATTR_PACKAGE_ID]) {
ret = -EINVAL;
goto out;
}
if (!info->attrs[NCSI_ATTR_CHANNEL_ID]) {
ret = -EINVAL;
goto out;
}
if (!info->attrs[NCSI_ATTR_DATA]) {
ret = -EINVAL;
goto out;
}
ndp = ndp_from_ifindex(get_net(sock_net(msg->sk)),
nla_get_u32(info->attrs[NCSI_ATTR_IFINDEX]));
if (!ndp) {
ret = -ENODEV;
goto out;
}
package_id = nla_get_u32(info->attrs[NCSI_ATTR_PACKAGE_ID]);
channel_id = nla_get_u32(info->attrs[NCSI_ATTR_CHANNEL_ID]);
if (package_id >= NCSI_MAX_PACKAGE || channel_id >= NCSI_MAX_CHANNEL) {
ret = -ERANGE;
goto out_netlink;
}
len = nla_len(info->attrs[NCSI_ATTR_DATA]);
if (len < sizeof(struct ncsi_pkt_hdr)) {
netdev_info(ndp->ndev.dev, "NCSI: no command to send %u\n",
package_id);
ret = -EINVAL;
goto out_netlink;
} else {
data = (unsigned char *)nla_data(info->attrs[NCSI_ATTR_DATA]);
}
hdr = (struct ncsi_pkt_hdr *)data;
nca.ndp = ndp;
nca.package = (unsigned char)package_id;
nca.channel = (unsigned char)channel_id;
nca.type = hdr->type;
nca.req_flags = NCSI_REQ_FLAG_NETLINK_DRIVEN;
nca.info = info;
nca.payload = ntohs(hdr->length);
nca.data = data + sizeof(*hdr);
ret = ncsi_xmit_cmd(&nca);
out_netlink:
if (ret != 0) {
netdev_err(ndp->ndev.dev,
"NCSI: Error %d sending command\n",
ret);
ncsi_send_netlink_err(ndp->ndev.dev,
info->snd_seq,
info->snd_portid,
info->nlhdr,
ret);
}
out:
return ret;
}
int ncsi_send_netlink_rsp(struct ncsi_request *nr,
struct ncsi_package *np,
struct ncsi_channel *nc)
{
struct sk_buff *skb;
struct net *net;
void *hdr;
int rc;
net = dev_net(nr->rsp->dev);
skb = genlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
if (!skb)
return -ENOMEM;
hdr = genlmsg_put(skb, nr->snd_portid, nr->snd_seq,
&ncsi_genl_family, 0, NCSI_CMD_SEND_CMD);
if (!hdr) {
kfree_skb(skb);
return -EMSGSIZE;
}
nla_put_u32(skb, NCSI_ATTR_IFINDEX, nr->rsp->dev->ifindex);
if (np)
nla_put_u32(skb, NCSI_ATTR_PACKAGE_ID, np->id);
if (nc)
nla_put_u32(skb, NCSI_ATTR_CHANNEL_ID, nc->id);
else
nla_put_u32(skb, NCSI_ATTR_CHANNEL_ID, NCSI_RESERVED_CHANNEL);
rc = nla_put(skb, NCSI_ATTR_DATA, nr->rsp->len, (void *)nr->rsp->data);
if (rc)
goto err;
genlmsg_end(skb, hdr);
return genlmsg_unicast(net, skb, nr->snd_portid);
err:
kfree_skb(skb);
return rc;
}
int ncsi_send_netlink_timeout(struct ncsi_request *nr,
struct ncsi_package *np,
struct ncsi_channel *nc)
{
struct sk_buff *skb;
struct net *net;
void *hdr;
skb = genlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
if (!skb)
return -ENOMEM;
hdr = genlmsg_put(skb, nr->snd_portid, nr->snd_seq,
&ncsi_genl_family, 0, NCSI_CMD_SEND_CMD);
if (!hdr) {
kfree_skb(skb);
return -EMSGSIZE;
}
net = dev_net(nr->cmd->dev);
nla_put_u32(skb, NCSI_ATTR_IFINDEX, nr->cmd->dev->ifindex);
if (np)
nla_put_u32(skb, NCSI_ATTR_PACKAGE_ID, np->id);
else
nla_put_u32(skb, NCSI_ATTR_PACKAGE_ID,
NCSI_PACKAGE_INDEX((((struct ncsi_pkt_hdr *)
nr->cmd->data)->channel)));
if (nc)
nla_put_u32(skb, NCSI_ATTR_CHANNEL_ID, nc->id);
else
nla_put_u32(skb, NCSI_ATTR_CHANNEL_ID, NCSI_RESERVED_CHANNEL);
genlmsg_end(skb, hdr);
return genlmsg_unicast(net, skb, nr->snd_portid);
}
int ncsi_send_netlink_err(struct net_device *dev,
u32 snd_seq,
u32 snd_portid,
const struct nlmsghdr *nlhdr,
int err)
{
struct nlmsghdr *nlh;
struct nlmsgerr *nle;
struct sk_buff *skb;
struct net *net;
skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
if (!skb)
return -ENOMEM;
net = dev_net(dev);
nlh = nlmsg_put(skb, snd_portid, snd_seq,
NLMSG_ERROR, sizeof(*nle), 0);
nle = (struct nlmsgerr *)nlmsg_data(nlh);
nle->error = err;
memcpy(&nle->msg, nlhdr, sizeof(*nlh));
nlmsg_end(skb, nlh);
return nlmsg_unicast(net->genl_sock, skb, snd_portid);
}
static int ncsi_set_package_mask_nl(struct sk_buff *msg,
struct genl_info *info)
{
struct ncsi_dev_priv *ndp;
unsigned long flags;
int rc;
if (!info || !info->attrs)
return -EINVAL;
if (!info->attrs[NCSI_ATTR_IFINDEX])
return -EINVAL;
if (!info->attrs[NCSI_ATTR_PACKAGE_MASK])
return -EINVAL;
ndp = ndp_from_ifindex(get_net(sock_net(msg->sk)),
nla_get_u32(info->attrs[NCSI_ATTR_IFINDEX]));
if (!ndp)
return -ENODEV;
spin_lock_irqsave(&ndp->lock, flags);
if (nla_get_flag(info->attrs[NCSI_ATTR_MULTI_FLAG])) {
if (ndp->flags & NCSI_DEV_HWA) {
ndp->multi_package = true;
rc = 0;
} else {
netdev_err(ndp->ndev.dev,
"NCSI: Can't use multiple packages without HWA\n");
rc = -EPERM;
}
} else {
ndp->multi_package = false;
rc = 0;
}
if (!rc)
ndp->package_whitelist =
nla_get_u32(info->attrs[NCSI_ATTR_PACKAGE_MASK]);
spin_unlock_irqrestore(&ndp->lock, flags);
if (!rc) {
/* Update channel configuration */
if (!(ndp->flags & NCSI_DEV_RESET))
ncsi_reset_dev(&ndp->ndev);
}
return rc;
}
static int ncsi_set_channel_mask_nl(struct sk_buff *msg,
struct genl_info *info)
{
struct ncsi_package *np, *package;
struct ncsi_channel *nc, *channel;
u32 package_id, channel_id;
struct ncsi_dev_priv *ndp;
unsigned long flags;
if (!info || !info->attrs)
return -EINVAL;
if (!info->attrs[NCSI_ATTR_IFINDEX])
return -EINVAL;
if (!info->attrs[NCSI_ATTR_PACKAGE_ID])
return -EINVAL;
if (!info->attrs[NCSI_ATTR_CHANNEL_MASK])
return -EINVAL;
ndp = ndp_from_ifindex(get_net(sock_net(msg->sk)),
nla_get_u32(info->attrs[NCSI_ATTR_IFINDEX]));
if (!ndp)
return -ENODEV;
package_id = nla_get_u32(info->attrs[NCSI_ATTR_PACKAGE_ID]);
package = NULL;
NCSI_FOR_EACH_PACKAGE(ndp, np)
if (np->id == package_id) {
package = np;
break;
}
if (!package)
return -ERANGE;
spin_lock_irqsave(&package->lock, flags);
channel = NULL;
if (info->attrs[NCSI_ATTR_CHANNEL_ID]) {
channel_id = nla_get_u32(info->attrs[NCSI_ATTR_CHANNEL_ID]);
NCSI_FOR_EACH_CHANNEL(np, nc)
if (nc->id == channel_id) {
channel = nc;
break;
}
if (!channel) {
spin_unlock_irqrestore(&package->lock, flags);
return -ERANGE;
}
netdev_dbg(ndp->ndev.dev,
"NCSI: Channel %u set as preferred channel\n",
channel->id);
}
package->channel_whitelist =
nla_get_u32(info->attrs[NCSI_ATTR_CHANNEL_MASK]);
if (package->channel_whitelist == 0)
netdev_dbg(ndp->ndev.dev,
"NCSI: Package %u set to all channels disabled\n",
package->id);
package->preferred_channel = channel;
if (nla_get_flag(info->attrs[NCSI_ATTR_MULTI_FLAG])) {
package->multi_channel = true;
netdev_info(ndp->ndev.dev,
"NCSI: Multi-channel enabled on package %u\n",
package_id);
} else {
package->multi_channel = false;
}
spin_unlock_irqrestore(&package->lock, flags);
/* Update channel configuration */
if (!(ndp->flags & NCSI_DEV_RESET))
ncsi_reset_dev(&ndp->ndev);
return 0;
}
static const struct genl_small_ops ncsi_ops[] = {
{
.cmd = NCSI_CMD_PKG_INFO,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = ncsi_pkg_info_nl,
.dumpit = ncsi_pkg_info_all_nl,
.flags = 0,
},
{
.cmd = NCSI_CMD_SET_INTERFACE,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = ncsi_set_interface_nl,
.flags = GENL_ADMIN_PERM,
},
{
.cmd = NCSI_CMD_CLEAR_INTERFACE,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = ncsi_clear_interface_nl,
.flags = GENL_ADMIN_PERM,
},
{
.cmd = NCSI_CMD_SEND_CMD,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = ncsi_send_cmd_nl,
.flags = GENL_ADMIN_PERM,
},
{
.cmd = NCSI_CMD_SET_PACKAGE_MASK,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = ncsi_set_package_mask_nl,
.flags = GENL_ADMIN_PERM,
},
{
.cmd = NCSI_CMD_SET_CHANNEL_MASK,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = ncsi_set_channel_mask_nl,
.flags = GENL_ADMIN_PERM,
},
};
static struct genl_family ncsi_genl_family __ro_after_init = {
.name = "NCSI",
.version = 0,
.maxattr = NCSI_ATTR_MAX,
.policy = ncsi_genl_policy,
.module = THIS_MODULE,
.small_ops = ncsi_ops,
.n_small_ops = ARRAY_SIZE(ncsi_ops),
.resv_start_op = NCSI_CMD_SET_CHANNEL_MASK + 1,
};
static int __init ncsi_init_netlink(void)
{
return genl_register_family(&ncsi_genl_family);
}
subsys_initcall(ncsi_init_netlink);
| linux-master | net/ncsi/ncsi-netlink.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright Gavin Shan, IBM Corporation 2016.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <net/ncsi.h>
#include <net/net_namespace.h>
#include <net/sock.h>
#include <net/genetlink.h>
#include "internal.h"
#include "ncsi-pkt.h"
#include "ncsi-netlink.h"
static int ncsi_validate_rsp_pkt(struct ncsi_request *nr,
unsigned short payload)
{
struct ncsi_rsp_pkt_hdr *h;
u32 checksum;
__be32 *pchecksum;
/* Check NCSI packet header. We don't need validate
* the packet type, which should have been checked
* before calling this function.
*/
h = (struct ncsi_rsp_pkt_hdr *)skb_network_header(nr->rsp);
if (h->common.revision != NCSI_PKT_REVISION) {
netdev_dbg(nr->ndp->ndev.dev,
"NCSI: unsupported header revision\n");
return -EINVAL;
}
if (ntohs(h->common.length) != payload) {
netdev_dbg(nr->ndp->ndev.dev,
"NCSI: payload length mismatched\n");
return -EINVAL;
}
/* Check on code and reason */
if (ntohs(h->code) != NCSI_PKT_RSP_C_COMPLETED ||
ntohs(h->reason) != NCSI_PKT_RSP_R_NO_ERROR) {
netdev_dbg(nr->ndp->ndev.dev,
"NCSI: non zero response/reason code %04xh, %04xh\n",
ntohs(h->code), ntohs(h->reason));
return -EPERM;
}
/* Validate checksum, which might be zeroes if the
* sender doesn't support checksum according to NCSI
* specification.
*/
pchecksum = (__be32 *)((void *)(h + 1) + ALIGN(payload, 4) - 4);
if (ntohl(*pchecksum) == 0)
return 0;
checksum = ncsi_calculate_checksum((unsigned char *)h,
sizeof(*h) + payload - 4);
if (*pchecksum != htonl(checksum)) {
netdev_dbg(nr->ndp->ndev.dev,
"NCSI: checksum mismatched; recd: %08x calc: %08x\n",
*pchecksum, htonl(checksum));
return -EINVAL;
}
return 0;
}
static int ncsi_rsp_handler_cis(struct ncsi_request *nr)
{
struct ncsi_rsp_pkt *rsp;
struct ncsi_dev_priv *ndp = nr->ndp;
struct ncsi_package *np;
struct ncsi_channel *nc;
unsigned char id;
rsp = (struct ncsi_rsp_pkt *)skb_network_header(nr->rsp);
ncsi_find_package_and_channel(ndp, rsp->rsp.common.channel, &np, &nc);
if (!nc) {
if (ndp->flags & NCSI_DEV_PROBED)
return -ENXIO;
id = NCSI_CHANNEL_INDEX(rsp->rsp.common.channel);
nc = ncsi_add_channel(np, id);
}
return nc ? 0 : -ENODEV;
}
static int ncsi_rsp_handler_sp(struct ncsi_request *nr)
{
struct ncsi_rsp_pkt *rsp;
struct ncsi_dev_priv *ndp = nr->ndp;
struct ncsi_package *np;
unsigned char id;
/* Add the package if it's not existing. Otherwise,
* to change the state of its child channels.
*/
rsp = (struct ncsi_rsp_pkt *)skb_network_header(nr->rsp);
ncsi_find_package_and_channel(ndp, rsp->rsp.common.channel,
&np, NULL);
if (!np) {
if (ndp->flags & NCSI_DEV_PROBED)
return -ENXIO;
id = NCSI_PACKAGE_INDEX(rsp->rsp.common.channel);
np = ncsi_add_package(ndp, id);
if (!np)
return -ENODEV;
}
return 0;
}
static int ncsi_rsp_handler_dp(struct ncsi_request *nr)
{
struct ncsi_rsp_pkt *rsp;
struct ncsi_dev_priv *ndp = nr->ndp;
struct ncsi_package *np;
struct ncsi_channel *nc;
unsigned long flags;
/* Find the package */
rsp = (struct ncsi_rsp_pkt *)skb_network_header(nr->rsp);
ncsi_find_package_and_channel(ndp, rsp->rsp.common.channel,
&np, NULL);
if (!np)
return -ENODEV;
/* Change state of all channels attached to the package */
NCSI_FOR_EACH_CHANNEL(np, nc) {
spin_lock_irqsave(&nc->lock, flags);
nc->state = NCSI_CHANNEL_INACTIVE;
spin_unlock_irqrestore(&nc->lock, flags);
}
return 0;
}
static int ncsi_rsp_handler_ec(struct ncsi_request *nr)
{
struct ncsi_rsp_pkt *rsp;
struct ncsi_dev_priv *ndp = nr->ndp;
struct ncsi_channel *nc;
struct ncsi_channel_mode *ncm;
/* Find the package and channel */
rsp = (struct ncsi_rsp_pkt *)skb_network_header(nr->rsp);
ncsi_find_package_and_channel(ndp, rsp->rsp.common.channel,
NULL, &nc);
if (!nc)
return -ENODEV;
ncm = &nc->modes[NCSI_MODE_ENABLE];
if (ncm->enable)
return 0;
ncm->enable = 1;
return 0;
}
static int ncsi_rsp_handler_dc(struct ncsi_request *nr)
{
struct ncsi_rsp_pkt *rsp;
struct ncsi_dev_priv *ndp = nr->ndp;
struct ncsi_channel *nc;
struct ncsi_channel_mode *ncm;
int ret;
ret = ncsi_validate_rsp_pkt(nr, 4);
if (ret)
return ret;
/* Find the package and channel */
rsp = (struct ncsi_rsp_pkt *)skb_network_header(nr->rsp);
ncsi_find_package_and_channel(ndp, rsp->rsp.common.channel,
NULL, &nc);
if (!nc)
return -ENODEV;
ncm = &nc->modes[NCSI_MODE_ENABLE];
if (!ncm->enable)
return 0;
ncm->enable = 0;
return 0;
}
static int ncsi_rsp_handler_rc(struct ncsi_request *nr)
{
struct ncsi_rsp_pkt *rsp;
struct ncsi_dev_priv *ndp = nr->ndp;
struct ncsi_channel *nc;
unsigned long flags;
/* Find the package and channel */
rsp = (struct ncsi_rsp_pkt *)skb_network_header(nr->rsp);
ncsi_find_package_and_channel(ndp, rsp->rsp.common.channel,
NULL, &nc);
if (!nc)
return -ENODEV;
/* Update state for the specified channel */
spin_lock_irqsave(&nc->lock, flags);
nc->state = NCSI_CHANNEL_INACTIVE;
spin_unlock_irqrestore(&nc->lock, flags);
return 0;
}
static int ncsi_rsp_handler_ecnt(struct ncsi_request *nr)
{
struct ncsi_rsp_pkt *rsp;
struct ncsi_dev_priv *ndp = nr->ndp;
struct ncsi_channel *nc;
struct ncsi_channel_mode *ncm;
/* Find the package and channel */
rsp = (struct ncsi_rsp_pkt *)skb_network_header(nr->rsp);
ncsi_find_package_and_channel(ndp, rsp->rsp.common.channel,
NULL, &nc);
if (!nc)
return -ENODEV;
ncm = &nc->modes[NCSI_MODE_TX_ENABLE];
if (ncm->enable)
return 0;
ncm->enable = 1;
return 0;
}
static int ncsi_rsp_handler_dcnt(struct ncsi_request *nr)
{
struct ncsi_rsp_pkt *rsp;
struct ncsi_dev_priv *ndp = nr->ndp;
struct ncsi_channel *nc;
struct ncsi_channel_mode *ncm;
/* Find the package and channel */
rsp = (struct ncsi_rsp_pkt *)skb_network_header(nr->rsp);
ncsi_find_package_and_channel(ndp, rsp->rsp.common.channel,
NULL, &nc);
if (!nc)
return -ENODEV;
ncm = &nc->modes[NCSI_MODE_TX_ENABLE];
if (!ncm->enable)
return 0;
ncm->enable = 0;
return 0;
}
static int ncsi_rsp_handler_ae(struct ncsi_request *nr)
{
struct ncsi_cmd_ae_pkt *cmd;
struct ncsi_rsp_pkt *rsp;
struct ncsi_dev_priv *ndp = nr->ndp;
struct ncsi_channel *nc;
struct ncsi_channel_mode *ncm;
/* Find the package and channel */
rsp = (struct ncsi_rsp_pkt *)skb_network_header(nr->rsp);
ncsi_find_package_and_channel(ndp, rsp->rsp.common.channel,
NULL, &nc);
if (!nc)
return -ENODEV;
/* Check if the AEN has been enabled */
ncm = &nc->modes[NCSI_MODE_AEN];
if (ncm->enable)
return 0;
/* Update to AEN configuration */
cmd = (struct ncsi_cmd_ae_pkt *)skb_network_header(nr->cmd);
ncm->enable = 1;
ncm->data[0] = cmd->mc_id;
ncm->data[1] = ntohl(cmd->mode);
return 0;
}
static int ncsi_rsp_handler_sl(struct ncsi_request *nr)
{
struct ncsi_cmd_sl_pkt *cmd;
struct ncsi_rsp_pkt *rsp;
struct ncsi_dev_priv *ndp = nr->ndp;
struct ncsi_channel *nc;
struct ncsi_channel_mode *ncm;
/* Find the package and channel */
rsp = (struct ncsi_rsp_pkt *)skb_network_header(nr->rsp);
ncsi_find_package_and_channel(ndp, rsp->rsp.common.channel,
NULL, &nc);
if (!nc)
return -ENODEV;
cmd = (struct ncsi_cmd_sl_pkt *)skb_network_header(nr->cmd);
ncm = &nc->modes[NCSI_MODE_LINK];
ncm->data[0] = ntohl(cmd->mode);
ncm->data[1] = ntohl(cmd->oem_mode);
return 0;
}
static int ncsi_rsp_handler_gls(struct ncsi_request *nr)
{
struct ncsi_rsp_gls_pkt *rsp;
struct ncsi_dev_priv *ndp = nr->ndp;
struct ncsi_channel *nc;
struct ncsi_channel_mode *ncm;
unsigned long flags;
/* Find the package and channel */
rsp = (struct ncsi_rsp_gls_pkt *)skb_network_header(nr->rsp);
ncsi_find_package_and_channel(ndp, rsp->rsp.common.channel,
NULL, &nc);
if (!nc)
return -ENODEV;
ncm = &nc->modes[NCSI_MODE_LINK];
ncm->data[2] = ntohl(rsp->status);
ncm->data[3] = ntohl(rsp->other);
ncm->data[4] = ntohl(rsp->oem_status);
if (nr->flags & NCSI_REQ_FLAG_EVENT_DRIVEN)
return 0;
/* Reset the channel monitor if it has been enabled */
spin_lock_irqsave(&nc->lock, flags);
nc->monitor.state = NCSI_CHANNEL_MONITOR_START;
spin_unlock_irqrestore(&nc->lock, flags);
return 0;
}
static int ncsi_rsp_handler_svf(struct ncsi_request *nr)
{
struct ncsi_cmd_svf_pkt *cmd;
struct ncsi_rsp_pkt *rsp;
struct ncsi_dev_priv *ndp = nr->ndp;
struct ncsi_channel *nc;
struct ncsi_channel_vlan_filter *ncf;
unsigned long flags;
void *bitmap;
/* Find the package and channel */
rsp = (struct ncsi_rsp_pkt *)skb_network_header(nr->rsp);
ncsi_find_package_and_channel(ndp, rsp->rsp.common.channel,
NULL, &nc);
if (!nc)
return -ENODEV;
cmd = (struct ncsi_cmd_svf_pkt *)skb_network_header(nr->cmd);
ncf = &nc->vlan_filter;
if (cmd->index == 0 || cmd->index > ncf->n_vids)
return -ERANGE;
/* Add or remove the VLAN filter. Remember HW indexes from 1 */
spin_lock_irqsave(&nc->lock, flags);
bitmap = &ncf->bitmap;
if (!(cmd->enable & 0x1)) {
if (test_and_clear_bit(cmd->index - 1, bitmap))
ncf->vids[cmd->index - 1] = 0;
} else {
set_bit(cmd->index - 1, bitmap);
ncf->vids[cmd->index - 1] = ntohs(cmd->vlan);
}
spin_unlock_irqrestore(&nc->lock, flags);
return 0;
}
static int ncsi_rsp_handler_ev(struct ncsi_request *nr)
{
struct ncsi_cmd_ev_pkt *cmd;
struct ncsi_rsp_pkt *rsp;
struct ncsi_dev_priv *ndp = nr->ndp;
struct ncsi_channel *nc;
struct ncsi_channel_mode *ncm;
/* Find the package and channel */
rsp = (struct ncsi_rsp_pkt *)skb_network_header(nr->rsp);
ncsi_find_package_and_channel(ndp, rsp->rsp.common.channel,
NULL, &nc);
if (!nc)
return -ENODEV;
/* Check if VLAN mode has been enabled */
ncm = &nc->modes[NCSI_MODE_VLAN];
if (ncm->enable)
return 0;
/* Update to VLAN mode */
cmd = (struct ncsi_cmd_ev_pkt *)skb_network_header(nr->cmd);
ncm->enable = 1;
ncm->data[0] = ntohl((__force __be32)cmd->mode);
return 0;
}
static int ncsi_rsp_handler_dv(struct ncsi_request *nr)
{
struct ncsi_rsp_pkt *rsp;
struct ncsi_dev_priv *ndp = nr->ndp;
struct ncsi_channel *nc;
struct ncsi_channel_mode *ncm;
/* Find the package and channel */
rsp = (struct ncsi_rsp_pkt *)skb_network_header(nr->rsp);
ncsi_find_package_and_channel(ndp, rsp->rsp.common.channel,
NULL, &nc);
if (!nc)
return -ENODEV;
/* Check if VLAN mode has been enabled */
ncm = &nc->modes[NCSI_MODE_VLAN];
if (!ncm->enable)
return 0;
/* Update to VLAN mode */
ncm->enable = 0;
return 0;
}
static int ncsi_rsp_handler_sma(struct ncsi_request *nr)
{
struct ncsi_cmd_sma_pkt *cmd;
struct ncsi_rsp_pkt *rsp;
struct ncsi_dev_priv *ndp = nr->ndp;
struct ncsi_channel *nc;
struct ncsi_channel_mac_filter *ncf;
unsigned long flags;
void *bitmap;
bool enabled;
int index;
/* Find the package and channel */
rsp = (struct ncsi_rsp_pkt *)skb_network_header(nr->rsp);
ncsi_find_package_and_channel(ndp, rsp->rsp.common.channel,
NULL, &nc);
if (!nc)
return -ENODEV;
/* According to NCSI spec 1.01, the mixed filter table
* isn't supported yet.
*/
cmd = (struct ncsi_cmd_sma_pkt *)skb_network_header(nr->cmd);
enabled = cmd->at_e & 0x1;
ncf = &nc->mac_filter;
bitmap = &ncf->bitmap;
if (cmd->index == 0 ||
cmd->index > ncf->n_uc + ncf->n_mc + ncf->n_mixed)
return -ERANGE;
index = (cmd->index - 1) * ETH_ALEN;
spin_lock_irqsave(&nc->lock, flags);
if (enabled) {
set_bit(cmd->index - 1, bitmap);
memcpy(&ncf->addrs[index], cmd->mac, ETH_ALEN);
} else {
clear_bit(cmd->index - 1, bitmap);
eth_zero_addr(&ncf->addrs[index]);
}
spin_unlock_irqrestore(&nc->lock, flags);
return 0;
}
static int ncsi_rsp_handler_ebf(struct ncsi_request *nr)
{
struct ncsi_cmd_ebf_pkt *cmd;
struct ncsi_rsp_pkt *rsp;
struct ncsi_dev_priv *ndp = nr->ndp;
struct ncsi_channel *nc;
struct ncsi_channel_mode *ncm;
/* Find the package and channel */
rsp = (struct ncsi_rsp_pkt *)skb_network_header(nr->rsp);
ncsi_find_package_and_channel(ndp, rsp->rsp.common.channel, NULL, &nc);
if (!nc)
return -ENODEV;
/* Check if broadcast filter has been enabled */
ncm = &nc->modes[NCSI_MODE_BC];
if (ncm->enable)
return 0;
/* Update to broadcast filter mode */
cmd = (struct ncsi_cmd_ebf_pkt *)skb_network_header(nr->cmd);
ncm->enable = 1;
ncm->data[0] = ntohl(cmd->mode);
return 0;
}
static int ncsi_rsp_handler_dbf(struct ncsi_request *nr)
{
struct ncsi_rsp_pkt *rsp;
struct ncsi_dev_priv *ndp = nr->ndp;
struct ncsi_channel *nc;
struct ncsi_channel_mode *ncm;
rsp = (struct ncsi_rsp_pkt *)skb_network_header(nr->rsp);
ncsi_find_package_and_channel(ndp, rsp->rsp.common.channel,
NULL, &nc);
if (!nc)
return -ENODEV;
/* Check if broadcast filter isn't enabled */
ncm = &nc->modes[NCSI_MODE_BC];
if (!ncm->enable)
return 0;
/* Update to broadcast filter mode */
ncm->enable = 0;
ncm->data[0] = 0;
return 0;
}
static int ncsi_rsp_handler_egmf(struct ncsi_request *nr)
{
struct ncsi_cmd_egmf_pkt *cmd;
struct ncsi_rsp_pkt *rsp;
struct ncsi_dev_priv *ndp = nr->ndp;
struct ncsi_channel *nc;
struct ncsi_channel_mode *ncm;
/* Find the channel */
rsp = (struct ncsi_rsp_pkt *)skb_network_header(nr->rsp);
ncsi_find_package_and_channel(ndp, rsp->rsp.common.channel,
NULL, &nc);
if (!nc)
return -ENODEV;
/* Check if multicast filter has been enabled */
ncm = &nc->modes[NCSI_MODE_MC];
if (ncm->enable)
return 0;
/* Update to multicast filter mode */
cmd = (struct ncsi_cmd_egmf_pkt *)skb_network_header(nr->cmd);
ncm->enable = 1;
ncm->data[0] = ntohl(cmd->mode);
return 0;
}
static int ncsi_rsp_handler_dgmf(struct ncsi_request *nr)
{
struct ncsi_rsp_pkt *rsp;
struct ncsi_dev_priv *ndp = nr->ndp;
struct ncsi_channel *nc;
struct ncsi_channel_mode *ncm;
rsp = (struct ncsi_rsp_pkt *)skb_network_header(nr->rsp);
ncsi_find_package_and_channel(ndp, rsp->rsp.common.channel,
NULL, &nc);
if (!nc)
return -ENODEV;
/* Check if multicast filter has been enabled */
ncm = &nc->modes[NCSI_MODE_MC];
if (!ncm->enable)
return 0;
/* Update to multicast filter mode */
ncm->enable = 0;
ncm->data[0] = 0;
return 0;
}
static int ncsi_rsp_handler_snfc(struct ncsi_request *nr)
{
struct ncsi_cmd_snfc_pkt *cmd;
struct ncsi_rsp_pkt *rsp;
struct ncsi_dev_priv *ndp = nr->ndp;
struct ncsi_channel *nc;
struct ncsi_channel_mode *ncm;
/* Find the channel */
rsp = (struct ncsi_rsp_pkt *)skb_network_header(nr->rsp);
ncsi_find_package_and_channel(ndp, rsp->rsp.common.channel,
NULL, &nc);
if (!nc)
return -ENODEV;
/* Check if flow control has been enabled */
ncm = &nc->modes[NCSI_MODE_FC];
if (ncm->enable)
return 0;
/* Update to flow control mode */
cmd = (struct ncsi_cmd_snfc_pkt *)skb_network_header(nr->cmd);
ncm->enable = 1;
ncm->data[0] = cmd->mode;
return 0;
}
/* Response handler for Get Mac Address command */
static int ncsi_rsp_handler_oem_gma(struct ncsi_request *nr, int mfr_id)
{
struct ncsi_dev_priv *ndp = nr->ndp;
struct net_device *ndev = ndp->ndev.dev;
struct ncsi_rsp_oem_pkt *rsp;
struct sockaddr saddr;
u32 mac_addr_off = 0;
int ret = 0;
/* Get the response header */
rsp = (struct ncsi_rsp_oem_pkt *)skb_network_header(nr->rsp);
saddr.sa_family = ndev->type;
ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
if (mfr_id == NCSI_OEM_MFR_BCM_ID)
mac_addr_off = BCM_MAC_ADDR_OFFSET;
else if (mfr_id == NCSI_OEM_MFR_MLX_ID)
mac_addr_off = MLX_MAC_ADDR_OFFSET;
else if (mfr_id == NCSI_OEM_MFR_INTEL_ID)
mac_addr_off = INTEL_MAC_ADDR_OFFSET;
memcpy(saddr.sa_data, &rsp->data[mac_addr_off], ETH_ALEN);
if (mfr_id == NCSI_OEM_MFR_BCM_ID || mfr_id == NCSI_OEM_MFR_INTEL_ID)
eth_addr_inc((u8 *)saddr.sa_data);
if (!is_valid_ether_addr((const u8 *)saddr.sa_data))
return -ENXIO;
/* Set the flag for GMA command which should only be called once */
ndp->gma_flag = 1;
rtnl_lock();
ret = dev_set_mac_address(ndev, &saddr, NULL);
rtnl_unlock();
if (ret < 0)
netdev_warn(ndev, "NCSI: 'Writing mac address to device failed\n");
return ret;
}
/* Response handler for Mellanox card */
static int ncsi_rsp_handler_oem_mlx(struct ncsi_request *nr)
{
struct ncsi_rsp_oem_mlx_pkt *mlx;
struct ncsi_rsp_oem_pkt *rsp;
/* Get the response header */
rsp = (struct ncsi_rsp_oem_pkt *)skb_network_header(nr->rsp);
mlx = (struct ncsi_rsp_oem_mlx_pkt *)(rsp->data);
if (mlx->cmd == NCSI_OEM_MLX_CMD_GMA &&
mlx->param == NCSI_OEM_MLX_CMD_GMA_PARAM)
return ncsi_rsp_handler_oem_gma(nr, NCSI_OEM_MFR_MLX_ID);
return 0;
}
/* Response handler for Broadcom card */
static int ncsi_rsp_handler_oem_bcm(struct ncsi_request *nr)
{
struct ncsi_rsp_oem_bcm_pkt *bcm;
struct ncsi_rsp_oem_pkt *rsp;
/* Get the response header */
rsp = (struct ncsi_rsp_oem_pkt *)skb_network_header(nr->rsp);
bcm = (struct ncsi_rsp_oem_bcm_pkt *)(rsp->data);
if (bcm->type == NCSI_OEM_BCM_CMD_GMA)
return ncsi_rsp_handler_oem_gma(nr, NCSI_OEM_MFR_BCM_ID);
return 0;
}
/* Response handler for Intel card */
static int ncsi_rsp_handler_oem_intel(struct ncsi_request *nr)
{
struct ncsi_rsp_oem_intel_pkt *intel;
struct ncsi_rsp_oem_pkt *rsp;
/* Get the response header */
rsp = (struct ncsi_rsp_oem_pkt *)skb_network_header(nr->rsp);
intel = (struct ncsi_rsp_oem_intel_pkt *)(rsp->data);
if (intel->cmd == NCSI_OEM_INTEL_CMD_GMA)
return ncsi_rsp_handler_oem_gma(nr, NCSI_OEM_MFR_INTEL_ID);
return 0;
}
static struct ncsi_rsp_oem_handler {
unsigned int mfr_id;
int (*handler)(struct ncsi_request *nr);
} ncsi_rsp_oem_handlers[] = {
{ NCSI_OEM_MFR_MLX_ID, ncsi_rsp_handler_oem_mlx },
{ NCSI_OEM_MFR_BCM_ID, ncsi_rsp_handler_oem_bcm },
{ NCSI_OEM_MFR_INTEL_ID, ncsi_rsp_handler_oem_intel }
};
/* Response handler for OEM command */
static int ncsi_rsp_handler_oem(struct ncsi_request *nr)
{
struct ncsi_rsp_oem_handler *nrh = NULL;
struct ncsi_rsp_oem_pkt *rsp;
unsigned int mfr_id, i;
/* Get the response header */
rsp = (struct ncsi_rsp_oem_pkt *)skb_network_header(nr->rsp);
mfr_id = ntohl(rsp->mfr_id);
/* Check for manufacturer id and Find the handler */
for (i = 0; i < ARRAY_SIZE(ncsi_rsp_oem_handlers); i++) {
if (ncsi_rsp_oem_handlers[i].mfr_id == mfr_id) {
if (ncsi_rsp_oem_handlers[i].handler)
nrh = &ncsi_rsp_oem_handlers[i];
else
nrh = NULL;
break;
}
}
if (!nrh) {
netdev_err(nr->ndp->ndev.dev, "Received unrecognized OEM packet with MFR-ID (0x%x)\n",
mfr_id);
return -ENOENT;
}
/* Process the packet */
return nrh->handler(nr);
}
static int ncsi_rsp_handler_gvi(struct ncsi_request *nr)
{
struct ncsi_rsp_gvi_pkt *rsp;
struct ncsi_dev_priv *ndp = nr->ndp;
struct ncsi_channel *nc;
struct ncsi_channel_version *ncv;
int i;
/* Find the channel */
rsp = (struct ncsi_rsp_gvi_pkt *)skb_network_header(nr->rsp);
ncsi_find_package_and_channel(ndp, rsp->rsp.common.channel,
NULL, &nc);
if (!nc)
return -ENODEV;
/* Update to channel's version info */
ncv = &nc->version;
ncv->version = ntohl(rsp->ncsi_version);
ncv->alpha2 = rsp->alpha2;
memcpy(ncv->fw_name, rsp->fw_name, 12);
ncv->fw_version = ntohl(rsp->fw_version);
for (i = 0; i < ARRAY_SIZE(ncv->pci_ids); i++)
ncv->pci_ids[i] = ntohs(rsp->pci_ids[i]);
ncv->mf_id = ntohl(rsp->mf_id);
return 0;
}
static int ncsi_rsp_handler_gc(struct ncsi_request *nr)
{
struct ncsi_rsp_gc_pkt *rsp;
struct ncsi_dev_priv *ndp = nr->ndp;
struct ncsi_channel *nc;
size_t size;
/* Find the channel */
rsp = (struct ncsi_rsp_gc_pkt *)skb_network_header(nr->rsp);
ncsi_find_package_and_channel(ndp, rsp->rsp.common.channel,
NULL, &nc);
if (!nc)
return -ENODEV;
/* Update channel's capabilities */
nc->caps[NCSI_CAP_GENERIC].cap = ntohl(rsp->cap) &
NCSI_CAP_GENERIC_MASK;
nc->caps[NCSI_CAP_BC].cap = ntohl(rsp->bc_cap) &
NCSI_CAP_BC_MASK;
nc->caps[NCSI_CAP_MC].cap = ntohl(rsp->mc_cap) &
NCSI_CAP_MC_MASK;
nc->caps[NCSI_CAP_BUFFER].cap = ntohl(rsp->buf_cap);
nc->caps[NCSI_CAP_AEN].cap = ntohl(rsp->aen_cap) &
NCSI_CAP_AEN_MASK;
nc->caps[NCSI_CAP_VLAN].cap = rsp->vlan_mode &
NCSI_CAP_VLAN_MASK;
size = (rsp->uc_cnt + rsp->mc_cnt + rsp->mixed_cnt) * ETH_ALEN;
nc->mac_filter.addrs = kzalloc(size, GFP_ATOMIC);
if (!nc->mac_filter.addrs)
return -ENOMEM;
nc->mac_filter.n_uc = rsp->uc_cnt;
nc->mac_filter.n_mc = rsp->mc_cnt;
nc->mac_filter.n_mixed = rsp->mixed_cnt;
nc->vlan_filter.vids = kcalloc(rsp->vlan_cnt,
sizeof(*nc->vlan_filter.vids),
GFP_ATOMIC);
if (!nc->vlan_filter.vids)
return -ENOMEM;
/* Set VLAN filters active so they are cleared in the first
* configuration state
*/
nc->vlan_filter.bitmap = U64_MAX;
nc->vlan_filter.n_vids = rsp->vlan_cnt;
return 0;
}
static int ncsi_rsp_handler_gp(struct ncsi_request *nr)
{
struct ncsi_channel_vlan_filter *ncvf;
struct ncsi_channel_mac_filter *ncmf;
struct ncsi_dev_priv *ndp = nr->ndp;
struct ncsi_rsp_gp_pkt *rsp;
struct ncsi_channel *nc;
unsigned short enable;
unsigned char *pdata;
unsigned long flags;
void *bitmap;
int i;
/* Find the channel */
rsp = (struct ncsi_rsp_gp_pkt *)skb_network_header(nr->rsp);
ncsi_find_package_and_channel(ndp, rsp->rsp.common.channel,
NULL, &nc);
if (!nc)
return -ENODEV;
/* Modes with explicit enabled indications */
if (ntohl(rsp->valid_modes) & 0x1) { /* BC filter mode */
nc->modes[NCSI_MODE_BC].enable = 1;
nc->modes[NCSI_MODE_BC].data[0] = ntohl(rsp->bc_mode);
}
if (ntohl(rsp->valid_modes) & 0x2) /* Channel enabled */
nc->modes[NCSI_MODE_ENABLE].enable = 1;
if (ntohl(rsp->valid_modes) & 0x4) /* Channel Tx enabled */
nc->modes[NCSI_MODE_TX_ENABLE].enable = 1;
if (ntohl(rsp->valid_modes) & 0x8) /* MC filter mode */
nc->modes[NCSI_MODE_MC].enable = 1;
/* Modes without explicit enabled indications */
nc->modes[NCSI_MODE_LINK].enable = 1;
nc->modes[NCSI_MODE_LINK].data[0] = ntohl(rsp->link_mode);
nc->modes[NCSI_MODE_VLAN].enable = 1;
nc->modes[NCSI_MODE_VLAN].data[0] = rsp->vlan_mode;
nc->modes[NCSI_MODE_FC].enable = 1;
nc->modes[NCSI_MODE_FC].data[0] = rsp->fc_mode;
nc->modes[NCSI_MODE_AEN].enable = 1;
nc->modes[NCSI_MODE_AEN].data[0] = ntohl(rsp->aen_mode);
/* MAC addresses filter table */
pdata = (unsigned char *)rsp + 48;
enable = rsp->mac_enable;
ncmf = &nc->mac_filter;
spin_lock_irqsave(&nc->lock, flags);
bitmap = &ncmf->bitmap;
for (i = 0; i < rsp->mac_cnt; i++, pdata += 6) {
if (!(enable & (0x1 << i)))
clear_bit(i, bitmap);
else
set_bit(i, bitmap);
memcpy(&ncmf->addrs[i * ETH_ALEN], pdata, ETH_ALEN);
}
spin_unlock_irqrestore(&nc->lock, flags);
/* VLAN filter table */
enable = ntohs(rsp->vlan_enable);
ncvf = &nc->vlan_filter;
bitmap = &ncvf->bitmap;
spin_lock_irqsave(&nc->lock, flags);
for (i = 0; i < rsp->vlan_cnt; i++, pdata += 2) {
if (!(enable & (0x1 << i)))
clear_bit(i, bitmap);
else
set_bit(i, bitmap);
ncvf->vids[i] = ntohs(*(__be16 *)pdata);
}
spin_unlock_irqrestore(&nc->lock, flags);
return 0;
}
static int ncsi_rsp_handler_gcps(struct ncsi_request *nr)
{
struct ncsi_rsp_gcps_pkt *rsp;
struct ncsi_dev_priv *ndp = nr->ndp;
struct ncsi_channel *nc;
struct ncsi_channel_stats *ncs;
/* Find the channel */
rsp = (struct ncsi_rsp_gcps_pkt *)skb_network_header(nr->rsp);
ncsi_find_package_and_channel(ndp, rsp->rsp.common.channel,
NULL, &nc);
if (!nc)
return -ENODEV;
/* Update HNC's statistics */
ncs = &nc->stats;
ncs->hnc_cnt_hi = ntohl(rsp->cnt_hi);
ncs->hnc_cnt_lo = ntohl(rsp->cnt_lo);
ncs->hnc_rx_bytes = ntohl(rsp->rx_bytes);
ncs->hnc_tx_bytes = ntohl(rsp->tx_bytes);
ncs->hnc_rx_uc_pkts = ntohl(rsp->rx_uc_pkts);
ncs->hnc_rx_mc_pkts = ntohl(rsp->rx_mc_pkts);
ncs->hnc_rx_bc_pkts = ntohl(rsp->rx_bc_pkts);
ncs->hnc_tx_uc_pkts = ntohl(rsp->tx_uc_pkts);
ncs->hnc_tx_mc_pkts = ntohl(rsp->tx_mc_pkts);
ncs->hnc_tx_bc_pkts = ntohl(rsp->tx_bc_pkts);
ncs->hnc_fcs_err = ntohl(rsp->fcs_err);
ncs->hnc_align_err = ntohl(rsp->align_err);
ncs->hnc_false_carrier = ntohl(rsp->false_carrier);
ncs->hnc_runt_pkts = ntohl(rsp->runt_pkts);
ncs->hnc_jabber_pkts = ntohl(rsp->jabber_pkts);
ncs->hnc_rx_pause_xon = ntohl(rsp->rx_pause_xon);
ncs->hnc_rx_pause_xoff = ntohl(rsp->rx_pause_xoff);
ncs->hnc_tx_pause_xon = ntohl(rsp->tx_pause_xon);
ncs->hnc_tx_pause_xoff = ntohl(rsp->tx_pause_xoff);
ncs->hnc_tx_s_collision = ntohl(rsp->tx_s_collision);
ncs->hnc_tx_m_collision = ntohl(rsp->tx_m_collision);
ncs->hnc_l_collision = ntohl(rsp->l_collision);
ncs->hnc_e_collision = ntohl(rsp->e_collision);
ncs->hnc_rx_ctl_frames = ntohl(rsp->rx_ctl_frames);
ncs->hnc_rx_64_frames = ntohl(rsp->rx_64_frames);
ncs->hnc_rx_127_frames = ntohl(rsp->rx_127_frames);
ncs->hnc_rx_255_frames = ntohl(rsp->rx_255_frames);
ncs->hnc_rx_511_frames = ntohl(rsp->rx_511_frames);
ncs->hnc_rx_1023_frames = ntohl(rsp->rx_1023_frames);
ncs->hnc_rx_1522_frames = ntohl(rsp->rx_1522_frames);
ncs->hnc_rx_9022_frames = ntohl(rsp->rx_9022_frames);
ncs->hnc_tx_64_frames = ntohl(rsp->tx_64_frames);
ncs->hnc_tx_127_frames = ntohl(rsp->tx_127_frames);
ncs->hnc_tx_255_frames = ntohl(rsp->tx_255_frames);
ncs->hnc_tx_511_frames = ntohl(rsp->tx_511_frames);
ncs->hnc_tx_1023_frames = ntohl(rsp->tx_1023_frames);
ncs->hnc_tx_1522_frames = ntohl(rsp->tx_1522_frames);
ncs->hnc_tx_9022_frames = ntohl(rsp->tx_9022_frames);
ncs->hnc_rx_valid_bytes = ntohl(rsp->rx_valid_bytes);
ncs->hnc_rx_runt_pkts = ntohl(rsp->rx_runt_pkts);
ncs->hnc_rx_jabber_pkts = ntohl(rsp->rx_jabber_pkts);
return 0;
}
static int ncsi_rsp_handler_gns(struct ncsi_request *nr)
{
struct ncsi_rsp_gns_pkt *rsp;
struct ncsi_dev_priv *ndp = nr->ndp;
struct ncsi_channel *nc;
struct ncsi_channel_stats *ncs;
/* Find the channel */
rsp = (struct ncsi_rsp_gns_pkt *)skb_network_header(nr->rsp);
ncsi_find_package_and_channel(ndp, rsp->rsp.common.channel,
NULL, &nc);
if (!nc)
return -ENODEV;
/* Update HNC's statistics */
ncs = &nc->stats;
ncs->ncsi_rx_cmds = ntohl(rsp->rx_cmds);
ncs->ncsi_dropped_cmds = ntohl(rsp->dropped_cmds);
ncs->ncsi_cmd_type_errs = ntohl(rsp->cmd_type_errs);
ncs->ncsi_cmd_csum_errs = ntohl(rsp->cmd_csum_errs);
ncs->ncsi_rx_pkts = ntohl(rsp->rx_pkts);
ncs->ncsi_tx_pkts = ntohl(rsp->tx_pkts);
ncs->ncsi_tx_aen_pkts = ntohl(rsp->tx_aen_pkts);
return 0;
}
static int ncsi_rsp_handler_gnpts(struct ncsi_request *nr)
{
struct ncsi_rsp_gnpts_pkt *rsp;
struct ncsi_dev_priv *ndp = nr->ndp;
struct ncsi_channel *nc;
struct ncsi_channel_stats *ncs;
/* Find the channel */
rsp = (struct ncsi_rsp_gnpts_pkt *)skb_network_header(nr->rsp);
ncsi_find_package_and_channel(ndp, rsp->rsp.common.channel,
NULL, &nc);
if (!nc)
return -ENODEV;
/* Update HNC's statistics */
ncs = &nc->stats;
ncs->pt_tx_pkts = ntohl(rsp->tx_pkts);
ncs->pt_tx_dropped = ntohl(rsp->tx_dropped);
ncs->pt_tx_channel_err = ntohl(rsp->tx_channel_err);
ncs->pt_tx_us_err = ntohl(rsp->tx_us_err);
ncs->pt_rx_pkts = ntohl(rsp->rx_pkts);
ncs->pt_rx_dropped = ntohl(rsp->rx_dropped);
ncs->pt_rx_channel_err = ntohl(rsp->rx_channel_err);
ncs->pt_rx_us_err = ntohl(rsp->rx_us_err);
ncs->pt_rx_os_err = ntohl(rsp->rx_os_err);
return 0;
}
static int ncsi_rsp_handler_gps(struct ncsi_request *nr)
{
struct ncsi_rsp_gps_pkt *rsp;
struct ncsi_dev_priv *ndp = nr->ndp;
struct ncsi_package *np;
/* Find the package */
rsp = (struct ncsi_rsp_gps_pkt *)skb_network_header(nr->rsp);
ncsi_find_package_and_channel(ndp, rsp->rsp.common.channel,
&np, NULL);
if (!np)
return -ENODEV;
return 0;
}
static int ncsi_rsp_handler_gpuuid(struct ncsi_request *nr)
{
struct ncsi_rsp_gpuuid_pkt *rsp;
struct ncsi_dev_priv *ndp = nr->ndp;
struct ncsi_package *np;
/* Find the package */
rsp = (struct ncsi_rsp_gpuuid_pkt *)skb_network_header(nr->rsp);
ncsi_find_package_and_channel(ndp, rsp->rsp.common.channel,
&np, NULL);
if (!np)
return -ENODEV;
memcpy(np->uuid, rsp->uuid, sizeof(rsp->uuid));
return 0;
}
static int ncsi_rsp_handler_pldm(struct ncsi_request *nr)
{
return 0;
}
static int ncsi_rsp_handler_netlink(struct ncsi_request *nr)
{
struct ncsi_dev_priv *ndp = nr->ndp;
struct ncsi_rsp_pkt *rsp;
struct ncsi_package *np;
struct ncsi_channel *nc;
int ret;
/* Find the package */
rsp = (struct ncsi_rsp_pkt *)skb_network_header(nr->rsp);
ncsi_find_package_and_channel(ndp, rsp->rsp.common.channel,
&np, &nc);
if (!np)
return -ENODEV;
ret = ncsi_send_netlink_rsp(nr, np, nc);
return ret;
}
static struct ncsi_rsp_handler {
unsigned char type;
int payload;
int (*handler)(struct ncsi_request *nr);
} ncsi_rsp_handlers[] = {
{ NCSI_PKT_RSP_CIS, 4, ncsi_rsp_handler_cis },
{ NCSI_PKT_RSP_SP, 4, ncsi_rsp_handler_sp },
{ NCSI_PKT_RSP_DP, 4, ncsi_rsp_handler_dp },
{ NCSI_PKT_RSP_EC, 4, ncsi_rsp_handler_ec },
{ NCSI_PKT_RSP_DC, 4, ncsi_rsp_handler_dc },
{ NCSI_PKT_RSP_RC, 4, ncsi_rsp_handler_rc },
{ NCSI_PKT_RSP_ECNT, 4, ncsi_rsp_handler_ecnt },
{ NCSI_PKT_RSP_DCNT, 4, ncsi_rsp_handler_dcnt },
{ NCSI_PKT_RSP_AE, 4, ncsi_rsp_handler_ae },
{ NCSI_PKT_RSP_SL, 4, ncsi_rsp_handler_sl },
{ NCSI_PKT_RSP_GLS, 16, ncsi_rsp_handler_gls },
{ NCSI_PKT_RSP_SVF, 4, ncsi_rsp_handler_svf },
{ NCSI_PKT_RSP_EV, 4, ncsi_rsp_handler_ev },
{ NCSI_PKT_RSP_DV, 4, ncsi_rsp_handler_dv },
{ NCSI_PKT_RSP_SMA, 4, ncsi_rsp_handler_sma },
{ NCSI_PKT_RSP_EBF, 4, ncsi_rsp_handler_ebf },
{ NCSI_PKT_RSP_DBF, 4, ncsi_rsp_handler_dbf },
{ NCSI_PKT_RSP_EGMF, 4, ncsi_rsp_handler_egmf },
{ NCSI_PKT_RSP_DGMF, 4, ncsi_rsp_handler_dgmf },
{ NCSI_PKT_RSP_SNFC, 4, ncsi_rsp_handler_snfc },
{ NCSI_PKT_RSP_GVI, 40, ncsi_rsp_handler_gvi },
{ NCSI_PKT_RSP_GC, 32, ncsi_rsp_handler_gc },
{ NCSI_PKT_RSP_GP, -1, ncsi_rsp_handler_gp },
{ NCSI_PKT_RSP_GCPS, 204, ncsi_rsp_handler_gcps },
{ NCSI_PKT_RSP_GNS, 32, ncsi_rsp_handler_gns },
{ NCSI_PKT_RSP_GNPTS, 48, ncsi_rsp_handler_gnpts },
{ NCSI_PKT_RSP_GPS, 8, ncsi_rsp_handler_gps },
{ NCSI_PKT_RSP_OEM, -1, ncsi_rsp_handler_oem },
{ NCSI_PKT_RSP_PLDM, -1, ncsi_rsp_handler_pldm },
{ NCSI_PKT_RSP_GPUUID, 20, ncsi_rsp_handler_gpuuid },
{ NCSI_PKT_RSP_QPNPR, -1, ncsi_rsp_handler_pldm },
{ NCSI_PKT_RSP_SNPR, -1, ncsi_rsp_handler_pldm }
};
int ncsi_rcv_rsp(struct sk_buff *skb, struct net_device *dev,
struct packet_type *pt, struct net_device *orig_dev)
{
struct ncsi_rsp_handler *nrh = NULL;
struct ncsi_dev *nd;
struct ncsi_dev_priv *ndp;
struct ncsi_request *nr;
struct ncsi_pkt_hdr *hdr;
unsigned long flags;
int payload, i, ret;
/* Find the NCSI device */
nd = ncsi_find_dev(orig_dev);
ndp = nd ? TO_NCSI_DEV_PRIV(nd) : NULL;
if (!ndp)
return -ENODEV;
/* Check if it is AEN packet */
hdr = (struct ncsi_pkt_hdr *)skb_network_header(skb);
if (hdr->type == NCSI_PKT_AEN)
return ncsi_aen_handler(ndp, skb);
/* Find the handler */
for (i = 0; i < ARRAY_SIZE(ncsi_rsp_handlers); i++) {
if (ncsi_rsp_handlers[i].type == hdr->type) {
if (ncsi_rsp_handlers[i].handler)
nrh = &ncsi_rsp_handlers[i];
else
nrh = NULL;
break;
}
}
if (!nrh) {
netdev_err(nd->dev, "Received unrecognized packet (0x%x)\n",
hdr->type);
return -ENOENT;
}
/* Associate with the request */
spin_lock_irqsave(&ndp->lock, flags);
nr = &ndp->requests[hdr->id];
if (!nr->used) {
spin_unlock_irqrestore(&ndp->lock, flags);
return -ENODEV;
}
nr->rsp = skb;
if (!nr->enabled) {
spin_unlock_irqrestore(&ndp->lock, flags);
ret = -ENOENT;
goto out;
}
/* Validate the packet */
spin_unlock_irqrestore(&ndp->lock, flags);
payload = nrh->payload;
if (payload < 0)
payload = ntohs(hdr->length);
ret = ncsi_validate_rsp_pkt(nr, payload);
if (ret) {
netdev_warn(ndp->ndev.dev,
"NCSI: 'bad' packet ignored for type 0x%x\n",
hdr->type);
if (nr->flags == NCSI_REQ_FLAG_NETLINK_DRIVEN) {
if (ret == -EPERM)
goto out_netlink;
else
ncsi_send_netlink_err(ndp->ndev.dev,
nr->snd_seq,
nr->snd_portid,
&nr->nlhdr,
ret);
}
goto out;
}
/* Process the packet */
ret = nrh->handler(nr);
if (ret)
netdev_err(ndp->ndev.dev,
"NCSI: Handler for packet type 0x%x returned %d\n",
hdr->type, ret);
out_netlink:
if (nr->flags == NCSI_REQ_FLAG_NETLINK_DRIVEN) {
ret = ncsi_rsp_handler_netlink(nr);
if (ret) {
netdev_err(ndp->ndev.dev,
"NCSI: Netlink handler for packet type 0x%x returned %d\n",
hdr->type, ret);
}
}
out:
ncsi_free_request(nr);
return ret;
}
| linux-master | net/ncsi/ncsi-rsp.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright Gavin Shan, IBM Corporation 2016.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/etherdevice.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <net/ncsi.h>
#include <net/net_namespace.h>
#include <net/sock.h>
#include <net/genetlink.h>
#include "internal.h"
#include "ncsi-pkt.h"
static const int padding_bytes = 26;
u32 ncsi_calculate_checksum(unsigned char *data, int len)
{
u32 checksum = 0;
int i;
for (i = 0; i < len; i += 2)
checksum += (((u32)data[i] << 8) | data[i + 1]);
checksum = (~checksum + 1);
return checksum;
}
/* This function should be called after the data area has been
* populated completely.
*/
static void ncsi_cmd_build_header(struct ncsi_pkt_hdr *h,
struct ncsi_cmd_arg *nca)
{
u32 checksum;
__be32 *pchecksum;
h->mc_id = 0;
h->revision = NCSI_PKT_REVISION;
h->reserved = 0;
h->id = nca->id;
h->type = nca->type;
h->channel = NCSI_TO_CHANNEL(nca->package,
nca->channel);
h->length = htons(nca->payload);
h->reserved1[0] = 0;
h->reserved1[1] = 0;
/* Fill with calculated checksum */
checksum = ncsi_calculate_checksum((unsigned char *)h,
sizeof(*h) + nca->payload);
pchecksum = (__be32 *)((void *)h + sizeof(struct ncsi_pkt_hdr) +
ALIGN(nca->payload, 4));
*pchecksum = htonl(checksum);
}
static int ncsi_cmd_handler_default(struct sk_buff *skb,
struct ncsi_cmd_arg *nca)
{
struct ncsi_cmd_pkt *cmd;
cmd = skb_put_zero(skb, sizeof(*cmd));
ncsi_cmd_build_header(&cmd->cmd.common, nca);
return 0;
}
static int ncsi_cmd_handler_sp(struct sk_buff *skb,
struct ncsi_cmd_arg *nca)
{
struct ncsi_cmd_sp_pkt *cmd;
cmd = skb_put_zero(skb, sizeof(*cmd));
cmd->hw_arbitration = nca->bytes[0];
ncsi_cmd_build_header(&cmd->cmd.common, nca);
return 0;
}
static int ncsi_cmd_handler_dc(struct sk_buff *skb,
struct ncsi_cmd_arg *nca)
{
struct ncsi_cmd_dc_pkt *cmd;
cmd = skb_put_zero(skb, sizeof(*cmd));
cmd->ald = nca->bytes[0];
ncsi_cmd_build_header(&cmd->cmd.common, nca);
return 0;
}
static int ncsi_cmd_handler_rc(struct sk_buff *skb,
struct ncsi_cmd_arg *nca)
{
struct ncsi_cmd_rc_pkt *cmd;
cmd = skb_put_zero(skb, sizeof(*cmd));
ncsi_cmd_build_header(&cmd->cmd.common, nca);
return 0;
}
static int ncsi_cmd_handler_ae(struct sk_buff *skb,
struct ncsi_cmd_arg *nca)
{
struct ncsi_cmd_ae_pkt *cmd;
cmd = skb_put_zero(skb, sizeof(*cmd));
cmd->mc_id = nca->bytes[0];
cmd->mode = htonl(nca->dwords[1]);
ncsi_cmd_build_header(&cmd->cmd.common, nca);
return 0;
}
static int ncsi_cmd_handler_sl(struct sk_buff *skb,
struct ncsi_cmd_arg *nca)
{
struct ncsi_cmd_sl_pkt *cmd;
cmd = skb_put_zero(skb, sizeof(*cmd));
cmd->mode = htonl(nca->dwords[0]);
cmd->oem_mode = htonl(nca->dwords[1]);
ncsi_cmd_build_header(&cmd->cmd.common, nca);
return 0;
}
static int ncsi_cmd_handler_svf(struct sk_buff *skb,
struct ncsi_cmd_arg *nca)
{
struct ncsi_cmd_svf_pkt *cmd;
cmd = skb_put_zero(skb, sizeof(*cmd));
cmd->vlan = htons(nca->words[1]);
cmd->index = nca->bytes[6];
cmd->enable = nca->bytes[7];
ncsi_cmd_build_header(&cmd->cmd.common, nca);
return 0;
}
static int ncsi_cmd_handler_ev(struct sk_buff *skb,
struct ncsi_cmd_arg *nca)
{
struct ncsi_cmd_ev_pkt *cmd;
cmd = skb_put_zero(skb, sizeof(*cmd));
cmd->mode = nca->bytes[3];
ncsi_cmd_build_header(&cmd->cmd.common, nca);
return 0;
}
static int ncsi_cmd_handler_sma(struct sk_buff *skb,
struct ncsi_cmd_arg *nca)
{
struct ncsi_cmd_sma_pkt *cmd;
int i;
cmd = skb_put_zero(skb, sizeof(*cmd));
for (i = 0; i < 6; i++)
cmd->mac[i] = nca->bytes[i];
cmd->index = nca->bytes[6];
cmd->at_e = nca->bytes[7];
ncsi_cmd_build_header(&cmd->cmd.common, nca);
return 0;
}
static int ncsi_cmd_handler_ebf(struct sk_buff *skb,
struct ncsi_cmd_arg *nca)
{
struct ncsi_cmd_ebf_pkt *cmd;
cmd = skb_put_zero(skb, sizeof(*cmd));
cmd->mode = htonl(nca->dwords[0]);
ncsi_cmd_build_header(&cmd->cmd.common, nca);
return 0;
}
static int ncsi_cmd_handler_egmf(struct sk_buff *skb,
struct ncsi_cmd_arg *nca)
{
struct ncsi_cmd_egmf_pkt *cmd;
cmd = skb_put_zero(skb, sizeof(*cmd));
cmd->mode = htonl(nca->dwords[0]);
ncsi_cmd_build_header(&cmd->cmd.common, nca);
return 0;
}
static int ncsi_cmd_handler_snfc(struct sk_buff *skb,
struct ncsi_cmd_arg *nca)
{
struct ncsi_cmd_snfc_pkt *cmd;
cmd = skb_put_zero(skb, sizeof(*cmd));
cmd->mode = nca->bytes[0];
ncsi_cmd_build_header(&cmd->cmd.common, nca);
return 0;
}
static int ncsi_cmd_handler_oem(struct sk_buff *skb,
struct ncsi_cmd_arg *nca)
{
struct ncsi_cmd_oem_pkt *cmd;
unsigned int len;
int payload;
/* NC-SI spec DSP_0222_1.2.0, section 8.2.2.2
* requires payload to be padded with 0 to
* 32-bit boundary before the checksum field.
* Ensure the padding bytes are accounted for in
* skb allocation
*/
payload = ALIGN(nca->payload, 4);
len = sizeof(struct ncsi_cmd_pkt_hdr) + 4;
len += max(payload, padding_bytes);
cmd = skb_put_zero(skb, len);
unsafe_memcpy(&cmd->mfr_id, nca->data, nca->payload,
/* skb allocated with enough to load the payload */);
ncsi_cmd_build_header(&cmd->cmd.common, nca);
return 0;
}
static struct ncsi_cmd_handler {
unsigned char type;
int payload;
int (*handler)(struct sk_buff *skb,
struct ncsi_cmd_arg *nca);
} ncsi_cmd_handlers[] = {
{ NCSI_PKT_CMD_CIS, 0, ncsi_cmd_handler_default },
{ NCSI_PKT_CMD_SP, 4, ncsi_cmd_handler_sp },
{ NCSI_PKT_CMD_DP, 0, ncsi_cmd_handler_default },
{ NCSI_PKT_CMD_EC, 0, ncsi_cmd_handler_default },
{ NCSI_PKT_CMD_DC, 4, ncsi_cmd_handler_dc },
{ NCSI_PKT_CMD_RC, 4, ncsi_cmd_handler_rc },
{ NCSI_PKT_CMD_ECNT, 0, ncsi_cmd_handler_default },
{ NCSI_PKT_CMD_DCNT, 0, ncsi_cmd_handler_default },
{ NCSI_PKT_CMD_AE, 8, ncsi_cmd_handler_ae },
{ NCSI_PKT_CMD_SL, 8, ncsi_cmd_handler_sl },
{ NCSI_PKT_CMD_GLS, 0, ncsi_cmd_handler_default },
{ NCSI_PKT_CMD_SVF, 8, ncsi_cmd_handler_svf },
{ NCSI_PKT_CMD_EV, 4, ncsi_cmd_handler_ev },
{ NCSI_PKT_CMD_DV, 0, ncsi_cmd_handler_default },
{ NCSI_PKT_CMD_SMA, 8, ncsi_cmd_handler_sma },
{ NCSI_PKT_CMD_EBF, 4, ncsi_cmd_handler_ebf },
{ NCSI_PKT_CMD_DBF, 0, ncsi_cmd_handler_default },
{ NCSI_PKT_CMD_EGMF, 4, ncsi_cmd_handler_egmf },
{ NCSI_PKT_CMD_DGMF, 0, ncsi_cmd_handler_default },
{ NCSI_PKT_CMD_SNFC, 4, ncsi_cmd_handler_snfc },
{ NCSI_PKT_CMD_GVI, 0, ncsi_cmd_handler_default },
{ NCSI_PKT_CMD_GC, 0, ncsi_cmd_handler_default },
{ NCSI_PKT_CMD_GP, 0, ncsi_cmd_handler_default },
{ NCSI_PKT_CMD_GCPS, 0, ncsi_cmd_handler_default },
{ NCSI_PKT_CMD_GNS, 0, ncsi_cmd_handler_default },
{ NCSI_PKT_CMD_GNPTS, 0, ncsi_cmd_handler_default },
{ NCSI_PKT_CMD_GPS, 0, ncsi_cmd_handler_default },
{ NCSI_PKT_CMD_OEM, -1, ncsi_cmd_handler_oem },
{ NCSI_PKT_CMD_PLDM, 0, NULL },
{ NCSI_PKT_CMD_GPUUID, 0, ncsi_cmd_handler_default }
};
static struct ncsi_request *ncsi_alloc_command(struct ncsi_cmd_arg *nca)
{
struct ncsi_dev_priv *ndp = nca->ndp;
struct ncsi_dev *nd = &ndp->ndev;
struct net_device *dev = nd->dev;
int hlen = LL_RESERVED_SPACE(dev);
int tlen = dev->needed_tailroom;
int payload;
int len = hlen + tlen;
struct sk_buff *skb;
struct ncsi_request *nr;
nr = ncsi_alloc_request(ndp, nca->req_flags);
if (!nr)
return NULL;
/* NCSI command packet has 16-bytes header, payload, 4 bytes checksum.
* Payload needs padding so that the checksum field following payload is
* aligned to 32-bit boundary.
* The packet needs padding if its payload is less than 26 bytes to
* meet 64 bytes minimal ethernet frame length.
*/
len += sizeof(struct ncsi_cmd_pkt_hdr) + 4;
payload = ALIGN(nca->payload, 4);
len += max(payload, padding_bytes);
/* Allocate skb */
skb = alloc_skb(len, GFP_ATOMIC);
if (!skb) {
ncsi_free_request(nr);
return NULL;
}
nr->cmd = skb;
skb_reserve(skb, hlen);
skb_reset_network_header(skb);
skb->dev = dev;
skb->protocol = htons(ETH_P_NCSI);
return nr;
}
int ncsi_xmit_cmd(struct ncsi_cmd_arg *nca)
{
struct ncsi_cmd_handler *nch = NULL;
struct ncsi_request *nr;
unsigned char type;
struct ethhdr *eh;
int i, ret;
/* Use OEM generic handler for Netlink request */
if (nca->req_flags == NCSI_REQ_FLAG_NETLINK_DRIVEN)
type = NCSI_PKT_CMD_OEM;
else
type = nca->type;
/* Search for the handler */
for (i = 0; i < ARRAY_SIZE(ncsi_cmd_handlers); i++) {
if (ncsi_cmd_handlers[i].type == type) {
if (ncsi_cmd_handlers[i].handler)
nch = &ncsi_cmd_handlers[i];
else
nch = NULL;
break;
}
}
if (!nch) {
netdev_err(nca->ndp->ndev.dev,
"Cannot send packet with type 0x%02x\n", nca->type);
return -ENOENT;
}
/* Get packet payload length and allocate the request
* It is expected that if length set as negative in
* handler structure means caller is initializing it
* and setting length in nca before calling xmit function
*/
if (nch->payload >= 0)
nca->payload = nch->payload;
nr = ncsi_alloc_command(nca);
if (!nr)
return -ENOMEM;
/* track netlink information */
if (nca->req_flags == NCSI_REQ_FLAG_NETLINK_DRIVEN) {
nr->snd_seq = nca->info->snd_seq;
nr->snd_portid = nca->info->snd_portid;
nr->nlhdr = *nca->info->nlhdr;
}
/* Prepare the packet */
nca->id = nr->id;
ret = nch->handler(nr->cmd, nca);
if (ret) {
ncsi_free_request(nr);
return ret;
}
/* Fill the ethernet header */
eh = skb_push(nr->cmd, sizeof(*eh));
eh->h_proto = htons(ETH_P_NCSI);
eth_broadcast_addr(eh->h_dest);
/* If mac address received from device then use it for
* source address as unicast address else use broadcast
* address as source address
*/
if (nca->ndp->gma_flag == 1)
memcpy(eh->h_source, nca->ndp->ndev.dev->dev_addr, ETH_ALEN);
else
eth_broadcast_addr(eh->h_source);
/* Start the timer for the request that might not have
* corresponding response. Given NCSI is an internal
* connection a 1 second delay should be sufficient.
*/
nr->enabled = true;
mod_timer(&nr->timer, jiffies + 1 * HZ);
/* Send NCSI packet */
skb_get(nr->cmd);
ret = dev_queue_xmit(nr->cmd);
if (ret < 0) {
ncsi_free_request(nr);
return ret;
}
return 0;
}
| linux-master | net/ncsi/ncsi-cmd.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright Gavin Shan, IBM Corporation 2016.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <net/ncsi.h>
#include <net/net_namespace.h>
#include <net/sock.h>
#include "internal.h"
#include "ncsi-pkt.h"
static int ncsi_validate_aen_pkt(struct ncsi_aen_pkt_hdr *h,
const unsigned short payload)
{
u32 checksum;
__be32 *pchecksum;
if (h->common.revision != NCSI_PKT_REVISION)
return -EINVAL;
if (ntohs(h->common.length) != payload)
return -EINVAL;
/* Validate checksum, which might be zeroes if the
* sender doesn't support checksum according to NCSI
* specification.
*/
pchecksum = (__be32 *)((void *)(h + 1) + payload - 4);
if (ntohl(*pchecksum) == 0)
return 0;
checksum = ncsi_calculate_checksum((unsigned char *)h,
sizeof(*h) + payload - 4);
if (*pchecksum != htonl(checksum))
return -EINVAL;
return 0;
}
static int ncsi_aen_handler_lsc(struct ncsi_dev_priv *ndp,
struct ncsi_aen_pkt_hdr *h)
{
struct ncsi_channel *nc, *tmp;
struct ncsi_channel_mode *ncm;
unsigned long old_data, data;
struct ncsi_aen_lsc_pkt *lsc;
struct ncsi_package *np;
bool had_link, has_link;
unsigned long flags;
bool chained;
int state;
/* Find the NCSI channel */
ncsi_find_package_and_channel(ndp, h->common.channel, NULL, &nc);
if (!nc)
return -ENODEV;
/* Update the link status */
lsc = (struct ncsi_aen_lsc_pkt *)h;
spin_lock_irqsave(&nc->lock, flags);
ncm = &nc->modes[NCSI_MODE_LINK];
old_data = ncm->data[2];
data = ntohl(lsc->status);
ncm->data[2] = data;
ncm->data[4] = ntohl(lsc->oem_status);
had_link = !!(old_data & 0x1);
has_link = !!(data & 0x1);
netdev_dbg(ndp->ndev.dev, "NCSI: LSC AEN - channel %u state %s\n",
nc->id, data & 0x1 ? "up" : "down");
chained = !list_empty(&nc->link);
state = nc->state;
spin_unlock_irqrestore(&nc->lock, flags);
if (state == NCSI_CHANNEL_INACTIVE)
netdev_warn(ndp->ndev.dev,
"NCSI: Inactive channel %u received AEN!\n",
nc->id);
if ((had_link == has_link) || chained)
return 0;
if (had_link)
netif_carrier_off(ndp->ndev.dev);
else
netif_carrier_on(ndp->ndev.dev);
if (!ndp->multi_package && !nc->package->multi_channel) {
if (had_link) {
ndp->flags |= NCSI_DEV_RESHUFFLE;
ncsi_stop_channel_monitor(nc);
spin_lock_irqsave(&ndp->lock, flags);
list_add_tail_rcu(&nc->link, &ndp->channel_queue);
spin_unlock_irqrestore(&ndp->lock, flags);
return ncsi_process_next_channel(ndp);
}
/* Configured channel came up */
return 0;
}
if (had_link) {
ncm = &nc->modes[NCSI_MODE_TX_ENABLE];
if (ncsi_channel_is_last(ndp, nc)) {
/* No channels left, reconfigure */
return ncsi_reset_dev(&ndp->ndev);
} else if (ncm->enable) {
/* Need to failover Tx channel */
ncsi_update_tx_channel(ndp, nc->package, nc, NULL);
}
} else if (has_link && nc->package->preferred_channel == nc) {
/* Return Tx to preferred channel */
ncsi_update_tx_channel(ndp, nc->package, NULL, nc);
} else if (has_link) {
NCSI_FOR_EACH_PACKAGE(ndp, np) {
NCSI_FOR_EACH_CHANNEL(np, tmp) {
/* Enable Tx on this channel if the current Tx
* channel is down.
*/
ncm = &tmp->modes[NCSI_MODE_TX_ENABLE];
if (ncm->enable &&
!ncsi_channel_has_link(tmp)) {
ncsi_update_tx_channel(ndp, nc->package,
tmp, nc);
break;
}
}
}
}
/* Leave configured channels active in a multi-channel scenario so
* AEN events are still received.
*/
return 0;
}
static int ncsi_aen_handler_cr(struct ncsi_dev_priv *ndp,
struct ncsi_aen_pkt_hdr *h)
{
struct ncsi_channel *nc;
unsigned long flags;
/* Find the NCSI channel */
ncsi_find_package_and_channel(ndp, h->common.channel, NULL, &nc);
if (!nc)
return -ENODEV;
spin_lock_irqsave(&nc->lock, flags);
if (!list_empty(&nc->link) ||
nc->state != NCSI_CHANNEL_ACTIVE) {
spin_unlock_irqrestore(&nc->lock, flags);
return 0;
}
spin_unlock_irqrestore(&nc->lock, flags);
ncsi_stop_channel_monitor(nc);
spin_lock_irqsave(&nc->lock, flags);
nc->state = NCSI_CHANNEL_INVISIBLE;
spin_unlock_irqrestore(&nc->lock, flags);
spin_lock_irqsave(&ndp->lock, flags);
nc->state = NCSI_CHANNEL_INACTIVE;
list_add_tail_rcu(&nc->link, &ndp->channel_queue);
spin_unlock_irqrestore(&ndp->lock, flags);
nc->modes[NCSI_MODE_TX_ENABLE].enable = 0;
return ncsi_process_next_channel(ndp);
}
static int ncsi_aen_handler_hncdsc(struct ncsi_dev_priv *ndp,
struct ncsi_aen_pkt_hdr *h)
{
struct ncsi_channel *nc;
struct ncsi_channel_mode *ncm;
struct ncsi_aen_hncdsc_pkt *hncdsc;
unsigned long flags;
/* Find the NCSI channel */
ncsi_find_package_and_channel(ndp, h->common.channel, NULL, &nc);
if (!nc)
return -ENODEV;
spin_lock_irqsave(&nc->lock, flags);
ncm = &nc->modes[NCSI_MODE_LINK];
hncdsc = (struct ncsi_aen_hncdsc_pkt *)h;
ncm->data[3] = ntohl(hncdsc->status);
spin_unlock_irqrestore(&nc->lock, flags);
netdev_dbg(ndp->ndev.dev,
"NCSI: host driver %srunning on channel %u\n",
ncm->data[3] & 0x1 ? "" : "not ", nc->id);
return 0;
}
static struct ncsi_aen_handler {
unsigned char type;
int payload;
int (*handler)(struct ncsi_dev_priv *ndp,
struct ncsi_aen_pkt_hdr *h);
} ncsi_aen_handlers[] = {
{ NCSI_PKT_AEN_LSC, 12, ncsi_aen_handler_lsc },
{ NCSI_PKT_AEN_CR, 4, ncsi_aen_handler_cr },
{ NCSI_PKT_AEN_HNCDSC, 8, ncsi_aen_handler_hncdsc }
};
int ncsi_aen_handler(struct ncsi_dev_priv *ndp, struct sk_buff *skb)
{
struct ncsi_aen_pkt_hdr *h;
struct ncsi_aen_handler *nah = NULL;
int i, ret;
/* Find the handler */
h = (struct ncsi_aen_pkt_hdr *)skb_network_header(skb);
for (i = 0; i < ARRAY_SIZE(ncsi_aen_handlers); i++) {
if (ncsi_aen_handlers[i].type == h->type) {
nah = &ncsi_aen_handlers[i];
break;
}
}
if (!nah) {
netdev_warn(ndp->ndev.dev, "Invalid AEN (0x%x) received\n",
h->type);
return -ENOENT;
}
ret = ncsi_validate_aen_pkt(h, nah->payload);
if (ret) {
netdev_warn(ndp->ndev.dev,
"NCSI: 'bad' packet ignored for AEN type 0x%x\n",
h->type);
goto out;
}
ret = nah->handler(ndp, h);
if (ret)
netdev_err(ndp->ndev.dev,
"NCSI: Handler for AEN type 0x%x returned %d\n",
h->type, ret);
out:
consume_skb(skb);
return ret;
}
| linux-master | net/ncsi/ncsi-aen.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright Gavin Shan, IBM Corporation 2016.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <net/ncsi.h>
#include <net/net_namespace.h>
#include <net/sock.h>
#include <net/addrconf.h>
#include <net/ipv6.h>
#include <net/genetlink.h>
#include "internal.h"
#include "ncsi-pkt.h"
#include "ncsi-netlink.h"
LIST_HEAD(ncsi_dev_list);
DEFINE_SPINLOCK(ncsi_dev_lock);
bool ncsi_channel_has_link(struct ncsi_channel *channel)
{
return !!(channel->modes[NCSI_MODE_LINK].data[2] & 0x1);
}
bool ncsi_channel_is_last(struct ncsi_dev_priv *ndp,
struct ncsi_channel *channel)
{
struct ncsi_package *np;
struct ncsi_channel *nc;
NCSI_FOR_EACH_PACKAGE(ndp, np)
NCSI_FOR_EACH_CHANNEL(np, nc) {
if (nc == channel)
continue;
if (nc->state == NCSI_CHANNEL_ACTIVE &&
ncsi_channel_has_link(nc))
return false;
}
return true;
}
static void ncsi_report_link(struct ncsi_dev_priv *ndp, bool force_down)
{
struct ncsi_dev *nd = &ndp->ndev;
struct ncsi_package *np;
struct ncsi_channel *nc;
unsigned long flags;
nd->state = ncsi_dev_state_functional;
if (force_down) {
nd->link_up = 0;
goto report;
}
nd->link_up = 0;
NCSI_FOR_EACH_PACKAGE(ndp, np) {
NCSI_FOR_EACH_CHANNEL(np, nc) {
spin_lock_irqsave(&nc->lock, flags);
if (!list_empty(&nc->link) ||
nc->state != NCSI_CHANNEL_ACTIVE) {
spin_unlock_irqrestore(&nc->lock, flags);
continue;
}
if (ncsi_channel_has_link(nc)) {
spin_unlock_irqrestore(&nc->lock, flags);
nd->link_up = 1;
goto report;
}
spin_unlock_irqrestore(&nc->lock, flags);
}
}
report:
nd->handler(nd);
}
static void ncsi_channel_monitor(struct timer_list *t)
{
struct ncsi_channel *nc = from_timer(nc, t, monitor.timer);
struct ncsi_package *np = nc->package;
struct ncsi_dev_priv *ndp = np->ndp;
struct ncsi_channel_mode *ncm;
struct ncsi_cmd_arg nca;
bool enabled, chained;
unsigned int monitor_state;
unsigned long flags;
int state, ret;
spin_lock_irqsave(&nc->lock, flags);
state = nc->state;
chained = !list_empty(&nc->link);
enabled = nc->monitor.enabled;
monitor_state = nc->monitor.state;
spin_unlock_irqrestore(&nc->lock, flags);
if (!enabled)
return; /* expected race disabling timer */
if (WARN_ON_ONCE(chained))
goto bad_state;
if (state != NCSI_CHANNEL_INACTIVE &&
state != NCSI_CHANNEL_ACTIVE) {
bad_state:
netdev_warn(ndp->ndev.dev,
"Bad NCSI monitor state channel %d 0x%x %s queue\n",
nc->id, state, chained ? "on" : "off");
spin_lock_irqsave(&nc->lock, flags);
nc->monitor.enabled = false;
spin_unlock_irqrestore(&nc->lock, flags);
return;
}
switch (monitor_state) {
case NCSI_CHANNEL_MONITOR_START:
case NCSI_CHANNEL_MONITOR_RETRY:
nca.ndp = ndp;
nca.package = np->id;
nca.channel = nc->id;
nca.type = NCSI_PKT_CMD_GLS;
nca.req_flags = 0;
ret = ncsi_xmit_cmd(&nca);
if (ret)
netdev_err(ndp->ndev.dev, "Error %d sending GLS\n",
ret);
break;
case NCSI_CHANNEL_MONITOR_WAIT ... NCSI_CHANNEL_MONITOR_WAIT_MAX:
break;
default:
netdev_err(ndp->ndev.dev, "NCSI Channel %d timed out!\n",
nc->id);
ncsi_report_link(ndp, true);
ndp->flags |= NCSI_DEV_RESHUFFLE;
ncm = &nc->modes[NCSI_MODE_LINK];
spin_lock_irqsave(&nc->lock, flags);
nc->monitor.enabled = false;
nc->state = NCSI_CHANNEL_INVISIBLE;
ncm->data[2] &= ~0x1;
spin_unlock_irqrestore(&nc->lock, flags);
spin_lock_irqsave(&ndp->lock, flags);
nc->state = NCSI_CHANNEL_ACTIVE;
list_add_tail_rcu(&nc->link, &ndp->channel_queue);
spin_unlock_irqrestore(&ndp->lock, flags);
ncsi_process_next_channel(ndp);
return;
}
spin_lock_irqsave(&nc->lock, flags);
nc->monitor.state++;
spin_unlock_irqrestore(&nc->lock, flags);
mod_timer(&nc->monitor.timer, jiffies + HZ);
}
void ncsi_start_channel_monitor(struct ncsi_channel *nc)
{
unsigned long flags;
spin_lock_irqsave(&nc->lock, flags);
WARN_ON_ONCE(nc->monitor.enabled);
nc->monitor.enabled = true;
nc->monitor.state = NCSI_CHANNEL_MONITOR_START;
spin_unlock_irqrestore(&nc->lock, flags);
mod_timer(&nc->monitor.timer, jiffies + HZ);
}
void ncsi_stop_channel_monitor(struct ncsi_channel *nc)
{
unsigned long flags;
spin_lock_irqsave(&nc->lock, flags);
if (!nc->monitor.enabled) {
spin_unlock_irqrestore(&nc->lock, flags);
return;
}
nc->monitor.enabled = false;
spin_unlock_irqrestore(&nc->lock, flags);
del_timer_sync(&nc->monitor.timer);
}
struct ncsi_channel *ncsi_find_channel(struct ncsi_package *np,
unsigned char id)
{
struct ncsi_channel *nc;
NCSI_FOR_EACH_CHANNEL(np, nc) {
if (nc->id == id)
return nc;
}
return NULL;
}
struct ncsi_channel *ncsi_add_channel(struct ncsi_package *np, unsigned char id)
{
struct ncsi_channel *nc, *tmp;
int index;
unsigned long flags;
nc = kzalloc(sizeof(*nc), GFP_ATOMIC);
if (!nc)
return NULL;
nc->id = id;
nc->package = np;
nc->state = NCSI_CHANNEL_INACTIVE;
nc->monitor.enabled = false;
timer_setup(&nc->monitor.timer, ncsi_channel_monitor, 0);
spin_lock_init(&nc->lock);
INIT_LIST_HEAD(&nc->link);
for (index = 0; index < NCSI_CAP_MAX; index++)
nc->caps[index].index = index;
for (index = 0; index < NCSI_MODE_MAX; index++)
nc->modes[index].index = index;
spin_lock_irqsave(&np->lock, flags);
tmp = ncsi_find_channel(np, id);
if (tmp) {
spin_unlock_irqrestore(&np->lock, flags);
kfree(nc);
return tmp;
}
list_add_tail_rcu(&nc->node, &np->channels);
np->channel_num++;
spin_unlock_irqrestore(&np->lock, flags);
return nc;
}
static void ncsi_remove_channel(struct ncsi_channel *nc)
{
struct ncsi_package *np = nc->package;
unsigned long flags;
spin_lock_irqsave(&nc->lock, flags);
/* Release filters */
kfree(nc->mac_filter.addrs);
kfree(nc->vlan_filter.vids);
nc->state = NCSI_CHANNEL_INACTIVE;
spin_unlock_irqrestore(&nc->lock, flags);
ncsi_stop_channel_monitor(nc);
/* Remove and free channel */
spin_lock_irqsave(&np->lock, flags);
list_del_rcu(&nc->node);
np->channel_num--;
spin_unlock_irqrestore(&np->lock, flags);
kfree(nc);
}
struct ncsi_package *ncsi_find_package(struct ncsi_dev_priv *ndp,
unsigned char id)
{
struct ncsi_package *np;
NCSI_FOR_EACH_PACKAGE(ndp, np) {
if (np->id == id)
return np;
}
return NULL;
}
struct ncsi_package *ncsi_add_package(struct ncsi_dev_priv *ndp,
unsigned char id)
{
struct ncsi_package *np, *tmp;
unsigned long flags;
np = kzalloc(sizeof(*np), GFP_ATOMIC);
if (!np)
return NULL;
np->id = id;
np->ndp = ndp;
spin_lock_init(&np->lock);
INIT_LIST_HEAD(&np->channels);
np->channel_whitelist = UINT_MAX;
spin_lock_irqsave(&ndp->lock, flags);
tmp = ncsi_find_package(ndp, id);
if (tmp) {
spin_unlock_irqrestore(&ndp->lock, flags);
kfree(np);
return tmp;
}
list_add_tail_rcu(&np->node, &ndp->packages);
ndp->package_num++;
spin_unlock_irqrestore(&ndp->lock, flags);
return np;
}
void ncsi_remove_package(struct ncsi_package *np)
{
struct ncsi_dev_priv *ndp = np->ndp;
struct ncsi_channel *nc, *tmp;
unsigned long flags;
/* Release all child channels */
list_for_each_entry_safe(nc, tmp, &np->channels, node)
ncsi_remove_channel(nc);
/* Remove and free package */
spin_lock_irqsave(&ndp->lock, flags);
list_del_rcu(&np->node);
ndp->package_num--;
spin_unlock_irqrestore(&ndp->lock, flags);
kfree(np);
}
void ncsi_find_package_and_channel(struct ncsi_dev_priv *ndp,
unsigned char id,
struct ncsi_package **np,
struct ncsi_channel **nc)
{
struct ncsi_package *p;
struct ncsi_channel *c;
p = ncsi_find_package(ndp, NCSI_PACKAGE_INDEX(id));
c = p ? ncsi_find_channel(p, NCSI_CHANNEL_INDEX(id)) : NULL;
if (np)
*np = p;
if (nc)
*nc = c;
}
/* For two consecutive NCSI commands, the packet IDs shouldn't
* be same. Otherwise, the bogus response might be replied. So
* the available IDs are allocated in round-robin fashion.
*/
struct ncsi_request *ncsi_alloc_request(struct ncsi_dev_priv *ndp,
unsigned int req_flags)
{
struct ncsi_request *nr = NULL;
int i, limit = ARRAY_SIZE(ndp->requests);
unsigned long flags;
/* Check if there is one available request until the ceiling */
spin_lock_irqsave(&ndp->lock, flags);
for (i = ndp->request_id; i < limit; i++) {
if (ndp->requests[i].used)
continue;
nr = &ndp->requests[i];
nr->used = true;
nr->flags = req_flags;
ndp->request_id = i + 1;
goto found;
}
/* Fail back to check from the starting cursor */
for (i = NCSI_REQ_START_IDX; i < ndp->request_id; i++) {
if (ndp->requests[i].used)
continue;
nr = &ndp->requests[i];
nr->used = true;
nr->flags = req_flags;
ndp->request_id = i + 1;
goto found;
}
found:
spin_unlock_irqrestore(&ndp->lock, flags);
return nr;
}
void ncsi_free_request(struct ncsi_request *nr)
{
struct ncsi_dev_priv *ndp = nr->ndp;
struct sk_buff *cmd, *rsp;
unsigned long flags;
bool driven;
if (nr->enabled) {
nr->enabled = false;
del_timer_sync(&nr->timer);
}
spin_lock_irqsave(&ndp->lock, flags);
cmd = nr->cmd;
rsp = nr->rsp;
nr->cmd = NULL;
nr->rsp = NULL;
nr->used = false;
driven = !!(nr->flags & NCSI_REQ_FLAG_EVENT_DRIVEN);
spin_unlock_irqrestore(&ndp->lock, flags);
if (driven && cmd && --ndp->pending_req_num == 0)
schedule_work(&ndp->work);
/* Release command and response */
consume_skb(cmd);
consume_skb(rsp);
}
struct ncsi_dev *ncsi_find_dev(struct net_device *dev)
{
struct ncsi_dev_priv *ndp;
NCSI_FOR_EACH_DEV(ndp) {
if (ndp->ndev.dev == dev)
return &ndp->ndev;
}
return NULL;
}
static void ncsi_request_timeout(struct timer_list *t)
{
struct ncsi_request *nr = from_timer(nr, t, timer);
struct ncsi_dev_priv *ndp = nr->ndp;
struct ncsi_cmd_pkt *cmd;
struct ncsi_package *np;
struct ncsi_channel *nc;
unsigned long flags;
/* If the request already had associated response,
* let the response handler to release it.
*/
spin_lock_irqsave(&ndp->lock, flags);
nr->enabled = false;
if (nr->rsp || !nr->cmd) {
spin_unlock_irqrestore(&ndp->lock, flags);
return;
}
spin_unlock_irqrestore(&ndp->lock, flags);
if (nr->flags == NCSI_REQ_FLAG_NETLINK_DRIVEN) {
if (nr->cmd) {
/* Find the package */
cmd = (struct ncsi_cmd_pkt *)
skb_network_header(nr->cmd);
ncsi_find_package_and_channel(ndp,
cmd->cmd.common.channel,
&np, &nc);
ncsi_send_netlink_timeout(nr, np, nc);
}
}
/* Release the request */
ncsi_free_request(nr);
}
static void ncsi_suspend_channel(struct ncsi_dev_priv *ndp)
{
struct ncsi_dev *nd = &ndp->ndev;
struct ncsi_package *np;
struct ncsi_channel *nc, *tmp;
struct ncsi_cmd_arg nca;
unsigned long flags;
int ret;
np = ndp->active_package;
nc = ndp->active_channel;
nca.ndp = ndp;
nca.req_flags = NCSI_REQ_FLAG_EVENT_DRIVEN;
switch (nd->state) {
case ncsi_dev_state_suspend:
nd->state = ncsi_dev_state_suspend_select;
fallthrough;
case ncsi_dev_state_suspend_select:
ndp->pending_req_num = 1;
nca.type = NCSI_PKT_CMD_SP;
nca.package = np->id;
nca.channel = NCSI_RESERVED_CHANNEL;
if (ndp->flags & NCSI_DEV_HWA)
nca.bytes[0] = 0;
else
nca.bytes[0] = 1;
/* To retrieve the last link states of channels in current
* package when current active channel needs fail over to
* another one. It means we will possibly select another
* channel as next active one. The link states of channels
* are most important factor of the selection. So we need
* accurate link states. Unfortunately, the link states on
* inactive channels can't be updated with LSC AEN in time.
*/
if (ndp->flags & NCSI_DEV_RESHUFFLE)
nd->state = ncsi_dev_state_suspend_gls;
else
nd->state = ncsi_dev_state_suspend_dcnt;
ret = ncsi_xmit_cmd(&nca);
if (ret)
goto error;
break;
case ncsi_dev_state_suspend_gls:
ndp->pending_req_num = np->channel_num;
nca.type = NCSI_PKT_CMD_GLS;
nca.package = np->id;
nd->state = ncsi_dev_state_suspend_dcnt;
NCSI_FOR_EACH_CHANNEL(np, nc) {
nca.channel = nc->id;
ret = ncsi_xmit_cmd(&nca);
if (ret)
goto error;
}
break;
case ncsi_dev_state_suspend_dcnt:
ndp->pending_req_num = 1;
nca.type = NCSI_PKT_CMD_DCNT;
nca.package = np->id;
nca.channel = nc->id;
nd->state = ncsi_dev_state_suspend_dc;
ret = ncsi_xmit_cmd(&nca);
if (ret)
goto error;
break;
case ncsi_dev_state_suspend_dc:
ndp->pending_req_num = 1;
nca.type = NCSI_PKT_CMD_DC;
nca.package = np->id;
nca.channel = nc->id;
nca.bytes[0] = 1;
nd->state = ncsi_dev_state_suspend_deselect;
ret = ncsi_xmit_cmd(&nca);
if (ret)
goto error;
NCSI_FOR_EACH_CHANNEL(np, tmp) {
/* If there is another channel active on this package
* do not deselect the package.
*/
if (tmp != nc && tmp->state == NCSI_CHANNEL_ACTIVE) {
nd->state = ncsi_dev_state_suspend_done;
break;
}
}
break;
case ncsi_dev_state_suspend_deselect:
ndp->pending_req_num = 1;
nca.type = NCSI_PKT_CMD_DP;
nca.package = np->id;
nca.channel = NCSI_RESERVED_CHANNEL;
nd->state = ncsi_dev_state_suspend_done;
ret = ncsi_xmit_cmd(&nca);
if (ret)
goto error;
break;
case ncsi_dev_state_suspend_done:
spin_lock_irqsave(&nc->lock, flags);
nc->state = NCSI_CHANNEL_INACTIVE;
spin_unlock_irqrestore(&nc->lock, flags);
if (ndp->flags & NCSI_DEV_RESET)
ncsi_reset_dev(nd);
else
ncsi_process_next_channel(ndp);
break;
default:
netdev_warn(nd->dev, "Wrong NCSI state 0x%x in suspend\n",
nd->state);
}
return;
error:
nd->state = ncsi_dev_state_functional;
}
/* Check the VLAN filter bitmap for a set filter, and construct a
* "Set VLAN Filter - Disable" packet if found.
*/
static int clear_one_vid(struct ncsi_dev_priv *ndp, struct ncsi_channel *nc,
struct ncsi_cmd_arg *nca)
{
struct ncsi_channel_vlan_filter *ncf;
unsigned long flags;
void *bitmap;
int index;
u16 vid;
ncf = &nc->vlan_filter;
bitmap = &ncf->bitmap;
spin_lock_irqsave(&nc->lock, flags);
index = find_first_bit(bitmap, ncf->n_vids);
if (index >= ncf->n_vids) {
spin_unlock_irqrestore(&nc->lock, flags);
return -1;
}
vid = ncf->vids[index];
clear_bit(index, bitmap);
ncf->vids[index] = 0;
spin_unlock_irqrestore(&nc->lock, flags);
nca->type = NCSI_PKT_CMD_SVF;
nca->words[1] = vid;
/* HW filter index starts at 1 */
nca->bytes[6] = index + 1;
nca->bytes[7] = 0x00;
return 0;
}
/* Find an outstanding VLAN tag and construct a "Set VLAN Filter - Enable"
* packet.
*/
static int set_one_vid(struct ncsi_dev_priv *ndp, struct ncsi_channel *nc,
struct ncsi_cmd_arg *nca)
{
struct ncsi_channel_vlan_filter *ncf;
struct vlan_vid *vlan = NULL;
unsigned long flags;
int i, index;
void *bitmap;
u16 vid;
if (list_empty(&ndp->vlan_vids))
return -1;
ncf = &nc->vlan_filter;
bitmap = &ncf->bitmap;
spin_lock_irqsave(&nc->lock, flags);
rcu_read_lock();
list_for_each_entry_rcu(vlan, &ndp->vlan_vids, list) {
vid = vlan->vid;
for (i = 0; i < ncf->n_vids; i++)
if (ncf->vids[i] == vid) {
vid = 0;
break;
}
if (vid)
break;
}
rcu_read_unlock();
if (!vid) {
/* No VLAN ID is not set */
spin_unlock_irqrestore(&nc->lock, flags);
return -1;
}
index = find_first_zero_bit(bitmap, ncf->n_vids);
if (index < 0 || index >= ncf->n_vids) {
netdev_err(ndp->ndev.dev,
"Channel %u already has all VLAN filters set\n",
nc->id);
spin_unlock_irqrestore(&nc->lock, flags);
return -1;
}
ncf->vids[index] = vid;
set_bit(index, bitmap);
spin_unlock_irqrestore(&nc->lock, flags);
nca->type = NCSI_PKT_CMD_SVF;
nca->words[1] = vid;
/* HW filter index starts at 1 */
nca->bytes[6] = index + 1;
nca->bytes[7] = 0x01;
return 0;
}
#if IS_ENABLED(CONFIG_NCSI_OEM_CMD_KEEP_PHY)
static int ncsi_oem_keep_phy_intel(struct ncsi_cmd_arg *nca)
{
unsigned char data[NCSI_OEM_INTEL_CMD_KEEP_PHY_LEN];
int ret = 0;
nca->payload = NCSI_OEM_INTEL_CMD_KEEP_PHY_LEN;
memset(data, 0, NCSI_OEM_INTEL_CMD_KEEP_PHY_LEN);
*(unsigned int *)data = ntohl((__force __be32)NCSI_OEM_MFR_INTEL_ID);
data[4] = NCSI_OEM_INTEL_CMD_KEEP_PHY;
/* PHY Link up attribute */
data[6] = 0x1;
nca->data = data;
ret = ncsi_xmit_cmd(nca);
if (ret)
netdev_err(nca->ndp->ndev.dev,
"NCSI: Failed to transmit cmd 0x%x during configure\n",
nca->type);
return ret;
}
#endif
#if IS_ENABLED(CONFIG_NCSI_OEM_CMD_GET_MAC)
/* NCSI OEM Command APIs */
static int ncsi_oem_gma_handler_bcm(struct ncsi_cmd_arg *nca)
{
unsigned char data[NCSI_OEM_BCM_CMD_GMA_LEN];
int ret = 0;
nca->payload = NCSI_OEM_BCM_CMD_GMA_LEN;
memset(data, 0, NCSI_OEM_BCM_CMD_GMA_LEN);
*(unsigned int *)data = ntohl((__force __be32)NCSI_OEM_MFR_BCM_ID);
data[5] = NCSI_OEM_BCM_CMD_GMA;
nca->data = data;
ret = ncsi_xmit_cmd(nca);
if (ret)
netdev_err(nca->ndp->ndev.dev,
"NCSI: Failed to transmit cmd 0x%x during configure\n",
nca->type);
return ret;
}
static int ncsi_oem_gma_handler_mlx(struct ncsi_cmd_arg *nca)
{
union {
u8 data_u8[NCSI_OEM_MLX_CMD_GMA_LEN];
u32 data_u32[NCSI_OEM_MLX_CMD_GMA_LEN / sizeof(u32)];
} u;
int ret = 0;
nca->payload = NCSI_OEM_MLX_CMD_GMA_LEN;
memset(&u, 0, sizeof(u));
u.data_u32[0] = ntohl((__force __be32)NCSI_OEM_MFR_MLX_ID);
u.data_u8[5] = NCSI_OEM_MLX_CMD_GMA;
u.data_u8[6] = NCSI_OEM_MLX_CMD_GMA_PARAM;
nca->data = u.data_u8;
ret = ncsi_xmit_cmd(nca);
if (ret)
netdev_err(nca->ndp->ndev.dev,
"NCSI: Failed to transmit cmd 0x%x during configure\n",
nca->type);
return ret;
}
static int ncsi_oem_smaf_mlx(struct ncsi_cmd_arg *nca)
{
union {
u8 data_u8[NCSI_OEM_MLX_CMD_SMAF_LEN];
u32 data_u32[NCSI_OEM_MLX_CMD_SMAF_LEN / sizeof(u32)];
} u;
int ret = 0;
memset(&u, 0, sizeof(u));
u.data_u32[0] = ntohl((__force __be32)NCSI_OEM_MFR_MLX_ID);
u.data_u8[5] = NCSI_OEM_MLX_CMD_SMAF;
u.data_u8[6] = NCSI_OEM_MLX_CMD_SMAF_PARAM;
memcpy(&u.data_u8[MLX_SMAF_MAC_ADDR_OFFSET],
nca->ndp->ndev.dev->dev_addr, ETH_ALEN);
u.data_u8[MLX_SMAF_MED_SUPPORT_OFFSET] =
(MLX_MC_RBT_AVL | MLX_MC_RBT_SUPPORT);
nca->payload = NCSI_OEM_MLX_CMD_SMAF_LEN;
nca->data = u.data_u8;
ret = ncsi_xmit_cmd(nca);
if (ret)
netdev_err(nca->ndp->ndev.dev,
"NCSI: Failed to transmit cmd 0x%x during probe\n",
nca->type);
return ret;
}
static int ncsi_oem_gma_handler_intel(struct ncsi_cmd_arg *nca)
{
unsigned char data[NCSI_OEM_INTEL_CMD_GMA_LEN];
int ret = 0;
nca->payload = NCSI_OEM_INTEL_CMD_GMA_LEN;
memset(data, 0, NCSI_OEM_INTEL_CMD_GMA_LEN);
*(unsigned int *)data = ntohl((__force __be32)NCSI_OEM_MFR_INTEL_ID);
data[4] = NCSI_OEM_INTEL_CMD_GMA;
nca->data = data;
ret = ncsi_xmit_cmd(nca);
if (ret)
netdev_err(nca->ndp->ndev.dev,
"NCSI: Failed to transmit cmd 0x%x during configure\n",
nca->type);
return ret;
}
/* OEM Command handlers initialization */
static struct ncsi_oem_gma_handler {
unsigned int mfr_id;
int (*handler)(struct ncsi_cmd_arg *nca);
} ncsi_oem_gma_handlers[] = {
{ NCSI_OEM_MFR_BCM_ID, ncsi_oem_gma_handler_bcm },
{ NCSI_OEM_MFR_MLX_ID, ncsi_oem_gma_handler_mlx },
{ NCSI_OEM_MFR_INTEL_ID, ncsi_oem_gma_handler_intel }
};
static int ncsi_gma_handler(struct ncsi_cmd_arg *nca, unsigned int mf_id)
{
struct ncsi_oem_gma_handler *nch = NULL;
int i;
/* This function should only be called once, return if flag set */
if (nca->ndp->gma_flag == 1)
return -1;
/* Find gma handler for given manufacturer id */
for (i = 0; i < ARRAY_SIZE(ncsi_oem_gma_handlers); i++) {
if (ncsi_oem_gma_handlers[i].mfr_id == mf_id) {
if (ncsi_oem_gma_handlers[i].handler)
nch = &ncsi_oem_gma_handlers[i];
break;
}
}
if (!nch) {
netdev_err(nca->ndp->ndev.dev,
"NCSI: No GMA handler available for MFR-ID (0x%x)\n",
mf_id);
return -1;
}
/* Get Mac address from NCSI device */
return nch->handler(nca);
}
#endif /* CONFIG_NCSI_OEM_CMD_GET_MAC */
/* Determine if a given channel from the channel_queue should be used for Tx */
static bool ncsi_channel_is_tx(struct ncsi_dev_priv *ndp,
struct ncsi_channel *nc)
{
struct ncsi_channel_mode *ncm;
struct ncsi_channel *channel;
struct ncsi_package *np;
/* Check if any other channel has Tx enabled; a channel may have already
* been configured and removed from the channel queue.
*/
NCSI_FOR_EACH_PACKAGE(ndp, np) {
if (!ndp->multi_package && np != nc->package)
continue;
NCSI_FOR_EACH_CHANNEL(np, channel) {
ncm = &channel->modes[NCSI_MODE_TX_ENABLE];
if (ncm->enable)
return false;
}
}
/* This channel is the preferred channel and has link */
list_for_each_entry_rcu(channel, &ndp->channel_queue, link) {
np = channel->package;
if (np->preferred_channel &&
ncsi_channel_has_link(np->preferred_channel)) {
return np->preferred_channel == nc;
}
}
/* This channel has link */
if (ncsi_channel_has_link(nc))
return true;
list_for_each_entry_rcu(channel, &ndp->channel_queue, link)
if (ncsi_channel_has_link(channel))
return false;
/* No other channel has link; default to this one */
return true;
}
/* Change the active Tx channel in a multi-channel setup */
int ncsi_update_tx_channel(struct ncsi_dev_priv *ndp,
struct ncsi_package *package,
struct ncsi_channel *disable,
struct ncsi_channel *enable)
{
struct ncsi_cmd_arg nca;
struct ncsi_channel *nc;
struct ncsi_package *np;
int ret = 0;
if (!package->multi_channel && !ndp->multi_package)
netdev_warn(ndp->ndev.dev,
"NCSI: Trying to update Tx channel in single-channel mode\n");
nca.ndp = ndp;
nca.req_flags = 0;
/* Find current channel with Tx enabled */
NCSI_FOR_EACH_PACKAGE(ndp, np) {
if (disable)
break;
if (!ndp->multi_package && np != package)
continue;
NCSI_FOR_EACH_CHANNEL(np, nc)
if (nc->modes[NCSI_MODE_TX_ENABLE].enable) {
disable = nc;
break;
}
}
/* Find a suitable channel for Tx */
NCSI_FOR_EACH_PACKAGE(ndp, np) {
if (enable)
break;
if (!ndp->multi_package && np != package)
continue;
if (!(ndp->package_whitelist & (0x1 << np->id)))
continue;
if (np->preferred_channel &&
ncsi_channel_has_link(np->preferred_channel)) {
enable = np->preferred_channel;
break;
}
NCSI_FOR_EACH_CHANNEL(np, nc) {
if (!(np->channel_whitelist & 0x1 << nc->id))
continue;
if (nc->state != NCSI_CHANNEL_ACTIVE)
continue;
if (ncsi_channel_has_link(nc)) {
enable = nc;
break;
}
}
}
if (disable == enable)
return -1;
if (!enable)
return -1;
if (disable) {
nca.channel = disable->id;
nca.package = disable->package->id;
nca.type = NCSI_PKT_CMD_DCNT;
ret = ncsi_xmit_cmd(&nca);
if (ret)
netdev_err(ndp->ndev.dev,
"Error %d sending DCNT\n",
ret);
}
netdev_info(ndp->ndev.dev, "NCSI: channel %u enables Tx\n", enable->id);
nca.channel = enable->id;
nca.package = enable->package->id;
nca.type = NCSI_PKT_CMD_ECNT;
ret = ncsi_xmit_cmd(&nca);
if (ret)
netdev_err(ndp->ndev.dev,
"Error %d sending ECNT\n",
ret);
return ret;
}
static void ncsi_configure_channel(struct ncsi_dev_priv *ndp)
{
struct ncsi_package *np = ndp->active_package;
struct ncsi_channel *nc = ndp->active_channel;
struct ncsi_channel *hot_nc = NULL;
struct ncsi_dev *nd = &ndp->ndev;
struct net_device *dev = nd->dev;
struct ncsi_cmd_arg nca;
unsigned char index;
unsigned long flags;
int ret;
nca.ndp = ndp;
nca.req_flags = NCSI_REQ_FLAG_EVENT_DRIVEN;
switch (nd->state) {
case ncsi_dev_state_config:
case ncsi_dev_state_config_sp:
ndp->pending_req_num = 1;
/* Select the specific package */
nca.type = NCSI_PKT_CMD_SP;
if (ndp->flags & NCSI_DEV_HWA)
nca.bytes[0] = 0;
else
nca.bytes[0] = 1;
nca.package = np->id;
nca.channel = NCSI_RESERVED_CHANNEL;
ret = ncsi_xmit_cmd(&nca);
if (ret) {
netdev_err(ndp->ndev.dev,
"NCSI: Failed to transmit CMD_SP\n");
goto error;
}
nd->state = ncsi_dev_state_config_cis;
break;
case ncsi_dev_state_config_cis:
ndp->pending_req_num = 1;
/* Clear initial state */
nca.type = NCSI_PKT_CMD_CIS;
nca.package = np->id;
nca.channel = nc->id;
ret = ncsi_xmit_cmd(&nca);
if (ret) {
netdev_err(ndp->ndev.dev,
"NCSI: Failed to transmit CMD_CIS\n");
goto error;
}
nd->state = ncsi_dev_state_config_oem_gma;
break;
case ncsi_dev_state_config_oem_gma:
nd->state = ncsi_dev_state_config_clear_vids;
ret = -1;
#if IS_ENABLED(CONFIG_NCSI_OEM_CMD_GET_MAC)
nca.type = NCSI_PKT_CMD_OEM;
nca.package = np->id;
nca.channel = nc->id;
ndp->pending_req_num = 1;
ret = ncsi_gma_handler(&nca, nc->version.mf_id);
#endif /* CONFIG_NCSI_OEM_CMD_GET_MAC */
if (ret < 0)
schedule_work(&ndp->work);
break;
case ncsi_dev_state_config_clear_vids:
case ncsi_dev_state_config_svf:
case ncsi_dev_state_config_ev:
case ncsi_dev_state_config_sma:
case ncsi_dev_state_config_ebf:
case ncsi_dev_state_config_dgmf:
case ncsi_dev_state_config_ecnt:
case ncsi_dev_state_config_ec:
case ncsi_dev_state_config_ae:
case ncsi_dev_state_config_gls:
ndp->pending_req_num = 1;
nca.package = np->id;
nca.channel = nc->id;
/* Clear any active filters on the channel before setting */
if (nd->state == ncsi_dev_state_config_clear_vids) {
ret = clear_one_vid(ndp, nc, &nca);
if (ret) {
nd->state = ncsi_dev_state_config_svf;
schedule_work(&ndp->work);
break;
}
/* Repeat */
nd->state = ncsi_dev_state_config_clear_vids;
/* Add known VLAN tags to the filter */
} else if (nd->state == ncsi_dev_state_config_svf) {
ret = set_one_vid(ndp, nc, &nca);
if (ret) {
nd->state = ncsi_dev_state_config_ev;
schedule_work(&ndp->work);
break;
}
/* Repeat */
nd->state = ncsi_dev_state_config_svf;
/* Enable/Disable the VLAN filter */
} else if (nd->state == ncsi_dev_state_config_ev) {
if (list_empty(&ndp->vlan_vids)) {
nca.type = NCSI_PKT_CMD_DV;
} else {
nca.type = NCSI_PKT_CMD_EV;
nca.bytes[3] = NCSI_CAP_VLAN_NO;
}
nd->state = ncsi_dev_state_config_sma;
} else if (nd->state == ncsi_dev_state_config_sma) {
/* Use first entry in unicast filter table. Note that
* the MAC filter table starts from entry 1 instead of
* 0.
*/
nca.type = NCSI_PKT_CMD_SMA;
for (index = 0; index < 6; index++)
nca.bytes[index] = dev->dev_addr[index];
nca.bytes[6] = 0x1;
nca.bytes[7] = 0x1;
nd->state = ncsi_dev_state_config_ebf;
} else if (nd->state == ncsi_dev_state_config_ebf) {
nca.type = NCSI_PKT_CMD_EBF;
nca.dwords[0] = nc->caps[NCSI_CAP_BC].cap;
/* if multicast global filtering is supported then
* disable it so that all multicast packet will be
* forwarded to management controller
*/
if (nc->caps[NCSI_CAP_GENERIC].cap &
NCSI_CAP_GENERIC_MC)
nd->state = ncsi_dev_state_config_dgmf;
else if (ncsi_channel_is_tx(ndp, nc))
nd->state = ncsi_dev_state_config_ecnt;
else
nd->state = ncsi_dev_state_config_ec;
} else if (nd->state == ncsi_dev_state_config_dgmf) {
nca.type = NCSI_PKT_CMD_DGMF;
if (ncsi_channel_is_tx(ndp, nc))
nd->state = ncsi_dev_state_config_ecnt;
else
nd->state = ncsi_dev_state_config_ec;
} else if (nd->state == ncsi_dev_state_config_ecnt) {
if (np->preferred_channel &&
nc != np->preferred_channel)
netdev_info(ndp->ndev.dev,
"NCSI: Tx failed over to channel %u\n",
nc->id);
nca.type = NCSI_PKT_CMD_ECNT;
nd->state = ncsi_dev_state_config_ec;
} else if (nd->state == ncsi_dev_state_config_ec) {
/* Enable AEN if it's supported */
nca.type = NCSI_PKT_CMD_EC;
nd->state = ncsi_dev_state_config_ae;
if (!(nc->caps[NCSI_CAP_AEN].cap & NCSI_CAP_AEN_MASK))
nd->state = ncsi_dev_state_config_gls;
} else if (nd->state == ncsi_dev_state_config_ae) {
nca.type = NCSI_PKT_CMD_AE;
nca.bytes[0] = 0;
nca.dwords[1] = nc->caps[NCSI_CAP_AEN].cap;
nd->state = ncsi_dev_state_config_gls;
} else if (nd->state == ncsi_dev_state_config_gls) {
nca.type = NCSI_PKT_CMD_GLS;
nd->state = ncsi_dev_state_config_done;
}
ret = ncsi_xmit_cmd(&nca);
if (ret) {
netdev_err(ndp->ndev.dev,
"NCSI: Failed to transmit CMD %x\n",
nca.type);
goto error;
}
break;
case ncsi_dev_state_config_done:
netdev_dbg(ndp->ndev.dev, "NCSI: channel %u config done\n",
nc->id);
spin_lock_irqsave(&nc->lock, flags);
nc->state = NCSI_CHANNEL_ACTIVE;
if (ndp->flags & NCSI_DEV_RESET) {
/* A reset event happened during config, start it now */
nc->reconfigure_needed = false;
spin_unlock_irqrestore(&nc->lock, flags);
ncsi_reset_dev(nd);
break;
}
if (nc->reconfigure_needed) {
/* This channel's configuration has been updated
* part-way during the config state - start the
* channel configuration over
*/
nc->reconfigure_needed = false;
nc->state = NCSI_CHANNEL_INACTIVE;
spin_unlock_irqrestore(&nc->lock, flags);
spin_lock_irqsave(&ndp->lock, flags);
list_add_tail_rcu(&nc->link, &ndp->channel_queue);
spin_unlock_irqrestore(&ndp->lock, flags);
netdev_dbg(dev, "Dirty NCSI channel state reset\n");
ncsi_process_next_channel(ndp);
break;
}
if (nc->modes[NCSI_MODE_LINK].data[2] & 0x1) {
hot_nc = nc;
} else {
hot_nc = NULL;
netdev_dbg(ndp->ndev.dev,
"NCSI: channel %u link down after config\n",
nc->id);
}
spin_unlock_irqrestore(&nc->lock, flags);
/* Update the hot channel */
spin_lock_irqsave(&ndp->lock, flags);
ndp->hot_channel = hot_nc;
spin_unlock_irqrestore(&ndp->lock, flags);
ncsi_start_channel_monitor(nc);
ncsi_process_next_channel(ndp);
break;
default:
netdev_alert(dev, "Wrong NCSI state 0x%x in config\n",
nd->state);
}
return;
error:
ncsi_report_link(ndp, true);
}
static int ncsi_choose_active_channel(struct ncsi_dev_priv *ndp)
{
struct ncsi_channel *nc, *found, *hot_nc;
struct ncsi_channel_mode *ncm;
unsigned long flags, cflags;
struct ncsi_package *np;
bool with_link;
spin_lock_irqsave(&ndp->lock, flags);
hot_nc = ndp->hot_channel;
spin_unlock_irqrestore(&ndp->lock, flags);
/* By default the search is done once an inactive channel with up
* link is found, unless a preferred channel is set.
* If multi_package or multi_channel are configured all channels in the
* whitelist are added to the channel queue.
*/
found = NULL;
with_link = false;
NCSI_FOR_EACH_PACKAGE(ndp, np) {
if (!(ndp->package_whitelist & (0x1 << np->id)))
continue;
NCSI_FOR_EACH_CHANNEL(np, nc) {
if (!(np->channel_whitelist & (0x1 << nc->id)))
continue;
spin_lock_irqsave(&nc->lock, cflags);
if (!list_empty(&nc->link) ||
nc->state != NCSI_CHANNEL_INACTIVE) {
spin_unlock_irqrestore(&nc->lock, cflags);
continue;
}
if (!found)
found = nc;
if (nc == hot_nc)
found = nc;
ncm = &nc->modes[NCSI_MODE_LINK];
if (ncm->data[2] & 0x1) {
found = nc;
with_link = true;
}
/* If multi_channel is enabled configure all valid
* channels whether or not they currently have link
* so they will have AENs enabled.
*/
if (with_link || np->multi_channel) {
spin_lock_irqsave(&ndp->lock, flags);
list_add_tail_rcu(&nc->link,
&ndp->channel_queue);
spin_unlock_irqrestore(&ndp->lock, flags);
netdev_dbg(ndp->ndev.dev,
"NCSI: Channel %u added to queue (link %s)\n",
nc->id,
ncm->data[2] & 0x1 ? "up" : "down");
}
spin_unlock_irqrestore(&nc->lock, cflags);
if (with_link && !np->multi_channel)
break;
}
if (with_link && !ndp->multi_package)
break;
}
if (list_empty(&ndp->channel_queue) && found) {
netdev_info(ndp->ndev.dev,
"NCSI: No channel with link found, configuring channel %u\n",
found->id);
spin_lock_irqsave(&ndp->lock, flags);
list_add_tail_rcu(&found->link, &ndp->channel_queue);
spin_unlock_irqrestore(&ndp->lock, flags);
} else if (!found) {
netdev_warn(ndp->ndev.dev,
"NCSI: No channel found to configure!\n");
ncsi_report_link(ndp, true);
return -ENODEV;
}
return ncsi_process_next_channel(ndp);
}
static bool ncsi_check_hwa(struct ncsi_dev_priv *ndp)
{
struct ncsi_package *np;
struct ncsi_channel *nc;
unsigned int cap;
bool has_channel = false;
/* The hardware arbitration is disabled if any one channel
* doesn't support explicitly.
*/
NCSI_FOR_EACH_PACKAGE(ndp, np) {
NCSI_FOR_EACH_CHANNEL(np, nc) {
has_channel = true;
cap = nc->caps[NCSI_CAP_GENERIC].cap;
if (!(cap & NCSI_CAP_GENERIC_HWA) ||
(cap & NCSI_CAP_GENERIC_HWA_MASK) !=
NCSI_CAP_GENERIC_HWA_SUPPORT) {
ndp->flags &= ~NCSI_DEV_HWA;
return false;
}
}
}
if (has_channel) {
ndp->flags |= NCSI_DEV_HWA;
return true;
}
ndp->flags &= ~NCSI_DEV_HWA;
return false;
}
static void ncsi_probe_channel(struct ncsi_dev_priv *ndp)
{
struct ncsi_dev *nd = &ndp->ndev;
struct ncsi_package *np;
struct ncsi_channel *nc;
struct ncsi_cmd_arg nca;
unsigned char index;
int ret;
nca.ndp = ndp;
nca.req_flags = NCSI_REQ_FLAG_EVENT_DRIVEN;
switch (nd->state) {
case ncsi_dev_state_probe:
nd->state = ncsi_dev_state_probe_deselect;
fallthrough;
case ncsi_dev_state_probe_deselect:
ndp->pending_req_num = 8;
/* Deselect all possible packages */
nca.type = NCSI_PKT_CMD_DP;
nca.channel = NCSI_RESERVED_CHANNEL;
for (index = 0; index < 8; index++) {
nca.package = index;
ret = ncsi_xmit_cmd(&nca);
if (ret)
goto error;
}
nd->state = ncsi_dev_state_probe_package;
break;
case ncsi_dev_state_probe_package:
ndp->pending_req_num = 1;
nca.type = NCSI_PKT_CMD_SP;
nca.bytes[0] = 1;
nca.package = ndp->package_probe_id;
nca.channel = NCSI_RESERVED_CHANNEL;
ret = ncsi_xmit_cmd(&nca);
if (ret)
goto error;
nd->state = ncsi_dev_state_probe_channel;
break;
case ncsi_dev_state_probe_channel:
ndp->active_package = ncsi_find_package(ndp,
ndp->package_probe_id);
if (!ndp->active_package) {
/* No response */
nd->state = ncsi_dev_state_probe_dp;
schedule_work(&ndp->work);
break;
}
nd->state = ncsi_dev_state_probe_cis;
if (IS_ENABLED(CONFIG_NCSI_OEM_CMD_GET_MAC) &&
ndp->mlx_multi_host)
nd->state = ncsi_dev_state_probe_mlx_gma;
schedule_work(&ndp->work);
break;
#if IS_ENABLED(CONFIG_NCSI_OEM_CMD_GET_MAC)
case ncsi_dev_state_probe_mlx_gma:
ndp->pending_req_num = 1;
nca.type = NCSI_PKT_CMD_OEM;
nca.package = ndp->active_package->id;
nca.channel = 0;
ret = ncsi_oem_gma_handler_mlx(&nca);
if (ret)
goto error;
nd->state = ncsi_dev_state_probe_mlx_smaf;
break;
case ncsi_dev_state_probe_mlx_smaf:
ndp->pending_req_num = 1;
nca.type = NCSI_PKT_CMD_OEM;
nca.package = ndp->active_package->id;
nca.channel = 0;
ret = ncsi_oem_smaf_mlx(&nca);
if (ret)
goto error;
nd->state = ncsi_dev_state_probe_cis;
break;
#endif /* CONFIG_NCSI_OEM_CMD_GET_MAC */
case ncsi_dev_state_probe_cis:
ndp->pending_req_num = NCSI_RESERVED_CHANNEL;
/* Clear initial state */
nca.type = NCSI_PKT_CMD_CIS;
nca.package = ndp->active_package->id;
for (index = 0; index < NCSI_RESERVED_CHANNEL; index++) {
nca.channel = index;
ret = ncsi_xmit_cmd(&nca);
if (ret)
goto error;
}
nd->state = ncsi_dev_state_probe_gvi;
if (IS_ENABLED(CONFIG_NCSI_OEM_CMD_KEEP_PHY))
nd->state = ncsi_dev_state_probe_keep_phy;
break;
#if IS_ENABLED(CONFIG_NCSI_OEM_CMD_KEEP_PHY)
case ncsi_dev_state_probe_keep_phy:
ndp->pending_req_num = 1;
nca.type = NCSI_PKT_CMD_OEM;
nca.package = ndp->active_package->id;
nca.channel = 0;
ret = ncsi_oem_keep_phy_intel(&nca);
if (ret)
goto error;
nd->state = ncsi_dev_state_probe_gvi;
break;
#endif /* CONFIG_NCSI_OEM_CMD_KEEP_PHY */
case ncsi_dev_state_probe_gvi:
case ncsi_dev_state_probe_gc:
case ncsi_dev_state_probe_gls:
np = ndp->active_package;
ndp->pending_req_num = np->channel_num;
/* Retrieve version, capability or link status */
if (nd->state == ncsi_dev_state_probe_gvi)
nca.type = NCSI_PKT_CMD_GVI;
else if (nd->state == ncsi_dev_state_probe_gc)
nca.type = NCSI_PKT_CMD_GC;
else
nca.type = NCSI_PKT_CMD_GLS;
nca.package = np->id;
NCSI_FOR_EACH_CHANNEL(np, nc) {
nca.channel = nc->id;
ret = ncsi_xmit_cmd(&nca);
if (ret)
goto error;
}
if (nd->state == ncsi_dev_state_probe_gvi)
nd->state = ncsi_dev_state_probe_gc;
else if (nd->state == ncsi_dev_state_probe_gc)
nd->state = ncsi_dev_state_probe_gls;
else
nd->state = ncsi_dev_state_probe_dp;
break;
case ncsi_dev_state_probe_dp:
ndp->pending_req_num = 1;
/* Deselect the current package */
nca.type = NCSI_PKT_CMD_DP;
nca.package = ndp->package_probe_id;
nca.channel = NCSI_RESERVED_CHANNEL;
ret = ncsi_xmit_cmd(&nca);
if (ret)
goto error;
/* Probe next package */
ndp->package_probe_id++;
if (ndp->package_probe_id >= 8) {
/* Probe finished */
ndp->flags |= NCSI_DEV_PROBED;
break;
}
nd->state = ncsi_dev_state_probe_package;
ndp->active_package = NULL;
break;
default:
netdev_warn(nd->dev, "Wrong NCSI state 0x%0x in enumeration\n",
nd->state);
}
if (ndp->flags & NCSI_DEV_PROBED) {
/* Check if all packages have HWA support */
ncsi_check_hwa(ndp);
ncsi_choose_active_channel(ndp);
}
return;
error:
netdev_err(ndp->ndev.dev,
"NCSI: Failed to transmit cmd 0x%x during probe\n",
nca.type);
ncsi_report_link(ndp, true);
}
static void ncsi_dev_work(struct work_struct *work)
{
struct ncsi_dev_priv *ndp = container_of(work,
struct ncsi_dev_priv, work);
struct ncsi_dev *nd = &ndp->ndev;
switch (nd->state & ncsi_dev_state_major) {
case ncsi_dev_state_probe:
ncsi_probe_channel(ndp);
break;
case ncsi_dev_state_suspend:
ncsi_suspend_channel(ndp);
break;
case ncsi_dev_state_config:
ncsi_configure_channel(ndp);
break;
default:
netdev_warn(nd->dev, "Wrong NCSI state 0x%x in workqueue\n",
nd->state);
}
}
int ncsi_process_next_channel(struct ncsi_dev_priv *ndp)
{
struct ncsi_channel *nc;
int old_state;
unsigned long flags;
spin_lock_irqsave(&ndp->lock, flags);
nc = list_first_or_null_rcu(&ndp->channel_queue,
struct ncsi_channel, link);
if (!nc) {
spin_unlock_irqrestore(&ndp->lock, flags);
goto out;
}
list_del_init(&nc->link);
spin_unlock_irqrestore(&ndp->lock, flags);
spin_lock_irqsave(&nc->lock, flags);
old_state = nc->state;
nc->state = NCSI_CHANNEL_INVISIBLE;
spin_unlock_irqrestore(&nc->lock, flags);
ndp->active_channel = nc;
ndp->active_package = nc->package;
switch (old_state) {
case NCSI_CHANNEL_INACTIVE:
ndp->ndev.state = ncsi_dev_state_config;
netdev_dbg(ndp->ndev.dev, "NCSI: configuring channel %u\n",
nc->id);
ncsi_configure_channel(ndp);
break;
case NCSI_CHANNEL_ACTIVE:
ndp->ndev.state = ncsi_dev_state_suspend;
netdev_dbg(ndp->ndev.dev, "NCSI: suspending channel %u\n",
nc->id);
ncsi_suspend_channel(ndp);
break;
default:
netdev_err(ndp->ndev.dev, "Invalid state 0x%x on %d:%d\n",
old_state, nc->package->id, nc->id);
ncsi_report_link(ndp, false);
return -EINVAL;
}
return 0;
out:
ndp->active_channel = NULL;
ndp->active_package = NULL;
if (ndp->flags & NCSI_DEV_RESHUFFLE) {
ndp->flags &= ~NCSI_DEV_RESHUFFLE;
return ncsi_choose_active_channel(ndp);
}
ncsi_report_link(ndp, false);
return -ENODEV;
}
static int ncsi_kick_channels(struct ncsi_dev_priv *ndp)
{
struct ncsi_dev *nd = &ndp->ndev;
struct ncsi_channel *nc;
struct ncsi_package *np;
unsigned long flags;
unsigned int n = 0;
NCSI_FOR_EACH_PACKAGE(ndp, np) {
NCSI_FOR_EACH_CHANNEL(np, nc) {
spin_lock_irqsave(&nc->lock, flags);
/* Channels may be busy, mark dirty instead of
* kicking if;
* a) not ACTIVE (configured)
* b) in the channel_queue (to be configured)
* c) it's ndev is in the config state
*/
if (nc->state != NCSI_CHANNEL_ACTIVE) {
if ((ndp->ndev.state & 0xff00) ==
ncsi_dev_state_config ||
!list_empty(&nc->link)) {
netdev_dbg(nd->dev,
"NCSI: channel %p marked dirty\n",
nc);
nc->reconfigure_needed = true;
}
spin_unlock_irqrestore(&nc->lock, flags);
continue;
}
spin_unlock_irqrestore(&nc->lock, flags);
ncsi_stop_channel_monitor(nc);
spin_lock_irqsave(&nc->lock, flags);
nc->state = NCSI_CHANNEL_INACTIVE;
spin_unlock_irqrestore(&nc->lock, flags);
spin_lock_irqsave(&ndp->lock, flags);
list_add_tail_rcu(&nc->link, &ndp->channel_queue);
spin_unlock_irqrestore(&ndp->lock, flags);
netdev_dbg(nd->dev, "NCSI: kicked channel %p\n", nc);
n++;
}
}
return n;
}
int ncsi_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
{
struct ncsi_dev_priv *ndp;
unsigned int n_vids = 0;
struct vlan_vid *vlan;
struct ncsi_dev *nd;
bool found = false;
if (vid == 0)
return 0;
nd = ncsi_find_dev(dev);
if (!nd) {
netdev_warn(dev, "NCSI: No net_device?\n");
return 0;
}
ndp = TO_NCSI_DEV_PRIV(nd);
/* Add the VLAN id to our internal list */
list_for_each_entry_rcu(vlan, &ndp->vlan_vids, list) {
n_vids++;
if (vlan->vid == vid) {
netdev_dbg(dev, "NCSI: vid %u already registered\n",
vid);
return 0;
}
}
if (n_vids >= NCSI_MAX_VLAN_VIDS) {
netdev_warn(dev,
"tried to add vlan id %u but NCSI max already registered (%u)\n",
vid, NCSI_MAX_VLAN_VIDS);
return -ENOSPC;
}
vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
if (!vlan)
return -ENOMEM;
vlan->proto = proto;
vlan->vid = vid;
list_add_rcu(&vlan->list, &ndp->vlan_vids);
netdev_dbg(dev, "NCSI: Added new vid %u\n", vid);
found = ncsi_kick_channels(ndp) != 0;
return found ? ncsi_process_next_channel(ndp) : 0;
}
EXPORT_SYMBOL_GPL(ncsi_vlan_rx_add_vid);
int ncsi_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
{
struct vlan_vid *vlan, *tmp;
struct ncsi_dev_priv *ndp;
struct ncsi_dev *nd;
bool found = false;
if (vid == 0)
return 0;
nd = ncsi_find_dev(dev);
if (!nd) {
netdev_warn(dev, "NCSI: no net_device?\n");
return 0;
}
ndp = TO_NCSI_DEV_PRIV(nd);
/* Remove the VLAN id from our internal list */
list_for_each_entry_safe(vlan, tmp, &ndp->vlan_vids, list)
if (vlan->vid == vid) {
netdev_dbg(dev, "NCSI: vid %u found, removing\n", vid);
list_del_rcu(&vlan->list);
found = true;
kfree(vlan);
}
if (!found) {
netdev_err(dev, "NCSI: vid %u wasn't registered!\n", vid);
return -EINVAL;
}
found = ncsi_kick_channels(ndp) != 0;
return found ? ncsi_process_next_channel(ndp) : 0;
}
EXPORT_SYMBOL_GPL(ncsi_vlan_rx_kill_vid);
struct ncsi_dev *ncsi_register_dev(struct net_device *dev,
void (*handler)(struct ncsi_dev *ndev))
{
struct ncsi_dev_priv *ndp;
struct ncsi_dev *nd;
struct platform_device *pdev;
struct device_node *np;
unsigned long flags;
int i;
/* Check if the device has been registered or not */
nd = ncsi_find_dev(dev);
if (nd)
return nd;
/* Create NCSI device */
ndp = kzalloc(sizeof(*ndp), GFP_ATOMIC);
if (!ndp)
return NULL;
nd = &ndp->ndev;
nd->state = ncsi_dev_state_registered;
nd->dev = dev;
nd->handler = handler;
ndp->pending_req_num = 0;
INIT_LIST_HEAD(&ndp->channel_queue);
INIT_LIST_HEAD(&ndp->vlan_vids);
INIT_WORK(&ndp->work, ncsi_dev_work);
ndp->package_whitelist = UINT_MAX;
/* Initialize private NCSI device */
spin_lock_init(&ndp->lock);
INIT_LIST_HEAD(&ndp->packages);
ndp->request_id = NCSI_REQ_START_IDX;
for (i = 0; i < ARRAY_SIZE(ndp->requests); i++) {
ndp->requests[i].id = i;
ndp->requests[i].ndp = ndp;
timer_setup(&ndp->requests[i].timer, ncsi_request_timeout, 0);
}
spin_lock_irqsave(&ncsi_dev_lock, flags);
list_add_tail_rcu(&ndp->node, &ncsi_dev_list);
spin_unlock_irqrestore(&ncsi_dev_lock, flags);
/* Register NCSI packet Rx handler */
ndp->ptype.type = cpu_to_be16(ETH_P_NCSI);
ndp->ptype.func = ncsi_rcv_rsp;
ndp->ptype.dev = dev;
dev_add_pack(&ndp->ptype);
pdev = to_platform_device(dev->dev.parent);
if (pdev) {
np = pdev->dev.of_node;
if (np && (of_property_read_bool(np, "mellanox,multi-host") ||
of_property_read_bool(np, "mlx,multi-host")))
ndp->mlx_multi_host = true;
}
return nd;
}
EXPORT_SYMBOL_GPL(ncsi_register_dev);
int ncsi_start_dev(struct ncsi_dev *nd)
{
struct ncsi_dev_priv *ndp = TO_NCSI_DEV_PRIV(nd);
if (nd->state != ncsi_dev_state_registered &&
nd->state != ncsi_dev_state_functional)
return -ENOTTY;
if (!(ndp->flags & NCSI_DEV_PROBED)) {
ndp->package_probe_id = 0;
nd->state = ncsi_dev_state_probe;
schedule_work(&ndp->work);
return 0;
}
return ncsi_reset_dev(nd);
}
EXPORT_SYMBOL_GPL(ncsi_start_dev);
void ncsi_stop_dev(struct ncsi_dev *nd)
{
struct ncsi_dev_priv *ndp = TO_NCSI_DEV_PRIV(nd);
struct ncsi_package *np;
struct ncsi_channel *nc;
bool chained;
int old_state;
unsigned long flags;
/* Stop the channel monitor on any active channels. Don't reset the
* channel state so we know which were active when ncsi_start_dev()
* is next called.
*/
NCSI_FOR_EACH_PACKAGE(ndp, np) {
NCSI_FOR_EACH_CHANNEL(np, nc) {
ncsi_stop_channel_monitor(nc);
spin_lock_irqsave(&nc->lock, flags);
chained = !list_empty(&nc->link);
old_state = nc->state;
spin_unlock_irqrestore(&nc->lock, flags);
WARN_ON_ONCE(chained ||
old_state == NCSI_CHANNEL_INVISIBLE);
}
}
netdev_dbg(ndp->ndev.dev, "NCSI: Stopping device\n");
ncsi_report_link(ndp, true);
}
EXPORT_SYMBOL_GPL(ncsi_stop_dev);
int ncsi_reset_dev(struct ncsi_dev *nd)
{
struct ncsi_dev_priv *ndp = TO_NCSI_DEV_PRIV(nd);
struct ncsi_channel *nc, *active, *tmp;
struct ncsi_package *np;
unsigned long flags;
spin_lock_irqsave(&ndp->lock, flags);
if (!(ndp->flags & NCSI_DEV_RESET)) {
/* Haven't been called yet, check states */
switch (nd->state & ncsi_dev_state_major) {
case ncsi_dev_state_registered:
case ncsi_dev_state_probe:
/* Not even probed yet - do nothing */
spin_unlock_irqrestore(&ndp->lock, flags);
return 0;
case ncsi_dev_state_suspend:
case ncsi_dev_state_config:
/* Wait for the channel to finish its suspend/config
* operation; once it finishes it will check for
* NCSI_DEV_RESET and reset the state.
*/
ndp->flags |= NCSI_DEV_RESET;
spin_unlock_irqrestore(&ndp->lock, flags);
return 0;
}
} else {
switch (nd->state) {
case ncsi_dev_state_suspend_done:
case ncsi_dev_state_config_done:
case ncsi_dev_state_functional:
/* Ok */
break;
default:
/* Current reset operation happening */
spin_unlock_irqrestore(&ndp->lock, flags);
return 0;
}
}
if (!list_empty(&ndp->channel_queue)) {
/* Clear any channel queue we may have interrupted */
list_for_each_entry_safe(nc, tmp, &ndp->channel_queue, link)
list_del_init(&nc->link);
}
spin_unlock_irqrestore(&ndp->lock, flags);
active = NULL;
NCSI_FOR_EACH_PACKAGE(ndp, np) {
NCSI_FOR_EACH_CHANNEL(np, nc) {
spin_lock_irqsave(&nc->lock, flags);
if (nc->state == NCSI_CHANNEL_ACTIVE) {
active = nc;
nc->state = NCSI_CHANNEL_INVISIBLE;
spin_unlock_irqrestore(&nc->lock, flags);
ncsi_stop_channel_monitor(nc);
break;
}
spin_unlock_irqrestore(&nc->lock, flags);
}
if (active)
break;
}
if (!active) {
/* Done */
spin_lock_irqsave(&ndp->lock, flags);
ndp->flags &= ~NCSI_DEV_RESET;
spin_unlock_irqrestore(&ndp->lock, flags);
return ncsi_choose_active_channel(ndp);
}
spin_lock_irqsave(&ndp->lock, flags);
ndp->flags |= NCSI_DEV_RESET;
ndp->active_channel = active;
ndp->active_package = active->package;
spin_unlock_irqrestore(&ndp->lock, flags);
nd->state = ncsi_dev_state_suspend;
schedule_work(&ndp->work);
return 0;
}
void ncsi_unregister_dev(struct ncsi_dev *nd)
{
struct ncsi_dev_priv *ndp = TO_NCSI_DEV_PRIV(nd);
struct ncsi_package *np, *tmp;
unsigned long flags;
dev_remove_pack(&ndp->ptype);
list_for_each_entry_safe(np, tmp, &ndp->packages, node)
ncsi_remove_package(np);
spin_lock_irqsave(&ncsi_dev_lock, flags);
list_del_rcu(&ndp->node);
spin_unlock_irqrestore(&ncsi_dev_lock, flags);
kfree(ndp);
}
EXPORT_SYMBOL_GPL(ncsi_unregister_dev);
| linux-master | net/ncsi/ncsi-manage.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Network Service Header
*
* Copyright (c) 2017 Red Hat, Inc. -- Jiri Benc <[email protected]>
*/
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <net/gso.h>
#include <net/nsh.h>
#include <net/tun_proto.h>
int nsh_push(struct sk_buff *skb, const struct nshhdr *pushed_nh)
{
struct nshhdr *nh;
size_t length = nsh_hdr_len(pushed_nh);
u8 next_proto;
if (skb->mac_len) {
next_proto = TUN_P_ETHERNET;
} else {
next_proto = tun_p_from_eth_p(skb->protocol);
if (!next_proto)
return -EAFNOSUPPORT;
}
/* Add the NSH header */
if (skb_cow_head(skb, length) < 0)
return -ENOMEM;
skb_push(skb, length);
nh = (struct nshhdr *)(skb->data);
memcpy(nh, pushed_nh, length);
nh->np = next_proto;
skb_postpush_rcsum(skb, nh, length);
skb->protocol = htons(ETH_P_NSH);
skb_reset_mac_header(skb);
skb_reset_network_header(skb);
skb_reset_mac_len(skb);
return 0;
}
EXPORT_SYMBOL_GPL(nsh_push);
int nsh_pop(struct sk_buff *skb)
{
struct nshhdr *nh;
size_t length;
__be16 inner_proto;
if (!pskb_may_pull(skb, NSH_BASE_HDR_LEN))
return -ENOMEM;
nh = (struct nshhdr *)(skb->data);
length = nsh_hdr_len(nh);
if (length < NSH_BASE_HDR_LEN)
return -EINVAL;
inner_proto = tun_p_to_eth_p(nh->np);
if (!pskb_may_pull(skb, length))
return -ENOMEM;
if (!inner_proto)
return -EAFNOSUPPORT;
skb_pull_rcsum(skb, length);
skb_reset_mac_header(skb);
skb_reset_network_header(skb);
skb_reset_mac_len(skb);
skb->protocol = inner_proto;
return 0;
}
EXPORT_SYMBOL_GPL(nsh_pop);
static struct sk_buff *nsh_gso_segment(struct sk_buff *skb,
netdev_features_t features)
{
struct sk_buff *segs = ERR_PTR(-EINVAL);
u16 mac_offset = skb->mac_header;
unsigned int nsh_len, mac_len;
__be16 proto;
skb_reset_network_header(skb);
mac_len = skb->mac_len;
if (unlikely(!pskb_may_pull(skb, NSH_BASE_HDR_LEN)))
goto out;
nsh_len = nsh_hdr_len(nsh_hdr(skb));
if (nsh_len < NSH_BASE_HDR_LEN)
goto out;
if (unlikely(!pskb_may_pull(skb, nsh_len)))
goto out;
proto = tun_p_to_eth_p(nsh_hdr(skb)->np);
if (!proto)
goto out;
__skb_pull(skb, nsh_len);
skb_reset_mac_header(skb);
skb->mac_len = proto == htons(ETH_P_TEB) ? ETH_HLEN : 0;
skb->protocol = proto;
features &= NETIF_F_SG;
segs = skb_mac_gso_segment(skb, features);
if (IS_ERR_OR_NULL(segs)) {
skb_gso_error_unwind(skb, htons(ETH_P_NSH), nsh_len,
mac_offset, mac_len);
goto out;
}
for (skb = segs; skb; skb = skb->next) {
skb->protocol = htons(ETH_P_NSH);
__skb_push(skb, nsh_len);
skb->mac_header = mac_offset;
skb->network_header = skb->mac_header + mac_len;
skb->mac_len = mac_len;
}
out:
return segs;
}
static struct packet_offload nsh_packet_offload __read_mostly = {
.type = htons(ETH_P_NSH),
.priority = 15,
.callbacks = {
.gso_segment = nsh_gso_segment,
},
};
static int __init nsh_init_module(void)
{
dev_add_offload(&nsh_packet_offload);
return 0;
}
static void __exit nsh_cleanup_module(void)
{
dev_remove_offload(&nsh_packet_offload);
}
module_init(nsh_init_module);
module_exit(nsh_cleanup_module);
MODULE_AUTHOR("Jiri Benc <[email protected]>");
MODULE_DESCRIPTION("NSH protocol");
MODULE_LICENSE("GPL v2");
| linux-master | net/nsh/nsh.c |
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/module.h>
#include <net/sock.h>
#include <linux/netlink.h>
#include <linux/sock_diag.h>
#include <linux/netlink_diag.h>
#include <linux/rhashtable.h>
#include "af_netlink.h"
static int sk_diag_dump_groups(struct sock *sk, struct sk_buff *nlskb)
{
struct netlink_sock *nlk = nlk_sk(sk);
if (nlk->groups == NULL)
return 0;
return nla_put(nlskb, NETLINK_DIAG_GROUPS, NLGRPSZ(nlk->ngroups),
nlk->groups);
}
static int sk_diag_put_flags(struct sock *sk, struct sk_buff *skb)
{
struct netlink_sock *nlk = nlk_sk(sk);
u32 flags = 0;
if (nlk->cb_running)
flags |= NDIAG_FLAG_CB_RUNNING;
if (nlk_test_bit(RECV_PKTINFO, sk))
flags |= NDIAG_FLAG_PKTINFO;
if (nlk_test_bit(BROADCAST_SEND_ERROR, sk))
flags |= NDIAG_FLAG_BROADCAST_ERROR;
if (nlk_test_bit(RECV_NO_ENOBUFS, sk))
flags |= NDIAG_FLAG_NO_ENOBUFS;
if (nlk_test_bit(LISTEN_ALL_NSID, sk))
flags |= NDIAG_FLAG_LISTEN_ALL_NSID;
if (nlk_test_bit(CAP_ACK, sk))
flags |= NDIAG_FLAG_CAP_ACK;
return nla_put_u32(skb, NETLINK_DIAG_FLAGS, flags);
}
static int sk_diag_fill(struct sock *sk, struct sk_buff *skb,
struct netlink_diag_req *req,
u32 portid, u32 seq, u32 flags, int sk_ino)
{
struct nlmsghdr *nlh;
struct netlink_diag_msg *rep;
struct netlink_sock *nlk = nlk_sk(sk);
nlh = nlmsg_put(skb, portid, seq, SOCK_DIAG_BY_FAMILY, sizeof(*rep),
flags);
if (!nlh)
return -EMSGSIZE;
rep = nlmsg_data(nlh);
rep->ndiag_family = AF_NETLINK;
rep->ndiag_type = sk->sk_type;
rep->ndiag_protocol = sk->sk_protocol;
rep->ndiag_state = sk->sk_state;
rep->ndiag_ino = sk_ino;
rep->ndiag_portid = nlk->portid;
rep->ndiag_dst_portid = nlk->dst_portid;
rep->ndiag_dst_group = nlk->dst_group;
sock_diag_save_cookie(sk, rep->ndiag_cookie);
if ((req->ndiag_show & NDIAG_SHOW_GROUPS) &&
sk_diag_dump_groups(sk, skb))
goto out_nlmsg_trim;
if ((req->ndiag_show & NDIAG_SHOW_MEMINFO) &&
sock_diag_put_meminfo(sk, skb, NETLINK_DIAG_MEMINFO))
goto out_nlmsg_trim;
if ((req->ndiag_show & NDIAG_SHOW_FLAGS) &&
sk_diag_put_flags(sk, skb))
goto out_nlmsg_trim;
nlmsg_end(skb, nlh);
return 0;
out_nlmsg_trim:
nlmsg_cancel(skb, nlh);
return -EMSGSIZE;
}
static int __netlink_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
int protocol, int s_num)
{
struct rhashtable_iter *hti = (void *)cb->args[2];
struct netlink_table *tbl = &nl_table[protocol];
struct net *net = sock_net(skb->sk);
struct netlink_diag_req *req;
struct netlink_sock *nlsk;
unsigned long flags;
struct sock *sk;
int num = 2;
int ret = 0;
req = nlmsg_data(cb->nlh);
if (s_num > 1)
goto mc_list;
num--;
if (!hti) {
hti = kmalloc(sizeof(*hti), GFP_KERNEL);
if (!hti)
return -ENOMEM;
cb->args[2] = (long)hti;
}
if (!s_num)
rhashtable_walk_enter(&tbl->hash, hti);
rhashtable_walk_start(hti);
while ((nlsk = rhashtable_walk_next(hti))) {
if (IS_ERR(nlsk)) {
ret = PTR_ERR(nlsk);
if (ret == -EAGAIN) {
ret = 0;
continue;
}
break;
}
sk = (struct sock *)nlsk;
if (!net_eq(sock_net(sk), net))
continue;
if (sk_diag_fill(sk, skb, req,
NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq,
NLM_F_MULTI,
sock_i_ino(sk)) < 0) {
ret = 1;
break;
}
}
rhashtable_walk_stop(hti);
if (ret)
goto done;
rhashtable_walk_exit(hti);
num++;
mc_list:
read_lock_irqsave(&nl_table_lock, flags);
sk_for_each_bound(sk, &tbl->mc_list) {
if (sk_hashed(sk))
continue;
if (!net_eq(sock_net(sk), net))
continue;
if (num < s_num) {
num++;
continue;
}
if (sk_diag_fill(sk, skb, req,
NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq,
NLM_F_MULTI,
__sock_i_ino(sk)) < 0) {
ret = 1;
break;
}
num++;
}
read_unlock_irqrestore(&nl_table_lock, flags);
done:
cb->args[0] = num;
return ret;
}
static int netlink_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
{
struct netlink_diag_req *req;
int s_num = cb->args[0];
int err = 0;
req = nlmsg_data(cb->nlh);
if (req->sdiag_protocol == NDIAG_PROTO_ALL) {
int i;
for (i = cb->args[1]; i < MAX_LINKS; i++) {
err = __netlink_diag_dump(skb, cb, i, s_num);
if (err)
break;
s_num = 0;
}
cb->args[1] = i;
} else {
if (req->sdiag_protocol >= MAX_LINKS)
return -ENOENT;
err = __netlink_diag_dump(skb, cb, req->sdiag_protocol, s_num);
}
return err < 0 ? err : skb->len;
}
static int netlink_diag_dump_done(struct netlink_callback *cb)
{
struct rhashtable_iter *hti = (void *)cb->args[2];
if (cb->args[0] == 1)
rhashtable_walk_exit(hti);
kfree(hti);
return 0;
}
static int netlink_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h)
{
int hdrlen = sizeof(struct netlink_diag_req);
struct net *net = sock_net(skb->sk);
if (nlmsg_len(h) < hdrlen)
return -EINVAL;
if (h->nlmsg_flags & NLM_F_DUMP) {
struct netlink_dump_control c = {
.dump = netlink_diag_dump,
.done = netlink_diag_dump_done,
};
return netlink_dump_start(net->diag_nlsk, skb, h, &c);
} else
return -EOPNOTSUPP;
}
static const struct sock_diag_handler netlink_diag_handler = {
.family = AF_NETLINK,
.dump = netlink_diag_handler_dump,
};
static int __init netlink_diag_init(void)
{
return sock_diag_register(&netlink_diag_handler);
}
static void __exit netlink_diag_exit(void)
{
sock_diag_unregister(&netlink_diag_handler);
}
module_init(netlink_diag_init);
module_exit(netlink_diag_exit);
MODULE_LICENSE("GPL");
MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 16 /* AF_NETLINK */);
| linux-master | net/netlink/diag.c |
// SPDX-License-Identifier: GPL-2.0
/*
* NETLINK Generic Netlink Family
*
* Authors: Jamal Hadi Salim
* Thomas Graf <[email protected]>
* Johannes Berg <[email protected]>
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/socket.h>
#include <linux/string_helpers.h>
#include <linux/skbuff.h>
#include <linux/mutex.h>
#include <linux/bitmap.h>
#include <linux/rwsem.h>
#include <linux/idr.h>
#include <net/sock.h>
#include <net/genetlink.h>
static DEFINE_MUTEX(genl_mutex); /* serialization of message processing */
static DECLARE_RWSEM(cb_lock);
atomic_t genl_sk_destructing_cnt = ATOMIC_INIT(0);
DECLARE_WAIT_QUEUE_HEAD(genl_sk_destructing_waitq);
void genl_lock(void)
{
mutex_lock(&genl_mutex);
}
EXPORT_SYMBOL(genl_lock);
void genl_unlock(void)
{
mutex_unlock(&genl_mutex);
}
EXPORT_SYMBOL(genl_unlock);
static void genl_lock_all(void)
{
down_write(&cb_lock);
genl_lock();
}
static void genl_unlock_all(void)
{
genl_unlock();
up_write(&cb_lock);
}
static void genl_op_lock(const struct genl_family *family)
{
if (!family->parallel_ops)
genl_lock();
}
static void genl_op_unlock(const struct genl_family *family)
{
if (!family->parallel_ops)
genl_unlock();
}
static DEFINE_IDR(genl_fam_idr);
/*
* Bitmap of multicast groups that are currently in use.
*
* To avoid an allocation at boot of just one unsigned long,
* declare it global instead.
* Bit 0 is marked as already used since group 0 is invalid.
* Bit 1 is marked as already used since the drop-monitor code
* abuses the API and thinks it can statically use group 1.
* That group will typically conflict with other groups that
* any proper users use.
* Bit 16 is marked as used since it's used for generic netlink
* and the code no longer marks pre-reserved IDs as used.
* Bit 17 is marked as already used since the VFS quota code
* also abused this API and relied on family == group ID, we
* cater to that by giving it a static family and group ID.
* Bit 18 is marked as already used since the PMCRAID driver
* did the same thing as the VFS quota code (maybe copied?)
*/
static unsigned long mc_group_start = 0x3 | BIT(GENL_ID_CTRL) |
BIT(GENL_ID_VFS_DQUOT) |
BIT(GENL_ID_PMCRAID);
static unsigned long *mc_groups = &mc_group_start;
static unsigned long mc_groups_longs = 1;
/* We need the last attribute with non-zero ID therefore a 2-entry array */
static struct nla_policy genl_policy_reject_all[] = {
{ .type = NLA_REJECT },
{ .type = NLA_REJECT },
};
static int genl_ctrl_event(int event, const struct genl_family *family,
const struct genl_multicast_group *grp,
int grp_id);
static void
genl_op_fill_in_reject_policy(const struct genl_family *family,
struct genl_ops *op)
{
BUILD_BUG_ON(ARRAY_SIZE(genl_policy_reject_all) - 1 != 1);
if (op->policy || op->cmd < family->resv_start_op)
return;
op->policy = genl_policy_reject_all;
op->maxattr = 1;
}
static void
genl_op_fill_in_reject_policy_split(const struct genl_family *family,
struct genl_split_ops *op)
{
if (op->policy)
return;
op->policy = genl_policy_reject_all;
op->maxattr = 1;
}
static const struct genl_family *genl_family_find_byid(unsigned int id)
{
return idr_find(&genl_fam_idr, id);
}
static const struct genl_family *genl_family_find_byname(char *name)
{
const struct genl_family *family;
unsigned int id;
idr_for_each_entry(&genl_fam_idr, family, id)
if (strcmp(family->name, name) == 0)
return family;
return NULL;
}
struct genl_op_iter {
const struct genl_family *family;
struct genl_split_ops doit;
struct genl_split_ops dumpit;
int cmd_idx;
int entry_idx;
u32 cmd;
u8 flags;
};
static void genl_op_from_full(const struct genl_family *family,
unsigned int i, struct genl_ops *op)
{
*op = family->ops[i];
if (!op->maxattr)
op->maxattr = family->maxattr;
if (!op->policy)
op->policy = family->policy;
genl_op_fill_in_reject_policy(family, op);
}
static int genl_get_cmd_full(u32 cmd, const struct genl_family *family,
struct genl_ops *op)
{
int i;
for (i = 0; i < family->n_ops; i++)
if (family->ops[i].cmd == cmd) {
genl_op_from_full(family, i, op);
return 0;
}
return -ENOENT;
}
static void genl_op_from_small(const struct genl_family *family,
unsigned int i, struct genl_ops *op)
{
memset(op, 0, sizeof(*op));
op->doit = family->small_ops[i].doit;
op->dumpit = family->small_ops[i].dumpit;
op->cmd = family->small_ops[i].cmd;
op->internal_flags = family->small_ops[i].internal_flags;
op->flags = family->small_ops[i].flags;
op->validate = family->small_ops[i].validate;
op->maxattr = family->maxattr;
op->policy = family->policy;
genl_op_fill_in_reject_policy(family, op);
}
static int genl_get_cmd_small(u32 cmd, const struct genl_family *family,
struct genl_ops *op)
{
int i;
for (i = 0; i < family->n_small_ops; i++)
if (family->small_ops[i].cmd == cmd) {
genl_op_from_small(family, i, op);
return 0;
}
return -ENOENT;
}
static void genl_op_from_split(struct genl_op_iter *iter)
{
const struct genl_family *family = iter->family;
int i, cnt = 0;
i = iter->entry_idx - family->n_ops - family->n_small_ops;
if (family->split_ops[i + cnt].flags & GENL_CMD_CAP_DO) {
iter->doit = family->split_ops[i + cnt];
genl_op_fill_in_reject_policy_split(family, &iter->doit);
cnt++;
} else {
memset(&iter->doit, 0, sizeof(iter->doit));
}
if (i + cnt < family->n_split_ops &&
family->split_ops[i + cnt].flags & GENL_CMD_CAP_DUMP) {
iter->dumpit = family->split_ops[i + cnt];
genl_op_fill_in_reject_policy_split(family, &iter->dumpit);
cnt++;
} else {
memset(&iter->dumpit, 0, sizeof(iter->dumpit));
}
WARN_ON(!cnt);
iter->entry_idx += cnt;
}
static int
genl_get_cmd_split(u32 cmd, u8 flag, const struct genl_family *family,
struct genl_split_ops *op)
{
int i;
for (i = 0; i < family->n_split_ops; i++)
if (family->split_ops[i].cmd == cmd &&
family->split_ops[i].flags & flag) {
*op = family->split_ops[i];
return 0;
}
return -ENOENT;
}
static int
genl_cmd_full_to_split(struct genl_split_ops *op,
const struct genl_family *family,
const struct genl_ops *full, u8 flags)
{
if ((flags & GENL_CMD_CAP_DO && !full->doit) ||
(flags & GENL_CMD_CAP_DUMP && !full->dumpit)) {
memset(op, 0, sizeof(*op));
return -ENOENT;
}
if (flags & GENL_CMD_CAP_DUMP) {
op->start = full->start;
op->dumpit = full->dumpit;
op->done = full->done;
} else {
op->pre_doit = family->pre_doit;
op->doit = full->doit;
op->post_doit = family->post_doit;
}
if (flags & GENL_CMD_CAP_DUMP &&
full->validate & GENL_DONT_VALIDATE_DUMP) {
op->policy = NULL;
op->maxattr = 0;
} else {
op->policy = full->policy;
op->maxattr = full->maxattr;
}
op->cmd = full->cmd;
op->internal_flags = full->internal_flags;
op->flags = full->flags;
op->validate = full->validate;
/* Make sure flags include the GENL_CMD_CAP_DO / GENL_CMD_CAP_DUMP */
op->flags |= flags;
return 0;
}
/* Must make sure that op is initialized to 0 on failure */
static int
genl_get_cmd(u32 cmd, u8 flags, const struct genl_family *family,
struct genl_split_ops *op)
{
struct genl_ops full;
int err;
err = genl_get_cmd_full(cmd, family, &full);
if (err == -ENOENT)
err = genl_get_cmd_small(cmd, family, &full);
/* Found one of legacy forms */
if (err == 0)
return genl_cmd_full_to_split(op, family, &full, flags);
err = genl_get_cmd_split(cmd, flags, family, op);
if (err)
memset(op, 0, sizeof(*op));
return err;
}
/* For policy dumping only, get ops of both do and dump.
* Fail if both are missing, genl_get_cmd() will zero-init in case of failure.
*/
static int
genl_get_cmd_both(u32 cmd, const struct genl_family *family,
struct genl_split_ops *doit, struct genl_split_ops *dumpit)
{
int err1, err2;
err1 = genl_get_cmd(cmd, GENL_CMD_CAP_DO, family, doit);
err2 = genl_get_cmd(cmd, GENL_CMD_CAP_DUMP, family, dumpit);
return err1 && err2 ? -ENOENT : 0;
}
static bool
genl_op_iter_init(const struct genl_family *family, struct genl_op_iter *iter)
{
iter->family = family;
iter->cmd_idx = 0;
iter->entry_idx = 0;
iter->flags = 0;
return iter->family->n_ops +
iter->family->n_small_ops +
iter->family->n_split_ops;
}
static bool genl_op_iter_next(struct genl_op_iter *iter)
{
const struct genl_family *family = iter->family;
bool legacy_op = true;
struct genl_ops op;
if (iter->entry_idx < family->n_ops) {
genl_op_from_full(family, iter->entry_idx, &op);
} else if (iter->entry_idx < family->n_ops + family->n_small_ops) {
genl_op_from_small(family, iter->entry_idx - family->n_ops,
&op);
} else if (iter->entry_idx <
family->n_ops + family->n_small_ops + family->n_split_ops) {
legacy_op = false;
/* updates entry_idx */
genl_op_from_split(iter);
} else {
return false;
}
iter->cmd_idx++;
if (legacy_op) {
iter->entry_idx++;
genl_cmd_full_to_split(&iter->doit, family,
&op, GENL_CMD_CAP_DO);
genl_cmd_full_to_split(&iter->dumpit, family,
&op, GENL_CMD_CAP_DUMP);
}
iter->cmd = iter->doit.cmd | iter->dumpit.cmd;
iter->flags = iter->doit.flags | iter->dumpit.flags;
return true;
}
static void
genl_op_iter_copy(struct genl_op_iter *dst, struct genl_op_iter *src)
{
*dst = *src;
}
static unsigned int genl_op_iter_idx(struct genl_op_iter *iter)
{
return iter->cmd_idx;
}
static int genl_allocate_reserve_groups(int n_groups, int *first_id)
{
unsigned long *new_groups;
int start = 0;
int i;
int id;
bool fits;
do {
if (start == 0)
id = find_first_zero_bit(mc_groups,
mc_groups_longs *
BITS_PER_LONG);
else
id = find_next_zero_bit(mc_groups,
mc_groups_longs * BITS_PER_LONG,
start);
fits = true;
for (i = id;
i < min_t(int, id + n_groups,
mc_groups_longs * BITS_PER_LONG);
i++) {
if (test_bit(i, mc_groups)) {
start = i;
fits = false;
break;
}
}
if (id + n_groups > mc_groups_longs * BITS_PER_LONG) {
unsigned long new_longs = mc_groups_longs +
BITS_TO_LONGS(n_groups);
size_t nlen = new_longs * sizeof(unsigned long);
if (mc_groups == &mc_group_start) {
new_groups = kzalloc(nlen, GFP_KERNEL);
if (!new_groups)
return -ENOMEM;
mc_groups = new_groups;
*mc_groups = mc_group_start;
} else {
new_groups = krealloc(mc_groups, nlen,
GFP_KERNEL);
if (!new_groups)
return -ENOMEM;
mc_groups = new_groups;
for (i = 0; i < BITS_TO_LONGS(n_groups); i++)
mc_groups[mc_groups_longs + i] = 0;
}
mc_groups_longs = new_longs;
}
} while (!fits);
for (i = id; i < id + n_groups; i++)
set_bit(i, mc_groups);
*first_id = id;
return 0;
}
static struct genl_family genl_ctrl;
static int genl_validate_assign_mc_groups(struct genl_family *family)
{
int first_id;
int n_groups = family->n_mcgrps;
int err = 0, i;
bool groups_allocated = false;
if (!n_groups)
return 0;
for (i = 0; i < n_groups; i++) {
const struct genl_multicast_group *grp = &family->mcgrps[i];
if (WARN_ON(grp->name[0] == '\0'))
return -EINVAL;
if (WARN_ON(!string_is_terminated(grp->name, GENL_NAMSIZ)))
return -EINVAL;
}
/* special-case our own group and hacks */
if (family == &genl_ctrl) {
first_id = GENL_ID_CTRL;
BUG_ON(n_groups != 1);
} else if (strcmp(family->name, "NET_DM") == 0) {
first_id = 1;
BUG_ON(n_groups != 1);
} else if (family->id == GENL_ID_VFS_DQUOT) {
first_id = GENL_ID_VFS_DQUOT;
BUG_ON(n_groups != 1);
} else if (family->id == GENL_ID_PMCRAID) {
first_id = GENL_ID_PMCRAID;
BUG_ON(n_groups != 1);
} else {
groups_allocated = true;
err = genl_allocate_reserve_groups(n_groups, &first_id);
if (err)
return err;
}
family->mcgrp_offset = first_id;
/* if still initializing, can't and don't need to realloc bitmaps */
if (!init_net.genl_sock)
return 0;
if (family->netnsok) {
struct net *net;
netlink_table_grab();
rcu_read_lock();
for_each_net_rcu(net) {
err = __netlink_change_ngroups(net->genl_sock,
mc_groups_longs * BITS_PER_LONG);
if (err) {
/*
* No need to roll back, can only fail if
* memory allocation fails and then the
* number of _possible_ groups has been
* increased on some sockets which is ok.
*/
break;
}
}
rcu_read_unlock();
netlink_table_ungrab();
} else {
err = netlink_change_ngroups(init_net.genl_sock,
mc_groups_longs * BITS_PER_LONG);
}
if (groups_allocated && err) {
for (i = 0; i < family->n_mcgrps; i++)
clear_bit(family->mcgrp_offset + i, mc_groups);
}
return err;
}
static void genl_unregister_mc_groups(const struct genl_family *family)
{
struct net *net;
int i;
netlink_table_grab();
rcu_read_lock();
for_each_net_rcu(net) {
for (i = 0; i < family->n_mcgrps; i++)
__netlink_clear_multicast_users(
net->genl_sock, family->mcgrp_offset + i);
}
rcu_read_unlock();
netlink_table_ungrab();
for (i = 0; i < family->n_mcgrps; i++) {
int grp_id = family->mcgrp_offset + i;
if (grp_id != 1)
clear_bit(grp_id, mc_groups);
genl_ctrl_event(CTRL_CMD_DELMCAST_GRP, family,
&family->mcgrps[i], grp_id);
}
}
static bool genl_split_op_check(const struct genl_split_ops *op)
{
if (WARN_ON(hweight8(op->flags & (GENL_CMD_CAP_DO |
GENL_CMD_CAP_DUMP)) != 1))
return true;
return false;
}
static int genl_validate_ops(const struct genl_family *family)
{
struct genl_op_iter i, j;
unsigned int s;
if (WARN_ON(family->n_ops && !family->ops) ||
WARN_ON(family->n_small_ops && !family->small_ops) ||
WARN_ON(family->n_split_ops && !family->split_ops))
return -EINVAL;
for (genl_op_iter_init(family, &i); genl_op_iter_next(&i); ) {
if (!(i.flags & (GENL_CMD_CAP_DO | GENL_CMD_CAP_DUMP)))
return -EINVAL;
if (WARN_ON(i.cmd >= family->resv_start_op &&
(i.doit.validate || i.dumpit.validate)))
return -EINVAL;
genl_op_iter_copy(&j, &i);
while (genl_op_iter_next(&j)) {
if (i.cmd == j.cmd)
return -EINVAL;
}
}
if (family->n_split_ops) {
if (genl_split_op_check(&family->split_ops[0]))
return -EINVAL;
}
for (s = 1; s < family->n_split_ops; s++) {
const struct genl_split_ops *a, *b;
a = &family->split_ops[s - 1];
b = &family->split_ops[s];
if (genl_split_op_check(b))
return -EINVAL;
/* Check sort order */
if (a->cmd < b->cmd) {
continue;
} else if (a->cmd > b->cmd) {
WARN_ON(1);
return -EINVAL;
}
if (a->internal_flags != b->internal_flags ||
((a->flags ^ b->flags) & ~(GENL_CMD_CAP_DO |
GENL_CMD_CAP_DUMP))) {
WARN_ON(1);
return -EINVAL;
}
if ((a->flags & GENL_CMD_CAP_DO) &&
(b->flags & GENL_CMD_CAP_DUMP))
continue;
WARN_ON(1);
return -EINVAL;
}
return 0;
}
/**
* genl_register_family - register a generic netlink family
* @family: generic netlink family
*
* Registers the specified family after validating it first. Only one
* family may be registered with the same family name or identifier.
*
* The family's ops, multicast groups and module pointer must already
* be assigned.
*
* Return 0 on success or a negative error code.
*/
int genl_register_family(struct genl_family *family)
{
int err, i;
int start = GENL_START_ALLOC, end = GENL_MAX_ID;
err = genl_validate_ops(family);
if (err)
return err;
genl_lock_all();
if (genl_family_find_byname(family->name)) {
err = -EEXIST;
goto errout_locked;
}
/*
* Sadly, a few cases need to be special-cased
* due to them having previously abused the API
* and having used their family ID also as their
* multicast group ID, so we use reserved IDs
* for both to be sure we can do that mapping.
*/
if (family == &genl_ctrl) {
/* and this needs to be special for initial family lookups */
start = end = GENL_ID_CTRL;
} else if (strcmp(family->name, "pmcraid") == 0) {
start = end = GENL_ID_PMCRAID;
} else if (strcmp(family->name, "VFS_DQUOT") == 0) {
start = end = GENL_ID_VFS_DQUOT;
}
family->id = idr_alloc_cyclic(&genl_fam_idr, family,
start, end + 1, GFP_KERNEL);
if (family->id < 0) {
err = family->id;
goto errout_locked;
}
err = genl_validate_assign_mc_groups(family);
if (err)
goto errout_remove;
genl_unlock_all();
/* send all events */
genl_ctrl_event(CTRL_CMD_NEWFAMILY, family, NULL, 0);
for (i = 0; i < family->n_mcgrps; i++)
genl_ctrl_event(CTRL_CMD_NEWMCAST_GRP, family,
&family->mcgrps[i], family->mcgrp_offset + i);
return 0;
errout_remove:
idr_remove(&genl_fam_idr, family->id);
errout_locked:
genl_unlock_all();
return err;
}
EXPORT_SYMBOL(genl_register_family);
/**
* genl_unregister_family - unregister generic netlink family
* @family: generic netlink family
*
* Unregisters the specified family.
*
* Returns 0 on success or a negative error code.
*/
int genl_unregister_family(const struct genl_family *family)
{
genl_lock_all();
if (!genl_family_find_byid(family->id)) {
genl_unlock_all();
return -ENOENT;
}
genl_unregister_mc_groups(family);
idr_remove(&genl_fam_idr, family->id);
up_write(&cb_lock);
wait_event(genl_sk_destructing_waitq,
atomic_read(&genl_sk_destructing_cnt) == 0);
genl_unlock();
genl_ctrl_event(CTRL_CMD_DELFAMILY, family, NULL, 0);
return 0;
}
EXPORT_SYMBOL(genl_unregister_family);
/**
* genlmsg_put - Add generic netlink header to netlink message
* @skb: socket buffer holding the message
* @portid: netlink portid the message is addressed to
* @seq: sequence number (usually the one of the sender)
* @family: generic netlink family
* @flags: netlink message flags
* @cmd: generic netlink command
*
* Returns pointer to user specific header
*/
void *genlmsg_put(struct sk_buff *skb, u32 portid, u32 seq,
const struct genl_family *family, int flags, u8 cmd)
{
struct nlmsghdr *nlh;
struct genlmsghdr *hdr;
nlh = nlmsg_put(skb, portid, seq, family->id, GENL_HDRLEN +
family->hdrsize, flags);
if (nlh == NULL)
return NULL;
hdr = nlmsg_data(nlh);
hdr->cmd = cmd;
hdr->version = family->version;
hdr->reserved = 0;
return (char *) hdr + GENL_HDRLEN;
}
EXPORT_SYMBOL(genlmsg_put);
static struct genl_dumpit_info *genl_dumpit_info_alloc(void)
{
return kmalloc(sizeof(struct genl_dumpit_info), GFP_KERNEL);
}
static void genl_dumpit_info_free(const struct genl_dumpit_info *info)
{
kfree(info);
}
static struct nlattr **
genl_family_rcv_msg_attrs_parse(const struct genl_family *family,
struct nlmsghdr *nlh,
struct netlink_ext_ack *extack,
const struct genl_split_ops *ops,
int hdrlen,
enum genl_validate_flags no_strict_flag)
{
enum netlink_validation validate = ops->validate & no_strict_flag ?
NL_VALIDATE_LIBERAL :
NL_VALIDATE_STRICT;
struct nlattr **attrbuf;
int err;
if (!ops->maxattr)
return NULL;
attrbuf = kmalloc_array(ops->maxattr + 1,
sizeof(struct nlattr *), GFP_KERNEL);
if (!attrbuf)
return ERR_PTR(-ENOMEM);
err = __nlmsg_parse(nlh, hdrlen, attrbuf, ops->maxattr, ops->policy,
validate, extack);
if (err) {
kfree(attrbuf);
return ERR_PTR(err);
}
return attrbuf;
}
static void genl_family_rcv_msg_attrs_free(struct nlattr **attrbuf)
{
kfree(attrbuf);
}
struct genl_start_context {
const struct genl_family *family;
struct nlmsghdr *nlh;
struct netlink_ext_ack *extack;
const struct genl_split_ops *ops;
int hdrlen;
};
static int genl_start(struct netlink_callback *cb)
{
struct genl_start_context *ctx = cb->data;
const struct genl_split_ops *ops;
struct genl_dumpit_info *info;
struct nlattr **attrs = NULL;
int rc = 0;
ops = ctx->ops;
if (!(ops->validate & GENL_DONT_VALIDATE_DUMP) &&
ctx->nlh->nlmsg_len < nlmsg_msg_size(ctx->hdrlen))
return -EINVAL;
attrs = genl_family_rcv_msg_attrs_parse(ctx->family, ctx->nlh, ctx->extack,
ops, ctx->hdrlen,
GENL_DONT_VALIDATE_DUMP_STRICT);
if (IS_ERR(attrs))
return PTR_ERR(attrs);
info = genl_dumpit_info_alloc();
if (!info) {
genl_family_rcv_msg_attrs_free(attrs);
return -ENOMEM;
}
info->op = *ops;
info->info.family = ctx->family;
info->info.snd_seq = cb->nlh->nlmsg_seq;
info->info.snd_portid = NETLINK_CB(cb->skb).portid;
info->info.nlhdr = cb->nlh;
info->info.genlhdr = nlmsg_data(cb->nlh);
info->info.attrs = attrs;
genl_info_net_set(&info->info, sock_net(cb->skb->sk));
info->info.extack = cb->extack;
memset(&info->info.user_ptr, 0, sizeof(info->info.user_ptr));
cb->data = info;
if (ops->start) {
genl_op_lock(ctx->family);
rc = ops->start(cb);
genl_op_unlock(ctx->family);
}
if (rc) {
genl_family_rcv_msg_attrs_free(info->info.attrs);
genl_dumpit_info_free(info);
cb->data = NULL;
}
return rc;
}
static int genl_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
{
struct genl_dumpit_info *dump_info = cb->data;
const struct genl_split_ops *ops = &dump_info->op;
struct genl_info *info = &dump_info->info;
int rc;
info->extack = cb->extack;
genl_op_lock(info->family);
rc = ops->dumpit(skb, cb);
genl_op_unlock(info->family);
return rc;
}
static int genl_done(struct netlink_callback *cb)
{
struct genl_dumpit_info *dump_info = cb->data;
const struct genl_split_ops *ops = &dump_info->op;
struct genl_info *info = &dump_info->info;
int rc = 0;
info->extack = cb->extack;
if (ops->done) {
genl_op_lock(info->family);
rc = ops->done(cb);
genl_op_unlock(info->family);
}
genl_family_rcv_msg_attrs_free(info->attrs);
genl_dumpit_info_free(dump_info);
return rc;
}
static int genl_family_rcv_msg_dumpit(const struct genl_family *family,
struct sk_buff *skb,
struct nlmsghdr *nlh,
struct netlink_ext_ack *extack,
const struct genl_split_ops *ops,
int hdrlen, struct net *net)
{
struct genl_start_context ctx;
struct netlink_dump_control c = {
.module = family->module,
.data = &ctx,
.start = genl_start,
.dump = genl_dumpit,
.done = genl_done,
.extack = extack,
};
int err;
ctx.family = family;
ctx.nlh = nlh;
ctx.extack = extack;
ctx.ops = ops;
ctx.hdrlen = hdrlen;
genl_op_unlock(family);
err = __netlink_dump_start(net->genl_sock, skb, nlh, &c);
genl_op_lock(family);
return err;
}
static int genl_family_rcv_msg_doit(const struct genl_family *family,
struct sk_buff *skb,
struct nlmsghdr *nlh,
struct netlink_ext_ack *extack,
const struct genl_split_ops *ops,
int hdrlen, struct net *net)
{
struct nlattr **attrbuf;
struct genl_info info;
int err;
attrbuf = genl_family_rcv_msg_attrs_parse(family, nlh, extack,
ops, hdrlen,
GENL_DONT_VALIDATE_STRICT);
if (IS_ERR(attrbuf))
return PTR_ERR(attrbuf);
info.snd_seq = nlh->nlmsg_seq;
info.snd_portid = NETLINK_CB(skb).portid;
info.family = family;
info.nlhdr = nlh;
info.genlhdr = nlmsg_data(nlh);
info.attrs = attrbuf;
info.extack = extack;
genl_info_net_set(&info, net);
memset(&info.user_ptr, 0, sizeof(info.user_ptr));
if (ops->pre_doit) {
err = ops->pre_doit(ops, skb, &info);
if (err)
goto out;
}
err = ops->doit(skb, &info);
if (ops->post_doit)
ops->post_doit(ops, skb, &info);
out:
genl_family_rcv_msg_attrs_free(attrbuf);
return err;
}
static int genl_header_check(const struct genl_family *family,
struct nlmsghdr *nlh, struct genlmsghdr *hdr,
struct netlink_ext_ack *extack)
{
u16 flags;
/* Only for commands added after we started validating */
if (hdr->cmd < family->resv_start_op)
return 0;
if (hdr->reserved) {
NL_SET_ERR_MSG(extack, "genlmsghdr.reserved field is not 0");
return -EINVAL;
}
/* Old netlink flags have pretty loose semantics, allow only the flags
* consumed by the core where we can enforce the meaning.
*/
flags = nlh->nlmsg_flags;
if ((flags & NLM_F_DUMP) == NLM_F_DUMP) /* DUMP is 2 bits */
flags &= ~NLM_F_DUMP;
if (flags & ~(NLM_F_REQUEST | NLM_F_ACK | NLM_F_ECHO)) {
NL_SET_ERR_MSG(extack,
"ambiguous or reserved bits set in nlmsg_flags");
return -EINVAL;
}
return 0;
}
static int genl_family_rcv_msg(const struct genl_family *family,
struct sk_buff *skb,
struct nlmsghdr *nlh,
struct netlink_ext_ack *extack)
{
struct net *net = sock_net(skb->sk);
struct genlmsghdr *hdr = nlmsg_data(nlh);
struct genl_split_ops op;
int hdrlen;
u8 flags;
/* this family doesn't exist in this netns */
if (!family->netnsok && !net_eq(net, &init_net))
return -ENOENT;
hdrlen = GENL_HDRLEN + family->hdrsize;
if (nlh->nlmsg_len < nlmsg_msg_size(hdrlen))
return -EINVAL;
if (genl_header_check(family, nlh, hdr, extack))
return -EINVAL;
flags = (nlh->nlmsg_flags & NLM_F_DUMP) == NLM_F_DUMP ?
GENL_CMD_CAP_DUMP : GENL_CMD_CAP_DO;
if (genl_get_cmd(hdr->cmd, flags, family, &op))
return -EOPNOTSUPP;
if ((op.flags & GENL_ADMIN_PERM) &&
!netlink_capable(skb, CAP_NET_ADMIN))
return -EPERM;
if ((op.flags & GENL_UNS_ADMIN_PERM) &&
!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
return -EPERM;
if (flags & GENL_CMD_CAP_DUMP)
return genl_family_rcv_msg_dumpit(family, skb, nlh, extack,
&op, hdrlen, net);
else
return genl_family_rcv_msg_doit(family, skb, nlh, extack,
&op, hdrlen, net);
}
static int genl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
struct netlink_ext_ack *extack)
{
const struct genl_family *family;
int err;
family = genl_family_find_byid(nlh->nlmsg_type);
if (family == NULL)
return -ENOENT;
genl_op_lock(family);
err = genl_family_rcv_msg(family, skb, nlh, extack);
genl_op_unlock(family);
return err;
}
static void genl_rcv(struct sk_buff *skb)
{
down_read(&cb_lock);
netlink_rcv_skb(skb, &genl_rcv_msg);
up_read(&cb_lock);
}
/**************************************************************************
* Controller
**************************************************************************/
static struct genl_family genl_ctrl;
static int ctrl_fill_info(const struct genl_family *family, u32 portid, u32 seq,
u32 flags, struct sk_buff *skb, u8 cmd)
{
struct genl_op_iter i;
void *hdr;
hdr = genlmsg_put(skb, portid, seq, &genl_ctrl, flags, cmd);
if (hdr == NULL)
return -1;
if (nla_put_string(skb, CTRL_ATTR_FAMILY_NAME, family->name) ||
nla_put_u16(skb, CTRL_ATTR_FAMILY_ID, family->id) ||
nla_put_u32(skb, CTRL_ATTR_VERSION, family->version) ||
nla_put_u32(skb, CTRL_ATTR_HDRSIZE, family->hdrsize) ||
nla_put_u32(skb, CTRL_ATTR_MAXATTR, family->maxattr))
goto nla_put_failure;
if (genl_op_iter_init(family, &i)) {
struct nlattr *nla_ops;
nla_ops = nla_nest_start_noflag(skb, CTRL_ATTR_OPS);
if (nla_ops == NULL)
goto nla_put_failure;
while (genl_op_iter_next(&i)) {
struct nlattr *nest;
u32 op_flags;
op_flags = i.flags;
if (i.doit.policy || i.dumpit.policy)
op_flags |= GENL_CMD_CAP_HASPOL;
nest = nla_nest_start_noflag(skb, genl_op_iter_idx(&i));
if (nest == NULL)
goto nla_put_failure;
if (nla_put_u32(skb, CTRL_ATTR_OP_ID, i.cmd) ||
nla_put_u32(skb, CTRL_ATTR_OP_FLAGS, op_flags))
goto nla_put_failure;
nla_nest_end(skb, nest);
}
nla_nest_end(skb, nla_ops);
}
if (family->n_mcgrps) {
struct nlattr *nla_grps;
int i;
nla_grps = nla_nest_start_noflag(skb, CTRL_ATTR_MCAST_GROUPS);
if (nla_grps == NULL)
goto nla_put_failure;
for (i = 0; i < family->n_mcgrps; i++) {
struct nlattr *nest;
const struct genl_multicast_group *grp;
grp = &family->mcgrps[i];
nest = nla_nest_start_noflag(skb, i + 1);
if (nest == NULL)
goto nla_put_failure;
if (nla_put_u32(skb, CTRL_ATTR_MCAST_GRP_ID,
family->mcgrp_offset + i) ||
nla_put_string(skb, CTRL_ATTR_MCAST_GRP_NAME,
grp->name))
goto nla_put_failure;
nla_nest_end(skb, nest);
}
nla_nest_end(skb, nla_grps);
}
genlmsg_end(skb, hdr);
return 0;
nla_put_failure:
genlmsg_cancel(skb, hdr);
return -EMSGSIZE;
}
static int ctrl_fill_mcgrp_info(const struct genl_family *family,
const struct genl_multicast_group *grp,
int grp_id, u32 portid, u32 seq, u32 flags,
struct sk_buff *skb, u8 cmd)
{
void *hdr;
struct nlattr *nla_grps;
struct nlattr *nest;
hdr = genlmsg_put(skb, portid, seq, &genl_ctrl, flags, cmd);
if (hdr == NULL)
return -1;
if (nla_put_string(skb, CTRL_ATTR_FAMILY_NAME, family->name) ||
nla_put_u16(skb, CTRL_ATTR_FAMILY_ID, family->id))
goto nla_put_failure;
nla_grps = nla_nest_start_noflag(skb, CTRL_ATTR_MCAST_GROUPS);
if (nla_grps == NULL)
goto nla_put_failure;
nest = nla_nest_start_noflag(skb, 1);
if (nest == NULL)
goto nla_put_failure;
if (nla_put_u32(skb, CTRL_ATTR_MCAST_GRP_ID, grp_id) ||
nla_put_string(skb, CTRL_ATTR_MCAST_GRP_NAME,
grp->name))
goto nla_put_failure;
nla_nest_end(skb, nest);
nla_nest_end(skb, nla_grps);
genlmsg_end(skb, hdr);
return 0;
nla_put_failure:
genlmsg_cancel(skb, hdr);
return -EMSGSIZE;
}
static int ctrl_dumpfamily(struct sk_buff *skb, struct netlink_callback *cb)
{
int n = 0;
struct genl_family *rt;
struct net *net = sock_net(skb->sk);
int fams_to_skip = cb->args[0];
unsigned int id;
idr_for_each_entry(&genl_fam_idr, rt, id) {
if (!rt->netnsok && !net_eq(net, &init_net))
continue;
if (n++ < fams_to_skip)
continue;
if (ctrl_fill_info(rt, NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq, NLM_F_MULTI,
skb, CTRL_CMD_NEWFAMILY) < 0) {
n--;
break;
}
}
cb->args[0] = n;
return skb->len;
}
static struct sk_buff *ctrl_build_family_msg(const struct genl_family *family,
u32 portid, int seq, u8 cmd)
{
struct sk_buff *skb;
int err;
skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (skb == NULL)
return ERR_PTR(-ENOBUFS);
err = ctrl_fill_info(family, portid, seq, 0, skb, cmd);
if (err < 0) {
nlmsg_free(skb);
return ERR_PTR(err);
}
return skb;
}
static struct sk_buff *
ctrl_build_mcgrp_msg(const struct genl_family *family,
const struct genl_multicast_group *grp,
int grp_id, u32 portid, int seq, u8 cmd)
{
struct sk_buff *skb;
int err;
skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (skb == NULL)
return ERR_PTR(-ENOBUFS);
err = ctrl_fill_mcgrp_info(family, grp, grp_id, portid,
seq, 0, skb, cmd);
if (err < 0) {
nlmsg_free(skb);
return ERR_PTR(err);
}
return skb;
}
static const struct nla_policy ctrl_policy_family[] = {
[CTRL_ATTR_FAMILY_ID] = { .type = NLA_U16 },
[CTRL_ATTR_FAMILY_NAME] = { .type = NLA_NUL_STRING,
.len = GENL_NAMSIZ - 1 },
};
static int ctrl_getfamily(struct sk_buff *skb, struct genl_info *info)
{
struct sk_buff *msg;
const struct genl_family *res = NULL;
int err = -EINVAL;
if (info->attrs[CTRL_ATTR_FAMILY_ID]) {
u16 id = nla_get_u16(info->attrs[CTRL_ATTR_FAMILY_ID]);
res = genl_family_find_byid(id);
err = -ENOENT;
}
if (info->attrs[CTRL_ATTR_FAMILY_NAME]) {
char *name;
name = nla_data(info->attrs[CTRL_ATTR_FAMILY_NAME]);
res = genl_family_find_byname(name);
#ifdef CONFIG_MODULES
if (res == NULL) {
genl_unlock();
up_read(&cb_lock);
request_module("net-pf-%d-proto-%d-family-%s",
PF_NETLINK, NETLINK_GENERIC, name);
down_read(&cb_lock);
genl_lock();
res = genl_family_find_byname(name);
}
#endif
err = -ENOENT;
}
if (res == NULL)
return err;
if (!res->netnsok && !net_eq(genl_info_net(info), &init_net)) {
/* family doesn't exist here */
return -ENOENT;
}
msg = ctrl_build_family_msg(res, info->snd_portid, info->snd_seq,
CTRL_CMD_NEWFAMILY);
if (IS_ERR(msg))
return PTR_ERR(msg);
return genlmsg_reply(msg, info);
}
static int genl_ctrl_event(int event, const struct genl_family *family,
const struct genl_multicast_group *grp,
int grp_id)
{
struct sk_buff *msg;
/* genl is still initialising */
if (!init_net.genl_sock)
return 0;
switch (event) {
case CTRL_CMD_NEWFAMILY:
case CTRL_CMD_DELFAMILY:
WARN_ON(grp);
msg = ctrl_build_family_msg(family, 0, 0, event);
break;
case CTRL_CMD_NEWMCAST_GRP:
case CTRL_CMD_DELMCAST_GRP:
BUG_ON(!grp);
msg = ctrl_build_mcgrp_msg(family, grp, grp_id, 0, 0, event);
break;
default:
return -EINVAL;
}
if (IS_ERR(msg))
return PTR_ERR(msg);
if (!family->netnsok) {
genlmsg_multicast_netns(&genl_ctrl, &init_net, msg, 0,
0, GFP_KERNEL);
} else {
rcu_read_lock();
genlmsg_multicast_allns(&genl_ctrl, msg, 0,
0, GFP_ATOMIC);
rcu_read_unlock();
}
return 0;
}
struct ctrl_dump_policy_ctx {
struct netlink_policy_dump_state *state;
const struct genl_family *rt;
struct genl_op_iter *op_iter;
u32 op;
u16 fam_id;
u8 dump_map:1,
single_op:1;
};
static const struct nla_policy ctrl_policy_policy[] = {
[CTRL_ATTR_FAMILY_ID] = { .type = NLA_U16 },
[CTRL_ATTR_FAMILY_NAME] = { .type = NLA_NUL_STRING,
.len = GENL_NAMSIZ - 1 },
[CTRL_ATTR_OP] = { .type = NLA_U32 },
};
static int ctrl_dumppolicy_start(struct netlink_callback *cb)
{
const struct genl_dumpit_info *info = genl_dumpit_info(cb);
struct ctrl_dump_policy_ctx *ctx = (void *)cb->ctx;
struct nlattr **tb = info->info.attrs;
const struct genl_family *rt;
struct genl_op_iter i;
int err;
BUILD_BUG_ON(sizeof(*ctx) > sizeof(cb->ctx));
if (!tb[CTRL_ATTR_FAMILY_ID] && !tb[CTRL_ATTR_FAMILY_NAME])
return -EINVAL;
if (tb[CTRL_ATTR_FAMILY_ID]) {
ctx->fam_id = nla_get_u16(tb[CTRL_ATTR_FAMILY_ID]);
} else {
rt = genl_family_find_byname(
nla_data(tb[CTRL_ATTR_FAMILY_NAME]));
if (!rt)
return -ENOENT;
ctx->fam_id = rt->id;
}
rt = genl_family_find_byid(ctx->fam_id);
if (!rt)
return -ENOENT;
ctx->rt = rt;
if (tb[CTRL_ATTR_OP]) {
struct genl_split_ops doit, dump;
ctx->single_op = true;
ctx->op = nla_get_u32(tb[CTRL_ATTR_OP]);
err = genl_get_cmd_both(ctx->op, rt, &doit, &dump);
if (err) {
NL_SET_BAD_ATTR(cb->extack, tb[CTRL_ATTR_OP]);
return err;
}
if (doit.policy) {
err = netlink_policy_dump_add_policy(&ctx->state,
doit.policy,
doit.maxattr);
if (err)
goto err_free_state;
}
if (dump.policy) {
err = netlink_policy_dump_add_policy(&ctx->state,
dump.policy,
dump.maxattr);
if (err)
goto err_free_state;
}
if (!ctx->state)
return -ENODATA;
ctx->dump_map = 1;
return 0;
}
ctx->op_iter = kmalloc(sizeof(*ctx->op_iter), GFP_KERNEL);
if (!ctx->op_iter)
return -ENOMEM;
genl_op_iter_init(rt, ctx->op_iter);
ctx->dump_map = genl_op_iter_next(ctx->op_iter);
for (genl_op_iter_init(rt, &i); genl_op_iter_next(&i); ) {
if (i.doit.policy) {
err = netlink_policy_dump_add_policy(&ctx->state,
i.doit.policy,
i.doit.maxattr);
if (err)
goto err_free_state;
}
if (i.dumpit.policy) {
err = netlink_policy_dump_add_policy(&ctx->state,
i.dumpit.policy,
i.dumpit.maxattr);
if (err)
goto err_free_state;
}
}
if (!ctx->state) {
err = -ENODATA;
goto err_free_op_iter;
}
return 0;
err_free_state:
netlink_policy_dump_free(ctx->state);
err_free_op_iter:
kfree(ctx->op_iter);
return err;
}
static void *ctrl_dumppolicy_prep(struct sk_buff *skb,
struct netlink_callback *cb)
{
struct ctrl_dump_policy_ctx *ctx = (void *)cb->ctx;
void *hdr;
hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq, &genl_ctrl,
NLM_F_MULTI, CTRL_CMD_GETPOLICY);
if (!hdr)
return NULL;
if (nla_put_u16(skb, CTRL_ATTR_FAMILY_ID, ctx->fam_id))
return NULL;
return hdr;
}
static int ctrl_dumppolicy_put_op(struct sk_buff *skb,
struct netlink_callback *cb,
struct genl_split_ops *doit,
struct genl_split_ops *dumpit)
{
struct ctrl_dump_policy_ctx *ctx = (void *)cb->ctx;
struct nlattr *nest_pol, *nest_op;
void *hdr;
int idx;
/* skip if we have nothing to show */
if (!doit->policy && !dumpit->policy)
return 0;
hdr = ctrl_dumppolicy_prep(skb, cb);
if (!hdr)
return -ENOBUFS;
nest_pol = nla_nest_start(skb, CTRL_ATTR_OP_POLICY);
if (!nest_pol)
goto err;
nest_op = nla_nest_start(skb, doit->cmd);
if (!nest_op)
goto err;
if (doit->policy) {
idx = netlink_policy_dump_get_policy_idx(ctx->state,
doit->policy,
doit->maxattr);
if (nla_put_u32(skb, CTRL_ATTR_POLICY_DO, idx))
goto err;
}
if (dumpit->policy) {
idx = netlink_policy_dump_get_policy_idx(ctx->state,
dumpit->policy,
dumpit->maxattr);
if (nla_put_u32(skb, CTRL_ATTR_POLICY_DUMP, idx))
goto err;
}
nla_nest_end(skb, nest_op);
nla_nest_end(skb, nest_pol);
genlmsg_end(skb, hdr);
return 0;
err:
genlmsg_cancel(skb, hdr);
return -ENOBUFS;
}
static int ctrl_dumppolicy(struct sk_buff *skb, struct netlink_callback *cb)
{
struct ctrl_dump_policy_ctx *ctx = (void *)cb->ctx;
void *hdr;
if (ctx->dump_map) {
if (ctx->single_op) {
struct genl_split_ops doit, dumpit;
if (WARN_ON(genl_get_cmd_both(ctx->op, ctx->rt,
&doit, &dumpit)))
return -ENOENT;
if (ctrl_dumppolicy_put_op(skb, cb, &doit, &dumpit))
return skb->len;
/* done with the per-op policy index list */
ctx->dump_map = 0;
}
while (ctx->dump_map) {
if (ctrl_dumppolicy_put_op(skb, cb,
&ctx->op_iter->doit,
&ctx->op_iter->dumpit))
return skb->len;
ctx->dump_map = genl_op_iter_next(ctx->op_iter);
}
}
while (netlink_policy_dump_loop(ctx->state)) {
struct nlattr *nest;
hdr = ctrl_dumppolicy_prep(skb, cb);
if (!hdr)
goto nla_put_failure;
nest = nla_nest_start(skb, CTRL_ATTR_POLICY);
if (!nest)
goto nla_put_failure;
if (netlink_policy_dump_write(skb, ctx->state))
goto nla_put_failure;
nla_nest_end(skb, nest);
genlmsg_end(skb, hdr);
}
return skb->len;
nla_put_failure:
genlmsg_cancel(skb, hdr);
return skb->len;
}
static int ctrl_dumppolicy_done(struct netlink_callback *cb)
{
struct ctrl_dump_policy_ctx *ctx = (void *)cb->ctx;
kfree(ctx->op_iter);
netlink_policy_dump_free(ctx->state);
return 0;
}
static const struct genl_split_ops genl_ctrl_ops[] = {
{
.cmd = CTRL_CMD_GETFAMILY,
.validate = GENL_DONT_VALIDATE_STRICT,
.policy = ctrl_policy_family,
.maxattr = ARRAY_SIZE(ctrl_policy_family) - 1,
.doit = ctrl_getfamily,
.flags = GENL_CMD_CAP_DO,
},
{
.cmd = CTRL_CMD_GETFAMILY,
.validate = GENL_DONT_VALIDATE_DUMP,
.policy = ctrl_policy_family,
.maxattr = ARRAY_SIZE(ctrl_policy_family) - 1,
.dumpit = ctrl_dumpfamily,
.flags = GENL_CMD_CAP_DUMP,
},
{
.cmd = CTRL_CMD_GETPOLICY,
.policy = ctrl_policy_policy,
.maxattr = ARRAY_SIZE(ctrl_policy_policy) - 1,
.start = ctrl_dumppolicy_start,
.dumpit = ctrl_dumppolicy,
.done = ctrl_dumppolicy_done,
.flags = GENL_CMD_CAP_DUMP,
},
};
static const struct genl_multicast_group genl_ctrl_groups[] = {
{ .name = "notify", },
};
static struct genl_family genl_ctrl __ro_after_init = {
.module = THIS_MODULE,
.split_ops = genl_ctrl_ops,
.n_split_ops = ARRAY_SIZE(genl_ctrl_ops),
.resv_start_op = CTRL_CMD_GETPOLICY + 1,
.mcgrps = genl_ctrl_groups,
.n_mcgrps = ARRAY_SIZE(genl_ctrl_groups),
.id = GENL_ID_CTRL,
.name = "nlctrl",
.version = 0x2,
.netnsok = true,
};
static int genl_bind(struct net *net, int group)
{
const struct genl_family *family;
unsigned int id;
int ret = 0;
down_read(&cb_lock);
idr_for_each_entry(&genl_fam_idr, family, id) {
const struct genl_multicast_group *grp;
int i;
if (family->n_mcgrps == 0)
continue;
i = group - family->mcgrp_offset;
if (i < 0 || i >= family->n_mcgrps)
continue;
grp = &family->mcgrps[i];
if ((grp->flags & GENL_UNS_ADMIN_PERM) &&
!ns_capable(net->user_ns, CAP_NET_ADMIN))
ret = -EPERM;
break;
}
up_read(&cb_lock);
return ret;
}
static int __net_init genl_pernet_init(struct net *net)
{
struct netlink_kernel_cfg cfg = {
.input = genl_rcv,
.flags = NL_CFG_F_NONROOT_RECV,
.bind = genl_bind,
};
/* we'll bump the group number right afterwards */
net->genl_sock = netlink_kernel_create(net, NETLINK_GENERIC, &cfg);
if (!net->genl_sock && net_eq(net, &init_net))
panic("GENL: Cannot initialize generic netlink\n");
if (!net->genl_sock)
return -ENOMEM;
return 0;
}
static void __net_exit genl_pernet_exit(struct net *net)
{
netlink_kernel_release(net->genl_sock);
net->genl_sock = NULL;
}
static struct pernet_operations genl_pernet_ops = {
.init = genl_pernet_init,
.exit = genl_pernet_exit,
};
static int __init genl_init(void)
{
int err;
err = genl_register_family(&genl_ctrl);
if (err < 0)
goto problem;
err = register_pernet_subsys(&genl_pernet_ops);
if (err)
goto problem;
return 0;
problem:
panic("GENL: Cannot register controller: %d\n", err);
}
core_initcall(genl_init);
static int genlmsg_mcast(struct sk_buff *skb, u32 portid, unsigned long group,
gfp_t flags)
{
struct sk_buff *tmp;
struct net *net, *prev = NULL;
bool delivered = false;
int err;
for_each_net_rcu(net) {
if (prev) {
tmp = skb_clone(skb, flags);
if (!tmp) {
err = -ENOMEM;
goto error;
}
err = nlmsg_multicast(prev->genl_sock, tmp,
portid, group, flags);
if (!err)
delivered = true;
else if (err != -ESRCH)
goto error;
}
prev = net;
}
err = nlmsg_multicast(prev->genl_sock, skb, portid, group, flags);
if (!err)
delivered = true;
else if (err != -ESRCH)
return err;
return delivered ? 0 : -ESRCH;
error:
kfree_skb(skb);
return err;
}
int genlmsg_multicast_allns(const struct genl_family *family,
struct sk_buff *skb, u32 portid,
unsigned int group, gfp_t flags)
{
if (WARN_ON_ONCE(group >= family->n_mcgrps))
return -EINVAL;
group = family->mcgrp_offset + group;
return genlmsg_mcast(skb, portid, group, flags);
}
EXPORT_SYMBOL(genlmsg_multicast_allns);
void genl_notify(const struct genl_family *family, struct sk_buff *skb,
struct genl_info *info, u32 group, gfp_t flags)
{
struct net *net = genl_info_net(info);
struct sock *sk = net->genl_sock;
if (WARN_ON_ONCE(group >= family->n_mcgrps))
return;
group = family->mcgrp_offset + group;
nlmsg_notify(sk, skb, info->snd_portid, group,
nlmsg_report(info->nlhdr), flags);
}
EXPORT_SYMBOL(genl_notify);
| linux-master | net/netlink/genetlink.c |
// SPDX-License-Identifier: GPL-2.0
/*
* NETLINK Policy advertisement to userspace
*
* Authors: Johannes Berg <[email protected]>
*
* Copyright 2019 Intel Corporation
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <net/netlink.h>
#define INITIAL_POLICIES_ALLOC 10
struct netlink_policy_dump_state {
unsigned int policy_idx;
unsigned int attr_idx;
unsigned int n_alloc;
struct {
const struct nla_policy *policy;
unsigned int maxtype;
} policies[];
};
static int add_policy(struct netlink_policy_dump_state **statep,
const struct nla_policy *policy,
unsigned int maxtype)
{
struct netlink_policy_dump_state *state = *statep;
unsigned int n_alloc, i;
if (!policy || !maxtype)
return 0;
for (i = 0; i < state->n_alloc; i++) {
if (state->policies[i].policy == policy &&
state->policies[i].maxtype == maxtype)
return 0;
if (!state->policies[i].policy) {
state->policies[i].policy = policy;
state->policies[i].maxtype = maxtype;
return 0;
}
}
n_alloc = state->n_alloc + INITIAL_POLICIES_ALLOC;
state = krealloc(state, struct_size(state, policies, n_alloc),
GFP_KERNEL);
if (!state)
return -ENOMEM;
memset(&state->policies[state->n_alloc], 0,
flex_array_size(state, policies, n_alloc - state->n_alloc));
state->policies[state->n_alloc].policy = policy;
state->policies[state->n_alloc].maxtype = maxtype;
state->n_alloc = n_alloc;
*statep = state;
return 0;
}
/**
* netlink_policy_dump_get_policy_idx - retrieve policy index
* @state: the policy dump state
* @policy: the policy to find
* @maxtype: the policy's maxattr
*
* Returns: the index of the given policy in the dump state
*
* Call this to find a policy index when you've added multiple and e.g.
* need to tell userspace which command has which policy (by index).
*
* Note: this will WARN and return 0 if the policy isn't found, which
* means it wasn't added in the first place, which would be an
* internal consistency bug.
*/
int netlink_policy_dump_get_policy_idx(struct netlink_policy_dump_state *state,
const struct nla_policy *policy,
unsigned int maxtype)
{
unsigned int i;
if (WARN_ON(!policy || !maxtype))
return 0;
for (i = 0; i < state->n_alloc; i++) {
if (state->policies[i].policy == policy &&
state->policies[i].maxtype == maxtype)
return i;
}
WARN_ON(1);
return 0;
}
static struct netlink_policy_dump_state *alloc_state(void)
{
struct netlink_policy_dump_state *state;
state = kzalloc(struct_size(state, policies, INITIAL_POLICIES_ALLOC),
GFP_KERNEL);
if (!state)
return ERR_PTR(-ENOMEM);
state->n_alloc = INITIAL_POLICIES_ALLOC;
return state;
}
/**
* netlink_policy_dump_add_policy - add a policy to the dump
* @pstate: state to add to, may be reallocated, must be %NULL the first time
* @policy: the new policy to add to the dump
* @maxtype: the new policy's max attr type
*
* Returns: 0 on success, a negative error code otherwise.
*
* Call this to allocate a policy dump state, and to add policies to it. This
* should be called from the dump start() callback.
*
* Note: on failures, any previously allocated state is freed.
*/
int netlink_policy_dump_add_policy(struct netlink_policy_dump_state **pstate,
const struct nla_policy *policy,
unsigned int maxtype)
{
struct netlink_policy_dump_state *state = *pstate;
unsigned int policy_idx;
int err;
if (!state) {
state = alloc_state();
if (IS_ERR(state))
return PTR_ERR(state);
}
/*
* walk the policies and nested ones first, and build
* a linear list of them.
*/
err = add_policy(&state, policy, maxtype);
if (err)
goto err_try_undo;
for (policy_idx = 0;
policy_idx < state->n_alloc && state->policies[policy_idx].policy;
policy_idx++) {
const struct nla_policy *policy;
unsigned int type;
policy = state->policies[policy_idx].policy;
for (type = 0;
type <= state->policies[policy_idx].maxtype;
type++) {
switch (policy[type].type) {
case NLA_NESTED:
case NLA_NESTED_ARRAY:
err = add_policy(&state,
policy[type].nested_policy,
policy[type].len);
if (err)
goto err_try_undo;
break;
default:
break;
}
}
}
*pstate = state;
return 0;
err_try_undo:
/* Try to preserve reasonable unwind semantics - if we're starting from
* scratch clean up fully, otherwise record what we got and caller will.
*/
if (!*pstate)
netlink_policy_dump_free(state);
else
*pstate = state;
return err;
}
static bool
netlink_policy_dump_finished(struct netlink_policy_dump_state *state)
{
return state->policy_idx >= state->n_alloc ||
!state->policies[state->policy_idx].policy;
}
/**
* netlink_policy_dump_loop - dumping loop indicator
* @state: the policy dump state
*
* Returns: %true if the dump continues, %false otherwise
*
* Note: this frees the dump state when finishing
*/
bool netlink_policy_dump_loop(struct netlink_policy_dump_state *state)
{
return !netlink_policy_dump_finished(state);
}
int netlink_policy_dump_attr_size_estimate(const struct nla_policy *pt)
{
/* nested + type */
int common = 2 * nla_attr_size(sizeof(u32));
switch (pt->type) {
case NLA_UNSPEC:
case NLA_REJECT:
/* these actually don't need any space */
return 0;
case NLA_NESTED:
case NLA_NESTED_ARRAY:
/* common, policy idx, policy maxattr */
return common + 2 * nla_attr_size(sizeof(u32));
case NLA_U8:
case NLA_U16:
case NLA_U32:
case NLA_U64:
case NLA_MSECS:
case NLA_S8:
case NLA_S16:
case NLA_S32:
case NLA_S64:
/* maximum is common, u64 min/max with padding */
return common +
2 * (nla_attr_size(0) + nla_attr_size(sizeof(u64)));
case NLA_BITFIELD32:
return common + nla_attr_size(sizeof(u32));
case NLA_STRING:
case NLA_NUL_STRING:
case NLA_BINARY:
/* maximum is common, u32 min-length/max-length */
return common + 2 * nla_attr_size(sizeof(u32));
case NLA_FLAG:
return common;
}
/* this should then cause a warning later */
return 0;
}
static int
__netlink_policy_dump_write_attr(struct netlink_policy_dump_state *state,
struct sk_buff *skb,
const struct nla_policy *pt,
int nestattr)
{
int estimate = netlink_policy_dump_attr_size_estimate(pt);
enum netlink_attribute_type type;
struct nlattr *attr;
attr = nla_nest_start(skb, nestattr);
if (!attr)
return -ENOBUFS;
switch (pt->type) {
default:
case NLA_UNSPEC:
case NLA_REJECT:
/* skip - use NLA_MIN_LEN to advertise such */
nla_nest_cancel(skb, attr);
return -ENODATA;
case NLA_NESTED:
type = NL_ATTR_TYPE_NESTED;
fallthrough;
case NLA_NESTED_ARRAY:
if (pt->type == NLA_NESTED_ARRAY)
type = NL_ATTR_TYPE_NESTED_ARRAY;
if (state && pt->nested_policy && pt->len &&
(nla_put_u32(skb, NL_POLICY_TYPE_ATTR_POLICY_IDX,
netlink_policy_dump_get_policy_idx(state,
pt->nested_policy,
pt->len)) ||
nla_put_u32(skb, NL_POLICY_TYPE_ATTR_POLICY_MAXTYPE,
pt->len)))
goto nla_put_failure;
break;
case NLA_U8:
case NLA_U16:
case NLA_U32:
case NLA_U64:
case NLA_MSECS: {
struct netlink_range_validation range;
if (pt->type == NLA_U8)
type = NL_ATTR_TYPE_U8;
else if (pt->type == NLA_U16)
type = NL_ATTR_TYPE_U16;
else if (pt->type == NLA_U32)
type = NL_ATTR_TYPE_U32;
else
type = NL_ATTR_TYPE_U64;
if (pt->validation_type == NLA_VALIDATE_MASK) {
if (nla_put_u64_64bit(skb, NL_POLICY_TYPE_ATTR_MASK,
pt->mask,
NL_POLICY_TYPE_ATTR_PAD))
goto nla_put_failure;
break;
}
nla_get_range_unsigned(pt, &range);
if (nla_put_u64_64bit(skb, NL_POLICY_TYPE_ATTR_MIN_VALUE_U,
range.min, NL_POLICY_TYPE_ATTR_PAD) ||
nla_put_u64_64bit(skb, NL_POLICY_TYPE_ATTR_MAX_VALUE_U,
range.max, NL_POLICY_TYPE_ATTR_PAD))
goto nla_put_failure;
break;
}
case NLA_S8:
case NLA_S16:
case NLA_S32:
case NLA_S64: {
struct netlink_range_validation_signed range;
if (pt->type == NLA_S8)
type = NL_ATTR_TYPE_S8;
else if (pt->type == NLA_S16)
type = NL_ATTR_TYPE_S16;
else if (pt->type == NLA_S32)
type = NL_ATTR_TYPE_S32;
else
type = NL_ATTR_TYPE_S64;
nla_get_range_signed(pt, &range);
if (nla_put_s64(skb, NL_POLICY_TYPE_ATTR_MIN_VALUE_S,
range.min, NL_POLICY_TYPE_ATTR_PAD) ||
nla_put_s64(skb, NL_POLICY_TYPE_ATTR_MAX_VALUE_S,
range.max, NL_POLICY_TYPE_ATTR_PAD))
goto nla_put_failure;
break;
}
case NLA_BITFIELD32:
type = NL_ATTR_TYPE_BITFIELD32;
if (nla_put_u32(skb, NL_POLICY_TYPE_ATTR_BITFIELD32_MASK,
pt->bitfield32_valid))
goto nla_put_failure;
break;
case NLA_STRING:
case NLA_NUL_STRING:
case NLA_BINARY:
if (pt->type == NLA_STRING)
type = NL_ATTR_TYPE_STRING;
else if (pt->type == NLA_NUL_STRING)
type = NL_ATTR_TYPE_NUL_STRING;
else
type = NL_ATTR_TYPE_BINARY;
if (pt->validation_type == NLA_VALIDATE_RANGE ||
pt->validation_type == NLA_VALIDATE_RANGE_WARN_TOO_LONG) {
struct netlink_range_validation range;
nla_get_range_unsigned(pt, &range);
if (range.min &&
nla_put_u32(skb, NL_POLICY_TYPE_ATTR_MIN_LENGTH,
range.min))
goto nla_put_failure;
if (range.max < U16_MAX &&
nla_put_u32(skb, NL_POLICY_TYPE_ATTR_MAX_LENGTH,
range.max))
goto nla_put_failure;
} else if (pt->len &&
nla_put_u32(skb, NL_POLICY_TYPE_ATTR_MAX_LENGTH,
pt->len)) {
goto nla_put_failure;
}
break;
case NLA_FLAG:
type = NL_ATTR_TYPE_FLAG;
break;
}
if (nla_put_u32(skb, NL_POLICY_TYPE_ATTR_TYPE, type))
goto nla_put_failure;
nla_nest_end(skb, attr);
WARN_ON(attr->nla_len > estimate);
return 0;
nla_put_failure:
nla_nest_cancel(skb, attr);
return -ENOBUFS;
}
/**
* netlink_policy_dump_write_attr - write a given attribute policy
* @skb: the message skb to write to
* @pt: the attribute's policy
* @nestattr: the nested attribute ID to use
*
* Returns: 0 on success, an error code otherwise; -%ENODATA is
* special, indicating that there's no policy data and
* the attribute is generally rejected.
*/
int netlink_policy_dump_write_attr(struct sk_buff *skb,
const struct nla_policy *pt,
int nestattr)
{
return __netlink_policy_dump_write_attr(NULL, skb, pt, nestattr);
}
/**
* netlink_policy_dump_write - write current policy dump attributes
* @skb: the message skb to write to
* @state: the policy dump state
*
* Returns: 0 on success, an error code otherwise
*/
int netlink_policy_dump_write(struct sk_buff *skb,
struct netlink_policy_dump_state *state)
{
const struct nla_policy *pt;
struct nlattr *policy;
bool again;
int err;
send_attribute:
again = false;
pt = &state->policies[state->policy_idx].policy[state->attr_idx];
policy = nla_nest_start(skb, state->policy_idx);
if (!policy)
return -ENOBUFS;
err = __netlink_policy_dump_write_attr(state, skb, pt, state->attr_idx);
if (err == -ENODATA) {
nla_nest_cancel(skb, policy);
again = true;
goto next;
} else if (err) {
goto nla_put_failure;
}
/* finish and move state to next attribute */
nla_nest_end(skb, policy);
next:
state->attr_idx += 1;
if (state->attr_idx > state->policies[state->policy_idx].maxtype) {
state->attr_idx = 0;
state->policy_idx++;
}
if (again) {
if (netlink_policy_dump_finished(state))
return -ENODATA;
goto send_attribute;
}
return 0;
nla_put_failure:
nla_nest_cancel(skb, policy);
return -ENOBUFS;
}
/**
* netlink_policy_dump_free - free policy dump state
* @state: the policy dump state to free
*
* Call this from the done() method to ensure dump state is freed.
*/
void netlink_policy_dump_free(struct netlink_policy_dump_state *state)
{
kfree(state);
}
| linux-master | net/netlink/policy.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* NETLINK Kernel-user communication protocol.
*
* Authors: Alan Cox <[email protected]>
* Alexey Kuznetsov <[email protected]>
* Patrick McHardy <[email protected]>
*
* Tue Jun 26 14:36:48 MEST 2001 Herbert "herp" Rosmanith
* added netlink_proto_exit
* Tue Jan 22 18:32:44 BRST 2002 Arnaldo C. de Melo <[email protected]>
* use nlk_sk, as sk->protinfo is on a diet 8)
* Fri Jul 22 19:51:12 MEST 2005 Harald Welte <[email protected]>
* - inc module use count of module that owns
* the kernel socket in case userspace opens
* socket of same protocol
* - remove all module support, since netlink is
* mandatory if CONFIG_NET=y these days
*/
#include <linux/module.h>
#include <linux/bpf.h>
#include <linux/capability.h>
#include <linux/kernel.h>
#include <linux/filter.h>
#include <linux/init.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/stat.h>
#include <linux/socket.h>
#include <linux/un.h>
#include <linux/fcntl.h>
#include <linux/termios.h>
#include <linux/sockios.h>
#include <linux/net.h>
#include <linux/fs.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <linux/rtnetlink.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/notifier.h>
#include <linux/security.h>
#include <linux/jhash.h>
#include <linux/jiffies.h>
#include <linux/random.h>
#include <linux/bitops.h>
#include <linux/mm.h>
#include <linux/types.h>
#include <linux/audit.h>
#include <linux/mutex.h>
#include <linux/vmalloc.h>
#include <linux/if_arp.h>
#include <linux/rhashtable.h>
#include <asm/cacheflush.h>
#include <linux/hash.h>
#include <linux/genetlink.h>
#include <linux/net_namespace.h>
#include <linux/nospec.h>
#include <linux/btf_ids.h>
#include <net/net_namespace.h>
#include <net/netns/generic.h>
#include <net/sock.h>
#include <net/scm.h>
#include <net/netlink.h>
#define CREATE_TRACE_POINTS
#include <trace/events/netlink.h>
#include "af_netlink.h"
struct listeners {
struct rcu_head rcu;
unsigned long masks[];
};
/* state bits */
#define NETLINK_S_CONGESTED 0x0
static inline int netlink_is_kernel(struct sock *sk)
{
return nlk_test_bit(KERNEL_SOCKET, sk);
}
struct netlink_table *nl_table __read_mostly;
EXPORT_SYMBOL_GPL(nl_table);
static DECLARE_WAIT_QUEUE_HEAD(nl_table_wait);
static struct lock_class_key nlk_cb_mutex_keys[MAX_LINKS];
static const char *const nlk_cb_mutex_key_strings[MAX_LINKS + 1] = {
"nlk_cb_mutex-ROUTE",
"nlk_cb_mutex-1",
"nlk_cb_mutex-USERSOCK",
"nlk_cb_mutex-FIREWALL",
"nlk_cb_mutex-SOCK_DIAG",
"nlk_cb_mutex-NFLOG",
"nlk_cb_mutex-XFRM",
"nlk_cb_mutex-SELINUX",
"nlk_cb_mutex-ISCSI",
"nlk_cb_mutex-AUDIT",
"nlk_cb_mutex-FIB_LOOKUP",
"nlk_cb_mutex-CONNECTOR",
"nlk_cb_mutex-NETFILTER",
"nlk_cb_mutex-IP6_FW",
"nlk_cb_mutex-DNRTMSG",
"nlk_cb_mutex-KOBJECT_UEVENT",
"nlk_cb_mutex-GENERIC",
"nlk_cb_mutex-17",
"nlk_cb_mutex-SCSITRANSPORT",
"nlk_cb_mutex-ECRYPTFS",
"nlk_cb_mutex-RDMA",
"nlk_cb_mutex-CRYPTO",
"nlk_cb_mutex-SMC",
"nlk_cb_mutex-23",
"nlk_cb_mutex-24",
"nlk_cb_mutex-25",
"nlk_cb_mutex-26",
"nlk_cb_mutex-27",
"nlk_cb_mutex-28",
"nlk_cb_mutex-29",
"nlk_cb_mutex-30",
"nlk_cb_mutex-31",
"nlk_cb_mutex-MAX_LINKS"
};
static int netlink_dump(struct sock *sk);
/* nl_table locking explained:
* Lookup and traversal are protected with an RCU read-side lock. Insertion
* and removal are protected with per bucket lock while using RCU list
* modification primitives and may run in parallel to RCU protected lookups.
* Destruction of the Netlink socket may only occur *after* nl_table_lock has
* been acquired * either during or after the socket has been removed from
* the list and after an RCU grace period.
*/
DEFINE_RWLOCK(nl_table_lock);
EXPORT_SYMBOL_GPL(nl_table_lock);
static atomic_t nl_table_users = ATOMIC_INIT(0);
#define nl_deref_protected(X) rcu_dereference_protected(X, lockdep_is_held(&nl_table_lock));
static BLOCKING_NOTIFIER_HEAD(netlink_chain);
static const struct rhashtable_params netlink_rhashtable_params;
void do_trace_netlink_extack(const char *msg)
{
trace_netlink_extack(msg);
}
EXPORT_SYMBOL(do_trace_netlink_extack);
static inline u32 netlink_group_mask(u32 group)
{
if (group > 32)
return 0;
return group ? 1 << (group - 1) : 0;
}
static struct sk_buff *netlink_to_full_skb(const struct sk_buff *skb,
gfp_t gfp_mask)
{
unsigned int len = skb_end_offset(skb);
struct sk_buff *new;
new = alloc_skb(len, gfp_mask);
if (new == NULL)
return NULL;
NETLINK_CB(new).portid = NETLINK_CB(skb).portid;
NETLINK_CB(new).dst_group = NETLINK_CB(skb).dst_group;
NETLINK_CB(new).creds = NETLINK_CB(skb).creds;
skb_put_data(new, skb->data, len);
return new;
}
static unsigned int netlink_tap_net_id;
struct netlink_tap_net {
struct list_head netlink_tap_all;
struct mutex netlink_tap_lock;
};
int netlink_add_tap(struct netlink_tap *nt)
{
struct net *net = dev_net(nt->dev);
struct netlink_tap_net *nn = net_generic(net, netlink_tap_net_id);
if (unlikely(nt->dev->type != ARPHRD_NETLINK))
return -EINVAL;
mutex_lock(&nn->netlink_tap_lock);
list_add_rcu(&nt->list, &nn->netlink_tap_all);
mutex_unlock(&nn->netlink_tap_lock);
__module_get(nt->module);
return 0;
}
EXPORT_SYMBOL_GPL(netlink_add_tap);
static int __netlink_remove_tap(struct netlink_tap *nt)
{
struct net *net = dev_net(nt->dev);
struct netlink_tap_net *nn = net_generic(net, netlink_tap_net_id);
bool found = false;
struct netlink_tap *tmp;
mutex_lock(&nn->netlink_tap_lock);
list_for_each_entry(tmp, &nn->netlink_tap_all, list) {
if (nt == tmp) {
list_del_rcu(&nt->list);
found = true;
goto out;
}
}
pr_warn("__netlink_remove_tap: %p not found\n", nt);
out:
mutex_unlock(&nn->netlink_tap_lock);
if (found)
module_put(nt->module);
return found ? 0 : -ENODEV;
}
int netlink_remove_tap(struct netlink_tap *nt)
{
int ret;
ret = __netlink_remove_tap(nt);
synchronize_net();
return ret;
}
EXPORT_SYMBOL_GPL(netlink_remove_tap);
static __net_init int netlink_tap_init_net(struct net *net)
{
struct netlink_tap_net *nn = net_generic(net, netlink_tap_net_id);
INIT_LIST_HEAD(&nn->netlink_tap_all);
mutex_init(&nn->netlink_tap_lock);
return 0;
}
static struct pernet_operations netlink_tap_net_ops = {
.init = netlink_tap_init_net,
.id = &netlink_tap_net_id,
.size = sizeof(struct netlink_tap_net),
};
static bool netlink_filter_tap(const struct sk_buff *skb)
{
struct sock *sk = skb->sk;
/* We take the more conservative approach and
* whitelist socket protocols that may pass.
*/
switch (sk->sk_protocol) {
case NETLINK_ROUTE:
case NETLINK_USERSOCK:
case NETLINK_SOCK_DIAG:
case NETLINK_NFLOG:
case NETLINK_XFRM:
case NETLINK_FIB_LOOKUP:
case NETLINK_NETFILTER:
case NETLINK_GENERIC:
return true;
}
return false;
}
static int __netlink_deliver_tap_skb(struct sk_buff *skb,
struct net_device *dev)
{
struct sk_buff *nskb;
struct sock *sk = skb->sk;
int ret = -ENOMEM;
if (!net_eq(dev_net(dev), sock_net(sk)))
return 0;
dev_hold(dev);
if (is_vmalloc_addr(skb->head))
nskb = netlink_to_full_skb(skb, GFP_ATOMIC);
else
nskb = skb_clone(skb, GFP_ATOMIC);
if (nskb) {
nskb->dev = dev;
nskb->protocol = htons((u16) sk->sk_protocol);
nskb->pkt_type = netlink_is_kernel(sk) ?
PACKET_KERNEL : PACKET_USER;
skb_reset_network_header(nskb);
ret = dev_queue_xmit(nskb);
if (unlikely(ret > 0))
ret = net_xmit_errno(ret);
}
dev_put(dev);
return ret;
}
static void __netlink_deliver_tap(struct sk_buff *skb, struct netlink_tap_net *nn)
{
int ret;
struct netlink_tap *tmp;
if (!netlink_filter_tap(skb))
return;
list_for_each_entry_rcu(tmp, &nn->netlink_tap_all, list) {
ret = __netlink_deliver_tap_skb(skb, tmp->dev);
if (unlikely(ret))
break;
}
}
static void netlink_deliver_tap(struct net *net, struct sk_buff *skb)
{
struct netlink_tap_net *nn = net_generic(net, netlink_tap_net_id);
rcu_read_lock();
if (unlikely(!list_empty(&nn->netlink_tap_all)))
__netlink_deliver_tap(skb, nn);
rcu_read_unlock();
}
static void netlink_deliver_tap_kernel(struct sock *dst, struct sock *src,
struct sk_buff *skb)
{
if (!(netlink_is_kernel(dst) && netlink_is_kernel(src)))
netlink_deliver_tap(sock_net(dst), skb);
}
static void netlink_overrun(struct sock *sk)
{
if (!nlk_test_bit(RECV_NO_ENOBUFS, sk)) {
if (!test_and_set_bit(NETLINK_S_CONGESTED,
&nlk_sk(sk)->state)) {
sk->sk_err = ENOBUFS;
sk_error_report(sk);
}
}
atomic_inc(&sk->sk_drops);
}
static void netlink_rcv_wake(struct sock *sk)
{
struct netlink_sock *nlk = nlk_sk(sk);
if (skb_queue_empty_lockless(&sk->sk_receive_queue))
clear_bit(NETLINK_S_CONGESTED, &nlk->state);
if (!test_bit(NETLINK_S_CONGESTED, &nlk->state))
wake_up_interruptible(&nlk->wait);
}
static void netlink_skb_destructor(struct sk_buff *skb)
{
if (is_vmalloc_addr(skb->head)) {
if (!skb->cloned ||
!atomic_dec_return(&(skb_shinfo(skb)->dataref)))
vfree(skb->head);
skb->head = NULL;
}
if (skb->sk != NULL)
sock_rfree(skb);
}
static void netlink_skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
{
WARN_ON(skb->sk != NULL);
skb->sk = sk;
skb->destructor = netlink_skb_destructor;
atomic_add(skb->truesize, &sk->sk_rmem_alloc);
sk_mem_charge(sk, skb->truesize);
}
static void netlink_sock_destruct(struct sock *sk)
{
struct netlink_sock *nlk = nlk_sk(sk);
if (nlk->cb_running) {
if (nlk->cb.done)
nlk->cb.done(&nlk->cb);
module_put(nlk->cb.module);
kfree_skb(nlk->cb.skb);
}
skb_queue_purge(&sk->sk_receive_queue);
if (!sock_flag(sk, SOCK_DEAD)) {
printk(KERN_ERR "Freeing alive netlink socket %p\n", sk);
return;
}
WARN_ON(atomic_read(&sk->sk_rmem_alloc));
WARN_ON(refcount_read(&sk->sk_wmem_alloc));
WARN_ON(nlk_sk(sk)->groups);
}
static void netlink_sock_destruct_work(struct work_struct *work)
{
struct netlink_sock *nlk = container_of(work, struct netlink_sock,
work);
sk_free(&nlk->sk);
}
/* This lock without WQ_FLAG_EXCLUSIVE is good on UP and it is _very_ bad on
* SMP. Look, when several writers sleep and reader wakes them up, all but one
* immediately hit write lock and grab all the cpus. Exclusive sleep solves
* this, _but_ remember, it adds useless work on UP machines.
*/
void netlink_table_grab(void)
__acquires(nl_table_lock)
{
might_sleep();
write_lock_irq(&nl_table_lock);
if (atomic_read(&nl_table_users)) {
DECLARE_WAITQUEUE(wait, current);
add_wait_queue_exclusive(&nl_table_wait, &wait);
for (;;) {
set_current_state(TASK_UNINTERRUPTIBLE);
if (atomic_read(&nl_table_users) == 0)
break;
write_unlock_irq(&nl_table_lock);
schedule();
write_lock_irq(&nl_table_lock);
}
__set_current_state(TASK_RUNNING);
remove_wait_queue(&nl_table_wait, &wait);
}
}
void netlink_table_ungrab(void)
__releases(nl_table_lock)
{
write_unlock_irq(&nl_table_lock);
wake_up(&nl_table_wait);
}
static inline void
netlink_lock_table(void)
{
unsigned long flags;
/* read_lock() synchronizes us to netlink_table_grab */
read_lock_irqsave(&nl_table_lock, flags);
atomic_inc(&nl_table_users);
read_unlock_irqrestore(&nl_table_lock, flags);
}
static inline void
netlink_unlock_table(void)
{
if (atomic_dec_and_test(&nl_table_users))
wake_up(&nl_table_wait);
}
struct netlink_compare_arg
{
possible_net_t pnet;
u32 portid;
};
/* Doing sizeof directly may yield 4 extra bytes on 64-bit. */
#define netlink_compare_arg_len \
(offsetof(struct netlink_compare_arg, portid) + sizeof(u32))
static inline int netlink_compare(struct rhashtable_compare_arg *arg,
const void *ptr)
{
const struct netlink_compare_arg *x = arg->key;
const struct netlink_sock *nlk = ptr;
return nlk->portid != x->portid ||
!net_eq(sock_net(&nlk->sk), read_pnet(&x->pnet));
}
static void netlink_compare_arg_init(struct netlink_compare_arg *arg,
struct net *net, u32 portid)
{
memset(arg, 0, sizeof(*arg));
write_pnet(&arg->pnet, net);
arg->portid = portid;
}
static struct sock *__netlink_lookup(struct netlink_table *table, u32 portid,
struct net *net)
{
struct netlink_compare_arg arg;
netlink_compare_arg_init(&arg, net, portid);
return rhashtable_lookup_fast(&table->hash, &arg,
netlink_rhashtable_params);
}
static int __netlink_insert(struct netlink_table *table, struct sock *sk)
{
struct netlink_compare_arg arg;
netlink_compare_arg_init(&arg, sock_net(sk), nlk_sk(sk)->portid);
return rhashtable_lookup_insert_key(&table->hash, &arg,
&nlk_sk(sk)->node,
netlink_rhashtable_params);
}
static struct sock *netlink_lookup(struct net *net, int protocol, u32 portid)
{
struct netlink_table *table = &nl_table[protocol];
struct sock *sk;
rcu_read_lock();
sk = __netlink_lookup(table, portid, net);
if (sk)
sock_hold(sk);
rcu_read_unlock();
return sk;
}
static const struct proto_ops netlink_ops;
static void
netlink_update_listeners(struct sock *sk)
{
struct netlink_table *tbl = &nl_table[sk->sk_protocol];
unsigned long mask;
unsigned int i;
struct listeners *listeners;
listeners = nl_deref_protected(tbl->listeners);
if (!listeners)
return;
for (i = 0; i < NLGRPLONGS(tbl->groups); i++) {
mask = 0;
sk_for_each_bound(sk, &tbl->mc_list) {
if (i < NLGRPLONGS(nlk_sk(sk)->ngroups))
mask |= nlk_sk(sk)->groups[i];
}
listeners->masks[i] = mask;
}
/* this function is only called with the netlink table "grabbed", which
* makes sure updates are visible before bind or setsockopt return. */
}
static int netlink_insert(struct sock *sk, u32 portid)
{
struct netlink_table *table = &nl_table[sk->sk_protocol];
int err;
lock_sock(sk);
err = nlk_sk(sk)->portid == portid ? 0 : -EBUSY;
if (nlk_sk(sk)->bound)
goto err;
/* portid can be read locklessly from netlink_getname(). */
WRITE_ONCE(nlk_sk(sk)->portid, portid);
sock_hold(sk);
err = __netlink_insert(table, sk);
if (err) {
/* In case the hashtable backend returns with -EBUSY
* from here, it must not escape to the caller.
*/
if (unlikely(err == -EBUSY))
err = -EOVERFLOW;
if (err == -EEXIST)
err = -EADDRINUSE;
sock_put(sk);
goto err;
}
/* We need to ensure that the socket is hashed and visible. */
smp_wmb();
/* Paired with lockless reads from netlink_bind(),
* netlink_connect() and netlink_sendmsg().
*/
WRITE_ONCE(nlk_sk(sk)->bound, portid);
err:
release_sock(sk);
return err;
}
static void netlink_remove(struct sock *sk)
{
struct netlink_table *table;
table = &nl_table[sk->sk_protocol];
if (!rhashtable_remove_fast(&table->hash, &nlk_sk(sk)->node,
netlink_rhashtable_params)) {
WARN_ON(refcount_read(&sk->sk_refcnt) == 1);
__sock_put(sk);
}
netlink_table_grab();
if (nlk_sk(sk)->subscriptions) {
__sk_del_bind_node(sk);
netlink_update_listeners(sk);
}
if (sk->sk_protocol == NETLINK_GENERIC)
atomic_inc(&genl_sk_destructing_cnt);
netlink_table_ungrab();
}
static struct proto netlink_proto = {
.name = "NETLINK",
.owner = THIS_MODULE,
.obj_size = sizeof(struct netlink_sock),
};
static int __netlink_create(struct net *net, struct socket *sock,
struct mutex *cb_mutex, int protocol,
int kern)
{
struct sock *sk;
struct netlink_sock *nlk;
sock->ops = &netlink_ops;
sk = sk_alloc(net, PF_NETLINK, GFP_KERNEL, &netlink_proto, kern);
if (!sk)
return -ENOMEM;
sock_init_data(sock, sk);
nlk = nlk_sk(sk);
if (cb_mutex) {
nlk->cb_mutex = cb_mutex;
} else {
nlk->cb_mutex = &nlk->cb_def_mutex;
mutex_init(nlk->cb_mutex);
lockdep_set_class_and_name(nlk->cb_mutex,
nlk_cb_mutex_keys + protocol,
nlk_cb_mutex_key_strings[protocol]);
}
init_waitqueue_head(&nlk->wait);
sk->sk_destruct = netlink_sock_destruct;
sk->sk_protocol = protocol;
return 0;
}
static int netlink_create(struct net *net, struct socket *sock, int protocol,
int kern)
{
struct module *module = NULL;
struct mutex *cb_mutex;
struct netlink_sock *nlk;
int (*bind)(struct net *net, int group);
void (*unbind)(struct net *net, int group);
void (*release)(struct sock *sock, unsigned long *groups);
int err = 0;
sock->state = SS_UNCONNECTED;
if (sock->type != SOCK_RAW && sock->type != SOCK_DGRAM)
return -ESOCKTNOSUPPORT;
if (protocol < 0 || protocol >= MAX_LINKS)
return -EPROTONOSUPPORT;
protocol = array_index_nospec(protocol, MAX_LINKS);
netlink_lock_table();
#ifdef CONFIG_MODULES
if (!nl_table[protocol].registered) {
netlink_unlock_table();
request_module("net-pf-%d-proto-%d", PF_NETLINK, protocol);
netlink_lock_table();
}
#endif
if (nl_table[protocol].registered &&
try_module_get(nl_table[protocol].module))
module = nl_table[protocol].module;
else
err = -EPROTONOSUPPORT;
cb_mutex = nl_table[protocol].cb_mutex;
bind = nl_table[protocol].bind;
unbind = nl_table[protocol].unbind;
release = nl_table[protocol].release;
netlink_unlock_table();
if (err < 0)
goto out;
err = __netlink_create(net, sock, cb_mutex, protocol, kern);
if (err < 0)
goto out_module;
sock_prot_inuse_add(net, &netlink_proto, 1);
nlk = nlk_sk(sock->sk);
nlk->module = module;
nlk->netlink_bind = bind;
nlk->netlink_unbind = unbind;
nlk->netlink_release = release;
out:
return err;
out_module:
module_put(module);
goto out;
}
static void deferred_put_nlk_sk(struct rcu_head *head)
{
struct netlink_sock *nlk = container_of(head, struct netlink_sock, rcu);
struct sock *sk = &nlk->sk;
kfree(nlk->groups);
nlk->groups = NULL;
if (!refcount_dec_and_test(&sk->sk_refcnt))
return;
if (nlk->cb_running && nlk->cb.done) {
INIT_WORK(&nlk->work, netlink_sock_destruct_work);
schedule_work(&nlk->work);
return;
}
sk_free(sk);
}
static int netlink_release(struct socket *sock)
{
struct sock *sk = sock->sk;
struct netlink_sock *nlk;
if (!sk)
return 0;
netlink_remove(sk);
sock_orphan(sk);
nlk = nlk_sk(sk);
/*
* OK. Socket is unlinked, any packets that arrive now
* will be purged.
*/
if (nlk->netlink_release)
nlk->netlink_release(sk, nlk->groups);
/* must not acquire netlink_table_lock in any way again before unbind
* and notifying genetlink is done as otherwise it might deadlock
*/
if (nlk->netlink_unbind) {
int i;
for (i = 0; i < nlk->ngroups; i++)
if (test_bit(i, nlk->groups))
nlk->netlink_unbind(sock_net(sk), i + 1);
}
if (sk->sk_protocol == NETLINK_GENERIC &&
atomic_dec_return(&genl_sk_destructing_cnt) == 0)
wake_up(&genl_sk_destructing_waitq);
sock->sk = NULL;
wake_up_interruptible_all(&nlk->wait);
skb_queue_purge(&sk->sk_write_queue);
if (nlk->portid && nlk->bound) {
struct netlink_notify n = {
.net = sock_net(sk),
.protocol = sk->sk_protocol,
.portid = nlk->portid,
};
blocking_notifier_call_chain(&netlink_chain,
NETLINK_URELEASE, &n);
}
module_put(nlk->module);
if (netlink_is_kernel(sk)) {
netlink_table_grab();
BUG_ON(nl_table[sk->sk_protocol].registered == 0);
if (--nl_table[sk->sk_protocol].registered == 0) {
struct listeners *old;
old = nl_deref_protected(nl_table[sk->sk_protocol].listeners);
RCU_INIT_POINTER(nl_table[sk->sk_protocol].listeners, NULL);
kfree_rcu(old, rcu);
nl_table[sk->sk_protocol].module = NULL;
nl_table[sk->sk_protocol].bind = NULL;
nl_table[sk->sk_protocol].unbind = NULL;
nl_table[sk->sk_protocol].flags = 0;
nl_table[sk->sk_protocol].registered = 0;
}
netlink_table_ungrab();
}
sock_prot_inuse_add(sock_net(sk), &netlink_proto, -1);
/* Because struct net might disappear soon, do not keep a pointer. */
if (!sk->sk_net_refcnt && sock_net(sk) != &init_net) {
__netns_tracker_free(sock_net(sk), &sk->ns_tracker, false);
/* Because of deferred_put_nlk_sk and use of work queue,
* it is possible netns will be freed before this socket.
*/
sock_net_set(sk, &init_net);
__netns_tracker_alloc(&init_net, &sk->ns_tracker,
false, GFP_KERNEL);
}
call_rcu(&nlk->rcu, deferred_put_nlk_sk);
return 0;
}
static int netlink_autobind(struct socket *sock)
{
struct sock *sk = sock->sk;
struct net *net = sock_net(sk);
struct netlink_table *table = &nl_table[sk->sk_protocol];
s32 portid = task_tgid_vnr(current);
int err;
s32 rover = -4096;
bool ok;
retry:
cond_resched();
rcu_read_lock();
ok = !__netlink_lookup(table, portid, net);
rcu_read_unlock();
if (!ok) {
/* Bind collision, search negative portid values. */
if (rover == -4096)
/* rover will be in range [S32_MIN, -4097] */
rover = S32_MIN + get_random_u32_below(-4096 - S32_MIN);
else if (rover >= -4096)
rover = -4097;
portid = rover--;
goto retry;
}
err = netlink_insert(sk, portid);
if (err == -EADDRINUSE)
goto retry;
/* If 2 threads race to autobind, that is fine. */
if (err == -EBUSY)
err = 0;
return err;
}
/**
* __netlink_ns_capable - General netlink message capability test
* @nsp: NETLINK_CB of the socket buffer holding a netlink command from userspace.
* @user_ns: The user namespace of the capability to use
* @cap: The capability to use
*
* Test to see if the opener of the socket we received the message
* from had when the netlink socket was created and the sender of the
* message has the capability @cap in the user namespace @user_ns.
*/
bool __netlink_ns_capable(const struct netlink_skb_parms *nsp,
struct user_namespace *user_ns, int cap)
{
return ((nsp->flags & NETLINK_SKB_DST) ||
file_ns_capable(nsp->sk->sk_socket->file, user_ns, cap)) &&
ns_capable(user_ns, cap);
}
EXPORT_SYMBOL(__netlink_ns_capable);
/**
* netlink_ns_capable - General netlink message capability test
* @skb: socket buffer holding a netlink command from userspace
* @user_ns: The user namespace of the capability to use
* @cap: The capability to use
*
* Test to see if the opener of the socket we received the message
* from had when the netlink socket was created and the sender of the
* message has the capability @cap in the user namespace @user_ns.
*/
bool netlink_ns_capable(const struct sk_buff *skb,
struct user_namespace *user_ns, int cap)
{
return __netlink_ns_capable(&NETLINK_CB(skb), user_ns, cap);
}
EXPORT_SYMBOL(netlink_ns_capable);
/**
* netlink_capable - Netlink global message capability test
* @skb: socket buffer holding a netlink command from userspace
* @cap: The capability to use
*
* Test to see if the opener of the socket we received the message
* from had when the netlink socket was created and the sender of the
* message has the capability @cap in all user namespaces.
*/
bool netlink_capable(const struct sk_buff *skb, int cap)
{
return netlink_ns_capable(skb, &init_user_ns, cap);
}
EXPORT_SYMBOL(netlink_capable);
/**
* netlink_net_capable - Netlink network namespace message capability test
* @skb: socket buffer holding a netlink command from userspace
* @cap: The capability to use
*
* Test to see if the opener of the socket we received the message
* from had when the netlink socket was created and the sender of the
* message has the capability @cap over the network namespace of
* the socket we received the message from.
*/
bool netlink_net_capable(const struct sk_buff *skb, int cap)
{
return netlink_ns_capable(skb, sock_net(skb->sk)->user_ns, cap);
}
EXPORT_SYMBOL(netlink_net_capable);
static inline int netlink_allowed(const struct socket *sock, unsigned int flag)
{
return (nl_table[sock->sk->sk_protocol].flags & flag) ||
ns_capable(sock_net(sock->sk)->user_ns, CAP_NET_ADMIN);
}
static void
netlink_update_subscriptions(struct sock *sk, unsigned int subscriptions)
{
struct netlink_sock *nlk = nlk_sk(sk);
if (nlk->subscriptions && !subscriptions)
__sk_del_bind_node(sk);
else if (!nlk->subscriptions && subscriptions)
sk_add_bind_node(sk, &nl_table[sk->sk_protocol].mc_list);
nlk->subscriptions = subscriptions;
}
static int netlink_realloc_groups(struct sock *sk)
{
struct netlink_sock *nlk = nlk_sk(sk);
unsigned int groups;
unsigned long *new_groups;
int err = 0;
netlink_table_grab();
groups = nl_table[sk->sk_protocol].groups;
if (!nl_table[sk->sk_protocol].registered) {
err = -ENOENT;
goto out_unlock;
}
if (nlk->ngroups >= groups)
goto out_unlock;
new_groups = krealloc(nlk->groups, NLGRPSZ(groups), GFP_ATOMIC);
if (new_groups == NULL) {
err = -ENOMEM;
goto out_unlock;
}
memset((char *)new_groups + NLGRPSZ(nlk->ngroups), 0,
NLGRPSZ(groups) - NLGRPSZ(nlk->ngroups));
nlk->groups = new_groups;
nlk->ngroups = groups;
out_unlock:
netlink_table_ungrab();
return err;
}
static void netlink_undo_bind(int group, long unsigned int groups,
struct sock *sk)
{
struct netlink_sock *nlk = nlk_sk(sk);
int undo;
if (!nlk->netlink_unbind)
return;
for (undo = 0; undo < group; undo++)
if (test_bit(undo, &groups))
nlk->netlink_unbind(sock_net(sk), undo + 1);
}
static int netlink_bind(struct socket *sock, struct sockaddr *addr,
int addr_len)
{
struct sock *sk = sock->sk;
struct net *net = sock_net(sk);
struct netlink_sock *nlk = nlk_sk(sk);
struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr;
int err = 0;
unsigned long groups;
bool bound;
if (addr_len < sizeof(struct sockaddr_nl))
return -EINVAL;
if (nladdr->nl_family != AF_NETLINK)
return -EINVAL;
groups = nladdr->nl_groups;
/* Only superuser is allowed to listen multicasts */
if (groups) {
if (!netlink_allowed(sock, NL_CFG_F_NONROOT_RECV))
return -EPERM;
err = netlink_realloc_groups(sk);
if (err)
return err;
}
if (nlk->ngroups < BITS_PER_LONG)
groups &= (1UL << nlk->ngroups) - 1;
/* Paired with WRITE_ONCE() in netlink_insert() */
bound = READ_ONCE(nlk->bound);
if (bound) {
/* Ensure nlk->portid is up-to-date. */
smp_rmb();
if (nladdr->nl_pid != nlk->portid)
return -EINVAL;
}
if (nlk->netlink_bind && groups) {
int group;
/* nl_groups is a u32, so cap the maximum groups we can bind */
for (group = 0; group < BITS_PER_TYPE(u32); group++) {
if (!test_bit(group, &groups))
continue;
err = nlk->netlink_bind(net, group + 1);
if (!err)
continue;
netlink_undo_bind(group, groups, sk);
return err;
}
}
/* No need for barriers here as we return to user-space without
* using any of the bound attributes.
*/
netlink_lock_table();
if (!bound) {
err = nladdr->nl_pid ?
netlink_insert(sk, nladdr->nl_pid) :
netlink_autobind(sock);
if (err) {
netlink_undo_bind(BITS_PER_TYPE(u32), groups, sk);
goto unlock;
}
}
if (!groups && (nlk->groups == NULL || !(u32)nlk->groups[0]))
goto unlock;
netlink_unlock_table();
netlink_table_grab();
netlink_update_subscriptions(sk, nlk->subscriptions +
hweight32(groups) -
hweight32(nlk->groups[0]));
nlk->groups[0] = (nlk->groups[0] & ~0xffffffffUL) | groups;
netlink_update_listeners(sk);
netlink_table_ungrab();
return 0;
unlock:
netlink_unlock_table();
return err;
}
static int netlink_connect(struct socket *sock, struct sockaddr *addr,
int alen, int flags)
{
int err = 0;
struct sock *sk = sock->sk;
struct netlink_sock *nlk = nlk_sk(sk);
struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr;
if (alen < sizeof(addr->sa_family))
return -EINVAL;
if (addr->sa_family == AF_UNSPEC) {
/* paired with READ_ONCE() in netlink_getsockbyportid() */
WRITE_ONCE(sk->sk_state, NETLINK_UNCONNECTED);
/* dst_portid and dst_group can be read locklessly */
WRITE_ONCE(nlk->dst_portid, 0);
WRITE_ONCE(nlk->dst_group, 0);
return 0;
}
if (addr->sa_family != AF_NETLINK)
return -EINVAL;
if (alen < sizeof(struct sockaddr_nl))
return -EINVAL;
if ((nladdr->nl_groups || nladdr->nl_pid) &&
!netlink_allowed(sock, NL_CFG_F_NONROOT_SEND))
return -EPERM;
/* No need for barriers here as we return to user-space without
* using any of the bound attributes.
* Paired with WRITE_ONCE() in netlink_insert().
*/
if (!READ_ONCE(nlk->bound))
err = netlink_autobind(sock);
if (err == 0) {
/* paired with READ_ONCE() in netlink_getsockbyportid() */
WRITE_ONCE(sk->sk_state, NETLINK_CONNECTED);
/* dst_portid and dst_group can be read locklessly */
WRITE_ONCE(nlk->dst_portid, nladdr->nl_pid);
WRITE_ONCE(nlk->dst_group, ffs(nladdr->nl_groups));
}
return err;
}
static int netlink_getname(struct socket *sock, struct sockaddr *addr,
int peer)
{
struct sock *sk = sock->sk;
struct netlink_sock *nlk = nlk_sk(sk);
DECLARE_SOCKADDR(struct sockaddr_nl *, nladdr, addr);
nladdr->nl_family = AF_NETLINK;
nladdr->nl_pad = 0;
if (peer) {
/* Paired with WRITE_ONCE() in netlink_connect() */
nladdr->nl_pid = READ_ONCE(nlk->dst_portid);
nladdr->nl_groups = netlink_group_mask(READ_ONCE(nlk->dst_group));
} else {
/* Paired with WRITE_ONCE() in netlink_insert() */
nladdr->nl_pid = READ_ONCE(nlk->portid);
netlink_lock_table();
nladdr->nl_groups = nlk->groups ? nlk->groups[0] : 0;
netlink_unlock_table();
}
return sizeof(*nladdr);
}
static int netlink_ioctl(struct socket *sock, unsigned int cmd,
unsigned long arg)
{
/* try to hand this ioctl down to the NIC drivers.
*/
return -ENOIOCTLCMD;
}
static struct sock *netlink_getsockbyportid(struct sock *ssk, u32 portid)
{
struct sock *sock;
struct netlink_sock *nlk;
sock = netlink_lookup(sock_net(ssk), ssk->sk_protocol, portid);
if (!sock)
return ERR_PTR(-ECONNREFUSED);
/* Don't bother queuing skb if kernel socket has no input function */
nlk = nlk_sk(sock);
/* dst_portid and sk_state can be changed in netlink_connect() */
if (READ_ONCE(sock->sk_state) == NETLINK_CONNECTED &&
READ_ONCE(nlk->dst_portid) != nlk_sk(ssk)->portid) {
sock_put(sock);
return ERR_PTR(-ECONNREFUSED);
}
return sock;
}
struct sock *netlink_getsockbyfilp(struct file *filp)
{
struct inode *inode = file_inode(filp);
struct sock *sock;
if (!S_ISSOCK(inode->i_mode))
return ERR_PTR(-ENOTSOCK);
sock = SOCKET_I(inode)->sk;
if (sock->sk_family != AF_NETLINK)
return ERR_PTR(-EINVAL);
sock_hold(sock);
return sock;
}
static struct sk_buff *netlink_alloc_large_skb(unsigned int size,
int broadcast)
{
struct sk_buff *skb;
void *data;
if (size <= NLMSG_GOODSIZE || broadcast)
return alloc_skb(size, GFP_KERNEL);
size = SKB_DATA_ALIGN(size) +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
data = vmalloc(size);
if (data == NULL)
return NULL;
skb = __build_skb(data, size);
if (skb == NULL)
vfree(data);
else
skb->destructor = netlink_skb_destructor;
return skb;
}
/*
* Attach a skb to a netlink socket.
* The caller must hold a reference to the destination socket. On error, the
* reference is dropped. The skb is not send to the destination, just all
* all error checks are performed and memory in the queue is reserved.
* Return values:
* < 0: error. skb freed, reference to sock dropped.
* 0: continue
* 1: repeat lookup - reference dropped while waiting for socket memory.
*/
int netlink_attachskb(struct sock *sk, struct sk_buff *skb,
long *timeo, struct sock *ssk)
{
struct netlink_sock *nlk;
nlk = nlk_sk(sk);
if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
test_bit(NETLINK_S_CONGESTED, &nlk->state))) {
DECLARE_WAITQUEUE(wait, current);
if (!*timeo) {
if (!ssk || netlink_is_kernel(ssk))
netlink_overrun(sk);
sock_put(sk);
kfree_skb(skb);
return -EAGAIN;
}
__set_current_state(TASK_INTERRUPTIBLE);
add_wait_queue(&nlk->wait, &wait);
if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
test_bit(NETLINK_S_CONGESTED, &nlk->state)) &&
!sock_flag(sk, SOCK_DEAD))
*timeo = schedule_timeout(*timeo);
__set_current_state(TASK_RUNNING);
remove_wait_queue(&nlk->wait, &wait);
sock_put(sk);
if (signal_pending(current)) {
kfree_skb(skb);
return sock_intr_errno(*timeo);
}
return 1;
}
netlink_skb_set_owner_r(skb, sk);
return 0;
}
static int __netlink_sendskb(struct sock *sk, struct sk_buff *skb)
{
int len = skb->len;
netlink_deliver_tap(sock_net(sk), skb);
skb_queue_tail(&sk->sk_receive_queue, skb);
sk->sk_data_ready(sk);
return len;
}
int netlink_sendskb(struct sock *sk, struct sk_buff *skb)
{
int len = __netlink_sendskb(sk, skb);
sock_put(sk);
return len;
}
void netlink_detachskb(struct sock *sk, struct sk_buff *skb)
{
kfree_skb(skb);
sock_put(sk);
}
static struct sk_buff *netlink_trim(struct sk_buff *skb, gfp_t allocation)
{
int delta;
WARN_ON(skb->sk != NULL);
delta = skb->end - skb->tail;
if (is_vmalloc_addr(skb->head) || delta * 2 < skb->truesize)
return skb;
if (skb_shared(skb)) {
struct sk_buff *nskb = skb_clone(skb, allocation);
if (!nskb)
return skb;
consume_skb(skb);
skb = nskb;
}
pskb_expand_head(skb, 0, -delta,
(allocation & ~__GFP_DIRECT_RECLAIM) |
__GFP_NOWARN | __GFP_NORETRY);
return skb;
}
static int netlink_unicast_kernel(struct sock *sk, struct sk_buff *skb,
struct sock *ssk)
{
int ret;
struct netlink_sock *nlk = nlk_sk(sk);
ret = -ECONNREFUSED;
if (nlk->netlink_rcv != NULL) {
ret = skb->len;
netlink_skb_set_owner_r(skb, sk);
NETLINK_CB(skb).sk = ssk;
netlink_deliver_tap_kernel(sk, ssk, skb);
nlk->netlink_rcv(skb);
consume_skb(skb);
} else {
kfree_skb(skb);
}
sock_put(sk);
return ret;
}
int netlink_unicast(struct sock *ssk, struct sk_buff *skb,
u32 portid, int nonblock)
{
struct sock *sk;
int err;
long timeo;
skb = netlink_trim(skb, gfp_any());
timeo = sock_sndtimeo(ssk, nonblock);
retry:
sk = netlink_getsockbyportid(ssk, portid);
if (IS_ERR(sk)) {
kfree_skb(skb);
return PTR_ERR(sk);
}
if (netlink_is_kernel(sk))
return netlink_unicast_kernel(sk, skb, ssk);
if (sk_filter(sk, skb)) {
err = skb->len;
kfree_skb(skb);
sock_put(sk);
return err;
}
err = netlink_attachskb(sk, skb, &timeo, ssk);
if (err == 1)
goto retry;
if (err)
return err;
return netlink_sendskb(sk, skb);
}
EXPORT_SYMBOL(netlink_unicast);
int netlink_has_listeners(struct sock *sk, unsigned int group)
{
int res = 0;
struct listeners *listeners;
BUG_ON(!netlink_is_kernel(sk));
rcu_read_lock();
listeners = rcu_dereference(nl_table[sk->sk_protocol].listeners);
if (listeners && group - 1 < nl_table[sk->sk_protocol].groups)
res = test_bit(group - 1, listeners->masks);
rcu_read_unlock();
return res;
}
EXPORT_SYMBOL_GPL(netlink_has_listeners);
bool netlink_strict_get_check(struct sk_buff *skb)
{
return nlk_test_bit(STRICT_CHK, NETLINK_CB(skb).sk);
}
EXPORT_SYMBOL_GPL(netlink_strict_get_check);
static int netlink_broadcast_deliver(struct sock *sk, struct sk_buff *skb)
{
struct netlink_sock *nlk = nlk_sk(sk);
if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf &&
!test_bit(NETLINK_S_CONGESTED, &nlk->state)) {
netlink_skb_set_owner_r(skb, sk);
__netlink_sendskb(sk, skb);
return atomic_read(&sk->sk_rmem_alloc) > (sk->sk_rcvbuf >> 1);
}
return -1;
}
struct netlink_broadcast_data {
struct sock *exclude_sk;
struct net *net;
u32 portid;
u32 group;
int failure;
int delivery_failure;
int congested;
int delivered;
gfp_t allocation;
struct sk_buff *skb, *skb2;
int (*tx_filter)(struct sock *dsk, struct sk_buff *skb, void *data);
void *tx_data;
};
static void do_one_broadcast(struct sock *sk,
struct netlink_broadcast_data *p)
{
struct netlink_sock *nlk = nlk_sk(sk);
int val;
if (p->exclude_sk == sk)
return;
if (nlk->portid == p->portid || p->group - 1 >= nlk->ngroups ||
!test_bit(p->group - 1, nlk->groups))
return;
if (!net_eq(sock_net(sk), p->net)) {
if (!nlk_test_bit(LISTEN_ALL_NSID, sk))
return;
if (!peernet_has_id(sock_net(sk), p->net))
return;
if (!file_ns_capable(sk->sk_socket->file, p->net->user_ns,
CAP_NET_BROADCAST))
return;
}
if (p->failure) {
netlink_overrun(sk);
return;
}
sock_hold(sk);
if (p->skb2 == NULL) {
if (skb_shared(p->skb)) {
p->skb2 = skb_clone(p->skb, p->allocation);
} else {
p->skb2 = skb_get(p->skb);
/*
* skb ownership may have been set when
* delivered to a previous socket.
*/
skb_orphan(p->skb2);
}
}
if (p->skb2 == NULL) {
netlink_overrun(sk);
/* Clone failed. Notify ALL listeners. */
p->failure = 1;
if (nlk_test_bit(BROADCAST_SEND_ERROR, sk))
p->delivery_failure = 1;
goto out;
}
if (p->tx_filter && p->tx_filter(sk, p->skb2, p->tx_data)) {
kfree_skb(p->skb2);
p->skb2 = NULL;
goto out;
}
if (sk_filter(sk, p->skb2)) {
kfree_skb(p->skb2);
p->skb2 = NULL;
goto out;
}
NETLINK_CB(p->skb2).nsid = peernet2id(sock_net(sk), p->net);
if (NETLINK_CB(p->skb2).nsid != NETNSA_NSID_NOT_ASSIGNED)
NETLINK_CB(p->skb2).nsid_is_set = true;
val = netlink_broadcast_deliver(sk, p->skb2);
if (val < 0) {
netlink_overrun(sk);
if (nlk_test_bit(BROADCAST_SEND_ERROR, sk))
p->delivery_failure = 1;
} else {
p->congested |= val;
p->delivered = 1;
p->skb2 = NULL;
}
out:
sock_put(sk);
}
int netlink_broadcast_filtered(struct sock *ssk, struct sk_buff *skb,
u32 portid,
u32 group, gfp_t allocation,
int (*filter)(struct sock *dsk,
struct sk_buff *skb, void *data),
void *filter_data)
{
struct net *net = sock_net(ssk);
struct netlink_broadcast_data info;
struct sock *sk;
skb = netlink_trim(skb, allocation);
info.exclude_sk = ssk;
info.net = net;
info.portid = portid;
info.group = group;
info.failure = 0;
info.delivery_failure = 0;
info.congested = 0;
info.delivered = 0;
info.allocation = allocation;
info.skb = skb;
info.skb2 = NULL;
info.tx_filter = filter;
info.tx_data = filter_data;
/* While we sleep in clone, do not allow to change socket list */
netlink_lock_table();
sk_for_each_bound(sk, &nl_table[ssk->sk_protocol].mc_list)
do_one_broadcast(sk, &info);
consume_skb(skb);
netlink_unlock_table();
if (info.delivery_failure) {
kfree_skb(info.skb2);
return -ENOBUFS;
}
consume_skb(info.skb2);
if (info.delivered) {
if (info.congested && gfpflags_allow_blocking(allocation))
yield();
return 0;
}
return -ESRCH;
}
EXPORT_SYMBOL(netlink_broadcast_filtered);
int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, u32 portid,
u32 group, gfp_t allocation)
{
return netlink_broadcast_filtered(ssk, skb, portid, group, allocation,
NULL, NULL);
}
EXPORT_SYMBOL(netlink_broadcast);
struct netlink_set_err_data {
struct sock *exclude_sk;
u32 portid;
u32 group;
int code;
};
static int do_one_set_err(struct sock *sk, struct netlink_set_err_data *p)
{
struct netlink_sock *nlk = nlk_sk(sk);
int ret = 0;
if (sk == p->exclude_sk)
goto out;
if (!net_eq(sock_net(sk), sock_net(p->exclude_sk)))
goto out;
if (nlk->portid == p->portid || p->group - 1 >= nlk->ngroups ||
!test_bit(p->group - 1, nlk->groups))
goto out;
if (p->code == ENOBUFS && nlk_test_bit(RECV_NO_ENOBUFS, sk)) {
ret = 1;
goto out;
}
sk->sk_err = p->code;
sk_error_report(sk);
out:
return ret;
}
/**
* netlink_set_err - report error to broadcast listeners
* @ssk: the kernel netlink socket, as returned by netlink_kernel_create()
* @portid: the PORTID of a process that we want to skip (if any)
* @group: the broadcast group that will notice the error
* @code: error code, must be negative (as usual in kernelspace)
*
* This function returns the number of broadcast listeners that have set the
* NETLINK_NO_ENOBUFS socket option.
*/
int netlink_set_err(struct sock *ssk, u32 portid, u32 group, int code)
{
struct netlink_set_err_data info;
unsigned long flags;
struct sock *sk;
int ret = 0;
info.exclude_sk = ssk;
info.portid = portid;
info.group = group;
/* sk->sk_err wants a positive error value */
info.code = -code;
read_lock_irqsave(&nl_table_lock, flags);
sk_for_each_bound(sk, &nl_table[ssk->sk_protocol].mc_list)
ret += do_one_set_err(sk, &info);
read_unlock_irqrestore(&nl_table_lock, flags);
return ret;
}
EXPORT_SYMBOL(netlink_set_err);
/* must be called with netlink table grabbed */
static void netlink_update_socket_mc(struct netlink_sock *nlk,
unsigned int group,
int is_new)
{
int old, new = !!is_new, subscriptions;
old = test_bit(group - 1, nlk->groups);
subscriptions = nlk->subscriptions - old + new;
__assign_bit(group - 1, nlk->groups, new);
netlink_update_subscriptions(&nlk->sk, subscriptions);
netlink_update_listeners(&nlk->sk);
}
static int netlink_setsockopt(struct socket *sock, int level, int optname,
sockptr_t optval, unsigned int optlen)
{
struct sock *sk = sock->sk;
struct netlink_sock *nlk = nlk_sk(sk);
unsigned int val = 0;
int nr = -1;
if (level != SOL_NETLINK)
return -ENOPROTOOPT;
if (optlen >= sizeof(int) &&
copy_from_sockptr(&val, optval, sizeof(val)))
return -EFAULT;
switch (optname) {
case NETLINK_PKTINFO:
nr = NETLINK_F_RECV_PKTINFO;
break;
case NETLINK_ADD_MEMBERSHIP:
case NETLINK_DROP_MEMBERSHIP: {
int err;
if (!netlink_allowed(sock, NL_CFG_F_NONROOT_RECV))
return -EPERM;
err = netlink_realloc_groups(sk);
if (err)
return err;
if (!val || val - 1 >= nlk->ngroups)
return -EINVAL;
if (optname == NETLINK_ADD_MEMBERSHIP && nlk->netlink_bind) {
err = nlk->netlink_bind(sock_net(sk), val);
if (err)
return err;
}
netlink_table_grab();
netlink_update_socket_mc(nlk, val,
optname == NETLINK_ADD_MEMBERSHIP);
netlink_table_ungrab();
if (optname == NETLINK_DROP_MEMBERSHIP && nlk->netlink_unbind)
nlk->netlink_unbind(sock_net(sk), val);
break;
}
case NETLINK_BROADCAST_ERROR:
nr = NETLINK_F_BROADCAST_SEND_ERROR;
break;
case NETLINK_NO_ENOBUFS:
assign_bit(NETLINK_F_RECV_NO_ENOBUFS, &nlk->flags, val);
if (val) {
clear_bit(NETLINK_S_CONGESTED, &nlk->state);
wake_up_interruptible(&nlk->wait);
}
break;
case NETLINK_LISTEN_ALL_NSID:
if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_BROADCAST))
return -EPERM;
nr = NETLINK_F_LISTEN_ALL_NSID;
break;
case NETLINK_CAP_ACK:
nr = NETLINK_F_CAP_ACK;
break;
case NETLINK_EXT_ACK:
nr = NETLINK_F_EXT_ACK;
break;
case NETLINK_GET_STRICT_CHK:
nr = NETLINK_F_STRICT_CHK;
break;
default:
return -ENOPROTOOPT;
}
if (nr >= 0)
assign_bit(nr, &nlk->flags, val);
return 0;
}
static int netlink_getsockopt(struct socket *sock, int level, int optname,
char __user *optval, int __user *optlen)
{
struct sock *sk = sock->sk;
struct netlink_sock *nlk = nlk_sk(sk);
unsigned int flag;
int len, val;
if (level != SOL_NETLINK)
return -ENOPROTOOPT;
if (get_user(len, optlen))
return -EFAULT;
if (len < 0)
return -EINVAL;
switch (optname) {
case NETLINK_PKTINFO:
flag = NETLINK_F_RECV_PKTINFO;
break;
case NETLINK_BROADCAST_ERROR:
flag = NETLINK_F_BROADCAST_SEND_ERROR;
break;
case NETLINK_NO_ENOBUFS:
flag = NETLINK_F_RECV_NO_ENOBUFS;
break;
case NETLINK_LIST_MEMBERSHIPS: {
int pos, idx, shift, err = 0;
netlink_lock_table();
for (pos = 0; pos * 8 < nlk->ngroups; pos += sizeof(u32)) {
if (len - pos < sizeof(u32))
break;
idx = pos / sizeof(unsigned long);
shift = (pos % sizeof(unsigned long)) * 8;
if (put_user((u32)(nlk->groups[idx] >> shift),
(u32 __user *)(optval + pos))) {
err = -EFAULT;
break;
}
}
if (put_user(ALIGN(BITS_TO_BYTES(nlk->ngroups), sizeof(u32)), optlen))
err = -EFAULT;
netlink_unlock_table();
return err;
}
case NETLINK_CAP_ACK:
flag = NETLINK_F_CAP_ACK;
break;
case NETLINK_EXT_ACK:
flag = NETLINK_F_EXT_ACK;
break;
case NETLINK_GET_STRICT_CHK:
flag = NETLINK_F_STRICT_CHK;
break;
default:
return -ENOPROTOOPT;
}
if (len < sizeof(int))
return -EINVAL;
len = sizeof(int);
val = test_bit(flag, &nlk->flags);
if (put_user(len, optlen) ||
copy_to_user(optval, &val, len))
return -EFAULT;
return 0;
}
static void netlink_cmsg_recv_pktinfo(struct msghdr *msg, struct sk_buff *skb)
{
struct nl_pktinfo info;
info.group = NETLINK_CB(skb).dst_group;
put_cmsg(msg, SOL_NETLINK, NETLINK_PKTINFO, sizeof(info), &info);
}
static void netlink_cmsg_listen_all_nsid(struct sock *sk, struct msghdr *msg,
struct sk_buff *skb)
{
if (!NETLINK_CB(skb).nsid_is_set)
return;
put_cmsg(msg, SOL_NETLINK, NETLINK_LISTEN_ALL_NSID, sizeof(int),
&NETLINK_CB(skb).nsid);
}
static int netlink_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
{
struct sock *sk = sock->sk;
struct netlink_sock *nlk = nlk_sk(sk);
DECLARE_SOCKADDR(struct sockaddr_nl *, addr, msg->msg_name);
u32 dst_portid;
u32 dst_group;
struct sk_buff *skb;
int err;
struct scm_cookie scm;
u32 netlink_skb_flags = 0;
if (msg->msg_flags & MSG_OOB)
return -EOPNOTSUPP;
if (len == 0) {
pr_warn_once("Zero length message leads to an empty skb\n");
return -ENODATA;
}
err = scm_send(sock, msg, &scm, true);
if (err < 0)
return err;
if (msg->msg_namelen) {
err = -EINVAL;
if (msg->msg_namelen < sizeof(struct sockaddr_nl))
goto out;
if (addr->nl_family != AF_NETLINK)
goto out;
dst_portid = addr->nl_pid;
dst_group = ffs(addr->nl_groups);
err = -EPERM;
if ((dst_group || dst_portid) &&
!netlink_allowed(sock, NL_CFG_F_NONROOT_SEND))
goto out;
netlink_skb_flags |= NETLINK_SKB_DST;
} else {
/* Paired with WRITE_ONCE() in netlink_connect() */
dst_portid = READ_ONCE(nlk->dst_portid);
dst_group = READ_ONCE(nlk->dst_group);
}
/* Paired with WRITE_ONCE() in netlink_insert() */
if (!READ_ONCE(nlk->bound)) {
err = netlink_autobind(sock);
if (err)
goto out;
} else {
/* Ensure nlk is hashed and visible. */
smp_rmb();
}
err = -EMSGSIZE;
if (len > sk->sk_sndbuf - 32)
goto out;
err = -ENOBUFS;
skb = netlink_alloc_large_skb(len, dst_group);
if (skb == NULL)
goto out;
NETLINK_CB(skb).portid = nlk->portid;
NETLINK_CB(skb).dst_group = dst_group;
NETLINK_CB(skb).creds = scm.creds;
NETLINK_CB(skb).flags = netlink_skb_flags;
err = -EFAULT;
if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
kfree_skb(skb);
goto out;
}
err = security_netlink_send(sk, skb);
if (err) {
kfree_skb(skb);
goto out;
}
if (dst_group) {
refcount_inc(&skb->users);
netlink_broadcast(sk, skb, dst_portid, dst_group, GFP_KERNEL);
}
err = netlink_unicast(sk, skb, dst_portid, msg->msg_flags & MSG_DONTWAIT);
out:
scm_destroy(&scm);
return err;
}
static int netlink_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
int flags)
{
struct scm_cookie scm;
struct sock *sk = sock->sk;
struct netlink_sock *nlk = nlk_sk(sk);
size_t copied, max_recvmsg_len;
struct sk_buff *skb, *data_skb;
int err, ret;
if (flags & MSG_OOB)
return -EOPNOTSUPP;
copied = 0;
skb = skb_recv_datagram(sk, flags, &err);
if (skb == NULL)
goto out;
data_skb = skb;
#ifdef CONFIG_COMPAT_NETLINK_MESSAGES
if (unlikely(skb_shinfo(skb)->frag_list)) {
/*
* If this skb has a frag_list, then here that means that we
* will have to use the frag_list skb's data for compat tasks
* and the regular skb's data for normal (non-compat) tasks.
*
* If we need to send the compat skb, assign it to the
* 'data_skb' variable so that it will be used below for data
* copying. We keep 'skb' for everything else, including
* freeing both later.
*/
if (flags & MSG_CMSG_COMPAT)
data_skb = skb_shinfo(skb)->frag_list;
}
#endif
/* Record the max length of recvmsg() calls for future allocations */
max_recvmsg_len = max(READ_ONCE(nlk->max_recvmsg_len), len);
max_recvmsg_len = min_t(size_t, max_recvmsg_len,
SKB_WITH_OVERHEAD(32768));
WRITE_ONCE(nlk->max_recvmsg_len, max_recvmsg_len);
copied = data_skb->len;
if (len < copied) {
msg->msg_flags |= MSG_TRUNC;
copied = len;
}
err = skb_copy_datagram_msg(data_skb, 0, msg, copied);
if (msg->msg_name) {
DECLARE_SOCKADDR(struct sockaddr_nl *, addr, msg->msg_name);
addr->nl_family = AF_NETLINK;
addr->nl_pad = 0;
addr->nl_pid = NETLINK_CB(skb).portid;
addr->nl_groups = netlink_group_mask(NETLINK_CB(skb).dst_group);
msg->msg_namelen = sizeof(*addr);
}
if (nlk_test_bit(RECV_PKTINFO, sk))
netlink_cmsg_recv_pktinfo(msg, skb);
if (nlk_test_bit(LISTEN_ALL_NSID, sk))
netlink_cmsg_listen_all_nsid(sk, msg, skb);
memset(&scm, 0, sizeof(scm));
scm.creds = *NETLINK_CREDS(skb);
if (flags & MSG_TRUNC)
copied = data_skb->len;
skb_free_datagram(sk, skb);
if (READ_ONCE(nlk->cb_running) &&
atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2) {
ret = netlink_dump(sk);
if (ret) {
sk->sk_err = -ret;
sk_error_report(sk);
}
}
scm_recv(sock, msg, &scm, flags);
out:
netlink_rcv_wake(sk);
return err ? : copied;
}
static void netlink_data_ready(struct sock *sk)
{
BUG();
}
/*
* We export these functions to other modules. They provide a
* complete set of kernel non-blocking support for message
* queueing.
*/
struct sock *
__netlink_kernel_create(struct net *net, int unit, struct module *module,
struct netlink_kernel_cfg *cfg)
{
struct socket *sock;
struct sock *sk;
struct netlink_sock *nlk;
struct listeners *listeners = NULL;
struct mutex *cb_mutex = cfg ? cfg->cb_mutex : NULL;
unsigned int groups;
BUG_ON(!nl_table);
if (unit < 0 || unit >= MAX_LINKS)
return NULL;
if (sock_create_lite(PF_NETLINK, SOCK_DGRAM, unit, &sock))
return NULL;
if (__netlink_create(net, sock, cb_mutex, unit, 1) < 0)
goto out_sock_release_nosk;
sk = sock->sk;
if (!cfg || cfg->groups < 32)
groups = 32;
else
groups = cfg->groups;
listeners = kzalloc(sizeof(*listeners) + NLGRPSZ(groups), GFP_KERNEL);
if (!listeners)
goto out_sock_release;
sk->sk_data_ready = netlink_data_ready;
if (cfg && cfg->input)
nlk_sk(sk)->netlink_rcv = cfg->input;
if (netlink_insert(sk, 0))
goto out_sock_release;
nlk = nlk_sk(sk);
set_bit(NETLINK_F_KERNEL_SOCKET, &nlk->flags);
netlink_table_grab();
if (!nl_table[unit].registered) {
nl_table[unit].groups = groups;
rcu_assign_pointer(nl_table[unit].listeners, listeners);
nl_table[unit].cb_mutex = cb_mutex;
nl_table[unit].module = module;
if (cfg) {
nl_table[unit].bind = cfg->bind;
nl_table[unit].unbind = cfg->unbind;
nl_table[unit].release = cfg->release;
nl_table[unit].flags = cfg->flags;
}
nl_table[unit].registered = 1;
} else {
kfree(listeners);
nl_table[unit].registered++;
}
netlink_table_ungrab();
return sk;
out_sock_release:
kfree(listeners);
netlink_kernel_release(sk);
return NULL;
out_sock_release_nosk:
sock_release(sock);
return NULL;
}
EXPORT_SYMBOL(__netlink_kernel_create);
void
netlink_kernel_release(struct sock *sk)
{
if (sk == NULL || sk->sk_socket == NULL)
return;
sock_release(sk->sk_socket);
}
EXPORT_SYMBOL(netlink_kernel_release);
int __netlink_change_ngroups(struct sock *sk, unsigned int groups)
{
struct listeners *new, *old;
struct netlink_table *tbl = &nl_table[sk->sk_protocol];
if (groups < 32)
groups = 32;
if (NLGRPSZ(tbl->groups) < NLGRPSZ(groups)) {
new = kzalloc(sizeof(*new) + NLGRPSZ(groups), GFP_ATOMIC);
if (!new)
return -ENOMEM;
old = nl_deref_protected(tbl->listeners);
memcpy(new->masks, old->masks, NLGRPSZ(tbl->groups));
rcu_assign_pointer(tbl->listeners, new);
kfree_rcu(old, rcu);
}
tbl->groups = groups;
return 0;
}
/**
* netlink_change_ngroups - change number of multicast groups
*
* This changes the number of multicast groups that are available
* on a certain netlink family. Note that it is not possible to
* change the number of groups to below 32. Also note that it does
* not implicitly call netlink_clear_multicast_users() when the
* number of groups is reduced.
*
* @sk: The kernel netlink socket, as returned by netlink_kernel_create().
* @groups: The new number of groups.
*/
int netlink_change_ngroups(struct sock *sk, unsigned int groups)
{
int err;
netlink_table_grab();
err = __netlink_change_ngroups(sk, groups);
netlink_table_ungrab();
return err;
}
void __netlink_clear_multicast_users(struct sock *ksk, unsigned int group)
{
struct sock *sk;
struct netlink_table *tbl = &nl_table[ksk->sk_protocol];
sk_for_each_bound(sk, &tbl->mc_list)
netlink_update_socket_mc(nlk_sk(sk), group, 0);
}
struct nlmsghdr *
__nlmsg_put(struct sk_buff *skb, u32 portid, u32 seq, int type, int len, int flags)
{
struct nlmsghdr *nlh;
int size = nlmsg_msg_size(len);
nlh = skb_put(skb, NLMSG_ALIGN(size));
nlh->nlmsg_type = type;
nlh->nlmsg_len = size;
nlh->nlmsg_flags = flags;
nlh->nlmsg_pid = portid;
nlh->nlmsg_seq = seq;
if (!__builtin_constant_p(size) || NLMSG_ALIGN(size) - size != 0)
memset(nlmsg_data(nlh) + len, 0, NLMSG_ALIGN(size) - size);
return nlh;
}
EXPORT_SYMBOL(__nlmsg_put);
/*
* It looks a bit ugly.
* It would be better to create kernel thread.
*/
static int netlink_dump_done(struct netlink_sock *nlk, struct sk_buff *skb,
struct netlink_callback *cb,
struct netlink_ext_ack *extack)
{
struct nlmsghdr *nlh;
nlh = nlmsg_put_answer(skb, cb, NLMSG_DONE, sizeof(nlk->dump_done_errno),
NLM_F_MULTI | cb->answer_flags);
if (WARN_ON(!nlh))
return -ENOBUFS;
nl_dump_check_consistent(cb, nlh);
memcpy(nlmsg_data(nlh), &nlk->dump_done_errno, sizeof(nlk->dump_done_errno));
if (extack->_msg && test_bit(NETLINK_F_EXT_ACK, &nlk->flags)) {
nlh->nlmsg_flags |= NLM_F_ACK_TLVS;
if (!nla_put_string(skb, NLMSGERR_ATTR_MSG, extack->_msg))
nlmsg_end(skb, nlh);
}
return 0;
}
static int netlink_dump(struct sock *sk)
{
struct netlink_sock *nlk = nlk_sk(sk);
struct netlink_ext_ack extack = {};
struct netlink_callback *cb;
struct sk_buff *skb = NULL;
size_t max_recvmsg_len;
struct module *module;
int err = -ENOBUFS;
int alloc_min_size;
int alloc_size;
mutex_lock(nlk->cb_mutex);
if (!nlk->cb_running) {
err = -EINVAL;
goto errout_skb;
}
if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
goto errout_skb;
/* NLMSG_GOODSIZE is small to avoid high order allocations being
* required, but it makes sense to _attempt_ a 16K bytes allocation
* to reduce number of system calls on dump operations, if user
* ever provided a big enough buffer.
*/
cb = &nlk->cb;
alloc_min_size = max_t(int, cb->min_dump_alloc, NLMSG_GOODSIZE);
max_recvmsg_len = READ_ONCE(nlk->max_recvmsg_len);
if (alloc_min_size < max_recvmsg_len) {
alloc_size = max_recvmsg_len;
skb = alloc_skb(alloc_size,
(GFP_KERNEL & ~__GFP_DIRECT_RECLAIM) |
__GFP_NOWARN | __GFP_NORETRY);
}
if (!skb) {
alloc_size = alloc_min_size;
skb = alloc_skb(alloc_size, GFP_KERNEL);
}
if (!skb)
goto errout_skb;
/* Trim skb to allocated size. User is expected to provide buffer as
* large as max(min_dump_alloc, 16KiB (mac_recvmsg_len capped at
* netlink_recvmsg())). dump will pack as many smaller messages as
* could fit within the allocated skb. skb is typically allocated
* with larger space than required (could be as much as near 2x the
* requested size with align to next power of 2 approach). Allowing
* dump to use the excess space makes it difficult for a user to have a
* reasonable static buffer based on the expected largest dump of a
* single netdev. The outcome is MSG_TRUNC error.
*/
skb_reserve(skb, skb_tailroom(skb) - alloc_size);
/* Make sure malicious BPF programs can not read unitialized memory
* from skb->head -> skb->data
*/
skb_reset_network_header(skb);
skb_reset_mac_header(skb);
netlink_skb_set_owner_r(skb, sk);
if (nlk->dump_done_errno > 0) {
cb->extack = &extack;
nlk->dump_done_errno = cb->dump(skb, cb);
cb->extack = NULL;
}
if (nlk->dump_done_errno > 0 ||
skb_tailroom(skb) < nlmsg_total_size(sizeof(nlk->dump_done_errno))) {
mutex_unlock(nlk->cb_mutex);
if (sk_filter(sk, skb))
kfree_skb(skb);
else
__netlink_sendskb(sk, skb);
return 0;
}
if (netlink_dump_done(nlk, skb, cb, &extack))
goto errout_skb;
#ifdef CONFIG_COMPAT_NETLINK_MESSAGES
/* frag_list skb's data is used for compat tasks
* and the regular skb's data for normal (non-compat) tasks.
* See netlink_recvmsg().
*/
if (unlikely(skb_shinfo(skb)->frag_list)) {
if (netlink_dump_done(nlk, skb_shinfo(skb)->frag_list, cb, &extack))
goto errout_skb;
}
#endif
if (sk_filter(sk, skb))
kfree_skb(skb);
else
__netlink_sendskb(sk, skb);
if (cb->done)
cb->done(cb);
WRITE_ONCE(nlk->cb_running, false);
module = cb->module;
skb = cb->skb;
mutex_unlock(nlk->cb_mutex);
module_put(module);
consume_skb(skb);
return 0;
errout_skb:
mutex_unlock(nlk->cb_mutex);
kfree_skb(skb);
return err;
}
int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
const struct nlmsghdr *nlh,
struct netlink_dump_control *control)
{
struct netlink_callback *cb;
struct netlink_sock *nlk;
struct sock *sk;
int ret;
refcount_inc(&skb->users);
sk = netlink_lookup(sock_net(ssk), ssk->sk_protocol, NETLINK_CB(skb).portid);
if (sk == NULL) {
ret = -ECONNREFUSED;
goto error_free;
}
nlk = nlk_sk(sk);
mutex_lock(nlk->cb_mutex);
/* A dump is in progress... */
if (nlk->cb_running) {
ret = -EBUSY;
goto error_unlock;
}
/* add reference of module which cb->dump belongs to */
if (!try_module_get(control->module)) {
ret = -EPROTONOSUPPORT;
goto error_unlock;
}
cb = &nlk->cb;
memset(cb, 0, sizeof(*cb));
cb->dump = control->dump;
cb->done = control->done;
cb->nlh = nlh;
cb->data = control->data;
cb->module = control->module;
cb->min_dump_alloc = control->min_dump_alloc;
cb->skb = skb;
cb->strict_check = nlk_test_bit(STRICT_CHK, NETLINK_CB(skb).sk);
if (control->start) {
cb->extack = control->extack;
ret = control->start(cb);
cb->extack = NULL;
if (ret)
goto error_put;
}
WRITE_ONCE(nlk->cb_running, true);
nlk->dump_done_errno = INT_MAX;
mutex_unlock(nlk->cb_mutex);
ret = netlink_dump(sk);
sock_put(sk);
if (ret)
return ret;
/* We successfully started a dump, by returning -EINTR we
* signal not to send ACK even if it was requested.
*/
return -EINTR;
error_put:
module_put(control->module);
error_unlock:
sock_put(sk);
mutex_unlock(nlk->cb_mutex);
error_free:
kfree_skb(skb);
return ret;
}
EXPORT_SYMBOL(__netlink_dump_start);
static size_t
netlink_ack_tlv_len(struct netlink_sock *nlk, int err,
const struct netlink_ext_ack *extack)
{
size_t tlvlen;
if (!extack || !test_bit(NETLINK_F_EXT_ACK, &nlk->flags))
return 0;
tlvlen = 0;
if (extack->_msg)
tlvlen += nla_total_size(strlen(extack->_msg) + 1);
if (extack->cookie_len)
tlvlen += nla_total_size(extack->cookie_len);
/* Following attributes are only reported as error (not warning) */
if (!err)
return tlvlen;
if (extack->bad_attr)
tlvlen += nla_total_size(sizeof(u32));
if (extack->policy)
tlvlen += netlink_policy_dump_attr_size_estimate(extack->policy);
if (extack->miss_type)
tlvlen += nla_total_size(sizeof(u32));
if (extack->miss_nest)
tlvlen += nla_total_size(sizeof(u32));
return tlvlen;
}
static void
netlink_ack_tlv_fill(struct sk_buff *in_skb, struct sk_buff *skb,
struct nlmsghdr *nlh, int err,
const struct netlink_ext_ack *extack)
{
if (extack->_msg)
WARN_ON(nla_put_string(skb, NLMSGERR_ATTR_MSG, extack->_msg));
if (extack->cookie_len)
WARN_ON(nla_put(skb, NLMSGERR_ATTR_COOKIE,
extack->cookie_len, extack->cookie));
if (!err)
return;
if (extack->bad_attr &&
!WARN_ON((u8 *)extack->bad_attr < in_skb->data ||
(u8 *)extack->bad_attr >= in_skb->data + in_skb->len))
WARN_ON(nla_put_u32(skb, NLMSGERR_ATTR_OFFS,
(u8 *)extack->bad_attr - (u8 *)nlh));
if (extack->policy)
netlink_policy_dump_write_attr(skb, extack->policy,
NLMSGERR_ATTR_POLICY);
if (extack->miss_type)
WARN_ON(nla_put_u32(skb, NLMSGERR_ATTR_MISS_TYPE,
extack->miss_type));
if (extack->miss_nest &&
!WARN_ON((u8 *)extack->miss_nest < in_skb->data ||
(u8 *)extack->miss_nest > in_skb->data + in_skb->len))
WARN_ON(nla_put_u32(skb, NLMSGERR_ATTR_MISS_NEST,
(u8 *)extack->miss_nest - (u8 *)nlh));
}
void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err,
const struct netlink_ext_ack *extack)
{
struct sk_buff *skb;
struct nlmsghdr *rep;
struct nlmsgerr *errmsg;
size_t payload = sizeof(*errmsg);
struct netlink_sock *nlk = nlk_sk(NETLINK_CB(in_skb).sk);
unsigned int flags = 0;
size_t tlvlen;
/* Error messages get the original request appened, unless the user
* requests to cap the error message, and get extra error data if
* requested.
*/
if (err && !test_bit(NETLINK_F_CAP_ACK, &nlk->flags))
payload += nlmsg_len(nlh);
else
flags |= NLM_F_CAPPED;
tlvlen = netlink_ack_tlv_len(nlk, err, extack);
if (tlvlen)
flags |= NLM_F_ACK_TLVS;
skb = nlmsg_new(payload + tlvlen, GFP_KERNEL);
if (!skb)
goto err_skb;
rep = nlmsg_put(skb, NETLINK_CB(in_skb).portid, nlh->nlmsg_seq,
NLMSG_ERROR, sizeof(*errmsg), flags);
if (!rep)
goto err_bad_put;
errmsg = nlmsg_data(rep);
errmsg->error = err;
errmsg->msg = *nlh;
if (!(flags & NLM_F_CAPPED)) {
if (!nlmsg_append(skb, nlmsg_len(nlh)))
goto err_bad_put;
memcpy(nlmsg_data(&errmsg->msg), nlmsg_data(nlh),
nlmsg_len(nlh));
}
if (tlvlen)
netlink_ack_tlv_fill(in_skb, skb, nlh, err, extack);
nlmsg_end(skb, rep);
nlmsg_unicast(in_skb->sk, skb, NETLINK_CB(in_skb).portid);
return;
err_bad_put:
nlmsg_free(skb);
err_skb:
NETLINK_CB(in_skb).sk->sk_err = ENOBUFS;
sk_error_report(NETLINK_CB(in_skb).sk);
}
EXPORT_SYMBOL(netlink_ack);
int netlink_rcv_skb(struct sk_buff *skb, int (*cb)(struct sk_buff *,
struct nlmsghdr *,
struct netlink_ext_ack *))
{
struct netlink_ext_ack extack;
struct nlmsghdr *nlh;
int err;
while (skb->len >= nlmsg_total_size(0)) {
int msglen;
memset(&extack, 0, sizeof(extack));
nlh = nlmsg_hdr(skb);
err = 0;
if (nlh->nlmsg_len < NLMSG_HDRLEN || skb->len < nlh->nlmsg_len)
return 0;
/* Only requests are handled by the kernel */
if (!(nlh->nlmsg_flags & NLM_F_REQUEST))
goto ack;
/* Skip control messages */
if (nlh->nlmsg_type < NLMSG_MIN_TYPE)
goto ack;
err = cb(skb, nlh, &extack);
if (err == -EINTR)
goto skip;
ack:
if (nlh->nlmsg_flags & NLM_F_ACK || err)
netlink_ack(skb, nlh, err, &extack);
skip:
msglen = NLMSG_ALIGN(nlh->nlmsg_len);
if (msglen > skb->len)
msglen = skb->len;
skb_pull(skb, msglen);
}
return 0;
}
EXPORT_SYMBOL(netlink_rcv_skb);
/**
* nlmsg_notify - send a notification netlink message
* @sk: netlink socket to use
* @skb: notification message
* @portid: destination netlink portid for reports or 0
* @group: destination multicast group or 0
* @report: 1 to report back, 0 to disable
* @flags: allocation flags
*/
int nlmsg_notify(struct sock *sk, struct sk_buff *skb, u32 portid,
unsigned int group, int report, gfp_t flags)
{
int err = 0;
if (group) {
int exclude_portid = 0;
if (report) {
refcount_inc(&skb->users);
exclude_portid = portid;
}
/* errors reported via destination sk->sk_err, but propagate
* delivery errors if NETLINK_BROADCAST_ERROR flag is set */
err = nlmsg_multicast(sk, skb, exclude_portid, group, flags);
if (err == -ESRCH)
err = 0;
}
if (report) {
int err2;
err2 = nlmsg_unicast(sk, skb, portid);
if (!err)
err = err2;
}
return err;
}
EXPORT_SYMBOL(nlmsg_notify);
#ifdef CONFIG_PROC_FS
struct nl_seq_iter {
struct seq_net_private p;
struct rhashtable_iter hti;
int link;
};
static void netlink_walk_start(struct nl_seq_iter *iter)
{
rhashtable_walk_enter(&nl_table[iter->link].hash, &iter->hti);
rhashtable_walk_start(&iter->hti);
}
static void netlink_walk_stop(struct nl_seq_iter *iter)
{
rhashtable_walk_stop(&iter->hti);
rhashtable_walk_exit(&iter->hti);
}
static void *__netlink_seq_next(struct seq_file *seq)
{
struct nl_seq_iter *iter = seq->private;
struct netlink_sock *nlk;
do {
for (;;) {
nlk = rhashtable_walk_next(&iter->hti);
if (IS_ERR(nlk)) {
if (PTR_ERR(nlk) == -EAGAIN)
continue;
return nlk;
}
if (nlk)
break;
netlink_walk_stop(iter);
if (++iter->link >= MAX_LINKS)
return NULL;
netlink_walk_start(iter);
}
} while (sock_net(&nlk->sk) != seq_file_net(seq));
return nlk;
}
static void *netlink_seq_start(struct seq_file *seq, loff_t *posp)
__acquires(RCU)
{
struct nl_seq_iter *iter = seq->private;
void *obj = SEQ_START_TOKEN;
loff_t pos;
iter->link = 0;
netlink_walk_start(iter);
for (pos = *posp; pos && obj && !IS_ERR(obj); pos--)
obj = __netlink_seq_next(seq);
return obj;
}
static void *netlink_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
++*pos;
return __netlink_seq_next(seq);
}
static void netlink_native_seq_stop(struct seq_file *seq, void *v)
{
struct nl_seq_iter *iter = seq->private;
if (iter->link >= MAX_LINKS)
return;
netlink_walk_stop(iter);
}
static int netlink_native_seq_show(struct seq_file *seq, void *v)
{
if (v == SEQ_START_TOKEN) {
seq_puts(seq,
"sk Eth Pid Groups "
"Rmem Wmem Dump Locks Drops Inode\n");
} else {
struct sock *s = v;
struct netlink_sock *nlk = nlk_sk(s);
seq_printf(seq, "%pK %-3d %-10u %08x %-8d %-8d %-5d %-8d %-8u %-8lu\n",
s,
s->sk_protocol,
nlk->portid,
nlk->groups ? (u32)nlk->groups[0] : 0,
sk_rmem_alloc_get(s),
sk_wmem_alloc_get(s),
READ_ONCE(nlk->cb_running),
refcount_read(&s->sk_refcnt),
atomic_read(&s->sk_drops),
sock_i_ino(s)
);
}
return 0;
}
#ifdef CONFIG_BPF_SYSCALL
struct bpf_iter__netlink {
__bpf_md_ptr(struct bpf_iter_meta *, meta);
__bpf_md_ptr(struct netlink_sock *, sk);
};
DEFINE_BPF_ITER_FUNC(netlink, struct bpf_iter_meta *meta, struct netlink_sock *sk)
static int netlink_prog_seq_show(struct bpf_prog *prog,
struct bpf_iter_meta *meta,
void *v)
{
struct bpf_iter__netlink ctx;
meta->seq_num--; /* skip SEQ_START_TOKEN */
ctx.meta = meta;
ctx.sk = nlk_sk((struct sock *)v);
return bpf_iter_run_prog(prog, &ctx);
}
static int netlink_seq_show(struct seq_file *seq, void *v)
{
struct bpf_iter_meta meta;
struct bpf_prog *prog;
meta.seq = seq;
prog = bpf_iter_get_info(&meta, false);
if (!prog)
return netlink_native_seq_show(seq, v);
if (v != SEQ_START_TOKEN)
return netlink_prog_seq_show(prog, &meta, v);
return 0;
}
static void netlink_seq_stop(struct seq_file *seq, void *v)
{
struct bpf_iter_meta meta;
struct bpf_prog *prog;
if (!v) {
meta.seq = seq;
prog = bpf_iter_get_info(&meta, true);
if (prog)
(void)netlink_prog_seq_show(prog, &meta, v);
}
netlink_native_seq_stop(seq, v);
}
#else
static int netlink_seq_show(struct seq_file *seq, void *v)
{
return netlink_native_seq_show(seq, v);
}
static void netlink_seq_stop(struct seq_file *seq, void *v)
{
netlink_native_seq_stop(seq, v);
}
#endif
static const struct seq_operations netlink_seq_ops = {
.start = netlink_seq_start,
.next = netlink_seq_next,
.stop = netlink_seq_stop,
.show = netlink_seq_show,
};
#endif
int netlink_register_notifier(struct notifier_block *nb)
{
return blocking_notifier_chain_register(&netlink_chain, nb);
}
EXPORT_SYMBOL(netlink_register_notifier);
int netlink_unregister_notifier(struct notifier_block *nb)
{
return blocking_notifier_chain_unregister(&netlink_chain, nb);
}
EXPORT_SYMBOL(netlink_unregister_notifier);
static const struct proto_ops netlink_ops = {
.family = PF_NETLINK,
.owner = THIS_MODULE,
.release = netlink_release,
.bind = netlink_bind,
.connect = netlink_connect,
.socketpair = sock_no_socketpair,
.accept = sock_no_accept,
.getname = netlink_getname,
.poll = datagram_poll,
.ioctl = netlink_ioctl,
.listen = sock_no_listen,
.shutdown = sock_no_shutdown,
.setsockopt = netlink_setsockopt,
.getsockopt = netlink_getsockopt,
.sendmsg = netlink_sendmsg,
.recvmsg = netlink_recvmsg,
.mmap = sock_no_mmap,
};
static const struct net_proto_family netlink_family_ops = {
.family = PF_NETLINK,
.create = netlink_create,
.owner = THIS_MODULE, /* for consistency 8) */
};
static int __net_init netlink_net_init(struct net *net)
{
#ifdef CONFIG_PROC_FS
if (!proc_create_net("netlink", 0, net->proc_net, &netlink_seq_ops,
sizeof(struct nl_seq_iter)))
return -ENOMEM;
#endif
return 0;
}
static void __net_exit netlink_net_exit(struct net *net)
{
#ifdef CONFIG_PROC_FS
remove_proc_entry("netlink", net->proc_net);
#endif
}
static void __init netlink_add_usersock_entry(void)
{
struct listeners *listeners;
int groups = 32;
listeners = kzalloc(sizeof(*listeners) + NLGRPSZ(groups), GFP_KERNEL);
if (!listeners)
panic("netlink_add_usersock_entry: Cannot allocate listeners\n");
netlink_table_grab();
nl_table[NETLINK_USERSOCK].groups = groups;
rcu_assign_pointer(nl_table[NETLINK_USERSOCK].listeners, listeners);
nl_table[NETLINK_USERSOCK].module = THIS_MODULE;
nl_table[NETLINK_USERSOCK].registered = 1;
nl_table[NETLINK_USERSOCK].flags = NL_CFG_F_NONROOT_SEND;
netlink_table_ungrab();
}
static struct pernet_operations __net_initdata netlink_net_ops = {
.init = netlink_net_init,
.exit = netlink_net_exit,
};
static inline u32 netlink_hash(const void *data, u32 len, u32 seed)
{
const struct netlink_sock *nlk = data;
struct netlink_compare_arg arg;
netlink_compare_arg_init(&arg, sock_net(&nlk->sk), nlk->portid);
return jhash2((u32 *)&arg, netlink_compare_arg_len / sizeof(u32), seed);
}
static const struct rhashtable_params netlink_rhashtable_params = {
.head_offset = offsetof(struct netlink_sock, node),
.key_len = netlink_compare_arg_len,
.obj_hashfn = netlink_hash,
.obj_cmpfn = netlink_compare,
.automatic_shrinking = true,
};
#if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS)
BTF_ID_LIST(btf_netlink_sock_id)
BTF_ID(struct, netlink_sock)
static const struct bpf_iter_seq_info netlink_seq_info = {
.seq_ops = &netlink_seq_ops,
.init_seq_private = bpf_iter_init_seq_net,
.fini_seq_private = bpf_iter_fini_seq_net,
.seq_priv_size = sizeof(struct nl_seq_iter),
};
static struct bpf_iter_reg netlink_reg_info = {
.target = "netlink",
.ctx_arg_info_size = 1,
.ctx_arg_info = {
{ offsetof(struct bpf_iter__netlink, sk),
PTR_TO_BTF_ID_OR_NULL },
},
.seq_info = &netlink_seq_info,
};
static int __init bpf_iter_register(void)
{
netlink_reg_info.ctx_arg_info[0].btf_id = *btf_netlink_sock_id;
return bpf_iter_reg_target(&netlink_reg_info);
}
#endif
static int __init netlink_proto_init(void)
{
int i;
int err = proto_register(&netlink_proto, 0);
if (err != 0)
goto out;
#if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS)
err = bpf_iter_register();
if (err)
goto out;
#endif
BUILD_BUG_ON(sizeof(struct netlink_skb_parms) > sizeof_field(struct sk_buff, cb));
nl_table = kcalloc(MAX_LINKS, sizeof(*nl_table), GFP_KERNEL);
if (!nl_table)
goto panic;
for (i = 0; i < MAX_LINKS; i++) {
if (rhashtable_init(&nl_table[i].hash,
&netlink_rhashtable_params) < 0) {
while (--i > 0)
rhashtable_destroy(&nl_table[i].hash);
kfree(nl_table);
goto panic;
}
}
netlink_add_usersock_entry();
sock_register(&netlink_family_ops);
register_pernet_subsys(&netlink_net_ops);
register_pernet_subsys(&netlink_tap_net_ops);
/* The netlink device handler may be needed early. */
rtnetlink_init();
out:
return err;
panic:
panic("netlink_init: Cannot allocate nl_table\n");
}
core_initcall(netlink_proto_init);
| linux-master | net/netlink/af_netlink.c |
/* Upcall routine, designed to work as a key type and working through
* /sbin/request-key to contact userspace when handling DNS queries.
*
* See Documentation/networking/dns_resolver.rst
*
* Copyright (c) 2007 Igor Mammedov
* Author(s): Igor Mammedov ([email protected])
* Steve French ([email protected])
* Wang Lei ([email protected])
* David Howells ([email protected])
*
* The upcall wrapper used to make an arbitrary DNS query.
*
* This function requires the appropriate userspace tool dns.upcall to be
* installed and something like the following lines should be added to the
* /etc/request-key.conf file:
*
* create dns_resolver * * /sbin/dns.upcall %k
*
* For example to use this module to query AFSDB RR:
*
* create dns_resolver afsdb:* * /sbin/dns.afsdb %k
*
* This library is free software; you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published
* by the Free Software Foundation; either version 2.1 of the License, or
* (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
* the GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/cred.h>
#include <linux/dns_resolver.h>
#include <linux/err.h>
#include <net/net_namespace.h>
#include <keys/dns_resolver-type.h>
#include <keys/user-type.h>
#include "internal.h"
/**
* dns_query - Query the DNS
* @net: The network namespace to operate in.
* @type: Query type (or NULL for straight host->IP lookup)
* @name: Name to look up
* @namelen: Length of name
* @options: Request options (or NULL if no options)
* @_result: Where to place the returned data (or NULL)
* @_expiry: Where to store the result expiry time (or NULL)
* @invalidate: Always invalidate the key after use
*
* The data will be returned in the pointer at *result, if provided, and the
* caller is responsible for freeing it.
*
* The description should be of the form "[<query_type>:]<domain_name>", and
* the options need to be appropriate for the query type requested. If no
* query_type is given, then the query is a straight hostname to IP address
* lookup.
*
* The DNS resolution lookup is performed by upcalling to userspace by way of
* requesting a key of type dns_resolver.
*
* Returns the size of the result on success, -ve error code otherwise.
*/
int dns_query(struct net *net,
const char *type, const char *name, size_t namelen,
const char *options, char **_result, time64_t *_expiry,
bool invalidate)
{
struct key *rkey;
struct user_key_payload *upayload;
const struct cred *saved_cred;
size_t typelen, desclen;
char *desc, *cp;
int ret, len;
kenter("%s,%*.*s,%zu,%s",
type, (int)namelen, (int)namelen, name, namelen, options);
if (!name || namelen == 0)
return -EINVAL;
/* construct the query key description as "[<type>:]<name>" */
typelen = 0;
desclen = 0;
if (type) {
typelen = strlen(type);
if (typelen < 1)
return -EINVAL;
desclen += typelen + 1;
}
if (namelen < 3 || namelen > 255)
return -EINVAL;
desclen += namelen + 1;
desc = kmalloc(desclen, GFP_KERNEL);
if (!desc)
return -ENOMEM;
cp = desc;
if (type) {
memcpy(cp, type, typelen);
cp += typelen;
*cp++ = ':';
}
memcpy(cp, name, namelen);
cp += namelen;
*cp = '\0';
if (!options)
options = "";
kdebug("call request_key(,%s,%s)", desc, options);
/* make the upcall, using special credentials to prevent the use of
* add_key() to preinstall malicious redirections
*/
saved_cred = override_creds(dns_resolver_cache);
rkey = request_key_net(&key_type_dns_resolver, desc, net, options);
revert_creds(saved_cred);
kfree(desc);
if (IS_ERR(rkey)) {
ret = PTR_ERR(rkey);
goto out;
}
down_read(&rkey->sem);
set_bit(KEY_FLAG_ROOT_CAN_INVAL, &rkey->flags);
rkey->perm |= KEY_USR_VIEW;
ret = key_validate(rkey);
if (ret < 0)
goto put;
/* If the DNS server gave an error, return that to the caller */
ret = PTR_ERR(rkey->payload.data[dns_key_error]);
if (ret)
goto put;
upayload = user_key_payload_locked(rkey);
len = upayload->datalen;
if (_result) {
ret = -ENOMEM;
*_result = kmemdup_nul(upayload->data, len, GFP_KERNEL);
if (!*_result)
goto put;
}
if (_expiry)
*_expiry = rkey->expiry;
ret = len;
put:
up_read(&rkey->sem);
if (invalidate)
key_invalidate(rkey);
key_put(rkey);
out:
kleave(" = %d", ret);
return ret;
}
EXPORT_SYMBOL(dns_query);
| linux-master | net/dns_resolver/dns_query.c |
/* Key type used to cache DNS lookups made by the kernel
*
* See Documentation/networking/dns_resolver.rst
*
* Copyright (c) 2007 Igor Mammedov
* Author(s): Igor Mammedov ([email protected])
* Steve French ([email protected])
* Wang Lei ([email protected])
* David Howells ([email protected])
*
* This library is free software; you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published
* by the Free Software Foundation; either version 2.1 of the License, or
* (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
* the GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/kernel.h>
#include <linux/keyctl.h>
#include <linux/err.h>
#include <linux/seq_file.h>
#include <linux/dns_resolver.h>
#include <keys/dns_resolver-type.h>
#include <keys/user-type.h>
#include "internal.h"
MODULE_DESCRIPTION("DNS Resolver");
MODULE_AUTHOR("Wang Lei");
MODULE_LICENSE("GPL");
unsigned int dns_resolver_debug;
module_param_named(debug, dns_resolver_debug, uint, 0644);
MODULE_PARM_DESC(debug, "DNS Resolver debugging mask");
const struct cred *dns_resolver_cache;
#define DNS_ERRORNO_OPTION "dnserror"
/*
* Preparse instantiation data for a dns_resolver key.
*
* For normal hostname lookups, the data must be a NUL-terminated string, with
* the NUL char accounted in datalen.
*
* If the data contains a '#' characters, then we take the clause after each
* one to be an option of the form 'key=value'. The actual data of interest is
* the string leading up to the first '#'. For instance:
*
* "ip1,ip2,...#foo=bar"
*
* For server list requests, the data must begin with a NUL char and be
* followed by a byte indicating the version of the data format. Version 1
* looks something like (note this is packed):
*
* u8 Non-string marker (ie. 0)
* u8 Content (DNS_PAYLOAD_IS_*)
* u8 Version (e.g. 1)
* u8 Source of server list
* u8 Lookup status of server list
* u8 Number of servers
* foreach-server {
* __le16 Name length
* __le16 Priority (as per SRV record, low first)
* __le16 Weight (as per SRV record, higher first)
* __le16 Port
* u8 Source of address list
* u8 Lookup status of address list
* u8 Protocol (DNS_SERVER_PROTOCOL_*)
* u8 Number of addresses
* char[] Name (not NUL-terminated)
* foreach-address {
* u8 Family (DNS_ADDRESS_IS_*)
* union {
* u8[4] ipv4_addr
* u8[16] ipv6_addr
* }
* }
* }
*
*/
static int
dns_resolver_preparse(struct key_preparsed_payload *prep)
{
const struct dns_payload_header *bin;
struct user_key_payload *upayload;
unsigned long derrno;
int ret;
int datalen = prep->datalen, result_len = 0;
const char *data = prep->data, *end, *opt;
if (datalen <= 1 || !data)
return -EINVAL;
if (data[0] == 0) {
/* It may be a server list. */
if (datalen <= sizeof(*bin))
return -EINVAL;
bin = (const struct dns_payload_header *)data;
kenter("[%u,%u],%u", bin->content, bin->version, datalen);
if (bin->content != DNS_PAYLOAD_IS_SERVER_LIST) {
pr_warn_ratelimited(
"dns_resolver: Unsupported content type (%u)\n",
bin->content);
return -EINVAL;
}
if (bin->version != 1) {
pr_warn_ratelimited(
"dns_resolver: Unsupported server list version (%u)\n",
bin->version);
return -EINVAL;
}
result_len = datalen;
goto store_result;
}
kenter("'%*.*s',%u", datalen, datalen, data, datalen);
if (!data || data[datalen - 1] != '\0')
return -EINVAL;
datalen--;
/* deal with any options embedded in the data */
end = data + datalen;
opt = memchr(data, '#', datalen);
if (!opt) {
/* no options: the entire data is the result */
kdebug("no options");
result_len = datalen;
} else {
const char *next_opt;
result_len = opt - data;
opt++;
kdebug("options: '%s'", opt);
do {
int opt_len, opt_nlen;
const char *eq;
char optval[128];
next_opt = memchr(opt, '#', end - opt) ?: end;
opt_len = next_opt - opt;
if (opt_len <= 0 || opt_len > sizeof(optval)) {
pr_warn_ratelimited("Invalid option length (%d) for dns_resolver key\n",
opt_len);
return -EINVAL;
}
eq = memchr(opt, '=', opt_len);
if (eq) {
opt_nlen = eq - opt;
eq++;
memcpy(optval, eq, next_opt - eq);
optval[next_opt - eq] = '\0';
} else {
opt_nlen = opt_len;
optval[0] = '\0';
}
kdebug("option '%*.*s' val '%s'",
opt_nlen, opt_nlen, opt, optval);
/* see if it's an error number representing a DNS error
* that's to be recorded as the result in this key */
if (opt_nlen == sizeof(DNS_ERRORNO_OPTION) - 1 &&
memcmp(opt, DNS_ERRORNO_OPTION, opt_nlen) == 0) {
kdebug("dns error number option");
ret = kstrtoul(optval, 10, &derrno);
if (ret < 0)
goto bad_option_value;
if (derrno < 1 || derrno > 511)
goto bad_option_value;
kdebug("dns error no. = %lu", derrno);
prep->payload.data[dns_key_error] = ERR_PTR(-derrno);
continue;
}
bad_option_value:
pr_warn_ratelimited("Option '%*.*s' to dns_resolver key: bad/missing value\n",
opt_nlen, opt_nlen, opt);
return -EINVAL;
} while (opt = next_opt + 1, opt < end);
}
/* don't cache the result if we're caching an error saying there's no
* result */
if (prep->payload.data[dns_key_error]) {
kleave(" = 0 [h_error %ld]", PTR_ERR(prep->payload.data[dns_key_error]));
return 0;
}
store_result:
kdebug("store result");
prep->quotalen = result_len;
upayload = kmalloc(sizeof(*upayload) + result_len + 1, GFP_KERNEL);
if (!upayload) {
kleave(" = -ENOMEM");
return -ENOMEM;
}
upayload->datalen = result_len;
memcpy(upayload->data, data, result_len);
upayload->data[result_len] = '\0';
prep->payload.data[dns_key_data] = upayload;
kleave(" = 0");
return 0;
}
/*
* Clean up the preparse data
*/
static void dns_resolver_free_preparse(struct key_preparsed_payload *prep)
{
pr_devel("==>%s()\n", __func__);
kfree(prep->payload.data[dns_key_data]);
}
/*
* The description is of the form "[<type>:]<domain_name>"
*
* The domain name may be a simple name or an absolute domain name (which
* should end with a period). The domain name is case-independent.
*/
static bool dns_resolver_cmp(const struct key *key,
const struct key_match_data *match_data)
{
int slen, dlen, ret = 0;
const char *src = key->description, *dsp = match_data->raw_data;
kenter("%s,%s", src, dsp);
if (!src || !dsp)
goto no_match;
if (strcasecmp(src, dsp) == 0)
goto matched;
slen = strlen(src);
dlen = strlen(dsp);
if (slen <= 0 || dlen <= 0)
goto no_match;
if (src[slen - 1] == '.')
slen--;
if (dsp[dlen - 1] == '.')
dlen--;
if (slen != dlen || strncasecmp(src, dsp, slen) != 0)
goto no_match;
matched:
ret = 1;
no_match:
kleave(" = %d", ret);
return ret;
}
/*
* Preparse the match criterion.
*/
static int dns_resolver_match_preparse(struct key_match_data *match_data)
{
match_data->lookup_type = KEYRING_SEARCH_LOOKUP_ITERATE;
match_data->cmp = dns_resolver_cmp;
return 0;
}
/*
* Describe a DNS key
*/
static void dns_resolver_describe(const struct key *key, struct seq_file *m)
{
seq_puts(m, key->description);
if (key_is_positive(key)) {
int err = PTR_ERR(key->payload.data[dns_key_error]);
if (err)
seq_printf(m, ": %d", err);
else
seq_printf(m, ": %u", key->datalen);
}
}
/*
* read the DNS data
* - the key's semaphore is read-locked
*/
static long dns_resolver_read(const struct key *key,
char *buffer, size_t buflen)
{
int err = PTR_ERR(key->payload.data[dns_key_error]);
if (err)
return err;
return user_read(key, buffer, buflen);
}
struct key_type key_type_dns_resolver = {
.name = "dns_resolver",
.flags = KEY_TYPE_NET_DOMAIN,
.preparse = dns_resolver_preparse,
.free_preparse = dns_resolver_free_preparse,
.instantiate = generic_key_instantiate,
.match_preparse = dns_resolver_match_preparse,
.revoke = user_revoke,
.destroy = user_destroy,
.describe = dns_resolver_describe,
.read = dns_resolver_read,
};
static int __init init_dns_resolver(void)
{
struct cred *cred;
struct key *keyring;
int ret;
/* create an override credential set with a special thread keyring in
* which DNS requests are cached
*
* this is used to prevent malicious redirections from being installed
* with add_key().
*/
cred = prepare_kernel_cred(&init_task);
if (!cred)
return -ENOMEM;
keyring = keyring_alloc(".dns_resolver",
GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, cred,
(KEY_POS_ALL & ~KEY_POS_SETATTR) |
KEY_USR_VIEW | KEY_USR_READ,
KEY_ALLOC_NOT_IN_QUOTA, NULL, NULL);
if (IS_ERR(keyring)) {
ret = PTR_ERR(keyring);
goto failed_put_cred;
}
ret = register_key_type(&key_type_dns_resolver);
if (ret < 0)
goto failed_put_key;
/* instruct request_key() to use this special keyring as a cache for
* the results it looks up */
set_bit(KEY_FLAG_ROOT_CAN_CLEAR, &keyring->flags);
cred->thread_keyring = keyring;
cred->jit_keyring = KEY_REQKEY_DEFL_THREAD_KEYRING;
dns_resolver_cache = cred;
kdebug("DNS resolver keyring: %d\n", key_serial(keyring));
return 0;
failed_put_key:
key_put(keyring);
failed_put_cred:
put_cred(cred);
return ret;
}
static void __exit exit_dns_resolver(void)
{
key_revoke(dns_resolver_cache->thread_keyring);
unregister_key_type(&key_type_dns_resolver);
put_cred(dns_resolver_cache);
}
module_init(init_dns_resolver)
module_exit(exit_dns_resolver)
MODULE_LICENSE("GPL");
| linux-master | net/dns_resolver/dns_key.c |
// SPDX-License-Identifier: GPL-2.0-or-later
#include <linux/seq_file.h>
#include <net/ip.h>
#include <net/mptcp.h>
#include <net/snmp.h>
#include <net/net_namespace.h>
#include "mib.h"
static const struct snmp_mib mptcp_snmp_list[] = {
SNMP_MIB_ITEM("MPCapableSYNRX", MPTCP_MIB_MPCAPABLEPASSIVE),
SNMP_MIB_ITEM("MPCapableSYNTX", MPTCP_MIB_MPCAPABLEACTIVE),
SNMP_MIB_ITEM("MPCapableSYNACKRX", MPTCP_MIB_MPCAPABLEACTIVEACK),
SNMP_MIB_ITEM("MPCapableACKRX", MPTCP_MIB_MPCAPABLEPASSIVEACK),
SNMP_MIB_ITEM("MPCapableFallbackACK", MPTCP_MIB_MPCAPABLEPASSIVEFALLBACK),
SNMP_MIB_ITEM("MPCapableFallbackSYNACK", MPTCP_MIB_MPCAPABLEACTIVEFALLBACK),
SNMP_MIB_ITEM("MPFallbackTokenInit", MPTCP_MIB_TOKENFALLBACKINIT),
SNMP_MIB_ITEM("MPTCPRetrans", MPTCP_MIB_RETRANSSEGS),
SNMP_MIB_ITEM("MPJoinNoTokenFound", MPTCP_MIB_JOINNOTOKEN),
SNMP_MIB_ITEM("MPJoinSynRx", MPTCP_MIB_JOINSYNRX),
SNMP_MIB_ITEM("MPJoinSynAckRx", MPTCP_MIB_JOINSYNACKRX),
SNMP_MIB_ITEM("MPJoinSynAckHMacFailure", MPTCP_MIB_JOINSYNACKMAC),
SNMP_MIB_ITEM("MPJoinAckRx", MPTCP_MIB_JOINACKRX),
SNMP_MIB_ITEM("MPJoinAckHMacFailure", MPTCP_MIB_JOINACKMAC),
SNMP_MIB_ITEM("DSSNotMatching", MPTCP_MIB_DSSNOMATCH),
SNMP_MIB_ITEM("InfiniteMapTx", MPTCP_MIB_INFINITEMAPTX),
SNMP_MIB_ITEM("InfiniteMapRx", MPTCP_MIB_INFINITEMAPRX),
SNMP_MIB_ITEM("DSSNoMatchTCP", MPTCP_MIB_DSSTCPMISMATCH),
SNMP_MIB_ITEM("DataCsumErr", MPTCP_MIB_DATACSUMERR),
SNMP_MIB_ITEM("OFOQueueTail", MPTCP_MIB_OFOQUEUETAIL),
SNMP_MIB_ITEM("OFOQueue", MPTCP_MIB_OFOQUEUE),
SNMP_MIB_ITEM("OFOMerge", MPTCP_MIB_OFOMERGE),
SNMP_MIB_ITEM("NoDSSInWindow", MPTCP_MIB_NODSSWINDOW),
SNMP_MIB_ITEM("DuplicateData", MPTCP_MIB_DUPDATA),
SNMP_MIB_ITEM("AddAddr", MPTCP_MIB_ADDADDR),
SNMP_MIB_ITEM("AddAddrTx", MPTCP_MIB_ADDADDRTX),
SNMP_MIB_ITEM("AddAddrTxDrop", MPTCP_MIB_ADDADDRTXDROP),
SNMP_MIB_ITEM("EchoAdd", MPTCP_MIB_ECHOADD),
SNMP_MIB_ITEM("EchoAddTx", MPTCP_MIB_ECHOADDTX),
SNMP_MIB_ITEM("EchoAddTxDrop", MPTCP_MIB_ECHOADDTXDROP),
SNMP_MIB_ITEM("PortAdd", MPTCP_MIB_PORTADD),
SNMP_MIB_ITEM("AddAddrDrop", MPTCP_MIB_ADDADDRDROP),
SNMP_MIB_ITEM("MPJoinPortSynRx", MPTCP_MIB_JOINPORTSYNRX),
SNMP_MIB_ITEM("MPJoinPortSynAckRx", MPTCP_MIB_JOINPORTSYNACKRX),
SNMP_MIB_ITEM("MPJoinPortAckRx", MPTCP_MIB_JOINPORTACKRX),
SNMP_MIB_ITEM("MismatchPortSynRx", MPTCP_MIB_MISMATCHPORTSYNRX),
SNMP_MIB_ITEM("MismatchPortAckRx", MPTCP_MIB_MISMATCHPORTACKRX),
SNMP_MIB_ITEM("RmAddr", MPTCP_MIB_RMADDR),
SNMP_MIB_ITEM("RmAddrDrop", MPTCP_MIB_RMADDRDROP),
SNMP_MIB_ITEM("RmAddrTx", MPTCP_MIB_RMADDRTX),
SNMP_MIB_ITEM("RmAddrTxDrop", MPTCP_MIB_RMADDRTXDROP),
SNMP_MIB_ITEM("RmSubflow", MPTCP_MIB_RMSUBFLOW),
SNMP_MIB_ITEM("MPPrioTx", MPTCP_MIB_MPPRIOTX),
SNMP_MIB_ITEM("MPPrioRx", MPTCP_MIB_MPPRIORX),
SNMP_MIB_ITEM("MPFailTx", MPTCP_MIB_MPFAILTX),
SNMP_MIB_ITEM("MPFailRx", MPTCP_MIB_MPFAILRX),
SNMP_MIB_ITEM("MPFastcloseTx", MPTCP_MIB_MPFASTCLOSETX),
SNMP_MIB_ITEM("MPFastcloseRx", MPTCP_MIB_MPFASTCLOSERX),
SNMP_MIB_ITEM("MPRstTx", MPTCP_MIB_MPRSTTX),
SNMP_MIB_ITEM("MPRstRx", MPTCP_MIB_MPRSTRX),
SNMP_MIB_ITEM("RcvPruned", MPTCP_MIB_RCVPRUNED),
SNMP_MIB_ITEM("SubflowStale", MPTCP_MIB_SUBFLOWSTALE),
SNMP_MIB_ITEM("SubflowRecover", MPTCP_MIB_SUBFLOWRECOVER),
SNMP_MIB_ITEM("SndWndShared", MPTCP_MIB_SNDWNDSHARED),
SNMP_MIB_ITEM("RcvWndShared", MPTCP_MIB_RCVWNDSHARED),
SNMP_MIB_ITEM("RcvWndConflictUpdate", MPTCP_MIB_RCVWNDCONFLICTUPDATE),
SNMP_MIB_ITEM("RcvWndConflict", MPTCP_MIB_RCVWNDCONFLICT),
SNMP_MIB_SENTINEL
};
/* mptcp_mib_alloc - allocate percpu mib counters
*
* These are allocated when the first mptcp socket is created so
* we do not waste percpu memory if mptcp isn't in use.
*/
bool mptcp_mib_alloc(struct net *net)
{
struct mptcp_mib __percpu *mib = alloc_percpu(struct mptcp_mib);
if (!mib)
return false;
if (cmpxchg(&net->mib.mptcp_statistics, NULL, mib))
free_percpu(mib);
return true;
}
void mptcp_seq_show(struct seq_file *seq)
{
unsigned long sum[ARRAY_SIZE(mptcp_snmp_list) - 1];
struct net *net = seq->private;
int i;
seq_puts(seq, "MPTcpExt:");
for (i = 0; mptcp_snmp_list[i].name; i++)
seq_printf(seq, " %s", mptcp_snmp_list[i].name);
seq_puts(seq, "\nMPTcpExt:");
memset(sum, 0, sizeof(sum));
if (net->mib.mptcp_statistics)
snmp_get_cpu_field_batch(sum, mptcp_snmp_list,
net->mib.mptcp_statistics);
for (i = 0; mptcp_snmp_list[i].name; i++)
seq_printf(seq, " %lu", sum[i]);
seq_putc(seq, '\n');
}
| linux-master | net/mptcp/mib.c |
// SPDX-License-Identifier: GPL-2.0
/* Multipath TCP
*
* Copyright (c) 2022, SUSE.
*/
#define pr_fmt(fmt) "MPTCP: " fmt
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/list.h>
#include <linux/rculist.h>
#include <linux/spinlock.h>
#include "protocol.h"
static DEFINE_SPINLOCK(mptcp_sched_list_lock);
static LIST_HEAD(mptcp_sched_list);
static int mptcp_sched_default_get_subflow(struct mptcp_sock *msk,
struct mptcp_sched_data *data)
{
struct sock *ssk;
ssk = data->reinject ? mptcp_subflow_get_retrans(msk) :
mptcp_subflow_get_send(msk);
if (!ssk)
return -EINVAL;
mptcp_subflow_set_scheduled(mptcp_subflow_ctx(ssk), true);
return 0;
}
static struct mptcp_sched_ops mptcp_sched_default = {
.get_subflow = mptcp_sched_default_get_subflow,
.name = "default",
.owner = THIS_MODULE,
};
/* Must be called with rcu read lock held */
struct mptcp_sched_ops *mptcp_sched_find(const char *name)
{
struct mptcp_sched_ops *sched, *ret = NULL;
list_for_each_entry_rcu(sched, &mptcp_sched_list, list) {
if (!strcmp(sched->name, name)) {
ret = sched;
break;
}
}
return ret;
}
int mptcp_register_scheduler(struct mptcp_sched_ops *sched)
{
if (!sched->get_subflow)
return -EINVAL;
spin_lock(&mptcp_sched_list_lock);
if (mptcp_sched_find(sched->name)) {
spin_unlock(&mptcp_sched_list_lock);
return -EEXIST;
}
list_add_tail_rcu(&sched->list, &mptcp_sched_list);
spin_unlock(&mptcp_sched_list_lock);
pr_debug("%s registered", sched->name);
return 0;
}
void mptcp_unregister_scheduler(struct mptcp_sched_ops *sched)
{
if (sched == &mptcp_sched_default)
return;
spin_lock(&mptcp_sched_list_lock);
list_del_rcu(&sched->list);
spin_unlock(&mptcp_sched_list_lock);
}
void mptcp_sched_init(void)
{
mptcp_register_scheduler(&mptcp_sched_default);
}
int mptcp_init_sched(struct mptcp_sock *msk,
struct mptcp_sched_ops *sched)
{
if (!sched)
sched = &mptcp_sched_default;
if (!bpf_try_module_get(sched, sched->owner))
return -EBUSY;
msk->sched = sched;
if (msk->sched->init)
msk->sched->init(msk);
pr_debug("sched=%s", msk->sched->name);
return 0;
}
void mptcp_release_sched(struct mptcp_sock *msk)
{
struct mptcp_sched_ops *sched = msk->sched;
if (!sched)
return;
msk->sched = NULL;
if (sched->release)
sched->release(msk);
bpf_module_put(sched, sched->owner);
}
void mptcp_subflow_set_scheduled(struct mptcp_subflow_context *subflow,
bool scheduled)
{
WRITE_ONCE(subflow->scheduled, scheduled);
}
int mptcp_sched_get_send(struct mptcp_sock *msk)
{
struct mptcp_subflow_context *subflow;
struct mptcp_sched_data data;
msk_owned_by_me(msk);
/* the following check is moved out of mptcp_subflow_get_send */
if (__mptcp_check_fallback(msk)) {
if (msk->first &&
__tcp_can_send(msk->first) &&
sk_stream_memory_free(msk->first)) {
mptcp_subflow_set_scheduled(mptcp_subflow_ctx(msk->first), true);
return 0;
}
return -EINVAL;
}
mptcp_for_each_subflow(msk, subflow) {
if (READ_ONCE(subflow->scheduled))
return 0;
}
data.reinject = false;
if (msk->sched == &mptcp_sched_default || !msk->sched)
return mptcp_sched_default_get_subflow(msk, &data);
return msk->sched->get_subflow(msk, &data);
}
int mptcp_sched_get_retrans(struct mptcp_sock *msk)
{
struct mptcp_subflow_context *subflow;
struct mptcp_sched_data data;
msk_owned_by_me(msk);
/* the following check is moved out of mptcp_subflow_get_retrans */
if (__mptcp_check_fallback(msk))
return -EINVAL;
mptcp_for_each_subflow(msk, subflow) {
if (READ_ONCE(subflow->scheduled))
return 0;
}
data.reinject = true;
if (msk->sched == &mptcp_sched_default || !msk->sched)
return mptcp_sched_default_get_subflow(msk, &data);
return msk->sched->get_subflow(msk, &data);
}
| linux-master | net/mptcp/sched.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/skbuff.h>
#include "protocol.h"
/* Syncookies do not work for JOIN requests.
*
* Unlike MP_CAPABLE, where the ACK cookie contains the needed MPTCP
* options to reconstruct the initial syn state, MP_JOIN does not contain
* the token to obtain the mptcp socket nor the server-generated nonce
* that was used in the cookie SYN/ACK response.
*
* Keep a small best effort state table to store the syn/synack data,
* indexed by skb hash.
*
* A MP_JOIN SYN packet handled by syn cookies is only stored if the 32bit
* token matches a known mptcp connection that can still accept more subflows.
*
* There is no timeout handling -- state is only re-constructed
* when the TCP ACK passed the cookie validation check.
*/
struct join_entry {
u32 token;
u32 remote_nonce;
u32 local_nonce;
u8 join_id;
u8 local_id;
u8 backup;
u8 valid;
};
#define COOKIE_JOIN_SLOTS 1024
static struct join_entry join_entries[COOKIE_JOIN_SLOTS] __cacheline_aligned_in_smp;
static spinlock_t join_entry_locks[COOKIE_JOIN_SLOTS] __cacheline_aligned_in_smp;
static u32 mptcp_join_entry_hash(struct sk_buff *skb, struct net *net)
{
static u32 mptcp_join_hash_secret __read_mostly;
struct tcphdr *th = tcp_hdr(skb);
u32 seq, i;
net_get_random_once(&mptcp_join_hash_secret,
sizeof(mptcp_join_hash_secret));
if (th->syn)
seq = TCP_SKB_CB(skb)->seq;
else
seq = TCP_SKB_CB(skb)->seq - 1;
i = jhash_3words(seq, net_hash_mix(net),
(__force __u32)th->source << 16 | (__force __u32)th->dest,
mptcp_join_hash_secret);
return i % ARRAY_SIZE(join_entries);
}
static void mptcp_join_store_state(struct join_entry *entry,
const struct mptcp_subflow_request_sock *subflow_req)
{
entry->token = subflow_req->token;
entry->remote_nonce = subflow_req->remote_nonce;
entry->local_nonce = subflow_req->local_nonce;
entry->backup = subflow_req->backup;
entry->join_id = subflow_req->remote_id;
entry->local_id = subflow_req->local_id;
entry->valid = 1;
}
void subflow_init_req_cookie_join_save(const struct mptcp_subflow_request_sock *subflow_req,
struct sk_buff *skb)
{
struct net *net = read_pnet(&subflow_req->sk.req.ireq_net);
u32 i = mptcp_join_entry_hash(skb, net);
/* No use in waiting if other cpu is already using this slot --
* would overwrite the data that got stored.
*/
spin_lock_bh(&join_entry_locks[i]);
mptcp_join_store_state(&join_entries[i], subflow_req);
spin_unlock_bh(&join_entry_locks[i]);
}
/* Called for a cookie-ack with MP_JOIN option present.
* Look up the saved state based on skb hash & check token matches msk
* in same netns.
*
* Caller will check msk can still accept another subflow. The hmac
* present in the cookie ACK mptcp option space will be checked later.
*/
bool mptcp_token_join_cookie_init_state(struct mptcp_subflow_request_sock *subflow_req,
struct sk_buff *skb)
{
struct net *net = read_pnet(&subflow_req->sk.req.ireq_net);
u32 i = mptcp_join_entry_hash(skb, net);
struct mptcp_sock *msk;
struct join_entry *e;
e = &join_entries[i];
spin_lock_bh(&join_entry_locks[i]);
if (e->valid == 0) {
spin_unlock_bh(&join_entry_locks[i]);
return false;
}
e->valid = 0;
msk = mptcp_token_get_sock(net, e->token);
if (!msk) {
spin_unlock_bh(&join_entry_locks[i]);
return false;
}
subflow_req->remote_nonce = e->remote_nonce;
subflow_req->local_nonce = e->local_nonce;
subflow_req->backup = e->backup;
subflow_req->remote_id = e->join_id;
subflow_req->token = e->token;
subflow_req->msk = msk;
spin_unlock_bh(&join_entry_locks[i]);
return true;
}
void __init mptcp_join_cookie_init(void)
{
int i;
for (i = 0; i < COOKIE_JOIN_SLOTS; i++)
spin_lock_init(&join_entry_locks[i]);
}
| linux-master | net/mptcp/syncookies.c |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.